From 83cec3793bb2300c76f1aeb88aac1e1a96309f34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?jos=C3=A9=20v?= <52646071+Peponks9@users.noreply.github.com> Date: Mon, 6 Oct 2025 15:33:12 -0600 Subject: [PATCH 001/371] docs: yellowpaper sections in consensus implementation (#18881) --- crates/consensus/consensus/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index a267dfe902f..b3dfa30e61b 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -62,8 +62,9 @@ pub trait Consensus: HeaderValidator { /// Validate a block disregarding world state, i.e. things that can be checked before sender /// recovery and execution. /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and - /// 11.1 "Ommer Validation". + /// See the Yellow Paper sections 4.4.2 "Holistic Validity", 4.4.4 "Block Header Validity". + /// Note: Ommer Validation (previously section 11.1) has been deprecated since the Paris hard + /// fork transition to proof of stake. /// /// **This should not be called for the genesis block**. /// From 2f3e2c6c97ebf23231ac8b043eb5d407893ea35a Mon Sep 17 00:00:00 2001 From: Forostovec Date: Tue, 7 Oct 2025 14:23:54 +0300 Subject: [PATCH 002/371] fix(era-utils): fix off-by-one for Excluded end bound in process_iter (#18731) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Roman Hodulák --- crates/era-utils/src/history.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/era-utils/src/history.rs b/crates/era-utils/src/history.rs index 12bafed6113..b1c3cd309c0 100644 --- a/crates/era-utils/src/history.rs +++ b/crates/era-utils/src/history.rs @@ -286,12 +286,12 @@ where { let mut last_header_number = match block_numbers.start_bound() { Bound::Included(&number) => number, - Bound::Excluded(&number) => number.saturating_sub(1), + Bound::Excluded(&number) => number.saturating_add(1), Bound::Unbounded => 0, }; let target = match block_numbers.end_bound() { Bound::Included(&number) => Some(number), - Bound::Excluded(&number) => Some(number.saturating_add(1)), + Bound::Excluded(&number) => Some(number.saturating_sub(1)), Bound::Unbounded => None, }; From 029509cc4209fbc37dfcc2c85ddcc133a052f39f Mon Sep 17 00:00:00 2001 From: Skylar Ray <137945430+sky-coderay@users.noreply.github.com> Date: Tue, 7 Oct 2025 14:34:35 +0300 Subject: [PATCH 003/371] refactor: eliminate redundant allocation in precompile cache example (#18886) --- examples/precompile-cache/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/precompile-cache/src/main.rs b/examples/precompile-cache/src/main.rs index dcaa886d736..69aaf7b4035 100644 --- a/examples/precompile-cache/src/main.rs +++ b/examples/precompile-cache/src/main.rs @@ -176,7 +176,7 @@ where async fn build_evm(self, ctx: &BuilderContext) -> eyre::Result { let evm_config = EthEvmConfig::new_with_evm_factory( ctx.chain_spec(), - MyEvmFactory { precompile_cache: self.precompile_cache.clone() }, + MyEvmFactory { precompile_cache: self.precompile_cache }, ); Ok(evm_config) } From 319a8dceb4fe5718e8b05600808cac65cbe6687a Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 7 Oct 2025 21:54:25 +0400 Subject: [PATCH 004/371] chore: relax `ChainSpec` impls (#18894) --- crates/chainspec/src/api.rs | 5 ++--- crates/chainspec/src/spec.rs | 4 ++-- crates/ethereum/consensus/src/lib.rs | 11 ++++++----- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index 80327d38b6d..ce035518bba 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,7 +1,6 @@ use crate::{ChainSpec, DepositContract}; use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; -use alloy_consensus::Header; use alloy_eips::{calc_next_block_base_fee, eip1559::BaseFeeParams, eip7840::BlobParams}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; @@ -75,8 +74,8 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { } } -impl EthChainSpec for ChainSpec { - type Header = Header; +impl EthChainSpec for ChainSpec { + type Header = H; fn chain(&self) -> Chain { self.chain diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 089b6c1c6c9..88e5a370d6d 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -266,7 +266,7 @@ impl From for BaseFeeParamsKind { #[derive(Clone, Debug, PartialEq, Eq, From)] pub struct ForkBaseFeeParams(Vec<(Box, BaseFeeParams)>); -impl core::ops::Deref for ChainSpec { +impl core::ops::Deref for ChainSpec { type Target = ChainHardforks; fn deref(&self) -> &Self::Target { @@ -1033,7 +1033,7 @@ impl From<&Arc> for ChainSpecBuilder { } } -impl EthExecutorSpec for ChainSpec { +impl EthExecutorSpec for ChainSpec { fn deposit_contract_address(&self) -> Option
{ self.deposit_contract.map(|deposit_contract| deposit_contract.address) } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 3c0021fc2d2..5aef1393032 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -196,6 +196,7 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; use alloy_primitives::B256; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_consensus_common::validation::validate_against_parent_gas_limit; @@ -215,7 +216,7 @@ mod tests { let child = header_with_gas_limit((parent.gas_limit + 5) as u64); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Ok(()) ); } @@ -226,7 +227,7 @@ mod tests { let child = header_with_gas_limit(MINIMUM_GAS_LIMIT - 1); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: child.gas_limit as u64 }) ); } @@ -239,7 +240,7 @@ mod tests { ); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Err(ConsensusError::GasLimitInvalidIncrease { parent_gas_limit: parent.gas_limit, child_gas_limit: child.gas_limit, @@ -253,7 +254,7 @@ mod tests { let child = header_with_gas_limit(parent.gas_limit - 5); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Ok(()) ); } @@ -266,7 +267,7 @@ mod tests { ); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Err(ConsensusError::GasLimitInvalidDecrease { parent_gas_limit: parent.gas_limit, child_gas_limit: child.gas_limit, From b82ad0777551a806b5110ba686dbf900adc8bd01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Wed, 8 Oct 2025 12:18:49 +0200 Subject: [PATCH 005/371] chore: make clippy happy (#18900) --- crates/storage/provider/src/providers/blockchain_provider.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 890b98124a5..75e276b3c42 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -2272,7 +2272,7 @@ mod tests { // Invalid/Non-existent argument should return `None` { - call_method!($arg_count, provider, $method, |_,_,_,_| ( ($invalid_args, None)), tx_num, tx_hash, &in_memory_blocks[0], &receipts); + call_method!($arg_count, provider, $method, |_,_,_,_| ($invalid_args, None), tx_num, tx_hash, &in_memory_blocks[0], &receipts); } // Check that the item is only in memory and not in database @@ -2283,7 +2283,7 @@ mod tests { call_method!($arg_count, provider, $method, |_,_,_,_| (args.clone(), expected_item), tx_num, tx_hash, last_mem_block, &receipts); // Ensure the item is not in storage - call_method!($arg_count, provider.database, $method, |_,_,_,_| ( (args, None)), tx_num, tx_hash, last_mem_block, &receipts); + call_method!($arg_count, provider.database, $method, |_,_,_,_| (args, None), tx_num, tx_hash, last_mem_block, &receipts); } )* }}; From 273ee08443b4daa039bd472f911beb0451a5e58d Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 8 Oct 2025 13:05:27 +0200 Subject: [PATCH 006/371] fix(trie): Reveal extension child when extension is last remaining child of a branch (#18891) --- crates/trie/sparse-parallel/src/trie.rs | 309 +++++++++++++++++++++--- crates/trie/sparse/src/trie.rs | 120 ++++++--- 2 files changed, 360 insertions(+), 69 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index d973d705de2..e1cfe84cdf9 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -623,51 +623,19 @@ impl SparseTrieInterface for ParallelSparseTrie { "Branch node has only one child", ); - let remaining_child_subtrie = self.subtrie_for_path_mut(&remaining_child_path); - // If the remaining child node is not yet revealed then we have to reveal it here, // otherwise it's not possible to know how to collapse the branch. - let remaining_child_node = - match remaining_child_subtrie.nodes.get(&remaining_child_path).unwrap() { - SparseNode::Hash(_) => { - debug!( - target: "trie::parallel_sparse", - child_path = ?remaining_child_path, - leaf_full_path = ?full_path, - "Branch node child not revealed in remove_leaf, falling back to db", - ); - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.trie_node(&remaining_child_path)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::parallel_sparse", - ?remaining_child_path, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing remaining blinded branch child" - ); - remaining_child_subtrie.reveal_node( - remaining_child_path, - &decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - remaining_child_subtrie.nodes.get(&remaining_child_path).unwrap() - } else { - return Err(SparseTrieErrorKind::NodeNotFoundInProvider { - path: remaining_child_path, - } - .into()) - } - } - node => node, - }; + let remaining_child_node = self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_child_path, + true, // recurse_into_extension + )?; let (new_branch_node, remove_child) = Self::branch_changes_on_leaf_removal( branch_path, &remaining_child_path, - remaining_child_node, + &remaining_child_node, ); if remove_child { @@ -1228,6 +1196,90 @@ impl ParallelSparseTrie { } } + /// Called when a leaf is removed on a branch which has only one other remaining child. That + /// child must be revealed in order to properly collapse the branch. + /// + /// If `recurse_into_extension` is true, and the remaining child is an extension node, then its + /// child will be ensured to be revealed as well. + /// + /// ## Returns + /// + /// The node of the remaining child, whether it was already revealed or not. + fn reveal_remaining_child_on_leaf_removal( + &mut self, + provider: P, + full_path: &Nibbles, // only needed for logs + remaining_child_path: &Nibbles, + recurse_into_extension: bool, + ) -> SparseTrieResult { + let remaining_child_subtrie = self.subtrie_for_path_mut(remaining_child_path); + + let remaining_child_node = + match remaining_child_subtrie.nodes.get(remaining_child_path).unwrap() { + SparseNode::Hash(_) => { + debug!( + target: "trie::parallel_sparse", + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Node child not revealed in remove_leaf, falling back to db", + ); + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(remaining_child_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?remaining_child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + remaining_child_subtrie.reveal_node( + *remaining_child_path, + &decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + remaining_child_subtrie.nodes.get(remaining_child_path).unwrap().clone() + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: *remaining_child_path, + } + .into()) + } + } + node => node.clone(), + }; + + // If `recurse_into_extension` is true, and the remaining child is an extension node, then + // its child will be ensured to be revealed as well. This is required for generation of + // trie updates; without revealing the grandchild branch it's not always possible to know + // if the tree mask bit should be set for the child extension on its parent branch. + if let SparseNode::Extension { key, .. } = &remaining_child_node && + recurse_into_extension + { + let mut remaining_grandchild_path = *remaining_child_path; + remaining_grandchild_path.extend(key); + + trace!( + target: "trie::parallel_sparse", + remaining_grandchild_path = ?remaining_grandchild_path, + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Revealing child of extension node, which is the last remaining child of the branch" + ); + + self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_grandchild_path, + false, // recurse_into_extension + )?; + } + + Ok(remaining_child_node) + } + /// Drains any [`SparseTrieUpdatesAction`]s from the given subtrie, and applies each action to /// the given `updates` set. If the given set is None then this is a no-op. fn apply_subtrie_update_actions( @@ -4076,6 +4128,185 @@ mod tests { ); } + #[test] + fn test_remove_leaf_remaining_extension_node_child_is_revealed() { + let branch_path = Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7]); + let removed_branch_path = Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2]); + + // Convert the logs into reveal_nodes call on a fresh ParallelSparseTrie + let nodes = vec![ + // Branch at 0x4f8807 + RevealedSparseNode { + path: branch_path, + node: { + TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::from(hex!( + "dede882d52f0e0eddfb5b89293a10c87468b4a73acd0d4ae550054a92353f6d5" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "8746f18e465e2eed16117306b6f2eef30bc9d2978aee4a7838255e39c41a3222" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "35a4ea861548af5f0262a9b6d619b4fc88fce6531cbd004eab1530a73f34bbb1" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "47d5c2bf9eea5c1ee027e4740c2b86159074a27d52fd2f6a8a8c86c77e48006f" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "eb76a359b216e1d86b1f2803692a9fe8c3d3f97a9fe6a82b396e30344febc0c1" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "437656f2697f167b23e33cb94acc8550128cfd647fc1579d61e982cb7616b8bc" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "45a1ac2faf15ea8a4da6f921475974e0379f39c3d08166242255a567fa88ce6c" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "7dbb299d714d3dfa593f53bc1b8c66d5c401c30a0b5587b01254a56330361395" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "ae407eb14a74ed951c9949c1867fb9ee9ba5d5b7e03769eaf3f29c687d080429" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "768d0fe1003f0e85d3bc76e4a1fa0827f63b10ca9bca52d56c2b1cceb8eb8b08" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "e5127935143493d5094f4da6e4f7f5a0f62d524fbb61e7bb9fb63d8a166db0f3" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "7f3698297308664fbc1b9e2c41d097fbd57d8f364c394f6ad7c71b10291fbf42" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "4a2bc7e19cec63cb5ef5754add0208959b50bcc79f13a22a370f77b277dbe6db" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "40764b8c48de59258e62a3371909a107e76e1b5e847cfa94dbc857e9fd205103" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "2985dca29a7616920d95c43ab62eb013a40e6a0c88c284471e4c3bd22f3b9b25" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "1b6511f7a385e79477239f7dd4a49f52082ecac05aa5bd0de18b1d55fe69d10c" + ))), + ], + TrieMask::new(0b1111111111111111), + )) + }, + masks: TrieMasks { + hash_mask: Some(TrieMask::new(0b1111111111111111)), + tree_mask: Some(TrieMask::new(0b0011110100100101)), + }, + }, + // Branch at 0x4f88072 + RevealedSparseNode { + path: removed_branch_path, + node: { + let stack = vec![ + RlpNode::word_rlp(&B256::from(hex!( + "15fd4993a41feff1af3b629b32572ab05acddd97c681d82ec2eb89c8a8e3ab9e" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "a272b0b94ced4e6ec7adb41719850cf4a167ad8711d0dda6a810d129258a0d94" + ))), + ]; + let branch_node = BranchNode::new(stack, TrieMask::new(0b0001000000000100)); + TrieNode::Branch(branch_node) + }, + masks: TrieMasks { + hash_mask: Some(TrieMask::new(0b0000000000000000)), + tree_mask: Some(TrieMask::new(0b0000000000000100)), + }, + }, + // Extension at 0x4f880722 + RevealedSparseNode { + path: Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0x2]), + node: { + let extension_node = ExtensionNode::new( + Nibbles::from_nibbles([0x6]), + RlpNode::word_rlp(&B256::from(hex!( + "56fab2b106a97eae9c7197f86d03bca292da6e0ac725b783082f7d950cc4e0fc" + ))), + ); + TrieNode::Extension(extension_node) + }, + masks: TrieMasks { hash_mask: None, tree_mask: None }, + }, + // Leaf at 0x4f88072c + RevealedSparseNode { + path: Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0xc]), + node: { + let leaf_node = LeafNode::new( + Nibbles::from_nibbles([ + 0x0, 0x7, 0x7, 0xf, 0x8, 0x6, 0x6, 0x1, 0x3, 0x0, 0x8, 0x8, 0xd, 0xf, + 0xc, 0xa, 0xe, 0x6, 0x4, 0x8, 0xa, 0xb, 0xe, 0x8, 0x3, 0x1, 0xf, 0xa, + 0xd, 0xc, 0xa, 0x5, 0x5, 0xa, 0xd, 0x4, 0x3, 0xa, 0xb, 0x1, 0x6, 0x5, + 0xd, 0x1, 0x6, 0x8, 0x0, 0xd, 0xd, 0x5, 0x6, 0x7, 0xb, 0x5, 0xd, 0x6, + ]), + hex::decode("8468d3971d").unwrap(), + ); + TrieNode::Leaf(leaf_node) + }, + masks: TrieMasks { hash_mask: None, tree_mask: None }, + }, + ]; + + // Create a fresh ParallelSparseTrie + let mut trie = ParallelSparseTrie::from_root( + TrieNode::Extension(ExtensionNode::new( + Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7]), + RlpNode::word_rlp(&B256::from(hex!( + "56fab2b106a97eae9c7197f86d03bca292da6e0ac725b783082f7d950cc4e0fc" + ))), + )), + TrieMasks::none(), + true, + ) + .unwrap(); + + // Call reveal_nodes + trie.reveal_nodes(nodes).unwrap(); + + // Remove the leaf at "0x4f88072c077f86613088dfcae648abe831fadca55ad43ab165d1680dd567b5d6" + let leaf_key = Nibbles::from_nibbles([ + 0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0xc, 0x0, 0x7, 0x7, 0xf, 0x8, 0x6, 0x6, 0x1, 0x3, + 0x0, 0x8, 0x8, 0xd, 0xf, 0xc, 0xa, 0xe, 0x6, 0x4, 0x8, 0xa, 0xb, 0xe, 0x8, 0x3, 0x1, + 0xf, 0xa, 0xd, 0xc, 0xa, 0x5, 0x5, 0xa, 0xd, 0x4, 0x3, 0xa, 0xb, 0x1, 0x6, 0x5, 0xd, + 0x1, 0x6, 0x8, 0x0, 0xd, 0xd, 0x5, 0x6, 0x7, 0xb, 0x5, 0xd, 0x6, + ]); + + let mut provider = MockTrieNodeProvider::new(); + let revealed_branch = create_branch_node_with_children(&[], []); + let mut encoded = Vec::new(); + revealed_branch.encode(&mut encoded); + provider.add_revealed_node( + Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0x2, 0x6]), + RevealedNode { + node: encoded.into(), + tree_mask: None, + // Give it a fake hashmask so that it appears like it will be stored in the db + hash_mask: Some(TrieMask::new(0b1111)), + }, + ); + + trie.remove_leaf(&leaf_key, provider).unwrap(); + + // Calculate root so that updates are calculated. + trie.root(); + + // Take updates and assert they are correct + let updates = trie.take_updates(); + assert_eq!( + updates.removed_nodes.into_iter().collect::>(), + vec![removed_branch_path] + ); + assert_eq!(updates.updated_nodes.len(), 1); + let updated_node = updates.updated_nodes.get(&branch_path).unwrap(); + + // Second bit must be set, indicating that the extension's child is in the db + assert_eq!(updated_node.tree_mask, TrieMask::new(0b011110100100101),) + } + #[test] fn test_parallel_sparse_trie_root() { // Step 1: Create the trie structure diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 76dadc8fc9c..36bcbe50e3a 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -821,38 +821,17 @@ impl SparseTrieInterface for SerialSparseTrie { trace!(target: "trie::sparse", ?removed_path, ?child_path, "Branch node has only one child"); - if self.nodes.get(&child_path).unwrap().is_hash() { - debug!( - target: "trie::sparse", - ?child_path, - leaf_full_path = ?full_path, - "Branch node child not revealed in remove_leaf, falling back to db", - ); - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.trie_node(&child_path)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::sparse", - ?child_path, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing remaining blinded branch child" - ); - self.reveal_node( - child_path, - decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - } - } - - // Get the only child node. - let child = self.nodes.get(&child_path).unwrap(); + // If the remaining child node is not yet revealed then we have to reveal + // it here, otherwise it's not possible to know how to collapse the branch. + let child = self.reveal_remaining_child_on_leaf_removal( + &provider, + full_path, + &child_path, + true, // recurse_into_extension + )?; let mut delete_child = false; - let new_node = match child { + let new_node = match &child { SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), &SparseNode::Hash(hash) => { return Err(SparseTrieErrorKind::BlindedNode { @@ -1256,6 +1235,87 @@ impl SerialSparseTrie { Ok(nodes) } + /// Called when a leaf is removed on a branch which has only one other remaining child. That + /// child must be revealed in order to properly collapse the branch. + /// + /// If `recurse_into_extension` is true, and the remaining child is an extension node, then its + /// child will be ensured to be revealed as well. + /// + /// ## Returns + /// + /// The node of the remaining child, whether it was already revealed or not. + fn reveal_remaining_child_on_leaf_removal( + &mut self, + provider: P, + full_path: &Nibbles, // only needed for logs + remaining_child_path: &Nibbles, + recurse_into_extension: bool, + ) -> SparseTrieResult { + let remaining_child_node = match self.nodes.get(remaining_child_path).unwrap() { + SparseNode::Hash(_) => { + debug!( + target: "trie::parallel_sparse", + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Node child not revealed in remove_leaf, falling back to db", + ); + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(remaining_child_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?remaining_child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + self.reveal_node( + *remaining_child_path, + decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + self.nodes.get(remaining_child_path).unwrap().clone() + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: *remaining_child_path, + } + .into()) + } + } + node => node.clone(), + }; + + // If `recurse_into_extension` is true, and the remaining child is an extension node, then + // its child will be ensured to be revealed as well. This is required for generation of + // trie updates; without revealing the grandchild branch it's not always possible to know + // if the tree mask bit should be set for the child extension on its parent branch. + if let SparseNode::Extension { key, .. } = &remaining_child_node && + recurse_into_extension + { + let mut remaining_grandchild_path = *remaining_child_path; + remaining_grandchild_path.extend(key); + + trace!( + target: "trie::parallel_sparse", + remaining_grandchild_path = ?remaining_grandchild_path, + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Revealing child of extension node, which is the last remaining child of the branch" + ); + + self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_grandchild_path, + false, // recurse_into_extension + )?; + } + + Ok(remaining_child_node) + } + /// Recalculates and updates the RLP hashes of nodes deeper than or equal to the specified /// `depth`. /// From 1aa312c12be46980e85a7fd2b5f02bc352c3dc75 Mon Sep 17 00:00:00 2001 From: radik878 Date: Wed, 8 Oct 2025 14:46:20 +0300 Subject: [PATCH 007/371] chore(node): simplify EngineApiExt bounds by removing redundant constraints (#18905) --- crates/node/builder/src/engine_api_ext.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/node/builder/src/engine_api_ext.rs b/crates/node/builder/src/engine_api_ext.rs index 936a2e19051..33d1d3e63ad 100644 --- a/crates/node/builder/src/engine_api_ext.rs +++ b/crates/node/builder/src/engine_api_ext.rs @@ -5,7 +5,6 @@ use crate::rpc::EngineApiBuilder; use eyre::Result; use reth_node_api::{AddOnsContext, FullNodeComponents}; -use reth_rpc_api::IntoEngineApiRpcModule; /// Provides access to an `EngineApi` instance with a callback #[derive(Debug)] @@ -27,7 +26,7 @@ impl EngineApiBuilder for EngineApiExt where B: EngineApiBuilder, N: FullNodeComponents, - B::EngineApi: IntoEngineApiRpcModule + Send + Sync + Clone + 'static, + B::EngineApi: Clone, F: FnOnce(B::EngineApi) + Send + Sync + 'static, { type EngineApi = B::EngineApi; From bed26238dc01281d85786b81ca1e8ec09a141e9d Mon Sep 17 00:00:00 2001 From: William Nwoke Date: Wed, 8 Oct 2025 12:54:59 +0100 Subject: [PATCH 008/371] refactor(engine): separate concerns in on_forkchoice_updated for better maintainability (#18661) Co-authored-by: Nathaniel Bajo Co-authored-by: YK Co-authored-by: Brian Picciano --- crates/engine/tree/src/tree/mod.rs | 259 +++++++++++++++-------- crates/engine/tree/src/tree/tests.rs | 303 +++++++++++++++++++++++++++ 2 files changed, 477 insertions(+), 85 deletions(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 24bdc069f09..2ea4b552e88 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1015,23 +1015,79 @@ where version: EngineApiMessageVersion, ) -> ProviderResult> { trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); + + // Record metrics + self.record_forkchoice_metrics(&attrs); + + // Pre-validation of forkchoice state + if let Some(early_result) = self.validate_forkchoice_state(state)? { + return Ok(TreeOutcome::new(early_result)); + } + + // Return early if we are on the correct fork + if let Some(result) = self.handle_canonical_head(state, &attrs, version)? { + return Ok(result); + } + + // Attempt to apply a chain update when the head differs from our canonical chain. + // This handles reorgs and chain extensions by making the specified head canonical. + if let Some(result) = self.apply_chain_update(state, &attrs, version)? { + return Ok(result); + } + + // Fallback that ensures to catch up to the network's state. + self.handle_missing_block(state) + } + + /// Records metrics for forkchoice updated calls + fn record_forkchoice_metrics(&self, attrs: &Option) { self.metrics.engine.forkchoice_updated_messages.increment(1); if attrs.is_some() { self.metrics.engine.forkchoice_with_attributes_updated_messages.increment(1); } self.canonical_in_memory_state.on_forkchoice_update_received(); + } - if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { - return Ok(TreeOutcome::new(on_updated)) + /// Pre-validates the forkchoice state and returns early if validation fails. + /// + /// Returns `Some(OnForkChoiceUpdated)` if validation fails and an early response should be + /// returned. Returns `None` if validation passes and processing should continue. + fn validate_forkchoice_state( + &mut self, + state: ForkchoiceState, + ) -> ProviderResult> { + if state.head_block_hash.is_zero() { + return Ok(Some(OnForkChoiceUpdated::invalid_state())); } - let valid_outcome = |head| { - TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( - PayloadStatusEnum::Valid, - Some(head), - ))) - }; + // Check if the new head hash is connected to any ancestor that we previously marked as + // invalid + let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); + if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { + return Ok(Some(OnForkChoiceUpdated::with_invalid(status))); + } + if !self.backfill_sync_state.is_idle() { + // We can only process new forkchoice updates if the pipeline is idle, since it requires + // exclusive access to the database + trace!(target: "engine::tree", "Pipeline is syncing, skipping forkchoice update"); + return Ok(Some(OnForkChoiceUpdated::syncing())); + } + + Ok(None) + } + + /// Handles the case where the forkchoice head is already canonical. + /// + /// Returns `Some(TreeOutcome)` if the head is already canonical and + /// processing is complete. Returns `None` if the head is not canonical and processing + /// should continue. + fn handle_canonical_head( + &self, + state: ForkchoiceState, + attrs: &Option, // Changed to reference + version: EngineApiMessageVersion, + ) -> ProviderResult>> { // Process the forkchoice update by trying to make the head block canonical // // We can only process this forkchoice update if: @@ -1046,34 +1102,58 @@ where // - emitting a canonicalization event for the new chain (including reorg) // - if we have payload attributes, delegate them to the payload service - // 1. ensure we have a new head block - if self.state.tree_state.canonical_block_hash() == state.head_block_hash { - trace!(target: "engine::tree", "fcu head hash is already canonical"); + if self.state.tree_state.canonical_block_hash() != state.head_block_hash { + return Ok(None); + } - // update the safe and finalized blocks and ensure their values are valid - if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { - // safe or finalized hashes are invalid - return Ok(TreeOutcome::new(outcome)) - } + trace!(target: "engine::tree", "fcu head hash is already canonical"); - // we still need to process payload attributes if the head is already canonical - if let Some(attr) = attrs { - let tip = self - .sealed_header_by_hash(self.state.tree_state.canonical_block_hash())? - .ok_or_else(|| { - // If we can't find the canonical block, then something is wrong and we need - // to return an error - ProviderError::HeaderNotFound(state.head_block_hash.into()) - })?; - let updated = self.process_payload_attributes(attr, &tip, state, version); - return Ok(TreeOutcome::new(updated)) - } + // Update the safe and finalized blocks and ensure their values are valid + if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { + // safe or finalized hashes are invalid + return Ok(Some(TreeOutcome::new(outcome))); + } - // the head block is already canonical - return Ok(valid_outcome(state.head_block_hash)) + // Process payload attributes if the head is already canonical + if let Some(attr) = attrs { + let tip = self + .sealed_header_by_hash(self.state.tree_state.canonical_block_hash())? + .ok_or_else(|| { + // If we can't find the canonical block, then something is wrong and we need + // to return an error + ProviderError::HeaderNotFound(state.head_block_hash.into()) + })?; + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes(attr.clone(), &tip, state, version); + return Ok(Some(TreeOutcome::new(updated))); } - // 2. check if the head is already part of the canonical chain + // The head block is already canonical + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + Ok(Some(outcome)) + } + + /// Applies chain update for the new head block and processes payload attributes. + /// + /// This method handles the case where the forkchoice head differs from our current canonical + /// head. It attempts to make the specified head block canonical by: + /// - Checking if the head is already part of the canonical chain + /// - Applying chain reorganizations (reorgs) if necessary + /// - Processing payload attributes if provided + /// - Returning the appropriate forkchoice update response + /// + /// Returns `Some(TreeOutcome)` if a chain update was successfully applied. + /// Returns `None` if no chain update was needed or possible. + fn apply_chain_update( + &mut self, + state: ForkchoiceState, + attrs: &Option, + version: EngineApiMessageVersion, + ) -> ProviderResult>> { + // Check if the head is already part of the canonical chain if let Ok(Some(canonical_header)) = self.find_canonical_header(state.head_block_hash) { debug!(target: "engine::tree", head = canonical_header.number(), "fcu head block is already canonical"); @@ -1084,9 +1164,14 @@ where { if let Some(attr) = attrs { debug!(target: "engine::tree", head = canonical_header.number(), "handling payload attributes for canonical head"); - let updated = - self.process_payload_attributes(attr, &canonical_header, state, version); - return Ok(TreeOutcome::new(updated)) + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes( + attr.clone(), + &canonical_header, + state, + version, + ); + return Ok(Some(TreeOutcome::new(updated))); } // At this point, no alternative block has been triggered, so we need effectively @@ -1095,52 +1180,75 @@ where // canonical ancestor. This ensures that state providers and the // transaction pool operate with the correct chain state after // forkchoice update processing. + if self.config.unwind_canonical_header() { self.update_latest_block_to_canonical_ancestor(&canonical_header)?; } } - // 2. Client software MAY skip an update of the forkchoice state and MUST NOT begin a - // payload build process if `forkchoiceState.headBlockHash` references a `VALID` - // ancestor of the head of canonical chain, i.e. the ancestor passed payload - // validation process and deemed `VALID`. In the case of such an event, client - // software MUST return `{payloadStatus: {status: VALID, latestValidHash: - // forkchoiceState.headBlockHash, validationError: null}, payloadId: null}` + // According to the Engine API specification, client software MAY skip an update of the + // forkchoice state and MUST NOT begin a payload build process if + // `forkchoiceState.headBlockHash` references a `VALID` ancestor of the head + // of canonical chain, i.e. the ancestor passed payload validation process + // and deemed `VALID`. In the case of such an event, client software MUST + // return `{payloadStatus: {status: VALID, latestValidHash: + // forkchoiceState.headBlockHash, validationError: null}, payloadId: null}` + + // The head block is already canonical and we're not processing payload attributes, + // so we're not triggering a payload job and can return right away - // the head block is already canonical, so we're not triggering a payload job and can - // return right away - return Ok(valid_outcome(state.head_block_hash)) + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + return Ok(Some(outcome)); } - // 3. ensure we can apply a new chain update for the head block + // Ensure we can apply a new chain update for the head block if let Some(chain_update) = self.on_new_head(state.head_block_hash)? { let tip = chain_update.tip().clone_sealed_header(); self.on_canonical_chain_update(chain_update); - // update the safe and finalized blocks and ensure their values are valid + // Update the safe and finalized blocks and ensure their values are valid if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { // safe or finalized hashes are invalid - return Ok(TreeOutcome::new(outcome)) + return Ok(Some(TreeOutcome::new(outcome))); } if let Some(attr) = attrs { - let updated = self.process_payload_attributes(attr, &tip, state, version); - return Ok(TreeOutcome::new(updated)) + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes(attr.clone(), &tip, state, version); + return Ok(Some(TreeOutcome::new(updated))); } - return Ok(valid_outcome(state.head_block_hash)) + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + return Ok(Some(outcome)); } - // 4. we don't have the block to perform the update - // we assume the FCU is valid and at least the head is missing, + Ok(None) + } + + /// Handles the case where the head block is missing and needs to be downloaded. + /// + /// This is the fallback case when all other forkchoice update scenarios have been exhausted. + /// Returns a `TreeOutcome` with syncing status and download event. + fn handle_missing_block( + &self, + state: ForkchoiceState, + ) -> ProviderResult> { + // We don't have the block to perform the forkchoice update + // We assume the FCU is valid and at least the head is missing, // so we need to start syncing to it // // find the appropriate target to sync to, if we don't have the safe block hash then we // start syncing to the safe block via backfill first let target = if self.state.forkchoice_state_tracker.is_empty() && - // check that safe block is valid and missing - !state.safe_block_hash.is_zero() && - self.find_canonical_header(state.safe_block_hash).ok().flatten().is_none() + // check that safe block is valid and missing + !state.safe_block_hash.is_zero() && + self.find_canonical_header(state.safe_block_hash).ok().flatten().is_none() { debug!(target: "engine::tree", "missing safe block on initial FCU, downloading safe block"); state.safe_block_hash @@ -1929,8 +2037,18 @@ where fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { // check if the head was previously marked as invalid let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; - // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent)?)) + + // Try to prepare invalid response, but handle errors gracefully + match self.prepare_invalid_response(header.parent) { + Ok(status) => Ok(Some(status)), + Err(err) => { + debug!(target: "engine::tree", %err, "Failed to prepare invalid response for ancestor check"); + // Return a basic invalid status without latest valid hash + Ok(Some(PayloadStatus::from_status(PayloadStatusEnum::Invalid { + validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), + }))) + } + } } /// Validate if block is correct and satisfies all the consensus rules that concern the header @@ -2753,35 +2871,6 @@ where self.update_safe_block(state.safe_block_hash) } - /// Pre-validate forkchoice update and check whether it can be processed. - /// - /// This method returns the update outcome if validation fails or - /// the node is syncing and the update cannot be processed at the moment. - fn pre_validate_forkchoice_update( - &mut self, - state: ForkchoiceState, - ) -> ProviderResult> { - if state.head_block_hash.is_zero() { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // check if the new head hash is connected to any ancestor that we previously marked as - // invalid - let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); - if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { - return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) - } - - if !self.backfill_sync_state.is_idle() { - // We can only process new forkchoice updates if the pipeline is idle, since it requires - // exclusive access to the database - trace!(target: "engine::tree", "Pipeline is syncing, skipping forkchoice update"); - return Ok(Some(OnForkChoiceUpdated::syncing())) - } - - Ok(None) - } - /// Validates the payload attributes with respect to the header and fork choice state. /// /// Note: At this point, the fork choice update is considered to be VALID, however, we can still diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index b2774b8b17e..17b5950e077 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -56,6 +56,7 @@ impl reth_engine_primitives::PayloadValidator for MockEngineVali reth_payload_primitives::NewPayloadError::Other(format!("{e:?}").into()) })?; let sealed = block.seal_slow(); + sealed.try_recover().map_err(|e| reth_payload_primitives::NewPayloadError::Other(e.into())) } } @@ -1705,3 +1706,305 @@ mod payload_execution_tests { } } } + +/// Test suite for the refactored `on_forkchoice_updated` helper methods +#[cfg(test)] +mod forkchoice_updated_tests { + use super::*; + use alloy_primitives::Address; + + /// Test that validates the forkchoice state pre-validation logic + #[tokio::test] + async fn test_validate_forkchoice_state() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Test 1: Zero head block hash should return early with invalid state + let zero_state = ForkchoiceState { + head_block_hash: B256::ZERO, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(zero_state).unwrap(); + assert!(result.is_some(), "Zero head block hash should return early"); + let outcome = result.unwrap(); + // For invalid state, we expect an error response + assert!(matches!(outcome, OnForkChoiceUpdated { .. })); + + // Test 2: Valid state with backfill active should return syncing + test_harness.tree.backfill_sync_state = BackfillSyncState::Active; + let valid_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(valid_state).unwrap(); + assert!(result.is_some(), "Backfill active should return early"); + let outcome = result.unwrap(); + // We need to await the outcome to check the payload status + let fcu_result = outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + + // Test 3: Valid state with idle backfill should continue processing + test_harness.tree.backfill_sync_state = BackfillSyncState::Idle; + let valid_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(valid_state).unwrap(); + assert!(result.is_none(), "Valid state should continue processing"); + } + + /// Test that verifies canonical head handling + #[tokio::test] + async fn test_handle_canonical_head() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // Test 1: Head is already canonical, no payload attributes + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "Should return outcome for canonical head"); + let outcome = result.unwrap(); + let fcu_result = outcome.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test 2: Head is not canonical - should return None to continue processing + let non_canonical_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(non_canonical_state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_none(), "Non-canonical head should return None"); + } + + /// Test that verifies chain update application + #[tokio::test] + async fn test_apply_chain_update() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create a chain of blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..5).collect(); + test_harness = test_harness.with_blocks(blocks.clone()); + + let new_head = blocks[2].recovered_block().hash(); + + // Test 1: Apply chain update to a new head + let state = ForkchoiceState { + head_block_hash: new_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .apply_chain_update(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "Should apply chain update for new head"); + let outcome = result.unwrap(); + let fcu_result = outcome.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test 2: Try to apply chain update to missing block + let missing_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .apply_chain_update(missing_state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_none(), "Missing block should return None"); + } + + /// Test that verifies missing block handling + #[tokio::test] + async fn test_handle_missing_block() { + let chain_spec = MAINNET.clone(); + let test_harness = TestHarness::new(chain_spec); + + let state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.handle_missing_block(state).unwrap(); + + // Should return syncing status with download event + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + assert!(result.event.is_some()); + + if let Some(TreeEvent::Download(download_request)) = result.event { + match download_request { + DownloadRequest::BlockSet(block_set) => { + assert_eq!(block_set.len(), 1); + } + _ => panic!("Expected single block download request"), + } + } + } + + /// Test the complete `on_forkchoice_updated` flow with all helper methods + #[tokio::test] + async fn test_on_forkchoice_updated_integration() { + reth_tracing::init_test_tracing(); + + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks.clone()); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // Test Case 1: FCU to existing canonical head + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: canonical_head, + finalized_block_hash: canonical_head, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test Case 2: FCU to missing block + let missing_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(missing_state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + assert!(result.event.is_some(), "Should trigger download event for missing block"); + + // Test Case 3: FCU during backfill sync + test_harness.tree.backfill_sync_state = BackfillSyncState::Active; + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing(), "Should return syncing during backfill"); + } + + /// Test metrics recording in forkchoice updated + #[tokio::test] + async fn test_record_forkchoice_metrics() { + let chain_spec = MAINNET.clone(); + let test_harness = TestHarness::new(chain_spec); + + // Get initial metrics state by checking if metrics are recorded + // We can't directly get counter values, but we can verify the methods are called + + // Test without attributes + let attrs_none = None; + test_harness.tree.record_forkchoice_metrics(&attrs_none); + + // Test with attributes + let attrs_some = Some(alloy_rpc_types_engine::PayloadAttributes { + timestamp: 1000, + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: None, + parent_beacon_block_root: None, + }); + test_harness.tree.record_forkchoice_metrics(&attrs_some); + + // We can't directly verify counter values since they're private metrics + // But we can verify the methods don't panic and execute successfully + } + + /// Test edge case: FCU with invalid ancestor + #[tokio::test] + async fn test_fcu_with_invalid_ancestor() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Mark a block as invalid + let invalid_block_hash = B256::random(); + test_harness.tree.state.invalid_headers.insert(BlockWithParent { + block: NumHash::new(1, invalid_block_hash), + parent: B256::ZERO, + }); + + // Test FCU that points to a descendant of the invalid block + // This is a bit tricky to test directly, but we can verify the check_invalid_ancestor + // method + let result = test_harness.tree.check_invalid_ancestor(invalid_block_hash).unwrap(); + assert!(result.is_some(), "Should detect invalid ancestor"); + } + + /// Test `OpStack` specific behavior with canonical head + #[tokio::test] + async fn test_opstack_canonical_head_behavior() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Set engine kind to OpStack + test_harness.tree.engine_kind = EngineApiKind::OpStack; + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // For OpStack, even if head is already canonical, we should still process payload + // attributes + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "OpStack should handle canonical head"); + } +} From 6770ba9eed6126f00ae1ff28fa86c39cbaae0112 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 8 Oct 2025 09:11:16 -0400 Subject: [PATCH 009/371] feat(provider): add get_account_before_block to ChangesetReader (#18898) --- .../src/providers/blockchain_provider.rs | 8 ++++ .../provider/src/providers/consistent.rs | 46 +++++++++++++++++++ .../src/providers/database/provider.rs | 13 ++++++ .../src/providers/state/historical.rs | 36 ++++++++------- .../storage/provider/src/test_utils/mock.rs | 8 ++++ crates/storage/rpc-provider/src/lib.rs | 8 ++++ crates/storage/storage-api/src/account.rs | 9 ++++ crates/storage/storage-api/src/noop.rs | 8 ++++ 8 files changed, 120 insertions(+), 16 deletions(-) diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 75e276b3c42..69e77079c55 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -716,6 +716,14 @@ impl ChangeSetReader for BlockchainProvider { ) -> ProviderResult> { self.consistent_provider()?.account_block_changeset(block_number) } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult> { + self.consistent_provider()?.get_account_before_block(block_number, address) + } } impl AccountReader for BlockchainProvider { diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 03615d5357b..93415e8e347 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1422,6 +1422,52 @@ impl ChangeSetReader for ConsistentProvider { self.storage_provider.account_block_changeset(block_number) } } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + // Search in-memory state for the account changeset + let changeset = state + .block_ref() + .execution_output + .bundle + .reverts + .clone() + .to_plain_state_reverts() + .accounts + .into_iter() + .flatten() + .find(|(addr, _)| addr == &address) + .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }); + Ok(changeset) + } else { + // Perform checks on whether or not changesets exist for the block. + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let account_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::AccountHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !account_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + // Delegate to the storage provider for database lookups + self.storage_provider.get_account_before_block(block_number, address) + } + } } impl AccountReader for ConsistentProvider { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 16b463be1e8..55739bbe915 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -939,6 +939,19 @@ impl ChangeSetReader for DatabaseProvider { }) .collect() } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult> { + self.tx + .cursor_dup_read::()? + .seek_by_key_subkey(block_number, address)? + .filter(|acc| acc.address == address) + .map(Ok) + .transpose() + } } impl HeaderSyncGapProvider diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 9a22a527ccb..f3e69bf7d91 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,6 +1,6 @@ use crate::{ providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, + ChangeSetReader, HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; @@ -241,23 +241,23 @@ impl HistoricalStateProviderRef<'_, Provi } } -impl AccountReader +impl AccountReader for HistoricalStateProviderRef<'_, Provider> { /// Get basic account information. fn basic_account(&self, address: &Address) -> ProviderResult> { match self.account_history_lookup(*address)? { HistoryInfo::NotYetWritten => Ok(None), - HistoryInfo::InChangeset(changeset_block_number) => Ok(self - .tx() - .cursor_dup_read::()? - .seek_by_key_subkey(changeset_block_number, *address)? - .filter(|acc| &acc.address == address) - .ok_or(ProviderError::AccountChangesetNotFound { - block_number: changeset_block_number, - address: *address, - })? - .info), + HistoryInfo::InChangeset(changeset_block_number) => { + // Use ChangeSetReader trait method to get the account from changesets + self.provider + .get_account_before_block(changeset_block_number, *address)? + .ok_or(ProviderError::AccountChangesetNotFound { + block_number: changeset_block_number, + address: *address, + }) + .map(|account_before| account_before.info) + } HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => { Ok(self.tx().get_by_encoded_key::(address)?) } @@ -394,7 +394,7 @@ impl HashedPostStateProvider for HistoricalStateProviderRef<'_, } } -impl StateProvider +impl StateProvider for HistoricalStateProviderRef<'_, Provider> { /// Get storage. @@ -485,7 +485,7 @@ impl HistoricalStateProvider { } // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader ]); +delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -530,7 +530,9 @@ mod tests { BlockNumberList, }; use reth_primitives_traits::{Account, StorageEntry}; - use reth_storage_api::{BlockHashReader, BlockNumReader, DBProvider, DatabaseProviderFactory}; + use reth_storage_api::{ + BlockHashReader, BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory, + }; use reth_storage_errors::provider::ProviderError; const ADDRESS: Address = address!("0x0000000000000000000000000000000000000001"); @@ -540,7 +542,9 @@ mod tests { const fn assert_state_provider() {} #[expect(dead_code)] - const fn assert_historical_state_provider() { + const fn assert_historical_state_provider< + T: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader, + >() { assert_state_provider::>(); } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index d5e3fe4da7b..1024312ead9 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -984,6 +984,14 @@ impl ChangeSetReader for MockEthProvi ) -> ProviderResult> { Ok(Vec::default()) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult> { + Ok(None) + } } impl StateReader for MockEthProvider { diff --git a/crates/storage/rpc-provider/src/lib.rs b/crates/storage/rpc-provider/src/lib.rs index 76e511d52d4..ed6e49eefbd 100644 --- a/crates/storage/rpc-provider/src/lib.rs +++ b/crates/storage/rpc-provider/src/lib.rs @@ -1764,6 +1764,14 @@ where ) -> Result, ProviderError> { Err(ProviderError::UnsupportedProvider) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } } impl StateProviderFactory for RpcBlockchainStateProvider diff --git a/crates/storage/storage-api/src/account.rs b/crates/storage/storage-api/src/account.rs index 1692c4c21f4..270bfd1226c 100644 --- a/crates/storage/storage-api/src/account.rs +++ b/crates/storage/storage-api/src/account.rs @@ -54,4 +54,13 @@ pub trait ChangeSetReader { &self, block_number: BlockNumber, ) -> ProviderResult>; + + /// Search the block's changesets for the given address, and return the result. + /// + /// Returns `None` if the account was not changed in this block. + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult>; } diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 44e499ae006..e0c57d5226b 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -399,6 +399,14 @@ impl ChangeSetReader for NoopProvider { ) -> ProviderResult> { Ok(Vec::default()) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult> { + Ok(None) + } } impl StateRootProvider for NoopProvider { From c0caaa17be5335ab6993a6bb2bd6845dffedd420 Mon Sep 17 00:00:00 2001 From: Merkel Tranjes <140164174+rnkrtt@users.noreply.github.com> Date: Wed, 8 Oct 2025 16:20:39 +0200 Subject: [PATCH 010/371] refactor: replace collect().is_empty() with next().is_none() in tests (#18902) Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- crates/chain-state/src/in_memory.rs | 3 +-- crates/transaction-pool/src/pool/pending.rs | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index cd194db81e3..dd78b6cf5fe 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -1380,8 +1380,7 @@ mod tests { #[test] fn test_canonical_in_memory_state_canonical_chain_empty() { let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); - let chain: Vec<_> = state.canonical_chain().collect(); - assert!(chain.is_empty()); + assert!(state.canonical_chain().next().is_none()); } #[test] diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 9bd1d092b4f..317066137da 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -921,8 +921,7 @@ mod tests { assert!(removed.is_empty()); // Verify that retrieving transactions from an empty pool yields nothing - let all_txs: Vec<_> = pool.all().collect(); - assert!(all_txs.is_empty()); + assert!(pool.all().next().is_none()); } #[test] From c78378a8cef062bf74fed2aca8d8ff1294b90315 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 8 Oct 2025 16:53:25 +0200 Subject: [PATCH 011/371] ci: cache hive simulator images to reduce prepare-hive job time (#18899) --- .github/workflows/hive.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 5263eb76deb..13a952e6875 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -34,14 +34,39 @@ jobs: repository: ethereum/hive path: hivetests + - name: Get hive commit hash + id: hive-commit + run: echo "hash=$(cd hivetests && git rev-parse HEAD)" >> $GITHUB_OUTPUT + - uses: actions/setup-go@v6 with: go-version: "^1.13.1" - run: go version + - name: Restore hive assets cache + id: cache-hive + uses: actions/cache@v4 + with: + path: ./hive_assets + key: hive-assets-${{ steps.hive-commit.outputs.hash }}-${{ hashFiles('.github/assets/hive/build_simulators.sh') }} + - name: Build hive assets + if: steps.cache-hive.outputs.cache-hit != 'true' run: .github/assets/hive/build_simulators.sh + - name: Load cached Docker images + if: steps.cache-hive.outputs.cache-hit == 'true' + run: | + cd hive_assets + for tar_file in *.tar; do + if [ -f "$tar_file" ]; then + echo "Loading $tar_file..." + docker load -i "$tar_file" + fi + done + # Make hive binary executable + chmod +x hive + - name: Upload hive assets uses: actions/upload-artifact@v4 with: From df6afe9daad02283b37c1f431cfad788907646c9 Mon Sep 17 00:00:00 2001 From: stevencartavia <112043913+stevencartavia@users.noreply.github.com> Date: Wed, 8 Oct 2025 09:03:44 -0600 Subject: [PATCH 012/371] docs: duplicate comment in Eip4844PoolTransactionError (#18858) --- crates/transaction-pool/src/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 74d92fb3e6b..6360817caa1 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -157,7 +157,7 @@ pub enum Eip4844PoolTransactionError { /// Thrown if an EIP-4844 transaction without any blobs arrives #[error("blobless blob transaction")] NoEip4844Blobs, - /// Thrown if an EIP-4844 transaction without any blobs arrives + /// Thrown if an EIP-4844 transaction arrives with too many blobs #[error("too many blobs in transaction: have {have}, permitted {permitted}")] TooManyEip4844Blobs { /// Number of blobs the transaction has From 6f96a328128eb79a3009669b0a32f232971a99c9 Mon Sep 17 00:00:00 2001 From: emmmm <155267286+eeemmmmmm@users.noreply.github.com> Date: Thu, 9 Oct 2025 07:18:49 -0400 Subject: [PATCH 013/371] chore: align node_config threshold constant (#18914) --- crates/node/core/src/node_config.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 96fa8cc8dfa..bb5beda1d0c 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -36,12 +36,9 @@ use tracing::*; use crate::args::EraArgs; pub use reth_engine_primitives::{ DEFAULT_MAX_PROOF_TASK_CONCURRENCY, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, - DEFAULT_RESERVED_CPU_CORES, + DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; -/// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. -pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; - /// Default size of cross-block cache in megabytes. pub const DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB: u64 = 4 * 1024; From d2070f4de34f523f6097ebc64fa9d63a04878055 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Thu, 9 Oct 2025 18:42:59 +0200 Subject: [PATCH 014/371] feat: wait for new blocks when build is in progress (#18831) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Roman Hodulák --- crates/optimism/flashblocks/src/lib.rs | 5 +- crates/optimism/flashblocks/src/service.rs | 38 +++++- crates/optimism/rpc/src/eth/mod.rs | 121 +++++++++++++------ crates/optimism/rpc/src/eth/pending_block.rs | 4 +- crates/optimism/rpc/src/eth/transaction.rs | 4 +- 5 files changed, 127 insertions(+), 45 deletions(-) diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index e818e9cb538..582cbca633f 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -4,7 +4,7 @@ pub use payload::{ ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashBlock, FlashBlockDecoder, Metadata, }; -pub use service::FlashBlockService; +pub use service::{FlashBlockBuildInfo, FlashBlockService}; pub use ws::{WsConnect, WsFlashBlockStream}; mod consensus; @@ -28,3 +28,6 @@ pub type PendingBlockRx = tokio::sync::watch::Receiver; + +/// Receiver that signals whether a [`FlashBlock`] is currently being built. +pub type InProgressFlashBlockRx = tokio::sync::watch::Receiver>; diff --git a/crates/optimism/flashblocks/src/service.rs b/crates/optimism/flashblocks/src/service.rs index f4cf7f18450..7e442470d98 100644 --- a/crates/optimism/flashblocks/src/service.rs +++ b/crates/optimism/flashblocks/src/service.rs @@ -1,7 +1,8 @@ use crate::{ sequence::FlashBlockPendingSequence, worker::{BuildArgs, FlashBlockBuilder}, - ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx, PendingFlashBlock, + ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, + PendingFlashBlock, }; use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; @@ -21,7 +22,10 @@ use std::{ task::{ready, Context, Poll}, time::Instant, }; -use tokio::{pin, sync::oneshot}; +use tokio::{ + pin, + sync::{oneshot, watch}, +}; use tracing::{debug, trace, warn}; pub(crate) const FB_STATE_ROOT_FROM_INDEX: usize = 9; @@ -48,11 +52,25 @@ pub struct FlashBlockService< /// when fb received on top of the same block. Avoid redundant I/O across multiple /// executions within the same block. cached_state: Option<(B256, CachedReads)>, + /// Signals when a block build is in progress + in_progress_tx: watch::Sender>, + /// `FlashBlock` service's metrics metrics: FlashBlockServiceMetrics, /// Enable state root calculation from flashblock with index [`FB_STATE_ROOT_FROM_INDEX`] compute_state_root: bool, } +/// Information for a flashblock currently built +#[derive(Debug, Clone, Copy)] +pub struct FlashBlockBuildInfo { + /// Parent block hash + pub parent_hash: B256, + /// Flashblock index within the current block's sequence + pub index: u64, + /// Block number of the flashblock being built. + pub block_number: u64, +} + impl FlashBlockService where N: NodePrimitives, @@ -73,6 +91,7 @@ where { /// Constructs a new `FlashBlockService` that receives [`FlashBlock`]s from `rx` stream. pub fn new(rx: S, evm_config: EvmConfig, provider: Provider, spawner: TaskExecutor) -> Self { + let (in_progress_tx, _) = watch::channel(None); Self { rx, current: None, @@ -83,6 +102,7 @@ where spawner, job: None, cached_state: None, + in_progress_tx, metrics: FlashBlockServiceMetrics::default(), compute_state_root: false, } @@ -99,6 +119,11 @@ where self.blocks.subscribe_block_sequence() } + /// Returns a receiver that signals when a flashblock is being built. + pub fn subscribe_in_progress(&self) -> InProgressFlashBlockRx { + self.in_progress_tx.subscribe() + } + /// Drives the services and sends new blocks to the receiver /// /// Note: this should be spawned @@ -218,6 +243,8 @@ where }; // reset job this.job.take(); + // No build in progress + let _ = this.in_progress_tx.send(None); if let Some((now, result)) = result { match result { @@ -293,6 +320,13 @@ where if let Some(args) = this.build_args() { let now = Instant::now(); + let fb_info = FlashBlockBuildInfo { + parent_hash: args.base.parent_hash, + index: args.last_flashblock_index, + block_number: args.base.block_number, + }; + // Signal that a flashblock build has started with build metadata + let _ = this.in_progress_tx.send(Some(fb_info)); let (tx, rx) = oneshot::channel(); let builder = this.builder.clone(); diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index fdd06d224bc..a2226e0cbf3 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -13,7 +13,7 @@ use crate::{ OpEthApiError, SequencerClient, }; use alloy_consensus::BlockHeader; -use alloy_primitives::U256; +use alloy_primitives::{B256, U256}; use eyre::WrapErr; use op_alloy_network::Optimism; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; @@ -23,8 +23,8 @@ use reth_evm::ConfigureEvm; use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ - ExecutionPayloadBaseV1, FlashBlockCompleteSequenceRx, FlashBlockService, PendingBlockRx, - WsFlashBlockStream, + ExecutionPayloadBaseV1, FlashBlockBuildInfo, FlashBlockCompleteSequenceRx, FlashBlockService, + InProgressFlashBlockRx, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -43,10 +43,18 @@ use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, }; -use std::{fmt, fmt::Formatter, marker::PhantomData, sync::Arc, time::Instant}; -use tokio::sync::watch; +use std::{ + fmt::{self, Formatter}, + marker::PhantomData, + sync::Arc, + time::Duration, +}; +use tokio::{sync::watch, time}; use tracing::info; +/// Maximum duration to wait for a fresh flashblock when one is being built. +const MAX_FLASHBLOCK_WAIT_DURATION: Duration = Duration::from_millis(50); + /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. pub type EthApiNodeBackend = EthApiInner; @@ -79,6 +87,7 @@ impl OpEthApi { min_suggested_priority_fee: U256, pending_block_rx: Option>, flashblock_rx: Option, + in_progress_rx: Option, ) -> Self { let inner = Arc::new(OpEthApiInner { eth_api, @@ -86,6 +95,7 @@ impl OpEthApi { min_suggested_priority_fee, pending_block_rx, flashblock_rx, + in_progress_rx, }); Self { inner } } @@ -109,15 +119,57 @@ impl OpEthApi { self.inner.flashblock_rx.as_ref().map(|rx| rx.resubscribe()) } + /// Returns information about the flashblock currently being built, if any. + fn flashblock_build_info(&self) -> Option { + self.inner.in_progress_rx.as_ref().and_then(|rx| *rx.borrow()) + } + + /// Extracts pending block if it matches the expected parent hash. + fn extract_matching_block( + &self, + block: Option<&PendingFlashBlock>, + parent_hash: B256, + ) -> Option> { + block.filter(|b| b.block().parent_hash() == parent_hash).map(|b| b.pending.clone()) + } + /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. pub const fn builder() -> OpEthApiBuilder { OpEthApiBuilder::new() } + /// Awaits a fresh flashblock if one is being built, otherwise returns current. + async fn flashblock( + &self, + parent_hash: B256, + ) -> eyre::Result>> { + let Some(rx) = self.inner.pending_block_rx.as_ref() else { return Ok(None) }; + + // Check if a flashblock is being built + if let Some(build_info) = self.flashblock_build_info() { + let current_index = rx.borrow().as_ref().map(|b| b.last_flashblock_index); + + // Check if this is the first flashblock or the next consecutive index + let is_next_index = current_index.is_none_or(|idx| build_info.index == idx + 1); + + // Wait only for relevant flashblocks: matching parent and next in sequence + if build_info.parent_hash == parent_hash && is_next_index { + let mut rx_clone = rx.clone(); + // Wait up to MAX_FLASHBLOCK_WAIT_DURATION for a new flashblock to arrive + let _ = time::timeout(MAX_FLASHBLOCK_WAIT_DURATION, rx_clone.changed()).await; + } + } + + // Fall back to current block + Ok(self.extract_matching_block(rx.borrow().as_ref(), parent_hash)) + } + /// Returns a [`PendingBlock`] that is built out of flashblocks. /// /// If flashblocks receiver is not set, then it always returns `None`. - pub fn pending_flashblock(&self) -> eyre::Result>> + /// + /// It may wait up to 50ms for a fresh flashblock if one is currently being built. + pub async fn pending_flashblock(&self) -> eyre::Result>> where OpEthApiError: FromEvmError, Rpc: RpcConvert, @@ -128,21 +180,7 @@ impl OpEthApi { PendingBlockEnvOrigin::DerivedFromLatest(parent) => parent, }; - let Some(rx) = self.inner.pending_block_rx.as_ref() else { return Ok(None) }; - let pending_block = rx.borrow(); - let Some(pending_block) = pending_block.as_ref() else { return Ok(None) }; - - let now = Instant::now(); - - // Is the pending block not expired and latest is its parent? - if pending.evm_env.block_env.number == U256::from(pending_block.block().number()) && - parent.hash() == pending_block.block().parent_hash() && - now <= pending_block.expires_at - { - return Ok(Some(pending_block.pending.clone())); - } - - Ok(None) + self.flashblock(parent.hash()).await } } @@ -330,6 +368,8 @@ pub struct OpEthApiInner { /// /// If set, then it provides sequences of flashblock built. flashblock_rx: Option, + /// Receiver that signals when a flashblock is being built + in_progress_rx: Option, } impl fmt::Debug for OpEthApiInner { @@ -465,24 +505,28 @@ where None }; - let rxs = if let Some(ws_url) = flashblocks_url { - info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); - let (tx, pending_block_rx) = watch::channel(None); - let stream = WsFlashBlockStream::new(ws_url); - let service = FlashBlockService::new( - stream, - ctx.components.evm_config().clone(), - ctx.components.provider().clone(), - ctx.components.task_executor().clone(), - ); - let flashblock_rx = service.subscribe_block_sequence(); - ctx.components.task_executor().spawn(Box::pin(service.run(tx))); - Some((pending_block_rx, flashblock_rx)) - } else { - None - }; + let (pending_block_rx, flashblock_rx, in_progress_rx) = + if let Some(ws_url) = flashblocks_url { + info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); + + let (tx, pending_rx) = watch::channel(None); + let stream = WsFlashBlockStream::new(ws_url); + let service = FlashBlockService::new( + stream, + ctx.components.evm_config().clone(), + ctx.components.provider().clone(), + ctx.components.task_executor().clone(), + ); + + let flashblock_rx = service.subscribe_block_sequence(); + let in_progress_rx = service.subscribe_in_progress(); + + ctx.components.task_executor().spawn(Box::pin(service.run(tx))); - let (pending_block_rx, flashblock_rx) = rxs.unzip(); + (Some(pending_rx), Some(flashblock_rx), Some(in_progress_rx)) + } else { + (None, None, None) + }; let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner(); @@ -492,6 +536,7 @@ where U256::from(min_suggested_priority_fee), pending_block_rx, flashblock_rx, + in_progress_rx, )) } } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 8857b89b021..151668f4039 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -42,7 +42,7 @@ where async fn local_pending_block( &self, ) -> Result>, Self::Error> { - if let Ok(Some(pending)) = self.pending_flashblock() { + if let Ok(Some(pending)) = self.pending_flashblock().await { return Ok(Some(pending.into_block_and_receipts())); } @@ -70,7 +70,7 @@ where where Self: SpawnBlocking, { - let Ok(Some(pending_block)) = self.pending_flashblock() else { + let Ok(Some(pending_block)) = self.pending_flashblock().await else { return Ok(None); }; diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index fb98569db10..aa7e8ea60bd 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -127,7 +127,7 @@ where } } => { // Check flashblocks for faster confirmation (Optimism-specific) - if let Ok(Some(pending_block)) = this.pending_flashblock() { + if let Ok(Some(pending_block)) = this.pending_flashblock().await { let block_and_receipts = pending_block.into_block_and_receipts(); if block_and_receipts.block.body().contains_transaction(&hash) && let Some(receipt) = this.transaction_receipt(hash).await? { @@ -168,7 +168,7 @@ where if tx_receipt.is_none() { // if flashblocks are supported, attempt to find id from the pending block - if let Ok(Some(pending_block)) = this.pending_flashblock() { + if let Ok(Some(pending_block)) = this.pending_flashblock().await { let block_and_receipts = pending_block.into_block_and_receipts(); if let Some((tx, receipt)) = block_and_receipts.find_transaction_and_receipt_by_hash(hash) From 397a30defbc7d577dd31dfbc269ad764a5e00449 Mon Sep 17 00:00:00 2001 From: YK Date: Fri, 10 Oct 2025 15:58:15 +0800 Subject: [PATCH 015/371] perf(tree): worker pooling for storage in multiproof generation (#18887) Co-authored-by: Brian Picciano Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- Cargo.lock | 1 + crates/engine/primitives/src/config.rs | 32 +- crates/engine/tree/benches/state_root_task.rs | 26 +- .../tree/src/tree/payload_processor/mod.rs | 48 +- .../src/tree/payload_processor/multiproof.rs | 4 +- .../engine/tree/src/tree/payload_validator.rs | 42 +- crates/node/core/src/args/engine.rs | 16 +- crates/trie/parallel/Cargo.toml | 1 + crates/trie/parallel/src/proof.rs | 3 +- crates/trie/parallel/src/proof_task.rs | 647 +++++++++++++----- docs/vocs/docs/pages/cli/reth/node.mdx | 3 + 11 files changed, 626 insertions(+), 197 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8350347b6b4..fde6f2dc3aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10739,6 +10739,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "codspeed-criterion-compat", + "crossbeam-channel", "dashmap 6.1.0", "derive_more", "itertools 0.14.0", diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index e5f58523d03..b2f8da4d424 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -6,9 +6,21 @@ pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; -/// Default maximum concurrency for proof tasks +/// Default maximum concurrency for on-demand proof tasks (blinded nodes) pub const DEFAULT_MAX_PROOF_TASK_CONCURRENCY: u64 = 256; +/// Returns the default number of storage worker threads based on available parallelism. +fn default_storage_worker_count() -> usize { + #[cfg(feature = "std")] + { + std::thread::available_parallelism().map(|n| (n.get() * 2).clamp(2, 64)).unwrap_or(8) + } + #[cfg(not(feature = "std"))] + { + 8 + } +} + /// The size of proof targets chunk to spawn in one multiproof calculation. pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 10; @@ -109,6 +121,8 @@ pub struct TreeConfig { prewarm_max_concurrency: usize, /// Whether to unwind canonical header to ancestor during forkchoice updates. allow_unwind_canonical_header: bool, + /// Number of storage proof worker threads. + storage_worker_count: usize, } impl Default for TreeConfig { @@ -135,6 +149,7 @@ impl Default for TreeConfig { always_process_payload_attributes_on_canonical_head: false, prewarm_max_concurrency: DEFAULT_PREWARM_MAX_CONCURRENCY, allow_unwind_canonical_header: false, + storage_worker_count: default_storage_worker_count(), } } } @@ -164,7 +179,9 @@ impl TreeConfig { always_process_payload_attributes_on_canonical_head: bool, prewarm_max_concurrency: usize, allow_unwind_canonical_header: bool, + storage_worker_count: usize, ) -> Self { + assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); Self { persistence_threshold, memory_block_buffer_target, @@ -187,6 +204,7 @@ impl TreeConfig { always_process_payload_attributes_on_canonical_head, prewarm_max_concurrency, allow_unwind_canonical_header, + storage_worker_count, } } @@ -394,6 +412,7 @@ impl TreeConfig { mut self, max_proof_task_concurrency: u64, ) -> Self { + assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); self.max_proof_task_concurrency = max_proof_task_concurrency; self } @@ -452,4 +471,15 @@ impl TreeConfig { pub const fn prewarm_max_concurrency(&self) -> usize { self.prewarm_max_concurrency } + + /// Return the number of storage proof worker threads. + pub const fn storage_worker_count(&self) -> usize { + self.storage_worker_count + } + + /// Setter for the number of storage proof worker threads. + pub const fn with_storage_worker_count(mut self, storage_worker_count: usize) -> Self { + self.storage_worker_count = storage_worker_count; + self + } } diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index 9f61e62d2f9..70d9e037e9d 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -228,16 +228,22 @@ fn bench_state_root(c: &mut Criterion) { }, |(genesis_hash, mut payload_processor, provider, state_updates)| { black_box({ - let mut handle = payload_processor.spawn( - Default::default(), - core::iter::empty::< - Result, core::convert::Infallible>, - >(), - StateProviderBuilder::new(provider.clone(), genesis_hash, None), - ConsistentDbView::new_with_latest_tip(provider).unwrap(), - TrieInput::default(), - &TreeConfig::default(), - ); + let mut handle = payload_processor + .spawn( + Default::default(), + core::iter::empty::< + Result< + Recovered, + core::convert::Infallible, + >, + >(), + StateProviderBuilder::new(provider.clone(), genesis_hash, None), + ConsistentDbView::new_with_latest_tip(provider).unwrap(), + TrieInput::default(), + &TreeConfig::default(), + ) + .map_err(|(err, ..)| err) + .expect("failed to spawn payload processor"); let mut state_hook = handle.state_hook(); diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 8d9bd1ba2e0..d449031606e 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -45,7 +45,7 @@ use std::sync::{ mpsc::{self, channel, Sender}, Arc, }; -use tracing::{debug, instrument}; +use tracing::{debug, instrument, warn}; mod configured_sparse_trie; pub mod executor; @@ -166,6 +166,10 @@ where /// /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) + /// + /// Returns an error with the original transactions iterator if the proof task manager fails to + /// initialize. + #[allow(clippy::type_complexity)] pub fn spawn>( &mut self, env: ExecutionEnv, @@ -174,7 +178,10 @@ where consistent_view: ConsistentDbView

, trie_input: TrieInput, config: &TreeConfig, - ) -> PayloadHandle, I::Tx>, I::Error> + ) -> Result< + PayloadHandle, I::Tx>, I::Error>, + (reth_provider::ProviderError, I, ExecutionEnv, StateProviderBuilder), + > where P: DatabaseProviderFactory + BlockReader @@ -196,12 +203,19 @@ where state_root_config.prefix_sets.clone(), ); let max_proof_task_concurrency = config.max_proof_task_concurrency() as usize; - let proof_task = ProofTaskManager::new( + let storage_worker_count = config.storage_worker_count(); + let proof_task = match ProofTaskManager::new( self.executor.handle().clone(), state_root_config.consistent_view.clone(), task_ctx, max_proof_task_concurrency, - ); + storage_worker_count, + ) { + Ok(task) => task, + Err(error) => { + return Err((error, transactions, env, provider_builder)); + } + }; // We set it to half of the proof task concurrency, because often for each multiproof we // spawn one Tokio task for the account proof, and one Tokio task for the storage proof. @@ -252,12 +266,12 @@ where } }); - PayloadHandle { + Ok(PayloadHandle { to_multi_proof, prewarm_handle, state_root: Some(state_root_rx), transactions: execution_rx, - } + }) } /// Spawns a task that exclusively handles cache prewarming for transaction execution. @@ -857,14 +871,20 @@ mod tests { PrecompileCacheMap::default(), ); let provider = BlockchainProvider::new(factory).unwrap(); - let mut handle = payload_processor.spawn( - Default::default(), - core::iter::empty::, core::convert::Infallible>>(), - StateProviderBuilder::new(provider.clone(), genesis_hash, None), - ConsistentDbView::new_with_latest_tip(provider).unwrap(), - TrieInput::from_state(hashed_state), - &TreeConfig::default(), - ); + let mut handle = + payload_processor + .spawn( + Default::default(), + core::iter::empty::< + Result, core::convert::Infallible>, + >(), + StateProviderBuilder::new(provider.clone(), genesis_hash, None), + ConsistentDbView::new_with_latest_tip(provider).unwrap(), + TrieInput::from_state(hashed_state), + &TreeConfig::default(), + ) + .map_err(|(err, ..)| err) + .expect("failed to spawn payload processor"); let mut state_hook = handle.state_hook(); diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 6c7f5de40a3..18d394477fb 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1236,7 +1236,9 @@ mod tests { config.consistent_view.clone(), task_ctx, 1, - ); + 1, + ) + .expect("Failed to create ProofTaskManager"); let channel = channel(); MultiProofTask::new(config, executor, proof_task.handle(), channel.0, 1, None) diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index cd2c37d1e91..1e63d29bf79 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -877,17 +877,37 @@ where // too expensive because it requires walking all paths in every proof. let spawn_start = Instant::now(); let (handle, strategy) = if trie_input.prefix_sets.is_empty() { - ( - self.payload_processor.spawn( - env, - txs, - provider_builder, - consistent_view, - trie_input, - &self.config, - ), - StateRootStrategy::StateRootTask, - ) + match self.payload_processor.spawn( + env, + txs, + provider_builder, + consistent_view, + trie_input, + &self.config, + ) { + Ok(handle) => { + // Successfully spawned with state root task support + (handle, StateRootStrategy::StateRootTask) + } + Err((error, txs, env, provider_builder)) => { + // Failed to initialize proof task manager, fallback to parallel state + // root + error!( + target: "engine::tree", + block=?block_num_hash, + ?error, + "Failed to initialize proof task manager, falling back to parallel state root" + ); + ( + self.payload_processor.spawn_cache_exclusive( + env, + txs, + provider_builder, + ), + StateRootStrategy::Parallel, + ) + } + } // if prefix sets are not empty, we spawn a task that exclusively handles cache // prewarming for transaction execution } else { diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 88179a6b40e..2298b28f9ce 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -108,6 +108,11 @@ pub struct EngineArgs { /// See `TreeConfig::unwind_canonical_header` for more details. #[arg(long = "engine.allow-unwind-canonical-header", default_value = "false")] pub allow_unwind_canonical_header: bool, + + /// Configure the number of storage proof workers in the Tokio blocking pool. + /// If not specified, defaults to 2x available parallelism, clamped between 2 and 64. + #[arg(long = "engine.storage-worker-count")] + pub storage_worker_count: Option, } #[allow(deprecated)] @@ -134,6 +139,7 @@ impl Default for EngineArgs { state_root_fallback: false, always_process_payload_attributes_on_canonical_head: false, allow_unwind_canonical_header: false, + storage_worker_count: None, } } } @@ -141,7 +147,7 @@ impl Default for EngineArgs { impl EngineArgs { /// Creates a [`TreeConfig`] from the engine arguments. pub fn tree_config(&self) -> TreeConfig { - TreeConfig::default() + let mut config = TreeConfig::default() .with_persistence_threshold(self.persistence_threshold) .with_memory_block_buffer_target(self.memory_block_buffer_target) .with_legacy_state_root(self.legacy_state_root_task_enabled) @@ -159,7 +165,13 @@ impl EngineArgs { .with_always_process_payload_attributes_on_canonical_head( self.always_process_payload_attributes_on_canonical_head, ) - .with_unwind_canonical_header(self.allow_unwind_canonical_header) + .with_unwind_canonical_header(self.allow_unwind_canonical_header); + + if let Some(count) = self.storage_worker_count { + config = config.with_storage_worker_count(count); + } + + config } } diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index c9f625a1500..b4463d9ede3 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -36,6 +36,7 @@ derive_more.workspace = true rayon.workspace = true itertools.workspace = true tokio = { workspace = true, features = ["rt-multi-thread"] } +crossbeam-channel.workspace = true # `metrics` feature reth-metrics = { workspace = true, optional = true } diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index d6e1b57ed9b..4a2738fd38e 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -448,7 +448,8 @@ mod tests { let task_ctx = ProofTaskCtx::new(Default::default(), Default::default(), Default::default()); let proof_task = - ProofTaskManager::new(rt.handle().clone(), consistent_view.clone(), task_ctx, 1); + ProofTaskManager::new(rt.handle().clone(), consistent_view.clone(), task_ctx, 1, 1) + .unwrap(); let proof_task_handle = proof_task.handle(); // keep the join handle around to make sure it does not return any errors diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 9bb96d4b19e..0c513c55763 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -10,17 +10,18 @@ use crate::root::ParallelStateRootError; use alloy_primitives::{map::B256Set, B256}; +use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use reth_db_api::transaction::DbTx; -use reth_execution_errors::SparseTrieError; +use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, FactoryTx, ProviderResult, }; use reth_trie::{ - hashed_cursor::HashedPostStateCursorFactory, + hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, prefix_set::TriePrefixSetsMut, proof::{ProofTrieNodeProviderFactory, StorageProof}, - trie_cursor::InMemoryTrieCursorFactory, + trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdatesSorted, DecodedStorageMultiProof, HashedPostStateSorted, Nibbles, }; @@ -40,7 +41,7 @@ use std::{ time::Instant, }; use tokio::runtime::Handle; -use tracing::{debug, trace}; +use tracing::trace; #[cfg(feature = "metrics")] use crate::proof_task_metrics::ProofTaskMetrics; @@ -48,65 +49,333 @@ use crate::proof_task_metrics::ProofTaskMetrics; type StorageProofResult = Result; type TrieNodeProviderResult = Result, SparseTrieError>; -/// A task that manages sending multiproof requests to a number of tasks that have longer-running -/// database transactions +/// Internal message for storage workers. +/// +/// This is NOT exposed publicly. External callers use `ProofTaskKind::StorageProof` or +/// `ProofTaskKind::BlindedStorageNode` which are routed through the manager's `std::mpsc` channel. +#[derive(Debug)] +enum StorageWorkerJob { + /// Storage proof computation request + StorageProof { + /// Storage proof input parameters + input: StorageProofInput, + /// Channel to send result back to original caller + result_sender: Sender, + }, + /// Blinded storage node retrieval request + BlindedStorageNode { + /// Target account + account: B256, + /// Path to the storage node + path: Nibbles, + /// Channel to send result back to original caller + result_sender: Sender, + }, +} + +impl StorageWorkerJob { + /// Sends an error back to the caller when worker pool is unavailable. + /// + /// Returns `Ok(())` if the error was sent successfully, or `Err(())` if the receiver was + /// dropped. + fn send_worker_unavailable_error(&self) -> Result<(), ()> { + let error = + ParallelStateRootError::Other("Storage proof worker pool unavailable".to_string()); + + match self { + Self::StorageProof { result_sender, .. } => { + result_sender.send(Err(error)).map_err(|_| ()) + } + Self::BlindedStorageNode { result_sender, .. } => result_sender + .send(Err(SparseTrieError::from(SparseTrieErrorKind::Other(Box::new(error))))) + .map_err(|_| ()), + } + } +} + +/// Manager for coordinating proof request execution across different task types. +/// +/// # Architecture +/// +/// This manager handles two distinct execution paths: +/// +/// 1. **Storage Worker Pool** (for storage trie operations): +/// - Pre-spawned workers with dedicated long-lived transactions +/// - Handles `StorageProof` and `BlindedStorageNode` requests +/// - Tasks queued via crossbeam unbounded channel +/// - Workers continuously process without transaction overhead +/// - Unbounded queue ensures all storage proofs benefit from transaction reuse +/// +/// 2. **On-Demand Execution** (for account trie operations): +/// - Lazy transaction creation for `BlindedAccountNode` requests +/// - Transactions returned to pool after use for reuse +/// +/// # Public Interface +/// +/// The public interface through `ProofTaskManagerHandle` allows external callers to: +/// - Submit tasks via `queue_task(ProofTaskKind)` +/// - Use standard `std::mpsc` message passing +/// - Receive consistent return types and error handling #[derive(Debug)] pub struct ProofTaskManager { - /// Max number of database transactions to create + /// Sender for storage worker jobs to worker pool. + storage_work_tx: CrossbeamSender, + + /// Number of storage workers successfully spawned. + /// + /// May be less than requested if concurrency limits reduce the worker budget. + storage_worker_count: usize, + + /// Max number of database transactions to create for on-demand account trie operations. max_concurrency: usize, - /// Number of database transactions created + + /// Number of database transactions created for on-demand operations. total_transactions: usize, - /// Consistent view provider used for creating transactions on-demand - view: ConsistentDbView, - /// Proof task context shared across all proof tasks - task_ctx: ProofTaskCtx, - /// Proof tasks pending execution + + /// Proof tasks pending execution (account trie operations only). pending_tasks: VecDeque, - /// The underlying handle from which to spawn proof tasks - executor: Handle, + /// The proof task transactions, containing owned cursor factories that are reused for proof - /// calculation. + /// calculation (account trie operations only). proof_task_txs: Vec>>, - /// A receiver for new proof tasks. + + /// Consistent view provider used for creating transactions on-demand. + view: ConsistentDbView, + + /// Proof task context shared across all proof tasks. + task_ctx: ProofTaskCtx, + + /// The underlying handle from which to spawn proof tasks. + executor: Handle, + + /// Receives proof task requests from [`ProofTaskManagerHandle`]. proof_task_rx: Receiver>>, - /// A sender for sending back transactions. + + /// Internal channel for on-demand tasks to return transactions after use. tx_sender: Sender>>, + /// The number of active handles. /// /// Incremented in [`ProofTaskManagerHandle::new`] and decremented in /// [`ProofTaskManagerHandle::drop`]. active_handles: Arc, - /// Metrics tracking blinded node fetches. + + /// Metrics tracking proof task operations. #[cfg(feature = "metrics")] metrics: ProofTaskMetrics, } -impl ProofTaskManager { - /// Creates a new [`ProofTaskManager`] with the given max concurrency, creating that number of - /// cursor factories. +/// Worker loop for storage trie operations. +/// +/// # Lifecycle +/// +/// Each worker: +/// 1. Receives `StorageWorkerJob` from crossbeam unbounded channel +/// 2. Computes result using its dedicated long-lived transaction +/// 3. Sends result directly to original caller via `std::mpsc` +/// 4. Repeats until channel closes (graceful shutdown) +/// +/// # Transaction Reuse +/// +/// Reuses the same transaction and cursor factories across multiple operations +/// to avoid transaction creation and cursor factory setup overhead. +/// +/// # Panic Safety +/// +/// If this function panics, the worker thread terminates but other workers +/// continue operating and the system degrades gracefully. +/// +/// # Shutdown +/// +/// Worker shuts down when the crossbeam channel closes (all senders dropped). +fn storage_worker_loop( + proof_tx: ProofTaskTx, + work_rx: CrossbeamReceiver, + worker_id: usize, +) where + Tx: DbTx, +{ + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Storage worker started" + ); + + // Create factories once at worker startup to avoid recreation overhead. + let (trie_cursor_factory, hashed_cursor_factory) = proof_tx.create_factories(); + + // Create blinded provider factory once for all blinded node requests + let blinded_provider_factory = ProofTrieNodeProviderFactory::new( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + proof_tx.task_ctx.prefix_sets.clone(), + ); + + let mut storage_proofs_processed = 0u64; + let mut storage_nodes_processed = 0u64; + + while let Ok(job) = work_rx.recv() { + match job { + StorageWorkerJob::StorageProof { input, result_sender } => { + let hashed_address = input.hashed_address; + + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + prefix_set_len = input.prefix_set.len(), + target_slots = input.target_slots.len(), + "Processing storage proof" + ); + + let proof_start = Instant::now(); + let result = proof_tx.compute_storage_proof( + input, + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + ); + + let proof_elapsed = proof_start.elapsed(); + storage_proofs_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + storage_proofs_processed, + "Storage proof receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + proof_time_us = proof_elapsed.as_micros(), + total_processed = storage_proofs_processed, + "Storage proof completed" + ); + } + + StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + "Processing blinded storage node" + ); + + let start = Instant::now(); + let result = + blinded_provider_factory.storage_node_provider(account).trie_node(&path); + let elapsed = start.elapsed(); + + storage_nodes_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + storage_nodes_processed, + "Blinded storage node receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + elapsed_us = elapsed.as_micros(), + total_processed = storage_nodes_processed, + "Blinded storage node completed" + ); + } + } + } + + tracing::debug!( + target: "trie::proof_task", + worker_id, + storage_proofs_processed, + storage_nodes_processed, + "Storage worker shutting down" + ); +} + +impl ProofTaskManager +where + Factory: DatabaseProviderFactory, +{ + /// Creates a new [`ProofTaskManager`] with pre-spawned storage proof workers. /// - /// Returns an error if the consistent view provider fails to create a read-only transaction. + /// The `storage_worker_count` determines how many storage workers to spawn, and + /// `max_concurrency` determines the limit for on-demand operations (blinded account nodes). + /// These are now independent - storage workers are spawned as requested, and on-demand + /// operations use a separate concurrency pool for blinded account nodes. + /// Returns an error if the underlying provider fails to create the transactions required for + /// spawning workers. pub fn new( executor: Handle, view: ConsistentDbView, task_ctx: ProofTaskCtx, max_concurrency: usize, - ) -> Self { + storage_worker_count: usize, + ) -> ProviderResult { let (tx_sender, proof_task_rx) = channel(); - Self { + + // Use unbounded channel to ensure all storage operations are queued to workers. + // This maintains transaction reuse benefits and avoids fallback to on-demand execution. + let (storage_work_tx, storage_work_rx) = unbounded::(); + + tracing::info!( + target: "trie::proof_task", + storage_worker_count, + max_concurrency, + "Initializing storage worker pool with unbounded queue" + ); + + let mut spawned_workers = 0; + for worker_id in 0..storage_worker_count { + let provider_ro = view.provider_ro()?; + + let tx = provider_ro.into_tx(); + let proof_task_tx = ProofTaskTx::new(tx, task_ctx.clone(), worker_id); + let work_rx = storage_work_rx.clone(); + + executor.spawn_blocking(move || storage_worker_loop(proof_task_tx, work_rx, worker_id)); + + spawned_workers += 1; + + tracing::debug!( + target: "trie::proof_task", + worker_id, + spawned_workers, + "Storage worker spawned successfully" + ); + } + + Ok(Self { + storage_work_tx, + storage_worker_count: spawned_workers, max_concurrency, total_transactions: 0, + pending_tasks: VecDeque::new(), + proof_task_txs: Vec::with_capacity(max_concurrency), view, task_ctx, - pending_tasks: VecDeque::new(), executor, - proof_task_txs: Vec::new(), proof_task_rx, tx_sender, active_handles: Arc::new(AtomicUsize::new(0)), + #[cfg(feature = "metrics")] metrics: ProofTaskMetrics::default(), - } + }) } /// Returns a handle for sending new proof tasks to the [`ProofTaskManager`]. @@ -158,14 +427,12 @@ where let tx_sender = self.tx_sender.clone(); self.executor.spawn_blocking(move || match task { - ProofTaskKind::StorageProof(input, sender) => { - proof_task_tx.storage_proof(input, sender, tx_sender); - } ProofTaskKind::BlindedAccountNode(path, sender) => { proof_task_tx.blinded_account_node(path, sender, tx_sender); } - ProofTaskKind::BlindedStorageNode(account, path, sender) => { - proof_task_tx.blinded_storage_node(account, path, sender, tx_sender); + // Storage trie operations should never reach here as they're routed to worker pool + ProofTaskKind::BlindedStorageNode(_, _, _) | ProofTaskKind::StorageProof(_, _) => { + unreachable!("Storage trie operations should be routed to worker pool") } }); @@ -173,42 +440,121 @@ where } /// Loops, managing the proof tasks, and sending new tasks to the executor. + /// + /// # Task Routing + /// + /// - **Storage Trie Operations** (`StorageProof` and `BlindedStorageNode`): Routed to + /// pre-spawned worker pool via unbounded channel. + /// - **Account Trie Operations** (`BlindedAccountNode`): Queued for on-demand execution via + /// `pending_tasks`. + /// + /// # Shutdown + /// + /// On termination, `storage_work_tx` is dropped, closing the channel and + /// signaling all workers to shut down gracefully. pub fn run(mut self) -> ProviderResult<()> { loop { match self.proof_task_rx.recv() { - Ok(message) => match message { - ProofTaskMessage::QueueTask(task) => { - // Track metrics for blinded node requests - #[cfg(feature = "metrics")] - match &task { - ProofTaskKind::BlindedAccountNode(_, _) => { - self.metrics.account_nodes += 1; + Ok(message) => { + match message { + ProofTaskMessage::QueueTask(task) => match task { + ProofTaskKind::StorageProof(input, sender) => { + match self.storage_work_tx.send(StorageWorkerJob::StorageProof { + input, + result_sender: sender, + }) { + Ok(_) => { + tracing::trace!( + target: "trie::proof_task", + "Storage proof dispatched to worker pool" + ); + } + Err(crossbeam_channel::SendError(job)) => { + tracing::error!( + target: "trie::proof_task", + storage_worker_count = self.storage_worker_count, + "Worker pool disconnected, cannot process storage proof" + ); + + // Send error back to caller + let _ = job.send_worker_unavailable_error(); + } + } } - ProofTaskKind::BlindedStorageNode(_, _, _) => { - self.metrics.storage_nodes += 1; + + ProofTaskKind::BlindedStorageNode(account, path, sender) => { + #[cfg(feature = "metrics")] + { + self.metrics.storage_nodes += 1; + } + + match self.storage_work_tx.send( + StorageWorkerJob::BlindedStorageNode { + account, + path, + result_sender: sender, + }, + ) { + Ok(_) => { + tracing::trace!( + target: "trie::proof_task", + ?account, + ?path, + "Blinded storage node dispatched to worker pool" + ); + } + Err(crossbeam_channel::SendError(job)) => { + tracing::warn!( + target: "trie::proof_task", + storage_worker_count = self.storage_worker_count, + ?account, + ?path, + "Worker pool disconnected, cannot process blinded storage node" + ); + + // Send error back to caller + let _ = job.send_worker_unavailable_error(); + } + } } - _ => {} + + ProofTaskKind::BlindedAccountNode(_, _) => { + // Route account trie operations to pending_tasks + #[cfg(feature = "metrics")] + { + self.metrics.account_nodes += 1; + } + self.queue_proof_task(task); + } + }, + ProofTaskMessage::Transaction(tx) => { + // Return transaction to pending_tasks pool + self.proof_task_txs.push(tx); + } + ProofTaskMessage::Terminate => { + // Drop storage_work_tx to signal workers to shut down + drop(self.storage_work_tx); + + tracing::debug!( + target: "trie::proof_task", + storage_worker_count = self.storage_worker_count, + "Shutting down proof task manager, signaling workers to terminate" + ); + + // Record metrics before terminating + #[cfg(feature = "metrics")] + self.metrics.record(); + + return Ok(()) } - // queue the task - self.queue_proof_task(task) - } - ProofTaskMessage::Transaction(tx) => { - // return the transaction to the pool - self.proof_task_txs.push(tx); - } - ProofTaskMessage::Terminate => { - // Record metrics before terminating - #[cfg(feature = "metrics")] - self.metrics.record(); - return Ok(()) } - }, + } // All senders are disconnected, so we can terminate // However this should never happen, as this struct stores a sender Err(_) => return Ok(()), }; - // try spawning the next task + // Try spawning pending account trie tasks self.try_spawn_next()?; } } @@ -246,6 +592,7 @@ impl ProofTaskTx where Tx: DbTx, { + #[inline] fn create_factories(&self) -> ProofFactories<'_, Tx> { let trie_cursor_factory = InMemoryTrieCursorFactory::new( DatabaseTrieCursorFactory::new(&self.tx), @@ -260,82 +607,70 @@ where (trie_cursor_factory, hashed_cursor_factory) } - /// Calculates a storage proof for the given hashed address, and desired prefix set. - fn storage_proof( - self, + /// Compute storage proof with pre-created factories. + /// + /// Accepts cursor factories as parameters to allow reuse across multiple proofs. + /// Used by storage workers in the worker pool to avoid factory recreation + /// overhead on each proof computation. + #[inline] + fn compute_storage_proof( + &self, input: StorageProofInput, - result_sender: Sender, - tx_sender: Sender>, - ) { - trace!( - target: "trie::proof_task", - hashed_address=?input.hashed_address, - "Starting storage proof task calculation" - ); + trie_cursor_factory: impl TrieCursorFactory, + hashed_cursor_factory: impl HashedCursorFactory, + ) -> StorageProofResult { + // Consume the input so we can move large collections (e.g. target slots) without cloning. + let StorageProofInput { + hashed_address, + prefix_set, + target_slots, + with_branch_node_masks, + multi_added_removed_keys, + } = input; - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - let multi_added_removed_keys = input - .multi_added_removed_keys - .unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); - let added_removed_keys = multi_added_removed_keys.get_storage(&input.hashed_address); + // Get or create added/removed keys context + let multi_added_removed_keys = + multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); + let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); let span = tracing::trace_span!( target: "trie::proof_task", "Storage proof calculation", - hashed_address=?input.hashed_address, - // Add a unique id because we often have parallel storage proof calculations for the - // same hashed address, and we want to differentiate them during trace analysis. - span_id=self.id, + hashed_address = ?hashed_address, + worker_id = self.id, ); - let span_guard = span.enter(); + let _span_guard = span.enter(); - let target_slots_len = input.target_slots.len(); let proof_start = Instant::now(); - let raw_proof_result = StorageProof::new_hashed( - trie_cursor_factory, - hashed_cursor_factory, - input.hashed_address, - ) - .with_prefix_set_mut(PrefixSetMut::from(input.prefix_set.iter().copied())) - .with_branch_node_masks(input.with_branch_node_masks) - .with_added_removed_keys(added_removed_keys) - .storage_multiproof(input.target_slots) - .map_err(|e| ParallelStateRootError::Other(e.to_string())); - - drop(span_guard); + // Compute raw storage multiproof + let raw_proof_result = + StorageProof::new_hashed(trie_cursor_factory, hashed_cursor_factory, hashed_address) + .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().copied())) + .with_branch_node_masks(with_branch_node_masks) + .with_added_removed_keys(added_removed_keys) + .storage_multiproof(target_slots) + .map_err(|e| ParallelStateRootError::Other(e.to_string())); + // Decode proof into DecodedStorageMultiProof let decoded_result = raw_proof_result.and_then(|raw_proof| { raw_proof.try_into().map_err(|e: alloy_rlp::Error| { ParallelStateRootError::Other(format!( "Failed to decode storage proof for {}: {}", - input.hashed_address, e + hashed_address, e )) }) }); trace!( target: "trie::proof_task", - hashed_address=?input.hashed_address, - prefix_set = ?input.prefix_set.len(), - target_slots = ?target_slots_len, - proof_time = ?proof_start.elapsed(), - "Completed storage proof task calculation" + hashed_address = ?hashed_address, + proof_time_us = proof_start.elapsed().as_micros(), + worker_id = self.id, + "Completed storage proof calculation" ); - // send the result back - if let Err(error) = result_sender.send(decoded_result) { - debug!( - target: "trie::proof_task", - hashed_address = ?input.hashed_address, - ?error, - task_time = ?proof_start.elapsed(), - "Storage proof receiver is dropped, discarding the result" - ); - } - - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); + decoded_result } /// Retrieves blinded account node by path. @@ -380,53 +715,6 @@ where // send the tx back let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); } - - /// Retrieves blinded storage node of the given account by path. - fn blinded_storage_node( - self, - account: B256, - path: Nibbles, - result_sender: Sender, - tx_sender: Sender>, - ) { - trace!( - target: "trie::proof_task", - ?account, - ?path, - "Starting blinded storage node retrieval" - ); - - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - - let blinded_provider_factory = ProofTrieNodeProviderFactory::new( - trie_cursor_factory, - hashed_cursor_factory, - self.task_ctx.prefix_sets.clone(), - ); - - let start = Instant::now(); - let result = blinded_provider_factory.storage_node_provider(account).trie_node(&path); - trace!( - target: "trie::proof_task", - ?account, - ?path, - elapsed = ?start.elapsed(), - "Completed blinded storage node retrieval" - ); - - if let Err(error) = result_sender.send(result) { - tracing::error!( - target: "trie::proof_task", - ?account, - ?path, - ?error, - "Failed to send blinded storage node result" - ); - } - - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); - } } /// This represents an input for a storage proof. @@ -607,3 +895,48 @@ impl TrieNodeProvider for ProofTaskTrieNodeProvider { rx.recv().unwrap() } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::map::B256Map; + use reth_provider::{providers::ConsistentDbView, test_utils::create_test_provider_factory}; + use reth_trie_common::{ + prefix_set::TriePrefixSetsMut, updates::TrieUpdatesSorted, HashedAccountsSorted, + HashedPostStateSorted, + }; + use std::sync::Arc; + use tokio::{runtime::Builder, task}; + + fn test_ctx() -> ProofTaskCtx { + ProofTaskCtx::new( + Arc::new(TrieUpdatesSorted::default()), + Arc::new(HashedPostStateSorted::new( + HashedAccountsSorted::default(), + B256Map::default(), + )), + Arc::new(TriePrefixSetsMut::default()), + ) + } + + /// Ensures `max_concurrency` is independent of storage workers. + #[test] + fn proof_task_manager_independent_pools() { + let runtime = Builder::new_multi_thread().worker_threads(1).enable_all().build().unwrap(); + runtime.block_on(async { + let handle = tokio::runtime::Handle::current(); + let factory = create_test_provider_factory(); + let view = ConsistentDbView::new(factory, None); + let ctx = test_ctx(); + + let manager = ProofTaskManager::new(handle.clone(), view, ctx, 1, 5).unwrap(); + // With storage_worker_count=5, we get exactly 5 workers + assert_eq!(manager.storage_worker_count, 5); + // max_concurrency=1 is for on-demand operations only + assert_eq!(manager.max_concurrency, 1); + + drop(manager); + task::yield_now().await; + }); + } +} diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 2021b342d62..394854f7246 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -864,6 +864,9 @@ Engine: --engine.allow-unwind-canonical-header Allow unwinding canonical header to ancestor during forkchoice updates. See `TreeConfig::unwind_canonical_header` for more details + --engine.storage-worker-count + Configure the number of storage proof workers in the Tokio blocking pool. If not specified, defaults to 2x available parallelism, clamped between 2 and 64 + ERA: --era.enable Enable import from ERA1 files From aec3e3dcc5953ea48e19de1f5a0f2549752cdc32 Mon Sep 17 00:00:00 2001 From: MIHAO PARK Date: Fri, 10 Oct 2025 14:26:47 +0200 Subject: [PATCH 016/371] chore(grafana): use precompile address as legend (#18913) --- etc/grafana/dashboards/overview.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 5b271d7ea8e..46a465ca4a4 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -3931,7 +3931,7 @@ "hide": false, "includeNullMetadata": true, "instant": false, - "legendFormat": "Precompile cache hits", + "legendFormat": "{{address}}", "range": true, "refId": "A", "useBackend": false From 5c18df9889941837e61929be4b51abb75f07f152 Mon Sep 17 00:00:00 2001 From: emmmm <155267286+eeemmmmmm@users.noreply.github.com> Date: Fri, 10 Oct 2025 12:23:10 -0400 Subject: [PATCH 017/371] refactor: remove needless collect() calls in trie tests (#18937) --- crates/trie/sparse-parallel/src/trie.rs | 4 ++-- crates/trie/sparse/src/trie.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index e1cfe84cdf9..50c9a79bd05 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -4995,7 +4995,7 @@ mod tests { state.clone(), trie_cursor.account_trie_cursor().unwrap(), Default::default(), - state.keys().copied().collect::>(), + state.keys().copied(), ); // Write trie updates to the database @@ -5040,7 +5040,7 @@ mod tests { .iter() .map(|nibbles| B256::from_slice(&nibbles.pack())) .collect(), - state.keys().copied().collect::>(), + state.keys().copied(), ); // Write trie updates to the database diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 36bcbe50e3a..89a23851e28 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -3031,7 +3031,7 @@ mod tests { state.clone(), trie_cursor.account_trie_cursor().unwrap(), Default::default(), - state.keys().copied().collect::>(), + state.keys().copied(), ); // Write trie updates to the database @@ -3073,7 +3073,7 @@ mod tests { .iter() .map(|nibbles| B256::from_slice(&nibbles.pack())) .collect(), - state.keys().copied().collect::>(), + state.keys().copied(), ); // Write trie updates to the database From b1d6c90fbbf0ec0c4f2d06ac722f1fcb3d7a7503 Mon Sep 17 00:00:00 2001 From: Tilak Madichetti Date: Sat, 11 Oct 2025 16:20:31 +0530 Subject: [PATCH 018/371] fix(examples): change method to launch with debug capabilities (#18946) --- examples/custom-dev-node/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index f700cf9e89a..c5441a2b388 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -33,7 +33,7 @@ async fn main() -> eyre::Result<()> { let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) .testing_node(tasks.executor()) .node(EthereumNode::default()) - .launch() + .launch_with_debug_capabilities() .await?; let mut notifications = node.provider.canonical_state_stream(); From 99a5da2f91188fdb8b63caf42a9163db9616dc43 Mon Sep 17 00:00:00 2001 From: Tilak Madichetti Date: Sat, 11 Oct 2025 16:20:52 +0530 Subject: [PATCH 019/371] fix(example): launch with debug capabilities (#18947) --- examples/node-custom-rpc/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/node-custom-rpc/src/main.rs b/examples/node-custom-rpc/src/main.rs index 3c7c9269f58..2af789a989c 100644 --- a/examples/node-custom-rpc/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -53,7 +53,7 @@ fn main() { Ok(()) }) // launch the node with custom rpc - .launch() + .launch_with_debug_capabilities() .await?; handle.wait_for_node_exit().await From 16e79888ae24f15d688a95b76fcb73b7e9acb643 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 13 Oct 2025 11:36:17 +0200 Subject: [PATCH 020/371] fix(testsuite): Fix unused updates in e2e-test-utils (#18953) --- .../e2e-test-utils/src/testsuite/actions/produce_blocks.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs index 9d2088c11a4..74a5e2ba1d5 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs @@ -510,7 +510,7 @@ where Box::pin(async move { let mut accepted_check: bool = false; - let mut latest_block = env + let latest_block = env .current_block_info() .ok_or_else(|| eyre::eyre!("No latest block information available"))?; @@ -603,10 +603,6 @@ where rpc_latest_header.inner.timestamp; env.active_node_state_mut()?.latest_fork_choice_state.head_block_hash = rpc_latest_header.hash; - - // update local copy for any further usage in this scope - latest_block.hash = rpc_latest_header.hash; - latest_block.number = rpc_latest_header.inner.number; } } From 16ba9e8979b869eb8b02a6f89cadc2ca9f68094e Mon Sep 17 00:00:00 2001 From: radik878 Date: Mon, 13 Oct 2025 13:19:28 +0300 Subject: [PATCH 021/371] fix(payload): correct Debug label for PayloadTimestamp in PayloadServiceCommand (#18954) --- crates/payload/builder/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index f9530d003f5..f3f1b03ab2e 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -512,7 +512,7 @@ where f.debug_tuple("BestPayload").field(&f0).field(&f1).finish() } Self::PayloadTimestamp(f0, f1) => { - f.debug_tuple("PayloadAttributes").field(&f0).field(&f1).finish() + f.debug_tuple("PayloadTimestamp").field(&f0).field(&f1).finish() } Self::Resolve(f0, f1, _f2) => f.debug_tuple("Resolve").field(&f0).field(&f1).finish(), Self::Subscribe(f0) => f.debug_tuple("Subscribe").field(&f0).finish(), From 0f14980d88abebeb46330c371af23e60e8e2d2e2 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Mon, 13 Oct 2025 12:24:55 +0200 Subject: [PATCH 022/371] chore(rpc): Moves `SequencerMetrics` into `reth-optimism-rpc` (#18921) --- crates/optimism/rpc/src/lib.rs | 2 ++ crates/optimism/rpc/src/metrics.rs | 21 +++++++++++++++++++ crates/optimism/rpc/src/sequencer.rs | 3 +-- .../optimism/txpool/src/supervisor/metrics.rs | 18 +--------------- 4 files changed, 25 insertions(+), 19 deletions(-) create mode 100644 crates/optimism/rpc/src/metrics.rs diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 1c9b5d1c39e..10f8ad5dccd 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -12,6 +12,7 @@ pub mod engine; pub mod error; pub mod eth; pub mod historical; +pub mod metrics; pub mod miner; pub mod sequencer; pub mod witness; @@ -21,4 +22,5 @@ pub use engine::OpEngineApiClient; pub use engine::{OpEngineApi, OpEngineApiServer, OP_ENGINE_CAPABILITIES}; pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; pub use eth::{OpEthApi, OpEthApiBuilder, OpReceiptBuilder}; +pub use metrics::SequencerMetrics; pub use sequencer::SequencerClient; diff --git a/crates/optimism/rpc/src/metrics.rs b/crates/optimism/rpc/src/metrics.rs new file mode 100644 index 00000000000..5aa5e3eff3d --- /dev/null +++ b/crates/optimism/rpc/src/metrics.rs @@ -0,0 +1,21 @@ +//! RPC metrics unique for OP-stack. + +use core::time::Duration; +use metrics::Histogram; +use reth_metrics::Metrics; + +/// Optimism sequencer metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.sequencer")] +pub struct SequencerMetrics { + /// How long it takes to forward a transaction to the sequencer + pub(crate) sequencer_forward_latency: Histogram, +} + +impl SequencerMetrics { + /// Records the duration it took to forward a transaction + #[inline] + pub fn record_forward_latency(&self, duration: Duration) { + self.sequencer_forward_latency.record(duration.as_secs_f64()); + } +} diff --git a/crates/optimism/rpc/src/sequencer.rs b/crates/optimism/rpc/src/sequencer.rs index c3b543638bb..86ed000e863 100644 --- a/crates/optimism/rpc/src/sequencer.rs +++ b/crates/optimism/rpc/src/sequencer.rs @@ -1,12 +1,11 @@ //! Helpers for optimism specific RPC implementations. -use crate::SequencerClientError; +use crate::{SequencerClientError, SequencerMetrics}; use alloy_json_rpc::{RpcRecv, RpcSend}; use alloy_primitives::{hex, B256}; use alloy_rpc_client::{BuiltInConnectionString, ClientBuilder, RpcClient as Client}; use alloy_rpc_types_eth::erc4337::TransactionConditional; use alloy_transport_http::Http; -use reth_optimism_txpool::supervisor::metrics::SequencerMetrics; use std::{str::FromStr, sync::Arc, time::Instant}; use thiserror::Error; use tracing::warn; diff --git a/crates/optimism/txpool/src/supervisor/metrics.rs b/crates/optimism/txpool/src/supervisor/metrics.rs index 23eec843025..cb51a52bfc5 100644 --- a/crates/optimism/txpool/src/supervisor/metrics.rs +++ b/crates/optimism/txpool/src/supervisor/metrics.rs @@ -1,4 +1,4 @@ -//! Optimism supervisor and sequencer metrics +//! Optimism supervisor metrics use crate::supervisor::InteropTxValidatorError; use op_alloy_rpc_types::SuperchainDAError; @@ -70,19 +70,3 @@ impl SupervisorMetrics { } } } - -/// Optimism sequencer metrics -#[derive(Metrics, Clone)] -#[metrics(scope = "optimism_transaction_pool.sequencer")] -pub struct SequencerMetrics { - /// How long it takes to forward a transaction to the sequencer - pub(crate) sequencer_forward_latency: Histogram, -} - -impl SequencerMetrics { - /// Records the duration it took to forward a transaction - #[inline] - pub fn record_forward_latency(&self, duration: Duration) { - self.sequencer_forward_latency.record(duration.as_secs_f64()); - } -} From 4415bc5d7a787fe660708b10d43a947e33b856f1 Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Mon, 13 Oct 2025 13:51:19 +0200 Subject: [PATCH 023/371] refactor: replace println! with structured logging in test_vectors (#18956) --- crates/cli/commands/src/test_vectors/tables.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index 1bbd2604f97..ef34e5b5e84 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -54,7 +54,7 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { match table.as_str() { $( stringify!($table_type) => { - println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME); + tracing::info!(target: "reth::cli", "Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME); generate_vector!($table_type$(<$($generic),+>)?, $per_table, $table_or_dup); }, From 6c27b35e19953daec3497acec4ab8ee3cdc129db Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Mon, 13 Oct 2025 14:24:58 +0200 Subject: [PATCH 024/371] refactor(cli): use structured logging (tracing) in p2p command (#18957) --- crates/cli/commands/src/p2p/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index 861fd836e76..792d4533856 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -38,9 +38,9 @@ impl let header = (move || get_single_header(fetch_client.clone(), id)) .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting header. Retrying...")) .await?; - println!("Successfully downloaded header: {header:?}"); + tracing::info!(target: "reth::cli", ?header, "Successfully downloaded header"); } Subcommands::Body { args, id } => { @@ -51,13 +51,13 @@ impl let hash = match id { BlockHashOrNumber::Hash(hash) => hash, BlockHashOrNumber::Number(number) => { - println!("Block number provided. Downloading header first..."); + tracing::info!(target: "reth::cli", "Block number provided. Downloading header first..."); let client = fetch_client.clone(); let header = (move || { get_single_header(client.clone(), BlockHashOrNumber::Number(number)) }) .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting header. Retrying...")) .await?; header.hash() } @@ -67,7 +67,7 @@ impl client.get_block_bodies(vec![hash]) }) .retry(backoff) - .notify(|err, _| println!("Error requesting block: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting block. Retrying...")) .await? .split(); if result.len() != 1 { @@ -77,7 +77,7 @@ impl ) } let body = result.into_iter().next().unwrap(); - println!("Successfully downloaded body: {body:?}") + tracing::info!(target: "reth::cli", ?body, "Successfully downloaded body") } Subcommands::Rlpx(command) => { command.execute().await?; From 691b14bfca1444e139f1e98d5d2d2db904456bf7 Mon Sep 17 00:00:00 2001 From: YK Date: Mon, 13 Oct 2025 20:53:12 +0800 Subject: [PATCH 025/371] perf(tree): add elapsed time to parallel state root completion log (#18959) --- crates/engine/tree/src/tree/payload_validator.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 1e63d29bf79..51e669b8883 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -492,13 +492,15 @@ where ctx.state(), ) { Ok(result) => { + let elapsed = root_time.elapsed(); info!( target: "engine::tree", block = ?block_num_hash, regular_state_root = ?result.0, + ?elapsed, "Regular root task finished" ); - maybe_state_root = Some((result.0, result.1, root_time.elapsed())); + maybe_state_root = Some((result.0, result.1, elapsed)); } Err(error) => { debug!(target: "engine::tree", %error, "Parallel state root computation failed"); From edc8261913e610e9681ac7a0e86f0abcad1145a7 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 13 Oct 2025 15:29:50 +0200 Subject: [PATCH 026/371] fix(trie): Properly upsert into StoragesTrie in repair-trie (#18941) --- crates/cli/commands/src/db/repair_trie.rs | 11 ++++++++++- .../provider/src/providers/database/provider.rs | 2 -- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/crates/cli/commands/src/db/repair_trie.rs b/crates/cli/commands/src/db/repair_trie.rs index e7ee8d7977c..e5b7db0e2f0 100644 --- a/crates/cli/commands/src/db/repair_trie.rs +++ b/crates/cli/commands/src/db/repair_trie.rs @@ -179,8 +179,17 @@ fn verify_and_repair( Output::StorageWrong { account, path, expected: node, .. } | Output::StorageMissing(account, path, node) => { // Wrong/missing storage node value, upsert it + // (We can't just use `upsert` method with a dup cursor, it's not properly + // supported) let nibbles = StoredNibblesSubKey(path); - let entry = StorageTrieEntry { nibbles, node }; + let entry = StorageTrieEntry { nibbles: nibbles.clone(), node }; + if storage_trie_cursor + .seek_by_key_subkey(account, nibbles.clone())? + .filter(|v| v.nibbles == nibbles) + .is_some() + { + storage_trie_cursor.delete_current()?; + } storage_trie_cursor.upsert(account, &entry)?; } Output::Progress(path) => { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 55739bbe915..f534a0ea127 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1981,7 +1981,6 @@ impl StateWriter for (storage_key, (old_storage_value, _new_storage_value)) in storage { let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; // delete previous value - // TODO: This does not use dupsort features if plain_storage_cursor .seek_by_key_subkey(*address, *storage_key)? .filter(|s| s.key == *storage_key) @@ -2080,7 +2079,6 @@ impl StateWriter for (storage_key, (old_storage_value, _new_storage_value)) in storage { let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; // delete previous value - // TODO: This does not use dupsort features if plain_storage_cursor .seek_by_key_subkey(*address, *storage_key)? .filter(|s| s.key == *storage_key) From ea65aca0d75a945a54e951226eabe5d6ee7051ad Mon Sep 17 00:00:00 2001 From: sashaodessa <140454972+sashaodessa@users.noreply.github.com> Date: Mon, 13 Oct 2025 15:48:42 +0200 Subject: [PATCH 027/371] fix: misleading error message in db list: show actual table name (#18896) --- crates/cli/commands/src/db/list.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/cli/commands/src/db/list.rs b/crates/cli/commands/src/db/list.rs index 9288a56a86c..2540e77c111 100644 --- a/crates/cli/commands/src/db/list.rs +++ b/crates/cli/commands/src/db/list.rs @@ -97,7 +97,7 @@ impl TableViewer<()> for ListTableViewer<'_, N> { fn view(&self) -> Result<(), Self::Error> { self.tool.provider_factory.db_ref().view(|tx| { let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?; - let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", stringify!($table)))?; + let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", self.args.table.name()))?; let total_entries = stats.entries(); let final_entry_idx = total_entries.saturating_sub(1); if self.args.skip > final_entry_idx { From 211e330eb92fd41d8d2925b1a67c67843da5fec1 Mon Sep 17 00:00:00 2001 From: sashaodessa <140454972+sashaodessa@users.noreply.github.com> Date: Mon, 13 Oct 2025 16:00:40 +0200 Subject: [PATCH 028/371] fix: remove noisy stderr prints in ERA1 cleanup (EraClient::delete_outside_range) (#18895) --- crates/era-downloader/src/client.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/era-downloader/src/client.rs b/crates/era-downloader/src/client.rs index 298248ff3e9..36ed93e1e2f 100644 --- a/crates/era-downloader/src/client.rs +++ b/crates/era-downloader/src/client.rs @@ -128,8 +128,6 @@ impl EraClient { let Some(number) = self.file_name_to_number(name) && (number < index || number >= last) { - eprintln!("Deleting file {}", entry.path().display()); - eprintln!("{number} < {index} || {number} >= {last}"); reth_fs_util::remove_file(entry.path())?; } } From 1dfd0ff772a7f843a3da6f66fde6829ad4ec73b2 Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Mon, 13 Oct 2025 17:25:01 +0200 Subject: [PATCH 029/371] fix: use max B256 for upper bound in empty-storage check (#18962) --- crates/trie/trie/src/verify.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/trie/trie/src/verify.rs b/crates/trie/trie/src/verify.rs index 5f2260bc7dc..96059211458 100644 --- a/crates/trie/trie/src/verify.rs +++ b/crates/trie/trie/src/verify.rs @@ -400,9 +400,8 @@ impl Verifier { // need to validate that all accounts coming after it have empty storages. let prev_account = *prev_account; - // Calculate the max possible account address. - let mut max_account = B256::ZERO; - max_account.reverse(); + // Calculate the max possible account address (all bits set). + let max_account = B256::from([0xFFu8; 32]); self.verify_empty_storages(prev_account, max_account, false, true)?; } From 0f919a949e77df7b2a9b18e4990f32c1a4891c46 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Mon, 13 Oct 2025 17:51:32 +0100 Subject: [PATCH 030/371] ci: remove reproducible build from release.yml (#18958) --- .github/workflows/release.yml | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4b637889d2a..f871b163a2d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -18,7 +18,6 @@ env: REPO_NAME: ${{ github.repository_owner }}/reth IMAGE_NAME: ${{ github.repository_owner }}/reth OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth - REPRODUCIBLE_IMAGE_NAME: ${{ github.repository_owner }}/reth-reproducible CARGO_TERM_COLOR: always DOCKER_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/reth DOCKER_OP_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/op-reth @@ -74,10 +73,6 @@ jobs: os: ubuntu-24.04 profile: maxperf allow_fail: false - - target: x86_64-unknown-linux-gnu - os: ubuntu-24.04 - profile: reproducible - allow_fail: false - target: aarch64-unknown-linux-gnu os: ubuntu-24.04 profile: maxperf @@ -124,13 +119,7 @@ jobs: echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV - name: Build Reth - if: ${{ !(matrix.build.binary == 'op-reth' && matrix.configs.profile == 'reproducible') }} - run: | - if [[ "${{ matrix.build.binary }}" == "reth" && "${{ matrix.configs.profile }}" == "reproducible" ]]; then - make build-reth-reproducible - else - make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - fi + run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - name: Build Reth deb package if: ${{ matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} @@ -141,13 +130,6 @@ jobs: mkdir artifacts [[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe" - # Handle reproducible builds which always target x86_64-unknown-linux-gnu - if [[ "${{ matrix.build.binary }}" == "reth" && "${{ matrix.configs.profile }}" == "reproducible" ]]; then - mv "target/x86_64-unknown-linux-gnu/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - else - mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - fi - # Move deb packages if they exist if [[ "${{ matrix.build.binary }}" == "reth" && "${{ env.DEB_SUPPORTED_TARGETS }}" == *"${{ matrix.configs.target }}"* ]]; then mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb" ./artifacts From 55d294dc7f4d8309d1dad7c6b36e71eb7c8e73fb Mon Sep 17 00:00:00 2001 From: Forostovec Date: Mon, 13 Oct 2025 20:22:07 +0300 Subject: [PATCH 031/371] chore(rpc): Remove redundant U256::from in suggested_priority_fee (#18969) --- crates/optimism/rpc/src/eth/mod.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index a2226e0cbf3..e10c5152473 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -290,8 +290,12 @@ where } async fn suggested_priority_fee(&self) -> Result { - let min_tip = U256::from(self.inner.min_suggested_priority_fee); - self.inner.eth_api.gas_oracle().op_suggest_tip_cap(min_tip).await.map_err(Into::into) + self.inner + .eth_api + .gas_oracle() + .op_suggest_tip_cap(self.inner.min_suggested_priority_fee) + .await + .map_err(Into::into) } } From 2041188744de41057b4989a7a82bb9b3e8668ca3 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 13 Oct 2025 20:03:43 +0200 Subject: [PATCH 032/371] chore(ci): update eest 7594 issue link in hive expected failures file (#18976) --- .github/assets/hive/expected_failures.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index 6a580d9a110..f28fd70be03 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -52,7 +52,7 @@ engine-auth: # 7002 related tests - post-fork test, should fix for spec compliance but not # realistic on mainnet # 7251 related tests - modified contract, not necessarily practical on mainnet, -# 7594: https://github.com/paradigmxyz/reth/issues/18471 +# 7594: https://github.com/paradigmxyz/reth/issues/18975 # worth re-visiting when more of these related tests are passing eest/consume-engine: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth From 9b0a2c37b4e8d212ec71ae0ed9471c22f2e4d76e Mon Sep 17 00:00:00 2001 From: Alvarez <140459501+prestoalvarez@users.noreply.github.com> Date: Mon, 13 Oct 2025 20:27:03 +0200 Subject: [PATCH 033/371] perf(tests): remove redundant format! in ef-tests run_only (#18909) --- testing/ef-tests/tests/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/ef-tests/tests/tests.rs b/testing/ef-tests/tests/tests.rs index 0961817e901..2728246901a 100644 --- a/testing/ef-tests/tests/tests.rs +++ b/testing/ef-tests/tests/tests.rs @@ -93,7 +93,7 @@ macro_rules! blockchain_test { .join("ethereum-tests") .join("BlockchainTests"); - BlockchainTests::new(suite_path).run_only(&format!("{}", stringify!($dir))); + BlockchainTests::new(suite_path).run_only(stringify!($dir)); } }; } From 59ace5892559f9f3ad461cf15a1c190bab76d427 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Mon, 13 Oct 2025 22:28:16 +0200 Subject: [PATCH 034/371] feat(cli): enable traces export via `tracing-otlp` cli arg (#18242) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- Cargo.lock | 40 +++++----- Cargo.toml | 10 ++- crates/cli/commands/src/node.rs | 32 ++++---- crates/ethereum/cli/Cargo.toml | 4 +- crates/ethereum/cli/src/app.rs | 9 ++- crates/ethereum/cli/src/interface.rs | 14 +++- crates/node/builder/src/launch/common.rs | 2 +- crates/node/core/Cargo.toml | 2 + crates/node/core/src/args/log.rs | 1 + crates/node/core/src/args/metric.rs | 13 ++++ crates/node/core/src/args/mod.rs | 8 ++ crates/node/core/src/args/trace.rs | 40 ++++++++++ crates/node/core/src/node_config.rs | 17 ++--- crates/optimism/cli/Cargo.toml | 5 ++ crates/optimism/cli/src/app.rs | 9 ++- crates/optimism/cli/src/lib.rs | 10 ++- crates/tracing-otlp/Cargo.toml | 26 +++++-- crates/tracing-otlp/src/lib.rs | 74 ++++++++++++++++--- crates/tracing/Cargo.toml | 15 +++- crates/tracing/src/layers.rs | 28 ++++++- docs/vocs/docs/pages/cli/reth.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/config.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/db.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/db/checksum.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/db/clear.mdx | 8 ++ .../docs/pages/cli/reth/db/clear/mdbx.mdx | 8 ++ .../pages/cli/reth/db/clear/static-file.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/db/diff.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/db/drop.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/db/get.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx | 8 ++ .../pages/cli/reth/db/get/static-file.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/db/list.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/db/path.mdx | 8 ++ .../docs/pages/cli/reth/db/repair-trie.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/db/stats.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/db/version.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/download.mdx | 8 ++ .../vocs/docs/pages/cli/reth/dump-genesis.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/export-era.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/import-era.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/import.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/init-state.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/init.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/node.mdx | 10 ++- docs/vocs/docs/pages/cli/reth/p2p.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 8 ++ .../vocs/docs/pages/cli/reth/p2p/bootnode.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 8 ++ .../docs/pages/cli/reth/p2p/rlpx/ping.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/prune.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/re-execute.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/stage.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 8 ++ .../cli/reth/stage/dump/account-hashing.mdx | 8 ++ .../pages/cli/reth/stage/dump/execution.mdx | 8 ++ .../docs/pages/cli/reth/stage/dump/merkle.mdx | 8 ++ .../cli/reth/stage/dump/storage-hashing.mdx | 8 ++ docs/vocs/docs/pages/cli/reth/stage/run.mdx | 8 ++ .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 8 ++ .../cli/reth/stage/unwind/num-blocks.mdx | 8 ++ .../pages/cli/reth/stage/unwind/to-block.mdx | 8 ++ docs/vocs/docs/pages/run/monitoring.mdx | 6 ++ 65 files changed, 645 insertions(+), 74 deletions(-) create mode 100644 crates/node/core/src/args/metric.rs create mode 100644 crates/node/core/src/args/trace.rs diff --git a/Cargo.lock b/Cargo.lock index fde6f2dc3aa..7b666230799 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6225,9 +6225,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "opentelemetry" -version = "0.29.1" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e87237e2775f74896f9ad219d26a2081751187eb7c9f5c58dde20a23b95d16c" +checksum = "aaf416e4cb72756655126f7dd7bb0af49c674f4c1b9903e80c009e0c37e552e6" dependencies = [ "futures-core", "futures-sink", @@ -6239,25 +6239,23 @@ dependencies = [ [[package]] name = "opentelemetry-http" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46d7ab32b827b5b495bd90fa95a6cb65ccc293555dcc3199ae2937d2d237c8ed" +checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d" dependencies = [ "async-trait", "bytes", "http", "opentelemetry", "reqwest", - "tracing", ] [[package]] name = "opentelemetry-otlp" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d899720fe06916ccba71c01d04ecd77312734e2de3467fd30d9d580c8ce85656" +checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b" dependencies = [ - "futures-core", "http", "opentelemetry", "opentelemetry-http", @@ -6271,9 +6269,9 @@ dependencies = [ [[package]] name = "opentelemetry-proto" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c40da242381435e18570d5b9d50aca2a4f4f4d8e146231adb4e7768023309b3" +checksum = "2e046fd7660710fe5a05e8748e70d9058dc15c94ba914e7c4faa7c728f0e8ddc" dependencies = [ "opentelemetry", "opentelemetry_sdk", @@ -6283,26 +6281,24 @@ dependencies = [ [[package]] name = "opentelemetry-semantic-conventions" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b29a9f89f1a954936d5aa92f19b2feec3c8f3971d3e96206640db7f9706ae3" +checksum = "83d059a296a47436748557a353c5e6c5705b9470ef6c95cfc52c21a8814ddac2" [[package]] name = "opentelemetry_sdk" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afdefb21d1d47394abc1ba6c57363ab141be19e27cc70d0e422b7f303e4d290b" +checksum = "11f644aa9e5e31d11896e024305d7e3c98a88884d9f8919dbf37a9991bc47a4b" dependencies = [ "futures-channel", "futures-executor", "futures-util", - "glob", "opentelemetry", "percent-encoding", "rand 0.9.2", "serde_json", "thiserror 2.0.16", - "tracing", ] [[package]] @@ -10573,18 +10569,21 @@ version = "1.8.2" dependencies = [ "clap", "eyre", + "reth-tracing-otlp", "rolling-file", "tracing", "tracing-appender", "tracing-journald", "tracing-logfmt", "tracing-subscriber 0.3.20", + "url", ] [[package]] name = "reth-tracing-otlp" version = "1.8.2" dependencies = [ + "eyre", "opentelemetry", "opentelemetry-otlp", "opentelemetry-semantic-conventions", @@ -10592,6 +10591,7 @@ dependencies = [ "tracing", "tracing-opentelemetry", "tracing-subscriber 0.3.20", + "url", ] [[package]] @@ -12590,9 +12590,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tonic" -version = "0.12.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" dependencies = [ "async-trait", "base64 0.22.1", @@ -12763,9 +12763,9 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8e764bd6f5813fd8bebc3117875190c5b0415be8f7f8059bffb6ecd979c444" +checksum = "ddcf5959f39507d0d04d6413119c04f33b623f4f951ebcbdddddfad2d0623a9c" dependencies = [ "js-sys", "once_cell", diff --git a/Cargo.toml b/Cargo.toml index 888ff2ad9d2..d027b0674ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -454,7 +454,8 @@ reth-storage-errors = { path = "crates/storage/errors", default-features = false reth-tasks = { path = "crates/tasks" } reth-testing-utils = { path = "testing/testing-utils" } reth-tokio-util = { path = "crates/tokio-util" } -reth-tracing = { path = "crates/tracing" } +reth-tracing = { path = "crates/tracing", default-features = false } +reth-tracing-otlp = { path = "crates/tracing-otlp" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie/trie" } reth-trie-common = { path = "crates/trie/common", default-features = false } @@ -649,6 +650,13 @@ c-kzg = "2.1.4" # config toml = "0.8" +# otlp obs +opentelemetry_sdk = "0.30" +opentelemetry = "0.30" +opentelemetry-otlp = "0.30" +opentelemetry-semantic-conventions = "0.30" +tracing-opentelemetry = "0.31" + # misc-testing arbitrary = "1.3" assert_matches = "1.5.0" diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index 7e1ba97fb91..240bb3c2893 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -5,18 +5,17 @@ use clap::{value_parser, Args, Parser}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; -use reth_cli_util::parse_socket_address; use reth_db::init_db; use reth_node_builder::NodeBuilder; use reth_node_core::{ args::{ - DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, NetworkArgs, - PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, + DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, MetricArgs, + NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, node_config::NodeConfig, version, }; -use std::{ffi::OsString, fmt, net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{ffi::OsString, fmt, path::PathBuf, sync::Arc}; /// Start the node #[derive(Debug, Parser)] @@ -39,11 +38,9 @@ pub struct NodeCommand, - /// Enable Prometheus metrics. - /// - /// The metrics will be served at the given interface and port. - #[arg(long, value_name = "SOCKET", value_parser = parse_socket_address, help_heading = "Metrics")] - pub metrics: Option, + /// Prometheus metrics configuration. + #[command(flatten)] + pub metrics: MetricArgs, /// Add a new instance of a node. /// @@ -225,7 +222,7 @@ mod tests { use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ethereum_cli::chainspec::{EthereumChainSpecParser, SUPPORTED_CHAINS}; use std::{ - net::{IpAddr, Ipv4Addr}, + net::{IpAddr, Ipv4Addr, SocketAddr}, path::Path, }; @@ -286,15 +283,24 @@ mod tests { fn parse_metrics_port() { let cmd: NodeCommand = NodeCommand::try_parse_args_from(["reth", "--metrics", "9001"]).unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + ); let cmd: NodeCommand = NodeCommand::try_parse_args_from(["reth", "--metrics", ":9001"]).unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + ); let cmd: NodeCommand = NodeCommand::try_parse_args_from(["reth", "--metrics", "localhost:9001"]).unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + ); } #[test] diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index 01a7751e77b..e232ea0cdb1 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -35,7 +35,9 @@ tracing.workspace = true tempfile.workspace = true [features] -default = ["jemalloc"] +default = ["jemalloc", "otlp"] + +otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] dev = ["reth-cli-commands/arbitrary"] diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index e99dae2ac77..dc299cb83cd 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -111,7 +111,14 @@ where /// If file logging is enabled, this function stores guard to the struct. pub fn init_tracing(&mut self) -> Result<()> { if self.guard.is_none() { - let layers = self.layers.take().unwrap_or_default(); + let mut layers = self.layers.take().unwrap_or_default(); + + #[cfg(feature = "otlp")] + if let Some(output_type) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); + layers.with_span_layer("reth".to_string(), output_type.clone())?; + } + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); } diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index 8f09b165e83..8d2b4ba62fb 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -18,7 +18,10 @@ use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_api::NodePrimitives; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use reth_node_core::{args::LogArgs, version::version_metadata}; +use reth_node_core::{ + args::{LogArgs, TraceArgs}, + version::version_metadata, +}; use reth_node_metrics::recorder::install_prometheus_recorder; use reth_rpc_server_types::{DefaultRpcModuleValidator, RpcModuleValidator}; use reth_tracing::FileWorkerGuard; @@ -43,6 +46,10 @@ pub struct Cli< #[command(flatten)] pub logs: LogArgs, + /// The tracing configuration for the CLI. + #[command(flatten)] + pub traces: TraceArgs, + /// Type marker for the RPC module validator #[arg(skip)] pub _phantom: PhantomData, @@ -212,8 +219,11 @@ impl /// /// If file logging is enabled, this function returns a guard that must be kept alive to ensure /// that all logs are flushed to disk. + /// If an OTLP endpoint is specified, it will export metrics to the configured collector. pub fn init_tracing(&self) -> eyre::Result> { - let guard = self.logs.init_tracing()?; + let layers = reth_tracing::Layers::new(); + + let guard = self.logs.init_tracing_with_layers(layers)?; Ok(guard) } } diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 3a35c4183f1..b43dc2a2a6a 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -582,7 +582,7 @@ where // ensure recorder runs upkeep periodically install_prometheus_recorder().spawn_upkeep(); - let listen_addr = self.node_config().metrics; + let listen_addr = self.node_config().metrics.prometheus; if let Some(addr) = listen_addr { info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); let config = MetricServerConfig::new( diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 2240fa98837..bf784b50703 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -77,6 +77,8 @@ tokio.workspace = true # Features for vergen to generate correct env vars jemalloc = ["reth-cli-util/jemalloc"] asm-keccak = ["alloy-primitives/asm-keccak"] +# Feature to enable opentelemetry export +otlp = ["reth-tracing/otlp"] [build-dependencies] vergen = { workspace = true, features = ["build", "cargo", "emit_and_set"] } diff --git a/crates/node/core/src/args/log.rs b/crates/node/core/src/args/log.rs index 1236984fac0..99fefc11445 100644 --- a/crates/node/core/src/args/log.rs +++ b/crates/node/core/src/args/log.rs @@ -70,6 +70,7 @@ pub struct LogArgs { default_value_t = ColorMode::Always )] pub color: ColorMode, + /// The verbosity settings for the tracer. #[command(flatten)] pub verbosity: Verbosity, diff --git a/crates/node/core/src/args/metric.rs b/crates/node/core/src/args/metric.rs new file mode 100644 index 00000000000..d46018b8e77 --- /dev/null +++ b/crates/node/core/src/args/metric.rs @@ -0,0 +1,13 @@ +use clap::Parser; +use reth_cli_util::parse_socket_address; +use std::net::SocketAddr; + +/// Metrics configuration. +#[derive(Debug, Clone, Default, Parser)] +pub struct MetricArgs { + /// Enable Prometheus metrics. + /// + /// The metrics will be served at the given interface and port. + #[arg(long="metrics", alias = "metrics.prometheus", value_name = "PROMETHEUS", value_parser = parse_socket_address, help_heading = "Metrics")] + pub prometheus: Option, +} diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index 6799fe418dc..54e77740146 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -24,6 +24,14 @@ pub use database::DatabaseArgs; mod log; pub use log::{ColorMode, LogArgs, Verbosity}; +/// `TraceArgs` for tracing and spans support +mod trace; +pub use trace::TraceArgs; + +/// `MetricArgs` to configure metrics. +mod metric; +pub use metric::MetricArgs; + /// `PayloadBuilderArgs` struct for configuring the payload builder mod payload_builder; pub use payload_builder::PayloadBuilderArgs; diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs new file mode 100644 index 00000000000..0d4e347a486 --- /dev/null +++ b/crates/node/core/src/args/trace.rs @@ -0,0 +1,40 @@ +//! Opentelemetry tracing configuration through CLI args. + +use clap::Parser; +use eyre::{ensure, WrapErr}; +use url::Url; + +/// CLI arguments for configuring `Opentelemetry` trace and span export. +#[derive(Debug, Clone, Default, Parser)] +pub struct TraceArgs { + /// Enable `Opentelemetry` tracing export to an OTLP endpoint. + /// + /// If no value provided, defaults to `http://localhost:4318/v1/traces`. + /// + /// Example: --tracing-otlp=http://collector:4318/v1/traces + #[arg( + long = "tracing-otlp", + global = true, + value_name = "URL", + num_args = 0..=1, + default_missing_value = "http://localhost:4318/v1/traces", + require_equals = true, + value_parser = parse_otlp_endpoint, + help_heading = "Tracing" + )] + pub otlp: Option, +} + +// Parses and validates an OTLP endpoint url. +fn parse_otlp_endpoint(arg: &str) -> eyre::Result { + let url = Url::parse(arg).wrap_err("Invalid URL for OTLP trace output")?; + + // OTLP url must end with `/v1/traces` per the OTLP specification. + ensure!( + url.path().ends_with("/v1/traces"), + "OTLP trace endpoint must end with /v1/traces, got path: {}", + url.path() + ); + + Ok(url) +} diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index bb5beda1d0c..94dbecb649c 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -27,13 +27,12 @@ use reth_transaction_pool::TransactionPool; use serde::{de::DeserializeOwned, Serialize}; use std::{ fs, - net::SocketAddr, path::{Path, PathBuf}, sync::Arc, }; use tracing::*; -use crate::args::EraArgs; +use crate::args::{EraArgs, MetricArgs}; pub use reth_engine_primitives::{ DEFAULT_MAX_PROOF_TASK_CONCURRENCY, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, @@ -100,10 +99,8 @@ pub struct NodeConfig { /// Possible values are either a built-in chain or the path to a chain specification file. pub chain: Arc, - /// Enable Prometheus metrics. - /// - /// The metrics will be served at the given interface and port. - pub metrics: Option, + /// Enable to configure metrics export to endpoints + pub metrics: MetricArgs, /// Add a new instance of a node. /// @@ -168,7 +165,7 @@ impl NodeConfig { Self { config: None, chain, - metrics: None, + metrics: MetricArgs::default(), instance: None, network: NetworkArgs::default(), rpc: RpcServerArgs::default(), @@ -222,8 +219,8 @@ impl NodeConfig { } /// Set the metrics address for the node - pub const fn with_metrics(mut self, metrics: SocketAddr) -> Self { - self.metrics = Some(metrics); + pub const fn with_metrics(mut self, metrics: MetricArgs) -> Self { + self.metrics = metrics; self } @@ -514,7 +511,7 @@ impl Clone for NodeConfig { Self { chain: self.chain.clone(), config: self.config.clone(), - metrics: self.metrics, + metrics: self.metrics.clone(), instance: self.instance, network: self.network.clone(), rpc: self.rpc.clone(), diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 422da3b883e..6ed24ca5823 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -74,6 +74,11 @@ reth-stages = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec = { workspace = true, features = ["std", "superchain-configs"] } [features] +default = ["otlp"] + +# Opentelemtry feature to activate metrics export +otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] + asm-keccak = [ "alloy-primitives/asm-keccak", "reth-node-core/asm-keccak", diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 1e9f7960ad1..a4f0a92e8f0 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -116,7 +116,14 @@ where /// If file logging is enabled, this function stores guard to the struct. pub fn init_tracing(&mut self) -> Result<()> { if self.guard.is_none() { - let layers = self.layers.take().unwrap_or_default(); + let mut layers = self.layers.take().unwrap_or_default(); + + #[cfg(feature = "otlp")] + if let Some(output_type) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); + layers.with_span_layer("reth".to_string(), output_type.clone())?; + } + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); } diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index b55bbed3ad4..1655b92d6ef 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -48,7 +48,10 @@ use reth_cli_commands::launcher::FnLauncher; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use reth_node_core::{args::LogArgs, version::version_metadata}; +use reth_node_core::{ + args::{LogArgs, TraceArgs}, + version::version_metadata, +}; use reth_optimism_node::args::RollupArgs; // This allows us to manually enable node metrics features, required for proper jemalloc metric @@ -73,6 +76,10 @@ pub struct Cli< #[command(flatten)] pub logs: LogArgs, + /// The metrics configuration for the CLI. + #[command(flatten)] + pub traces: TraceArgs, + /// Type marker for the RPC module validator #[arg(skip)] _phantom: PhantomData, @@ -193,6 +200,7 @@ mod test { "10000", "--metrics", "9003", + "--tracing-otlp=http://localhost:4318/v1/traces", "--log.file.max-size", "100", ]); diff --git a/crates/tracing-otlp/Cargo.toml b/crates/tracing-otlp/Cargo.toml index 7b8b666116c..60cee0aa229 100644 --- a/crates/tracing-otlp/Cargo.toml +++ b/crates/tracing-otlp/Cargo.toml @@ -9,13 +9,29 @@ repository.workspace = true exclude.workspace = true [dependencies] -opentelemetry_sdk = "0.29.0" -opentelemetry = "0.29.1" -opentelemetry-otlp = "0.29.0" -tracing-opentelemetry = "0.30.0" +# obs +opentelemetry_sdk = { workspace = true, optional = true } +opentelemetry = { workspace = true, optional = true } +opentelemetry-otlp = { workspace = true, optional = true } +opentelemetry-semantic-conventions = { workspace = true, optional = true } +tracing-opentelemetry = { workspace = true, optional = true } tracing-subscriber.workspace = true tracing.workspace = true -opentelemetry-semantic-conventions = "0.29.0" + +# misc +eyre.workspace = true +url.workspace = true [lints] workspace = true + +[features] +default = ["otlp"] + +otlp = [ + "opentelemetry", + "opentelemetry_sdk", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "tracing-opentelemetry", +] diff --git a/crates/tracing-otlp/src/lib.rs b/crates/tracing-otlp/src/lib.rs index 1de112cdb33..07415ac2a65 100644 --- a/crates/tracing-otlp/src/lib.rs +++ b/crates/tracing-otlp/src/lib.rs @@ -1,12 +1,16 @@ +#![cfg(feature = "otlp")] + //! Provides a tracing layer for `OpenTelemetry` that exports spans to an OTLP endpoint. //! //! This module simplifies the integration of `OpenTelemetry` tracing with OTLP export in Rust //! applications. It allows for easily capturing and exporting distributed traces to compatible //! backends like Jaeger, Zipkin, or any other OpenTelemetry-compatible tracing system. -use opentelemetry::{trace::TracerProvider, KeyValue, Value}; -use opentelemetry_otlp::SpanExporter; +use eyre::{ensure, WrapErr}; +use opentelemetry::{global, trace::TracerProvider, KeyValue, Value}; +use opentelemetry_otlp::{SpanExporter, WithExportConfig}; use opentelemetry_sdk::{ + propagation::TraceContextPropagator, trace::{SdkTracer, SdkTracerProvider}, Resource, }; @@ -14,25 +18,73 @@ use opentelemetry_semantic_conventions::{attribute::SERVICE_VERSION, SCHEMA_URL} use tracing::Subscriber; use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::registry::LookupSpan; +use url::Url; /// Creates a tracing [`OpenTelemetryLayer`] that exports spans to an OTLP endpoint. /// /// This layer can be added to a [`tracing_subscriber::Registry`] to enable `OpenTelemetry` tracing -/// with OTLP export. -pub fn layer(service_name: impl Into) -> OpenTelemetryLayer +/// with OTLP export to an url. +pub fn span_layer( + service_name: impl Into, + endpoint: &Url, +) -> eyre::Result> where for<'span> S: Subscriber + LookupSpan<'span>, { - let exporter = SpanExporter::builder().with_http().build().unwrap(); + global::set_text_map_propagator(TraceContextPropagator::new()); + + let resource = build_resource(service_name); + + let span_exporter = + SpanExporter::builder().with_http().with_endpoint(endpoint.to_string()).build()?; + + let tracer_provider = SdkTracerProvider::builder() + .with_resource(resource) + .with_batch_exporter(span_exporter) + .build(); + + global::set_tracer_provider(tracer_provider.clone()); + + let tracer = tracer_provider.tracer("reth-otlp"); + Ok(tracing_opentelemetry::layer().with_tracer(tracer)) +} - let resource = Resource::builder() +// Builds OTLP resource with service information. +fn build_resource(service_name: impl Into) -> Resource { + Resource::builder() .with_service_name(service_name) .with_schema_url([KeyValue::new(SERVICE_VERSION, env!("CARGO_PKG_VERSION"))], SCHEMA_URL) - .build(); + .build() +} + +/// Destination for exported trace spans. +#[derive(Debug, Clone)] +pub enum TraceOutput { + /// Export traces as JSON to stdout. + Stdout, + /// Export traces to an OTLP collector at the specified URL. + Otlp(Url), +} + +impl TraceOutput { + /// Parses the trace output destination from a string. + /// + /// Returns `TraceOutput::Stdout` for "stdout", or `TraceOutput::Otlp` for valid OTLP URLs. + /// OTLP URLs must end with `/v1/traces` per the OTLP specification. + pub fn parse(s: &str) -> eyre::Result { + if s == "stdout" { + return Ok(Self::Stdout); + } + + let url = Url::parse(s).wrap_err("Invalid URL for trace output")?; - let provider = - SdkTracerProvider::builder().with_resource(resource).with_batch_exporter(exporter).build(); + // OTLP specification requires the `/v1/traces` path for trace endpoints + ensure!( + url.path().ends_with("/v1/traces"), + "OTLP trace endpoint must end with /v1/traces, got path: {}", + url.path() + ); - let tracer = provider.tracer("reth-otlp"); - tracing_opentelemetry::layer().with_tracer(tracer) + Ok(Self::Otlp(url)) + } } diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index a5c09c23a35..b5bcfacd530 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -12,11 +12,22 @@ description = "tracing helpers" workspace = true [dependencies] +# reth +reth-tracing-otlp = { workspace = true, optional = true } + +# obs tracing.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "ansi", "json"] } tracing-appender.workspace = true tracing-journald.workspace = true tracing-logfmt.workspace = true -rolling-file.workspace = true -eyre.workspace = true + +# misc clap = { workspace = true, features = ["derive"] } +eyre.workspace = true +rolling-file.workspace = true +url.workspace = true + +[features] +default = ["otlp"] +otlp = ["reth-tracing-otlp"] diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 5b9c93b5fb6..88a565cc25d 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -1,13 +1,14 @@ +use crate::formatter::LogFormat; +#[cfg(feature = "otlp")] +use reth_tracing_otlp::span_layer; +use rolling_file::{RollingConditionBasic, RollingFileAppender}; use std::{ fmt, path::{Path, PathBuf}, }; - -use rolling_file::{RollingConditionBasic, RollingFileAppender}; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{filter::Directive, EnvFilter, Layer, Registry}; - -use crate::formatter::LogFormat; +use url::Url; /// A worker guard returned by the file layer. /// @@ -123,6 +124,25 @@ impl Layers { self.add_layer(layer); Ok(guard) } + + /// Add OTLP spans layer to the layer collection + #[cfg(feature = "otlp")] + pub fn with_span_layer( + &mut self, + service_name: String, + endpoint_exporter: Url, + ) -> eyre::Result<()> { + // Create the span provider + + use tracing::{level_filters::LevelFilter, Level}; + let span_layer = span_layer(service_name, &endpoint_exporter) + .map_err(|e| eyre::eyre!("Failed to build OTLP span exporter {}", e))? + .with_filter(LevelFilter::from_level(Level::TRACE)); + + self.add_layer(span_layer); + + Ok(()) + } } /// Holds configuration information for file logging. diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 9a32d647876..0eca25947e2 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -113,4 +113,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index b449f118168..4b820f94bba 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -99,4 +99,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 2553a1480f9..5dd62265088 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -164,4 +164,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index ba12fd1b2f5..cce0e673413 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -116,4 +116,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 79e324021bf..6a63441b34f 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -108,4 +108,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index 843f5253c9a..1680a536fbd 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -107,4 +107,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 3af272ff362..0d30040a403 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -110,4 +110,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index f440545f129..739ad240efc 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -143,4 +143,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index 64552318a21..10661fe776b 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -106,4 +106,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index c7fc831b764..61f8239a3e8 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -108,4 +108,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 48fd6c889c6..4b28ec2b53b 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -116,4 +116,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index af21819a452..5ad16928b18 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -116,4 +116,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index cff6c7eed5e..be06352ad6d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -149,4 +149,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index 1dd3279a797..1e1ab2fb5fb 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -103,4 +103,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index f5058265196..4e5651c32c8 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -106,4 +106,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 1f2c50908dc..9689d28d336 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -116,4 +116,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index a683749fcdf..ea34e03bf79 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -103,4 +103,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 973dce74a22..c92517d578e 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -161,4 +161,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 6bc27381a24..924184d9546 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -102,4 +102,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 896f7f34d08..47dfe8f15a1 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -167,4 +167,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index a783067d193..7c28e80bcfe 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -162,4 +162,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 0914444e108..13bbba70131 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -163,4 +163,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 8c0cfa6e4d3..c245f82a601 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -186,4 +186,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index b1ac27e8ba7..760664b375a 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -151,4 +151,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 394854f7246..6912a05d7a7 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -39,7 +39,7 @@ Options: Print help (see a summary with '-h') Metrics: - --metrics + --metrics Enable Prometheus metrics. The metrics will be served at the given interface and port. @@ -987,4 +987,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 6b24d9d326b..fe8ee4e917a 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -100,4 +100,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index ecd6ccf8141..c897e041bf3 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -317,4 +317,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 2a0a5b6a808..b5310f002ee 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -111,4 +111,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index fee957e3385..0a9951bc89c 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -317,4 +317,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index dbd7ca91b34..4aeca9ceea1 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -97,4 +97,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index ac123d47285..0de366fbbdf 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -97,4 +97,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index ce6bc399d8e..c68b3cff783 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -151,4 +151,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index ec5e048b5cd..df45d7a98b1 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -164,4 +164,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index bc693f7e463..efbb5d9fc8b 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -100,4 +100,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index a36545638ce..452c8184705 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -165,4 +165,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 97211934295..c38f8869b30 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -158,4 +158,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index c1459ee5498..90c09646b76 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -115,4 +115,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index 4f39dccac12..6d89c8d0328 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -115,4 +115,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index f5d6a07b09a..356ea65cc1a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -115,4 +115,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index fce03ffa753..26653838da8 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -115,4 +115,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 76ce30a2f79..69f61e3a17e 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -383,4 +383,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 1a3fd02cae8..5a4c970163d 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -159,4 +159,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index bed98899e19..50914455a30 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -107,4 +107,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index bcfc87cf3e5..f3b9abc65ea 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -107,4 +107,12 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/run/monitoring.mdx b/docs/vocs/docs/pages/run/monitoring.mdx index 30ce967bb10..d6c73436098 100644 --- a/docs/vocs/docs/pages/run/monitoring.mdx +++ b/docs/vocs/docs/pages/run/monitoring.mdx @@ -10,6 +10,12 @@ Reth exposes a number of metrics which can be enabled by adding the `--metrics` reth node --metrics 127.0.0.1:9001 ``` +Alternatively, you can export metrics to an OpenTelemetry collector using `--otlp-metrics`: + +```bash +reth node --otlp-metrics 127.0.0.1:4318 +``` + Now, as the node is running, you can `curl` the endpoint you provided to the `--metrics` flag to get a text dump of the metrics at that time: ```bash From ab2b11f40eed3623219c49022061a11a0b5e2c0c Mon Sep 17 00:00:00 2001 From: stevencartavia <112043913+stevencartavia@users.noreply.github.com> Date: Mon, 13 Oct 2025 18:41:22 -0600 Subject: [PATCH 035/371] feat: allow otlp level to be configurable (#18981) --- crates/ethereum/cli/src/app.rs | 6 ++++- crates/node/core/src/args/trace.rs | 23 ++++++++++++++++++- crates/optimism/cli/src/app.rs | 6 ++++- crates/tracing/src/layers.rs | 5 ++-- docs/vocs/docs/pages/cli/reth.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/config.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/db.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/db/checksum.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/db/clear.mdx | 9 ++++++++ .../docs/pages/cli/reth/db/clear/mdbx.mdx | 9 ++++++++ .../pages/cli/reth/db/clear/static-file.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/db/diff.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/db/drop.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/db/get.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx | 9 ++++++++ .../pages/cli/reth/db/get/static-file.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/db/list.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/db/path.mdx | 9 ++++++++ .../docs/pages/cli/reth/db/repair-trie.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/db/stats.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/db/version.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/download.mdx | 9 ++++++++ .../vocs/docs/pages/cli/reth/dump-genesis.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/export-era.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/import-era.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/import.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/init-state.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/init.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/node.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/p2p.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 9 ++++++++ .../vocs/docs/pages/cli/reth/p2p/bootnode.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 9 ++++++++ .../docs/pages/cli/reth/p2p/rlpx/ping.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/prune.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/re-execute.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/stage.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 9 ++++++++ .../cli/reth/stage/dump/account-hashing.mdx | 9 ++++++++ .../pages/cli/reth/stage/dump/execution.mdx | 9 ++++++++ .../docs/pages/cli/reth/stage/dump/merkle.mdx | 9 ++++++++ .../cli/reth/stage/dump/storage-hashing.mdx | 9 ++++++++ docs/vocs/docs/pages/cli/reth/stage/run.mdx | 9 ++++++++ .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 9 ++++++++ .../cli/reth/stage/unwind/num-blocks.mdx | 9 ++++++++ .../pages/cli/reth/stage/unwind/to-block.mdx | 9 ++++++++ 48 files changed, 431 insertions(+), 5 deletions(-) diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index dc299cb83cd..805c9144257 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -116,7 +116,11 @@ where #[cfg(feature = "otlp")] if let Some(output_type) = &self.cli.traces.otlp { info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); - layers.with_span_layer("reth".to_string(), output_type.clone())?; + layers.with_span_layer( + "reth".to_string(), + output_type.clone(), + self.cli.traces.otlp_level, + )?; } self.guard = self.cli.logs.init_tracing_with_layers(layers)?; diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs index 0d4e347a486..751ab556ac8 100644 --- a/crates/node/core/src/args/trace.rs +++ b/crates/node/core/src/args/trace.rs @@ -2,10 +2,11 @@ use clap::Parser; use eyre::{ensure, WrapErr}; +use tracing::Level; use url::Url; /// CLI arguments for configuring `Opentelemetry` trace and span export. -#[derive(Debug, Clone, Default, Parser)] +#[derive(Debug, Clone, Parser)] pub struct TraceArgs { /// Enable `Opentelemetry` tracing export to an OTLP endpoint. /// @@ -23,6 +24,26 @@ pub struct TraceArgs { help_heading = "Tracing" )] pub otlp: Option, + + /// Set the minimum log level for OTLP traces. + /// + /// Valid values: ERROR, WARN, INFO, DEBUG, TRACE + /// + /// Defaults to TRACE if not specified. + #[arg( + long = "tracing-otlp-level", + global = true, + value_name = "LEVEL", + default_value = "TRACE", + help_heading = "Tracing" + )] + pub otlp_level: Level, +} + +impl Default for TraceArgs { + fn default() -> Self { + Self { otlp: None, otlp_level: Level::TRACE } + } } // Parses and validates an OTLP endpoint url. diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index a4f0a92e8f0..891578cbe24 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -121,7 +121,11 @@ where #[cfg(feature = "otlp")] if let Some(output_type) = &self.cli.traces.otlp { info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); - layers.with_span_layer("reth".to_string(), output_type.clone())?; + layers.with_span_layer( + "reth".to_string(), + output_type.clone(), + self.cli.traces.otlp_level, + )?; } self.guard = self.cli.logs.init_tracing_with_layers(layers)?; diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 88a565cc25d..44b2fff5995 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -6,6 +6,7 @@ use std::{ fmt, path::{Path, PathBuf}, }; +use tracing::level_filters::LevelFilter; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{filter::Directive, EnvFilter, Layer, Registry}; use url::Url; @@ -131,13 +132,13 @@ impl Layers { &mut self, service_name: String, endpoint_exporter: Url, + level: tracing::Level, ) -> eyre::Result<()> { // Create the span provider - use tracing::{level_filters::LevelFilter, Level}; let span_layer = span_layer(service_name, &endpoint_exporter) .map_err(|e| eyre::eyre!("Failed to build OTLP span exporter {}", e))? - .with_filter(LevelFilter::from_level(Level::TRACE)); + .with_filter(LevelFilter::from_level(level)); self.add_layer(span_layer); diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 0eca25947e2..5f0ccfca01f 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -121,4 +121,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index 4b820f94bba..849f4ec5bab 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -107,4 +107,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 5dd62265088..3b28b43162a 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -172,4 +172,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index cce0e673413..13e2c2bd39d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -124,4 +124,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 6a63441b34f..5c19682e8b6 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -116,4 +116,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index 1680a536fbd..0e5526affe5 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -115,4 +115,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 0d30040a403..72c3108fcf3 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -118,4 +118,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index 739ad240efc..fadd0613ca8 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -151,4 +151,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index 10661fe776b..0f9ddba9ee9 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -114,4 +114,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index 61f8239a3e8..942eda79998 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -116,4 +116,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 4b28ec2b53b..b7ccf9e7d3d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -124,4 +124,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 5ad16928b18..28d7c343e94 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -124,4 +124,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index be06352ad6d..3f9ac94c5c5 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -157,4 +157,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index 1e1ab2fb5fb..f6714898b35 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -111,4 +111,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index 4e5651c32c8..3a6bfae1d3c 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -114,4 +114,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 9689d28d336..a4939c3ef93 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -124,4 +124,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index ea34e03bf79..7b3766b4e8a 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -111,4 +111,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index c92517d578e..74296538855 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -169,4 +169,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 924184d9546..a6dbbcb1b27 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -110,4 +110,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 47dfe8f15a1..ee65abbeb42 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -175,4 +175,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 7c28e80bcfe..ae17ab91e0e 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -170,4 +170,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 13bbba70131..f92b52ec591 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -171,4 +171,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index c245f82a601..03d1e7b883b 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -194,4 +194,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index 760664b375a..993ae2dcd85 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -159,4 +159,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 6912a05d7a7..086187bc927 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -995,4 +995,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index fe8ee4e917a..9693e20e756 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -108,4 +108,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index c897e041bf3..070079b715f 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -325,4 +325,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index b5310f002ee..d1bf7c69870 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -119,4 +119,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index 0a9951bc89c..8725c940e49 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -325,4 +325,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index 4aeca9ceea1..75ab654964f 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -105,4 +105,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index 0de366fbbdf..7152b222fb4 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -105,4 +105,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index c68b3cff783..f54f6687805 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -159,4 +159,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index df45d7a98b1..973ac79f29f 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -172,4 +172,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index efbb5d9fc8b..f382eb2081e 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -108,4 +108,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 452c8184705..e2ba5751b52 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -173,4 +173,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index c38f8869b30..01b4f61f29f 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -166,4 +166,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index 90c09646b76..18f44ae13ed 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -123,4 +123,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index 6d89c8d0328..de0f693ed57 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -123,4 +123,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index 356ea65cc1a..aaff755796a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -123,4 +123,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index 26653838da8..2ff7b22b76b 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -123,4 +123,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 69f61e3a17e..e876c83f84a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -391,4 +391,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 5a4c970163d..977d949a9b7 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -167,4 +167,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index 50914455a30..0b60467c413 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -115,4 +115,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index f3b9abc65ea..07632cf8285 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -115,4 +115,13 @@ Tracing: If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file From 221d585f084f5923a4e1f823e94e69f27fda9aa0 Mon Sep 17 00:00:00 2001 From: MozirDmitriy Date: Tue, 14 Oct 2025 11:54:55 +0300 Subject: [PATCH 036/371] chore(optimism): remove unnecessary Debug bounds from header generics (#18989) --- crates/optimism/consensus/src/validation/isthmus.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/optimism/consensus/src/validation/isthmus.rs b/crates/optimism/consensus/src/validation/isthmus.rs index 64d45eae5c8..4703e10869e 100644 --- a/crates/optimism/consensus/src/validation/isthmus.rs +++ b/crates/optimism/consensus/src/validation/isthmus.rs @@ -4,7 +4,6 @@ use crate::OpConsensusError; use alloy_consensus::BlockHeader; use alloy_primitives::{address, Address, B256}; use alloy_trie::EMPTY_ROOT_HASH; -use core::fmt::Debug; use reth_storage_api::{errors::ProviderResult, StorageRootProvider}; use reth_trie_common::HashedStorage; use revm::database::BundleState; @@ -72,7 +71,7 @@ pub fn verify_withdrawals_root( ) -> Result<(), OpConsensusError> where DB: StorageRootProvider, - H: BlockHeader + Debug, + H: BlockHeader, { let header_storage_root = header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?; @@ -110,7 +109,7 @@ pub fn verify_withdrawals_root_prehashed( ) -> Result<(), OpConsensusError> where DB: StorageRootProvider, - H: BlockHeader + core::fmt::Debug, + H: BlockHeader, { let header_storage_root = header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?; From 2a441d62612f59b780ba5e25aaac46e79cd7acbc Mon Sep 17 00:00:00 2001 From: stevencartavia <112043913+stevencartavia@users.noreply.github.com> Date: Tue, 14 Oct 2025 03:29:01 -0600 Subject: [PATCH 037/371] refactor: convert satisfy_base_fee_ids to use closure (#18979) --- crates/transaction-pool/src/pool/parked.rs | 45 +++++++++++----------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 43a652a1476..193442174ca 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -260,35 +260,33 @@ impl ParkedPool> { &self, basefee: u64, ) -> Vec>> { - let ids = self.satisfy_base_fee_ids(basefee as u128); - let mut txs = Vec::with_capacity(ids.len()); - for id in ids { - txs.push(self.get(&id).expect("transaction exists").transaction.clone().into()); - } + let mut txs = Vec::new(); + self.satisfy_base_fee_ids(basefee as u128, |tx| { + txs.push(tx.clone()); + }); txs } /// Returns all transactions that satisfy the given basefee. - fn satisfy_base_fee_ids(&self, basefee: u128) -> Vec { - let mut transactions = Vec::new(); - { - let mut iter = self.by_id.iter().peekable(); - - while let Some((id, tx)) = iter.next() { - if tx.transaction.transaction.max_fee_per_gas() < basefee { - // still parked -> skip descendant transactions - 'this: while let Some((peek, _)) = iter.peek() { - if peek.sender != id.sender { - break 'this - } - iter.next(); + fn satisfy_base_fee_ids(&self, basefee: u128, mut tx_handler: F) + where + F: FnMut(&Arc>), + { + let mut iter = self.by_id.iter().peekable(); + + while let Some((id, tx)) = iter.next() { + if tx.transaction.transaction.max_fee_per_gas() < basefee { + // still parked -> skip descendant transactions + 'this: while let Some((peek, _)) = iter.peek() { + if peek.sender != id.sender { + break 'this } - } else { - transactions.push(*id); + iter.next(); } + } else { + tx_handler(&tx.transaction); } } - transactions } /// Removes all transactions from this subpool that can afford the given basefee, @@ -306,7 +304,10 @@ impl ParkedPool> { where F: FnMut(Arc>), { - let to_remove = self.satisfy_base_fee_ids(basefee as u128); + let mut to_remove = Vec::new(); + self.satisfy_base_fee_ids(basefee as u128, |tx| { + to_remove.push(*tx.id()); + }); for id in to_remove { if let Some(tx) = self.remove_transaction(&id) { From 5065890823eb6358845218f5e0f3219c6544e63a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Tue, 14 Oct 2025 11:34:20 +0200 Subject: [PATCH 038/371] chore: bump otlp crates (#18984) --- Cargo.lock | 58 ++++++++++++++++++++++++++++++++---------------------- Cargo.toml | 10 +++++----- 2 files changed, 40 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7b666230799..9929deb1742 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6225,9 +6225,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "opentelemetry" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf416e4cb72756655126f7dd7bb0af49c674f4c1b9903e80c009e0c37e552e6" +checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0" dependencies = [ "futures-core", "futures-sink", @@ -6239,9 +6239,9 @@ dependencies = [ [[package]] name = "opentelemetry-http" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d" +checksum = "d7a6d09a73194e6b66df7c8f1b680f156d916a1a942abf2de06823dd02b7855d" dependencies = [ "async-trait", "bytes", @@ -6252,9 +6252,9 @@ dependencies = [ [[package]] name = "opentelemetry-otlp" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b" +checksum = "7a2366db2dca4d2ad033cad11e6ee42844fd727007af5ad04a1730f4cb8163bf" dependencies = [ "http", "opentelemetry", @@ -6269,27 +6269,28 @@ dependencies = [ [[package]] name = "opentelemetry-proto" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e046fd7660710fe5a05e8748e70d9058dc15c94ba914e7c4faa7c728f0e8ddc" +checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f" dependencies = [ "opentelemetry", "opentelemetry_sdk", "prost", "tonic", + "tonic-prost", ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d059a296a47436748557a353c5e6c5705b9470ef6c95cfc52c21a8814ddac2" +checksum = "e62e29dfe041afb8ed2a6c9737ab57db4907285d999ef8ad3a59092a36bdc846" [[package]] name = "opentelemetry_sdk" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11f644aa9e5e31d11896e024305d7e3c98a88884d9f8919dbf37a9991bc47a4b" +checksum = "e14ae4f5991976fd48df6d843de219ca6d31b01daaab2dad5af2badeded372bd" dependencies = [ "futures-channel", "futures-executor", @@ -6297,7 +6298,6 @@ dependencies = [ "opentelemetry", "percent-encoding", "rand 0.9.2", - "serde_json", "thiserror 2.0.16", ] @@ -6785,9 +6785,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", "prost-derive", @@ -6795,9 +6795,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", "itertools 0.14.0", @@ -12590,9 +12590,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tonic" -version = "0.13.1" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" dependencies = [ "async-trait", "base64 0.22.1", @@ -12602,13 +12602,24 @@ dependencies = [ "http-body-util", "percent-encoding", "pin-project", - "prost", + "sync_wrapper", "tokio-stream", "tower-layer", "tower-service", "tracing", ] +[[package]] +name = "tonic-prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +dependencies = [ + "bytes", + "prost", + "tonic", +] + [[package]] name = "tower" version = "0.5.2" @@ -12763,15 +12774,16 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.31.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddcf5959f39507d0d04d6413119c04f33b623f4f951ebcbdddddfad2d0623a9c" +checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e" dependencies = [ "js-sys", - "once_cell", "opentelemetry", "opentelemetry_sdk", + "rustversion", "smallvec", + "thiserror 2.0.16", "tracing", "tracing-core", "tracing-log", diff --git a/Cargo.toml b/Cargo.toml index d027b0674ca..e8e94930193 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -651,11 +651,11 @@ c-kzg = "2.1.4" toml = "0.8" # otlp obs -opentelemetry_sdk = "0.30" -opentelemetry = "0.30" -opentelemetry-otlp = "0.30" -opentelemetry-semantic-conventions = "0.30" -tracing-opentelemetry = "0.31" +opentelemetry_sdk = "0.31" +opentelemetry = "0.31" +opentelemetry-otlp = "0.31" +opentelemetry-semantic-conventions = "0.31" +tracing-opentelemetry = "0.32" # misc-testing arbitrary = "1.3" From 9fa2779959f8a7d2c4246646a0aac63942585310 Mon Sep 17 00:00:00 2001 From: Alvarez <140459501+prestoalvarez@users.noreply.github.com> Date: Tue, 14 Oct 2025 15:33:45 +0200 Subject: [PATCH 039/371] fix(network): prevent metric leak in outgoing message queue on session teardown (#18847) --- crates/net/network/src/session/active.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 32f90899851..0044c1f92e1 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -924,6 +924,16 @@ impl QueuedOutgoingMessages { } } +impl Drop for QueuedOutgoingMessages { + fn drop(&mut self) { + // Ensure gauge is decremented for any remaining items to avoid metric leak on teardown. + let remaining = self.messages.len(); + if remaining > 0 { + self.count.decrement(remaining as f64); + } + } +} + #[cfg(test)] mod tests { use super::*; From cec30cd9f3f64b6ee5e3bf2135f984135632944a Mon Sep 17 00:00:00 2001 From: drhgencer Date: Tue, 14 Oct 2025 19:36:06 +0530 Subject: [PATCH 040/371] chore: remove unused imports in blockchain_provider (#18867) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 - crates/storage/provider/Cargo.toml | 2 - .../src/providers/blockchain_provider.rs | 73 ++++++------------- 3 files changed, 24 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9929deb1742..20dfb2c62db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9765,7 +9765,6 @@ dependencies = [ "reth-errors", "reth-ethereum-engine-primitives", "reth-ethereum-primitives", - "reth-evm", "reth-execution-types", "reth-fs-util", "reth-metrics", diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 82a3726c43e..e8599a89706 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -29,7 +29,6 @@ reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-codecs.workspace = true -reth-evm.workspace = true reth-chain-state.workspace = true reth-node-types.workspace = true reth-static-file-types.workspace = true @@ -90,7 +89,6 @@ test-utils = [ "reth-ethereum-engine-primitives", "reth-ethereum-primitives/test-utils", "reth-chainspec/test-utils", - "reth-evm/test-utils", "reth-primitives-traits/test-utils", "reth-codecs/test-utils", "reth-db-api/test-utils", diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 69e77079c55..7040032eca0 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1,48 +1,34 @@ -#![allow(unused)] use crate::{ providers::{ConsistentProvider, ProviderNodeTypes, StaticFileProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, - ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, - DatabaseProviderFactory, FullProvider, HashedPostStateProvider, HeaderProvider, ProviderError, - ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, - StaticFileProviderFactory, TransactionVariant, TransactionsProvider, + ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, + HashedPostStateProvider, HeaderProvider, ProviderError, ProviderFactory, PruneCheckpointReader, + ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, + StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, + TransactionsProvider, }; -use alloy_consensus::{transaction::TransactionMeta, Header}; -use alloy_eips::{ - eip4895::{Withdrawal, Withdrawals}, - BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, -}; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use alloy_consensus::transaction::TransactionMeta; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ BlockState, CanonicalInMemoryState, ForkChoiceNotifications, ForkChoiceSubscriptions, MemoryOverlayStateProvider, }; -use reth_chainspec::{ChainInfo, EthereumHardforks}; -use reth_db_api::{ - models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices}, - transaction::DbTx, - Database, -}; -use reth_ethereum_primitives::{Block, EthPrimitives, Receipt, TransactionSigned}; -use reth_evm::{ConfigureEvm, EvmEnv}; +use reth_chainspec::ChainInfo; +use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices}; use reth_execution_types::ExecutionOutcome; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; -use reth_primitives_traits::{ - Account, BlockBody, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, StorageEntry, -}; +use reth_primitives_traits::{Account, RecoveredBlock, SealedHeader, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{ - BlockBodyIndicesProvider, DBProvider, NodePrimitivesProvider, StorageChangeSetReader, -}; +use reth_storage_api::{BlockBodyIndicesProvider, NodePrimitivesProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{HashedPostState, KeccakKeyHasher}; use revm_database::BundleState; use std::{ - ops::{Add, RangeBounds, RangeInclusive, Sub}, + ops::{RangeBounds, RangeInclusive}, sync::Arc, time::Instant, }; @@ -761,8 +747,7 @@ mod tests { create_test_provider_factory, create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }, - BlockWriter, CanonChainTracker, ProviderFactory, StaticFileProviderFactory, - StaticFileWriter, + BlockWriter, CanonChainTracker, ProviderFactory, }; use alloy_eips::{BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, TxNumber, B256}; @@ -773,22 +758,12 @@ mod tests { CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, NewCanonicalChain, }; - use reth_chainspec::{ - ChainSpec, ChainSpecBuilder, ChainSpecProvider, EthereumHardfork, MAINNET, - }; - use reth_db_api::{ - cursor::DbCursorRO, - models::{AccountBeforeTx, StoredBlockBodyIndices}, - tables, - transaction::DbTx, - }; + use reth_chainspec::{ChainSpec, MAINNET}; + use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; - use reth_ethereum_primitives::{Block, EthPrimitives, Receipt}; + use reth_ethereum_primitives::{Block, Receipt}; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives_traits::{ - BlockBody, RecoveredBlock, SealedBlock, SignedTransaction, SignerRecoverable, - }; - use reth_static_file_types::StaticFileSegment; + use reth_primitives_traits::{RecoveredBlock, SealedBlock, SignerRecoverable}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DBProvider, DatabaseProviderFactory, @@ -801,9 +776,8 @@ mod tests { }; use revm_database::{BundleState, OriginalValuesKnown}; use std::{ - ops::{Bound, Deref, Range, RangeBounds}, + ops::{Bound, Range, RangeBounds}, sync::Arc, - time::Instant, }; const TEST_BLOCKS_COUNT: usize = 5; @@ -2594,14 +2568,15 @@ mod tests { persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[1].number); let to_be_persisted_tx = in_memory_blocks[1].body().transactions[0].clone(); - assert!(matches!( + assert_eq!( correct_transaction_hash_fn( *to_be_persisted_tx.tx_hash(), provider.canonical_in_memory_state(), provider.database - ), - Ok(Some(to_be_persisted_tx)) - )); + ) + .unwrap(), + Some(to_be_persisted_tx) + ); } Ok(()) From 0470ee8735ac1221553464cb745b8a49a6392f9b Mon Sep 17 00:00:00 2001 From: Forostovec Date: Tue, 14 Oct 2025 17:05:47 +0300 Subject: [PATCH 041/371] fix(stateless): enforce BLOCKHASH ancestor header limit (#18920) --- crates/stateless/src/validation.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crates/stateless/src/validation.rs b/crates/stateless/src/validation.rs index 23308bcfa55..38b96d6bd0f 100644 --- a/crates/stateless/src/validation.rs +++ b/crates/stateless/src/validation.rs @@ -21,6 +21,9 @@ use reth_evm::{execute::Executor, ConfigureEvm}; use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_trie_common::{HashedPostState, KeccakKeyHasher}; +/// BLOCKHASH ancestor lookup window limit per EVM (number of most recent blocks accessible). +const BLOCKHASH_ANCESTOR_LIMIT: usize = 256; + /// Errors that can occur during stateless validation. #[derive(Debug, thiserror::Error)] pub enum StatelessValidationError { @@ -175,6 +178,15 @@ where // ascending order. ancestor_headers.sort_by_key(|header| header.number()); + // Enforce BLOCKHASH ancestor headers limit (256 most recent blocks) + let count = ancestor_headers.len(); + if count > BLOCKHASH_ANCESTOR_LIMIT { + return Err(StatelessValidationError::AncestorHeaderLimitExceeded { + count, + limit: BLOCKHASH_ANCESTOR_LIMIT, + }); + } + // Check that the ancestor headers form a contiguous chain and are not just random headers. let ancestor_hashes = compute_ancestor_hashes(¤t_block, &ancestor_headers)?; From 7aebea2f3758cd2cd4678113d1e079800e18b667 Mon Sep 17 00:00:00 2001 From: Forostovec Date: Tue, 14 Oct 2025 17:18:33 +0300 Subject: [PATCH 042/371] chore(evm): mark ExecuteOutput as unused and slated for removal (#18754) --- crates/evm/evm/src/execute.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index e318b589939..28b972e7c95 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -149,6 +149,11 @@ pub trait Executor: Sized { } /// Helper type for the output of executing a block. +/// +/// Deprecated: this type is unused within reth and will be removed in the next +/// major release. Use `reth_execution_types::BlockExecutionResult` or +/// `reth_execution_types::BlockExecutionOutput`. +#[deprecated(note = "Use reth_execution_types::BlockExecutionResult or BlockExecutionOutput")] #[derive(Debug, Clone)] pub struct ExecuteOutput { /// Receipts obtained after executing a block. From c661cd2f75189c8c1b396df9fcc2053413975fca Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 14 Oct 2025 20:20:21 +0400 Subject: [PATCH 043/371] refactor: unify `Pipeline` creation codepaths (#18955) --- crates/cli/commands/src/common.rs | 12 +- crates/node/builder/src/launch/common.rs | 144 +----------------- crates/node/builder/src/launch/engine.rs | 19 ++- crates/stages/api/src/pipeline/mod.rs | 87 +++++++++-- crates/stages/stages/src/stages/mod.rs | 44 +++--- .../provider/src/providers/database/mod.rs | 5 + .../src/providers/static_file/manager.rs | 11 +- 7 files changed, 136 insertions(+), 186 deletions(-) diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 1ceba8f57da..25f32f63a2b 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -24,7 +24,7 @@ use reth_provider::{ providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider}, ProviderFactory, StaticFileProviderFactory, }; -use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; +use reth_stages::{sets::DefaultStages, Pipeline}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; @@ -126,7 +126,6 @@ impl EnvironmentArgs { where C: ChainSpecParser, { - let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); let prune_modes = config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); let factory = ProviderFactory::>>::new( @@ -137,9 +136,8 @@ impl EnvironmentArgs { .with_prune_modes(prune_modes.clone()); // Check for consistency between database and static files. - if let Some(unwind_target) = factory - .static_file_provider() - .check_consistency(&factory.provider()?, has_receipt_pruning)? + if let Some(unwind_target) = + factory.static_file_provider().check_consistency(&factory.provider()?)? { if factory.db_ref().is_read_only()? { warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal."); @@ -150,7 +148,7 @@ impl EnvironmentArgs { // instead. assert_ne!( unwind_target, - PipelineTarget::Unwind(0), + 0, "A static file <> database inconsistency was found that would trigger an unwind to block 0" ); @@ -175,7 +173,7 @@ impl EnvironmentArgs { // Move all applicable data from database to static files. pipeline.move_to_static_files()?; - pipeline.unwind(unwind_target.unwind_target().expect("should exist"), None)?; + pipeline.unwind(unwind_target, None)?; } Ok(factory) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index b43dc2a2a6a..2d1fb6924d8 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -41,12 +41,10 @@ use eyre::Context; use rayon::ThreadPoolBuilder; use reth_chainspec::{Chain, EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; -use reth_consensus::noop::NoopConsensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitStorageError}; -use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_engine_local::MiningMode; -use reth_evm::{noop::NoopEvmConfig, ConfigureEvm}; +use reth_evm::ConfigureEvm; use reth_exex::ExExManagerHandle; use reth_fs_util as fs; use reth_network_p2p::headers::client::HeadersClient; @@ -67,25 +65,19 @@ use reth_node_metrics::{ }; use reth_provider::{ providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, BlockNumReader, BlockReaderIdExt, ProviderError, ProviderFactory, - ProviderResult, StageCheckpointReader, StaticFileProviderFactory, + BlockNumReader, BlockReaderIdExt, ProviderError, ProviderFactory, ProviderResult, + StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_builder::config::RethRpcServerConfig; use reth_rpc_layer::JwtSecret; -use reth_stages::{ - sets::DefaultStages, stages::EraImportSource, MetricEvent, PipelineBuilder, PipelineTarget, - StageId, -}; +use reth_stages::{stages::EraImportSource, MetricEvent}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; use reth_transaction_pool::TransactionPool; use std::{sync::Arc, thread::available_parallelism}; -use tokio::sync::{ - mpsc::{unbounded_channel, UnboundedSender}, - oneshot, watch, -}; +use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; use futures::{future::Either, stream, Stream, StreamExt}; use reth_node_ethstats::EthStatsService; @@ -466,70 +458,13 @@ where N: ProviderNodeTypes, Evm: ConfigureEvm + 'static, { - let factory = ProviderFactory::new( + Ok(ProviderFactory::new( self.right().clone(), self.chain_spec(), StaticFileProvider::read_write(self.data_dir().static_files())?, ) .with_prune_modes(self.prune_modes()) - .with_static_files_metrics(); - - let has_receipt_pruning = - self.toml_config().prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); - - // Check for consistency between database and static files. If it fails, it unwinds to - // the first block that's consistent between database and static files. - if let Some(unwind_target) = factory - .static_file_provider() - .check_consistency(&factory.provider()?, has_receipt_pruning)? - { - // Highly unlikely to happen, and given its destructive nature, it's better to panic - // instead. - assert_ne!( - unwind_target, - PipelineTarget::Unwind(0), - "A static file <> database inconsistency was found that would trigger an unwind to block 0" - ); - - info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check."); - - let (_tip_tx, tip_rx) = watch::channel(B256::ZERO); - - // Builds an unwind-only pipeline - let pipeline = PipelineBuilder::default() - .add_stages(DefaultStages::new( - factory.clone(), - tip_rx, - Arc::new(NoopConsensus::default()), - NoopHeaderDownloader::default(), - NoopBodiesDownloader::default(), - NoopEvmConfig::::default(), - self.toml_config().stages.clone(), - self.prune_modes(), - None, - )) - .build( - factory.clone(), - StaticFileProducer::new(factory.clone(), self.prune_modes()), - ); - - // Unwinds to block - let (tx, rx) = oneshot::channel(); - - // Pipeline should be run as blocking and panic if it fails. - self.task_executor().spawn_critical_blocking( - "pipeline task", - Box::pin(async move { - let (_, result) = pipeline.run_as_fut(Some(unwind_target)).await; - let _ = tx.send(result); - }), - ); - rx.await?.inspect_err(|err| { - error!(target: "reth::cli", unwind_target = %unwind_target, %err, "failed to run unwind") - })?; - } - - Ok(factory) + .with_static_files_metrics()) } /// Creates a new [`ProviderFactory`] and attaches it to the launch context. @@ -852,21 +787,6 @@ where &self.node_adapter().provider } - /// Returns the initial backfill to sync to at launch. - /// - /// This returns the configured `debug.tip` if set, otherwise it will check if backfill was - /// previously interrupted and returns the block hash of the last checkpoint, see also - /// [`Self::check_pipeline_consistency`] - pub fn initial_backfill_target(&self) -> ProviderResult> { - let mut initial_target = self.node_config().debug.tip; - - if initial_target.is_none() { - initial_target = self.check_pipeline_consistency()?; - } - - Ok(initial_target) - } - /// Returns true if the node should terminate after the initial backfill run. /// /// This is the case if any of these configs are set: @@ -880,7 +800,7 @@ where /// /// This checks for OP-Mainnet and ensures we have all the necessary data to progress (past /// bedrock height) - fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { + pub fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { if self.chain_spec().is_optimism() && !self.is_dev() && self.chain_id() == Chain::optimism_mainnet() @@ -898,54 +818,6 @@ where Ok(()) } - /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less - /// than the checkpoint of the first stage). - /// - /// This will return the pipeline target if: - /// * the pipeline was interrupted during its previous run - /// * a new stage was added - /// * stage data was dropped manually through `reth stage drop ...` - /// - /// # Returns - /// - /// A target block hash if the pipeline is inconsistent, otherwise `None`. - pub fn check_pipeline_consistency(&self) -> ProviderResult> { - // If no target was provided, check if the stages are congruent - check if the - // checkpoint of the last stage matches the checkpoint of the first. - let first_stage_checkpoint = self - .blockchain_db() - .get_stage_checkpoint(*StageId::ALL.first().unwrap())? - .unwrap_or_default() - .block_number; - - // Skip the first stage as we've already retrieved it and comparing all other checkpoints - // against it. - for stage_id in StageId::ALL.iter().skip(1) { - let stage_checkpoint = self - .blockchain_db() - .get_stage_checkpoint(*stage_id)? - .unwrap_or_default() - .block_number; - - // If the checkpoint of any stage is less than the checkpoint of the first stage, - // retrieve and return the block hash of the latest header and use it as the target. - if stage_checkpoint < first_stage_checkpoint { - debug!( - target: "consensus::engine", - first_stage_checkpoint, - inconsistent_stage_id = %stage_id, - inconsistent_stage_checkpoint = stage_checkpoint, - "Pipeline sync progress is inconsistent" - ); - return self.blockchain_db().block_hash(first_stage_checkpoint); - } - } - - self.ensure_chain_specific_db_checks()?; - - Ok(None) - } - /// Expire the pre-merge transactions if the node is configured to do so and the chain has a /// merge block. /// diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 5f6c54afc96..02fb505b077 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -117,9 +117,6 @@ impl EngineNodeLauncher { })? .with_components(components_builder, on_component_initialized).await?; - // Try to expire pre-merge transaction history if configured - ctx.expire_pre_merge_transactions()?; - // spawn exexs if any let maybe_exex_manager_handle = ctx.launch_exex(installed_exex).await?; @@ -141,7 +138,7 @@ impl EngineNodeLauncher { let consensus = Arc::new(ctx.components().consensus().clone()); - let pipeline = build_networked_pipeline( + let mut pipeline = build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), consensus.clone(), @@ -157,7 +154,18 @@ impl EngineNodeLauncher { )?; // The new engine writes directly to static files. This ensures that they're up to the tip. - pipeline.move_to_static_files()?; + pipeline.ensure_static_files_consistency().await?; + + // Try to expire pre-merge transaction history if configured + ctx.expire_pre_merge_transactions()?; + + let initial_target = if let Some(tip) = ctx.node_config().debug.tip { + Some(tip) + } else { + pipeline.initial_backfill_target()? + }; + + ctx.ensure_chain_specific_db_checks()?; let pipeline_events = pipeline.events(); @@ -249,7 +257,6 @@ impl EngineNodeLauncher { add_ons.launch_add_ons(add_ons_ctx).await?; // Run consensus engine to completion - let initial_target = ctx.initial_backfill_target()?; let mut built_payloads = ctx .components() .payload_builder_handle() diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 0a9aaef73de..2446219ea3d 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -9,7 +9,7 @@ use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ providers::ProviderNodeTypes, BlockHashReader, BlockNumReader, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, DatabaseProviderFactory, ProviderFactory, - PruneCheckpointReader, StageCheckpointReader, StageCheckpointWriter, + PruneCheckpointReader, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; @@ -31,7 +31,7 @@ use crate::{ }; pub use builder::*; use progress::*; -use reth_errors::RethResult; +use reth_errors::{ProviderResult, RethResult}; pub use set::*; /// A container for a queued stage. @@ -101,12 +101,6 @@ impl Pipeline { PipelineBuilder::default() } - /// Return the minimum block number achieved by - /// any stage during the execution of the pipeline. - pub const fn minimum_block_number(&self) -> Option { - self.progress.minimum_block_number - } - /// Set tip for reverse sync. #[track_caller] pub fn set_tip(&self, tip: B256) { @@ -127,9 +121,7 @@ impl Pipeline { ) -> &mut dyn Stage< as DatabaseProviderFactory>::ProviderRW> { &mut self.stages[idx] } -} -impl Pipeline { /// Registers progress metrics for each registered stage pub fn register_metrics(&mut self) -> Result<(), PipelineError> { let Some(metrics_tx) = &mut self.metrics_tx else { return Ok(()) }; @@ -290,6 +282,81 @@ impl Pipeline { Ok(()) } + /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less + /// than the checkpoint of the first stage). + /// + /// This will return the pipeline target if: + /// * the pipeline was interrupted during its previous run + /// * a new stage was added + /// * stage data was dropped manually through `reth stage drop ...` + /// + /// # Returns + /// + /// A target block hash if the pipeline is inconsistent, otherwise `None`. + pub fn initial_backfill_target(&self) -> ProviderResult> { + let provider = self.provider_factory.provider()?; + + // If no target was provided, check if the stages are congruent - check if the + // checkpoint of the last stage matches the checkpoint of the first. + let first_stage_checkpoint = provider + .get_stage_checkpoint(self.stages.first().unwrap().id())? + .unwrap_or_default() + .block_number; + + // Skip the first stage as we've already retrieved it and comparing all other checkpoints + // against it. + for stage in self.stages.iter().skip(1) { + let stage_id = stage.id(); + + let stage_checkpoint = + provider.get_stage_checkpoint(stage_id)?.unwrap_or_default().block_number; + + // If the checkpoint of any stage is less than the checkpoint of the first stage, + // retrieve and return the block hash of the latest header and use it as the target. + if stage_checkpoint < first_stage_checkpoint { + debug!( + target: "consensus::engine", + first_stage_checkpoint, + inconsistent_stage_id = %stage_id, + inconsistent_stage_checkpoint = stage_checkpoint, + "Pipeline sync progress is inconsistent" + ); + return provider.block_hash(first_stage_checkpoint); + } + } + + Ok(None) + } + + /// Checks for consistency between database and static files. If it fails, it unwinds to + /// the first block that's consistent between database and static files. + pub async fn ensure_static_files_consistency(&mut self) -> Result<(), PipelineError> { + let maybe_unwind_target = self + .provider_factory + .static_file_provider() + .check_consistency(&self.provider_factory.provider()?)?; + + self.move_to_static_files()?; + + if let Some(unwind_target) = maybe_unwind_target { + // Highly unlikely to happen, and given its destructive nature, it's better to panic + // instead. + assert_ne!( + unwind_target, + 0, + "A static file <> database inconsistency was found that would trigger an unwind to block 0" + ); + + info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check."); + + self.unwind(unwind_target, None).inspect_err(|err| { + error!(target: "reth::cli", unwind_target = %unwind_target, %err, "failed to run unwind") + })?; + } + + Ok(()) + } + /// Unwind the stages to the target block (exclusive). /// /// If the unwind is due to a bad block the number of that block should be specified. diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index f9b2312f5ab..7e57009e808 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -72,9 +72,7 @@ mod tests { StaticFileProviderFactory, StorageReader, }; use reth_prune_types::{PruneMode, PruneModes}; - use reth_stages_api::{ - ExecInput, ExecutionStageThresholds, PipelineTarget, Stage, StageCheckpoint, StageId, - }; + use reth_stages_api::{ExecInput, ExecutionStageThresholds, Stage, StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_testing_utils::generators::{ self, random_block, random_block_range, random_receipt, BlockRangeParams, @@ -301,7 +299,7 @@ mod tests { prune_count: usize, segment: StaticFileSegment, is_full_node: bool, - expected: Option, + expected: Option, ) { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. @@ -323,11 +321,18 @@ mod tests { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. + let mut provider = db.factory.database_provider_ro().unwrap(); + if is_full_node { + provider.set_prune_modes(PruneModes { + receipts: Some(PruneMode::Full), + ..Default::default() + }); + } let mut static_file_provider = db.factory.static_file_provider(); static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap(); assert!(matches!( static_file_provider - .check_consistency(&db.factory.database_provider_ro().unwrap(), is_full_node,), + .check_consistency(&provider), Ok(e) if e == expected )); } @@ -338,7 +343,7 @@ mod tests { db: &TestStageDB, stage_id: StageId, checkpoint_block_number: BlockNumber, - expected: Option, + expected: Option, ) { let provider_rw = db.factory.provider_rw().unwrap(); provider_rw @@ -349,18 +354,15 @@ mod tests { assert!(matches!( db.factory .static_file_provider() - .check_consistency(&db.factory.database_provider_ro().unwrap(), false,), + .check_consistency(&db.factory.database_provider_ro().unwrap()), Ok(e) if e == expected )); } /// Inserts a dummy value at key and compare the check consistency result against the expected /// one. - fn update_db_and_check>( - db: &TestStageDB, - key: u64, - expected: Option, - ) where + fn update_db_and_check>(db: &TestStageDB, key: u64, expected: Option) + where ::Value: Default, { update_db_with_and_check::(db, key, expected, &Default::default()); @@ -371,7 +373,7 @@ mod tests { fn update_db_with_and_check>( db: &TestStageDB, key: u64, - expected: Option, + expected: Option, value: &T::Value, ) { let provider_rw = db.factory.provider_rw().unwrap(); @@ -382,7 +384,7 @@ mod tests { assert!(matches!( db.factory .static_file_provider() - .check_consistency(&db.factory.database_provider_ro().unwrap(), false), + .check_consistency(&db.factory.database_provider_ro().unwrap()), Ok(e) if e == expected )); } @@ -393,7 +395,7 @@ mod tests { let db_provider = db.factory.database_provider_ro().unwrap(); assert!(matches!( - db.factory.static_file_provider().check_consistency(&db_provider, false), + db.factory.static_file_provider().check_consistency(&db_provider), Ok(None) )); } @@ -415,7 +417,7 @@ mod tests { 1, StaticFileSegment::Receipts, archive_node, - Some(PipelineTarget::Unwind(88)), + Some(88), ); simulate_behind_checkpoint_corruption( @@ -423,7 +425,7 @@ mod tests { 3, StaticFileSegment::Headers, archive_node, - Some(PipelineTarget::Unwind(86)), + Some(86), ); } @@ -472,7 +474,7 @@ mod tests { ); // When a checkpoint is ahead, we request a pipeline unwind. - save_checkpoint_and_check(&db, StageId::Headers, 91, Some(PipelineTarget::Unwind(block))); + save_checkpoint_and_check(&db, StageId::Headers, 91, Some(block)); } #[test] @@ -485,7 +487,7 @@ mod tests { .unwrap(); // Creates a gap of one header: static_file db - update_db_and_check::(&db, current + 2, Some(PipelineTarget::Unwind(89))); + update_db_and_check::(&db, current + 2, Some(89)); // Fill the gap, and ensure no unwind is necessary. update_db_and_check::(&db, current + 1, None); @@ -504,7 +506,7 @@ mod tests { update_db_with_and_check::( &db, current + 2, - Some(PipelineTarget::Unwind(89)), + Some(89), &TxLegacy::default().into_signed(Signature::test_signature()).into(), ); @@ -527,7 +529,7 @@ mod tests { .unwrap(); // Creates a gap of one receipt: static_file db - update_db_and_check::(&db, current + 2, Some(PipelineTarget::Unwind(89))); + update_db_and_check::(&db, current + 2, Some(89)); // Fill the gap, and ensure no unwind is necessary. update_db_and_check::(&db, current + 1, None); diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 54642a94757..f7b3c4ba603 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -111,6 +111,11 @@ impl ProviderFactory { pub fn into_db(self) -> N::DB { self.db } + + /// Returns reference to the prune modes. + pub const fn prune_modes_ref(&self) -> &PruneModes { + &self.prune_modes + } } impl>> ProviderFactory { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 434d3836fb2..800c761718a 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -37,7 +37,7 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_node_types::{FullNodePrimitives, NodePrimitives}; use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction}; -use reth_stages_types::{PipelineTarget, StageId}; +use reth_stages_types::StageId; use reth_static_file_types::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE, @@ -731,15 +731,14 @@ impl StaticFileProvider { /// * its highest block should match the stage checkpoint block number if it's equal or higher /// than the corresponding database table last entry. /// - /// Returns a [`Option`] of [`PipelineTarget::Unwind`] if any healing is further required. + /// Returns a [`Option`] with block number to unwind to if any healing is further required. /// /// WARNING: No static file writer should be held before calling this function, otherwise it /// will deadlock. pub fn check_consistency( &self, provider: &Provider, - has_receipt_pruning: bool, - ) -> ProviderResult> + ) -> ProviderResult> where Provider: DBProvider + BlockReader + StageCheckpointReader + ChainSpecProvider, N: NodePrimitives, @@ -776,7 +775,7 @@ impl StaticFileProvider { }; for segment in StaticFileSegment::iter() { - if has_receipt_pruning && segment.is_receipts() { + if provider.prune_modes_ref().has_receipts_pruning() && segment.is_receipts() { // Pruned nodes (including full node) do not store receipts as static files. continue } @@ -887,7 +886,7 @@ impl StaticFileProvider { } } - Ok(unwind_target.map(PipelineTarget::Unwind)) + Ok(unwind_target) } /// Checks consistency of the latest static file segment and throws an error if at fault. From 169a1fb97b677731e2281b5e3ca5c372a1dcc616 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 14 Oct 2025 15:12:55 -0400 Subject: [PATCH 044/371] fix(engine): flatten storage cache (#18880) --- crates/engine/tree/src/tree/cached_state.rs | 142 ++++++++------------ 1 file changed, 53 insertions(+), 89 deletions(-) diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index 9f4eb8398df..8553a9fe63c 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -1,5 +1,8 @@ //! Execution cache implementation for block processing. -use alloy_primitives::{Address, StorageKey, StorageValue, B256}; +use alloy_primitives::{ + map::{DefaultHashBuilder, HashSet}, + Address, StorageKey, StorageValue, B256, +}; use metrics::Gauge; use mini_moka::sync::CacheBuilder; use reth_errors::ProviderResult; @@ -14,7 +17,6 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; -use revm_primitives::map::DefaultHashBuilder; use std::{sync::Arc, time::Duration}; use tracing::trace; @@ -300,65 +302,69 @@ pub(crate) struct ExecutionCache { /// Cache for contract bytecode, keyed by code hash. code_cache: Cache>, - /// Per-account storage cache: outer cache keyed by Address, inner cache tracks that account’s - /// storage slots. - storage_cache: Cache, + /// Flattened storage cache: composite key of (`Address`, `StorageKey`) maps directly to + /// values. + storage_cache: Cache<(Address, StorageKey), Option>, /// Cache for basic account information (nonce, balance, code hash). account_cache: Cache>, } impl ExecutionCache { - /// Get storage value from hierarchical cache. + /// Get storage value from flattened cache. /// /// Returns a `SlotStatus` indicating whether: - /// - `NotCached`: The account's storage cache doesn't exist - /// - `Empty`: The slot exists in the account's cache but is empty + /// - `NotCached`: The storage slot is not in the cache + /// - `Empty`: The slot exists in the cache but is empty /// - `Value`: The slot exists and has a specific value pub(crate) fn get_storage(&self, address: &Address, key: &StorageKey) -> SlotStatus { - match self.storage_cache.get(address) { + match self.storage_cache.get(&(*address, *key)) { None => SlotStatus::NotCached, - Some(account_cache) => account_cache.get_storage(key), + Some(None) => SlotStatus::Empty, + Some(Some(value)) => SlotStatus::Value(value), } } - /// Insert storage value into hierarchical cache + /// Insert storage value into flattened cache pub(crate) fn insert_storage( &self, address: Address, key: StorageKey, value: Option, ) { - self.insert_storage_bulk(address, [(key, value)]); + self.storage_cache.insert((address, key), value); } - /// Insert multiple storage values into hierarchical cache for a single account + /// Insert multiple storage values into flattened cache for a single account /// - /// This method is optimized for inserting multiple storage values for the same address - /// by doing the account cache lookup only once instead of for each key-value pair. + /// This method inserts multiple storage values for the same address directly + /// into the flattened cache. pub(crate) fn insert_storage_bulk(&self, address: Address, storage_entries: I) where I: IntoIterator)>, { - let account_cache = self.storage_cache.get(&address).unwrap_or_else(|| { - let account_cache = AccountStorageCache::default(); - self.storage_cache.insert(address, account_cache.clone()); - account_cache - }); - for (key, value) in storage_entries { - account_cache.insert_storage(key, value); + self.storage_cache.insert((address, key), value); } } - /// Invalidate storage for specific account - pub(crate) fn invalidate_account_storage(&self, address: &Address) { - self.storage_cache.invalidate(address); - } - /// Returns the total number of storage slots cached across all accounts pub(crate) fn total_storage_slots(&self) -> usize { - self.storage_cache.iter().map(|addr| addr.len()).sum() + self.storage_cache.entry_count() as usize + } + + /// Invalidates the storage for all addresses in the set + pub(crate) fn invalidate_storages(&self, addresses: HashSet<&Address>) { + // NOTE: this must collect because the invalidate function should not be called while we + // hold an iter for it + let storage_entries = self + .storage_cache + .iter() + .filter_map(|entry| addresses.contains(&entry.key().0).then_some(*entry.key())) + .collect::>(); + for key in storage_entries { + self.storage_cache.invalidate(&key) + } } /// Inserts the post-execution state changes into the cache. @@ -385,6 +391,7 @@ impl ExecutionCache { self.code_cache.insert(*code_hash, Some(Bytecode(bytecode.clone()))); } + let mut invalidated_accounts = HashSet::default(); for (addr, account) in &state_updates.state { // If the account was not modified, as in not changed and not destroyed, then we have // nothing to do w.r.t. this particular account and can move on @@ -397,7 +404,7 @@ impl ExecutionCache { // Invalidate the account cache entry if destroyed self.account_cache.invalidate(addr); - self.invalidate_account_storage(addr); + invalidated_accounts.insert(addr); continue } @@ -424,6 +431,9 @@ impl ExecutionCache { self.account_cache.insert(*addr, Some(Account::from(account_info))); } + // invalidate storage for all destroyed accounts + self.invalidate_storages(invalidated_accounts); + Ok(()) } } @@ -452,11 +462,11 @@ impl ExecutionCacheBuilder { const TIME_TO_IDLE: Duration = Duration::from_secs(3600); // 1 hour let storage_cache = CacheBuilder::new(self.storage_cache_entries) - .weigher(|_key: &Address, value: &AccountStorageCache| -> u32 { - // values based on results from measure_storage_cache_overhead test - let base_weight = 39_000; - let slots_weight = value.len() * 218; - (base_weight + slots_weight) as u32 + .weigher(|_key: &(Address, StorageKey), _value: &Option| -> u32 { + // Size of composite key (Address + StorageKey) + Option + // Address: 20 bytes, StorageKey: 32 bytes, Option: 33 bytes + // Plus some overhead for the hash map entry + 120_u32 }) .max_capacity(storage_cache_size) .time_to_live(EXPIRY_TIME) @@ -573,56 +583,6 @@ impl SavedCache { } } -/// Cache for an individual account's storage slots. -/// -/// This represents the second level of the hierarchical storage cache. -/// Each account gets its own `AccountStorageCache` to store accessed storage slots. -#[derive(Debug, Clone)] -pub(crate) struct AccountStorageCache { - /// Map of storage keys to their cached values. - slots: Cache>, -} - -impl AccountStorageCache { - /// Create a new [`AccountStorageCache`] - pub(crate) fn new(max_slots: u64) -> Self { - Self { - slots: CacheBuilder::new(max_slots).build_with_hasher(DefaultHashBuilder::default()), - } - } - - /// Get a storage value from this account's cache. - /// - `NotCached`: The slot is not in the cache - /// - `Empty`: The slot is empty - /// - `Value`: The slot has a specific value - pub(crate) fn get_storage(&self, key: &StorageKey) -> SlotStatus { - match self.slots.get(key) { - None => SlotStatus::NotCached, - Some(None) => SlotStatus::Empty, - Some(Some(value)) => SlotStatus::Value(value), - } - } - - /// Insert a storage value - pub(crate) fn insert_storage(&self, key: StorageKey, value: Option) { - self.slots.insert(key, value); - } - - /// Returns the number of slots in the cache - pub(crate) fn len(&self) -> usize { - self.slots.entry_count() as usize - } -} - -impl Default for AccountStorageCache { - fn default() -> Self { - // With weigher and max_capacity in place, this number represents - // the maximum number of entries that can be stored, not the actual - // memory usage which is controlled by storage cache's max_capacity. - Self::new(1_000_000) - } -} - #[cfg(test)] mod tests { use super::*; @@ -697,32 +657,36 @@ mod tests { #[test] fn measure_storage_cache_overhead() { - let (base_overhead, cache) = measure_allocation(|| AccountStorageCache::new(1000)); - println!("Base AccountStorageCache overhead: {base_overhead} bytes"); + let (base_overhead, cache) = + measure_allocation(|| ExecutionCacheBuilder::default().build_caches(1000)); + println!("Base ExecutionCache overhead: {base_overhead} bytes"); let mut rng = rand::rng(); + let address = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::()); let (first_slot, _) = measure_allocation(|| { - cache.insert_storage(key, Some(value)); + cache.insert_storage(address, key, Some(value)); }); println!("First slot insertion overhead: {first_slot} bytes"); const TOTAL_SLOTS: usize = 10_000; let (test_slots, _) = measure_allocation(|| { for _ in 0..TOTAL_SLOTS { + let addr = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::()); - cache.insert_storage(key, Some(value)); + cache.insert_storage(addr, key, Some(value)); } }); println!("Average overhead over {} slots: {} bytes", TOTAL_SLOTS, test_slots / TOTAL_SLOTS); println!("\nTheoretical sizes:"); + println!("Address size: {} bytes", size_of::

()); println!("StorageKey size: {} bytes", size_of::()); println!("StorageValue size: {} bytes", size_of::()); println!("Option size: {} bytes", size_of::>()); - println!("Option size: {} bytes", size_of::>()); + println!("(Address, StorageKey) size: {} bytes", size_of::<(Address, StorageKey)>()); } #[test] From e0b7a86313cd137488fc6d11d629d2139efa40d8 Mon Sep 17 00:00:00 2001 From: YK Date: Wed, 15 Oct 2025 08:26:02 +0800 Subject: [PATCH 045/371] perf(tree): worker pooling for account proofs (#18901) Co-authored-by: Brian Picciano Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- crates/engine/primitives/src/config.rs | 24 + .../tree/src/tree/payload_processor/mod.rs | 10 +- .../src/tree/payload_processor/multiproof.rs | 174 ++-- crates/node/core/src/args/engine.rs | 10 + crates/trie/parallel/src/proof.rs | 276 ++--- crates/trie/parallel/src/proof_task.rs | 962 +++++++++++++----- .../trie/parallel/src/storage_root_targets.rs | 17 + docs/vocs/docs/pages/cli/reth/node.mdx | 3 + 8 files changed, 903 insertions(+), 573 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index b2f8da4d424..70763b6701f 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -21,6 +21,14 @@ fn default_storage_worker_count() -> usize { } } +/// Returns the default number of account worker threads. +/// +/// Account workers coordinate storage proof collection and account trie traversal. +/// They are set to the same count as storage workers for simplicity. +fn default_account_worker_count() -> usize { + default_storage_worker_count() +} + /// The size of proof targets chunk to spawn in one multiproof calculation. pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 10; @@ -123,6 +131,8 @@ pub struct TreeConfig { allow_unwind_canonical_header: bool, /// Number of storage proof worker threads. storage_worker_count: usize, + /// Number of account proof worker threads. + account_worker_count: usize, } impl Default for TreeConfig { @@ -150,6 +160,7 @@ impl Default for TreeConfig { prewarm_max_concurrency: DEFAULT_PREWARM_MAX_CONCURRENCY, allow_unwind_canonical_header: false, storage_worker_count: default_storage_worker_count(), + account_worker_count: default_account_worker_count(), } } } @@ -180,6 +191,7 @@ impl TreeConfig { prewarm_max_concurrency: usize, allow_unwind_canonical_header: bool, storage_worker_count: usize, + account_worker_count: usize, ) -> Self { assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); Self { @@ -205,6 +217,7 @@ impl TreeConfig { prewarm_max_concurrency, allow_unwind_canonical_header, storage_worker_count, + account_worker_count, } } @@ -482,4 +495,15 @@ impl TreeConfig { self.storage_worker_count = storage_worker_count; self } + + /// Return the number of account proof worker threads. + pub const fn account_worker_count(&self) -> usize { + self.account_worker_count + } + + /// Setter for the number of account proof worker threads. + pub const fn with_account_worker_count(mut self, account_worker_count: usize) -> Self { + self.account_worker_count = account_worker_count; + self + } } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index d449031606e..c24b0d1fe16 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -192,8 +192,7 @@ where { let (to_sparse_trie, sparse_trie_rx) = channel(); // spawn multiproof task, save the trie input - let (trie_input, state_root_config) = - MultiProofConfig::new_from_input(consistent_view, trie_input); + let (trie_input, state_root_config) = MultiProofConfig::from_input(trie_input); self.trie_input = Some(trie_input); // Create and spawn the storage proof task @@ -202,14 +201,15 @@ where state_root_config.state_sorted.clone(), state_root_config.prefix_sets.clone(), ); - let max_proof_task_concurrency = config.max_proof_task_concurrency() as usize; let storage_worker_count = config.storage_worker_count(); + let account_worker_count = config.account_worker_count(); + let max_proof_task_concurrency = config.max_proof_task_concurrency() as usize; let proof_task = match ProofTaskManager::new( self.executor.handle().clone(), - state_root_config.consistent_view.clone(), + consistent_view, task_ctx, - max_proof_task_concurrency, storage_worker_count, + account_worker_count, ) { Ok(task) => task, Err(error) => { diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 18d394477fb..f865312b83d 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -12,14 +12,17 @@ use derive_more::derive::Deref; use metrics::Histogram; use reth_errors::ProviderError; use reth_metrics::Metrics; -use reth_provider::{providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, FactoryTx}; use reth_revm::state::EvmState; use reth_trie::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::TriePrefixSetsMut, updates::TrieUpdatesSorted, DecodedMultiProof, HashedPostState, HashedPostStateSorted, HashedStorage, MultiProofTargets, TrieInput, }; -use reth_trie_parallel::{proof::ParallelProof, proof_task::ProofTaskManagerHandle}; +use reth_trie_parallel::{ + proof::ParallelProof, + proof_task::{AccountMultiproofInput, ProofTaskKind, ProofTaskManagerHandle}, + root::ParallelStateRootError, +}; use std::{ collections::{BTreeMap, VecDeque}, ops::DerefMut, @@ -62,9 +65,7 @@ impl SparseTrieUpdate { /// Common configuration for multi proof tasks #[derive(Debug, Clone)] -pub(super) struct MultiProofConfig { - /// View over the state in the database. - pub consistent_view: ConsistentDbView, +pub(super) struct MultiProofConfig { /// The sorted collection of cached in-memory intermediate trie nodes that /// can be reused for computation. pub nodes_sorted: Arc, @@ -76,17 +77,13 @@ pub(super) struct MultiProofConfig { pub prefix_sets: Arc, } -impl MultiProofConfig { - /// Creates a new state root config from the consistent view and the trie input. +impl MultiProofConfig { + /// Creates a new state root config from the trie input. /// /// This returns a cleared [`TrieInput`] so that we can reuse any allocated space in the /// [`TrieInput`]. - pub(super) fn new_from_input( - consistent_view: ConsistentDbView, - mut input: TrieInput, - ) -> (TrieInput, Self) { + pub(super) fn from_input(mut input: TrieInput) -> (TrieInput, Self) { let config = Self { - consistent_view, nodes_sorted: Arc::new(input.nodes.drain_into_sorted()), state_sorted: Arc::new(input.state.drain_into_sorted()), prefix_sets: Arc::new(input.prefix_sets.clone()), @@ -245,14 +242,14 @@ pub(crate) fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostStat /// A pending multiproof task, either [`StorageMultiproofInput`] or [`MultiproofInput`]. #[derive(Debug)] -enum PendingMultiproofTask { +enum PendingMultiproofTask { /// A storage multiproof task input. - Storage(StorageMultiproofInput), + Storage(StorageMultiproofInput), /// A regular multiproof task input. - Regular(MultiproofInput), + Regular(MultiproofInput), } -impl PendingMultiproofTask { +impl PendingMultiproofTask { /// Returns the proof sequence number of the task. const fn proof_sequence_number(&self) -> u64 { match self { @@ -278,22 +275,22 @@ impl PendingMultiproofTask { } } -impl From> for PendingMultiproofTask { - fn from(input: StorageMultiproofInput) -> Self { +impl From for PendingMultiproofTask { + fn from(input: StorageMultiproofInput) -> Self { Self::Storage(input) } } -impl From> for PendingMultiproofTask { - fn from(input: MultiproofInput) -> Self { +impl From for PendingMultiproofTask { + fn from(input: MultiproofInput) -> Self { Self::Regular(input) } } /// Input parameters for spawning a dedicated storage multiproof calculation. #[derive(Debug)] -struct StorageMultiproofInput { - config: MultiProofConfig, +struct StorageMultiproofInput { + config: MultiProofConfig, source: Option, hashed_state_update: HashedPostState, hashed_address: B256, @@ -303,7 +300,7 @@ struct StorageMultiproofInput { multi_added_removed_keys: Arc, } -impl StorageMultiproofInput { +impl StorageMultiproofInput { /// Destroys the input and sends a [`MultiProofMessage::EmptyProof`] message to the sender. fn send_empty_proof(self) { let _ = self.state_root_message_sender.send(MultiProofMessage::EmptyProof { @@ -315,8 +312,8 @@ impl StorageMultiproofInput { /// Input parameters for spawning a multiproof calculation. #[derive(Debug)] -struct MultiproofInput { - config: MultiProofConfig, +struct MultiproofInput { + config: MultiProofConfig, source: Option, hashed_state_update: HashedPostState, proof_targets: MultiProofTargets, @@ -325,7 +322,7 @@ struct MultiproofInput { multi_added_removed_keys: Option>, } -impl MultiproofInput { +impl MultiproofInput { /// Destroys the input and sends a [`MultiProofMessage::EmptyProof`] message to the sender. fn send_empty_proof(self) { let _ = self.state_root_message_sender.send(MultiProofMessage::EmptyProof { @@ -340,17 +337,20 @@ impl MultiproofInput { /// concurrency, further calculation requests are queued and spawn later, after /// availability has been signaled. #[derive(Debug)] -pub struct MultiproofManager { +pub struct MultiproofManager { /// Maximum number of concurrent calculations. max_concurrent: usize, /// Currently running calculations. inflight: usize, /// Queued calculations. - pending: VecDeque>, + pending: VecDeque, /// Executor for tasks executor: WorkloadExecutor, - /// Sender to the storage proof task. - storage_proof_task_handle: ProofTaskManagerHandle>, + /// Handle to the proof task manager used for creating `ParallelProof` instances for storage + /// proofs. + storage_proof_task_handle: ProofTaskManagerHandle, + /// Handle to the proof task manager used for account multiproofs. + account_proof_task_handle: ProofTaskManagerHandle, /// Cached storage proof roots for missed leaves; this maps /// hashed (missed) addresses to their storage proof roots. /// @@ -367,15 +367,13 @@ pub struct MultiproofManager { metrics: MultiProofTaskMetrics, } -impl MultiproofManager -where - Factory: DatabaseProviderFactory + Clone + 'static, -{ +impl MultiproofManager { /// Creates a new [`MultiproofManager`]. fn new( executor: WorkloadExecutor, metrics: MultiProofTaskMetrics, - storage_proof_task_handle: ProofTaskManagerHandle>, + storage_proof_task_handle: ProofTaskManagerHandle, + account_proof_task_handle: ProofTaskManagerHandle, max_concurrent: usize, ) -> Self { Self { @@ -385,6 +383,7 @@ where inflight: 0, metrics, storage_proof_task_handle, + account_proof_task_handle, missed_leaves_storage_roots: Default::default(), } } @@ -395,7 +394,7 @@ where /// Spawns a new multiproof calculation or enqueues it for later if /// `max_concurrent` are already inflight. - fn spawn_or_queue(&mut self, input: PendingMultiproofTask) { + fn spawn_or_queue(&mut self, input: PendingMultiproofTask) { // If there are no proof targets, we can just send an empty multiproof back immediately if input.proof_targets_is_empty() { debug!( @@ -429,7 +428,7 @@ where /// Spawns a multiproof task, dispatching to `spawn_storage_proof` if the input is a storage /// multiproof, and dispatching to `spawn_multiproof` otherwise. - fn spawn_multiproof_task(&mut self, input: PendingMultiproofTask) { + fn spawn_multiproof_task(&mut self, input: PendingMultiproofTask) { match input { PendingMultiproofTask::Storage(storage_input) => { self.spawn_storage_proof(storage_input); @@ -441,7 +440,7 @@ where } /// Spawns a single storage proof calculation task. - fn spawn_storage_proof(&mut self, storage_multiproof_input: StorageMultiproofInput) { + fn spawn_storage_proof(&mut self, storage_multiproof_input: StorageMultiproofInput) { let StorageMultiproofInput { config, source, @@ -468,12 +467,11 @@ where ); let start = Instant::now(); let proof_result = ParallelProof::new( - config.consistent_view, config.nodes_sorted, config.state_sorted, config.prefix_sets, missed_leaves_storage_roots, - storage_proof_task_handle.clone(), + storage_proof_task_handle, ) .with_branch_node_masks(true) .with_multi_added_removed_keys(Some(multi_added_removed_keys)) @@ -516,7 +514,7 @@ where } /// Spawns a single multiproof calculation task. - fn spawn_multiproof(&mut self, multiproof_input: MultiproofInput) { + fn spawn_multiproof(&mut self, multiproof_input: MultiproofInput) { let MultiproofInput { config, source, @@ -526,7 +524,7 @@ where state_root_message_sender, multi_added_removed_keys, } = multiproof_input; - let storage_proof_task_handle = self.storage_proof_task_handle.clone(); + let account_proof_task_handle = self.account_proof_task_handle.clone(); let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone(); self.executor.spawn_blocking(move || { @@ -544,17 +542,37 @@ where ); let start = Instant::now(); - let proof_result = ParallelProof::new( - config.consistent_view, - config.nodes_sorted, - config.state_sorted, - config.prefix_sets, + + // Extend prefix sets with targets + let frozen_prefix_sets = + ParallelProof::extend_prefix_sets_with_targets(&config.prefix_sets, &proof_targets); + + // Queue account multiproof to worker pool + let input = AccountMultiproofInput { + targets: proof_targets, + prefix_sets: frozen_prefix_sets, + collect_branch_node_masks: true, + multi_added_removed_keys, missed_leaves_storage_roots, - storage_proof_task_handle.clone(), - ) - .with_branch_node_masks(true) - .with_multi_added_removed_keys(multi_added_removed_keys) - .decoded_multiproof(proof_targets); + }; + + let (sender, receiver) = channel(); + let proof_result: Result = (|| { + account_proof_task_handle + .queue_task(ProofTaskKind::AccountMultiproof(input, sender)) + .map_err(|_| { + ParallelStateRootError::Other( + "Failed to queue account multiproof to worker pool".into(), + ) + })?; + + receiver + .recv() + .map_err(|_| { + ParallelStateRootError::Other("Account multiproof channel closed".into()) + })? + .map(|(proof, _stats)| proof) + })(); let elapsed = start.elapsed(); trace!( target: "engine::root", @@ -645,13 +663,13 @@ pub(crate) struct MultiProofTaskMetrics { /// Then it updates relevant leaves according to the result of the transaction. /// This feeds updates to the sparse trie task. #[derive(Debug)] -pub(super) struct MultiProofTask { +pub(super) struct MultiProofTask { /// The size of proof targets chunk to spawn in one calculation. /// /// If [`None`], then chunking is disabled. chunk_size: Option, /// Task configuration. - config: MultiProofConfig, + config: MultiProofConfig, /// Receiver for state root related messages. rx: Receiver, /// Sender for state root related messages. @@ -665,20 +683,17 @@ pub(super) struct MultiProofTask { /// Proof sequencing handler. proof_sequencer: ProofSequencer, /// Manages calculation of multiproofs. - multiproof_manager: MultiproofManager, + multiproof_manager: MultiproofManager, /// multi proof task metrics metrics: MultiProofTaskMetrics, } -impl MultiProofTask -where - Factory: DatabaseProviderFactory + Clone + 'static, -{ +impl MultiProofTask { /// Creates a new multi proof task with the unified message channel pub(super) fn new( - config: MultiProofConfig, + config: MultiProofConfig, executor: WorkloadExecutor, - proof_task_handle: ProofTaskManagerHandle>, + proof_task_handle: ProofTaskManagerHandle, to_sparse_trie: Sender, max_concurrency: usize, chunk_size: Option, @@ -698,7 +713,8 @@ where multiproof_manager: MultiproofManager::new( executor, metrics.clone(), - proof_task_handle, + proof_task_handle.clone(), // handle for storage proof workers + proof_task_handle, // handle for account proof workers max_concurrency, ), metrics, @@ -1202,43 +1218,29 @@ fn get_proof_targets( mod tests { use super::*; use alloy_primitives::map::B256Set; - use reth_provider::{providers::ConsistentDbView, test_utils::create_test_provider_factory}; + use reth_provider::{ + providers::ConsistentDbView, test_utils::create_test_provider_factory, BlockReader, + DatabaseProviderFactory, + }; use reth_trie::{MultiProof, TrieInput}; use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofTaskManager}; use revm_primitives::{B256, U256}; - use std::sync::Arc; - - fn create_state_root_config(factory: F, input: TrieInput) -> MultiProofConfig - where - F: DatabaseProviderFactory + Clone + 'static, - { - let consistent_view = ConsistentDbView::new(factory, None); - let nodes_sorted = Arc::new(input.nodes.clone().into_sorted()); - let state_sorted = Arc::new(input.state.clone().into_sorted()); - let prefix_sets = Arc::new(input.prefix_sets); - MultiProofConfig { consistent_view, nodes_sorted, state_sorted, prefix_sets } - } - - fn create_test_state_root_task(factory: F) -> MultiProofTask + fn create_test_state_root_task(factory: F) -> MultiProofTask where F: DatabaseProviderFactory + Clone + 'static, { let executor = WorkloadExecutor::default(); - let config = create_state_root_config(factory, TrieInput::default()); + let (_trie_input, config) = MultiProofConfig::from_input(TrieInput::default()); let task_ctx = ProofTaskCtx::new( config.nodes_sorted.clone(), config.state_sorted.clone(), config.prefix_sets.clone(), ); - let proof_task = ProofTaskManager::new( - executor.handle().clone(), - config.consistent_view.clone(), - task_ctx, - 1, - 1, - ) - .expect("Failed to create ProofTaskManager"); + let consistent_view = ConsistentDbView::new(factory, None); + let proof_task = + ProofTaskManager::new(executor.handle().clone(), consistent_view, task_ctx, 1, 1) + .expect("Failed to create ProofTaskManager"); let channel = channel(); MultiProofTask::new(config, executor, proof_task.handle(), channel.0, 1, None) diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 2298b28f9ce..6b678b5789b 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -113,6 +113,11 @@ pub struct EngineArgs { /// If not specified, defaults to 2x available parallelism, clamped between 2 and 64. #[arg(long = "engine.storage-worker-count")] pub storage_worker_count: Option, + + /// Configure the number of account proof workers in the Tokio blocking pool. + /// If not specified, defaults to the same count as storage workers. + #[arg(long = "engine.account-worker-count")] + pub account_worker_count: Option, } #[allow(deprecated)] @@ -140,6 +145,7 @@ impl Default for EngineArgs { always_process_payload_attributes_on_canonical_head: false, allow_unwind_canonical_header: false, storage_worker_count: None, + account_worker_count: None, } } } @@ -171,6 +177,10 @@ impl EngineArgs { config = config.with_storage_worker_count(count); } + if let Some(count) = self.account_worker_count { + config = config.with_account_worker_count(count); + } + config } } diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 4a2738fd38e..7fc1f022a7e 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -1,40 +1,25 @@ use crate::{ metrics::ParallelTrieMetrics, - proof_task::{ProofTaskKind, ProofTaskManagerHandle, StorageProofInput}, + proof_task::{ + AccountMultiproofInput, ProofTaskKind, ProofTaskManagerHandle, StorageProofInput, + }, root::ParallelStateRootError, - stats::ParallelTrieTracker, StorageRootTargets, }; -use alloy_primitives::{ - map::{B256Map, B256Set, HashMap}, - B256, -}; -use alloy_rlp::{BufMut, Encodable}; +use alloy_primitives::{map::B256Set, B256}; use dashmap::DashMap; -use itertools::Itertools; use reth_execution_errors::StorageRootError; -use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, FactoryTx, - ProviderError, -}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, - node_iter::{TrieElement, TrieNodeIter}, - prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSetsMut}, - proof::StorageProof, - trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, + prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets, TriePrefixSetsMut}, updates::TrieUpdatesSorted, - walker::TrieWalker, - DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostStateSorted, - MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, + DecodedMultiProof, DecodedStorageMultiProof, HashedPostStateSorted, MultiProofTargets, Nibbles, }; -use reth_trie_common::{ - added_removed_keys::MultiAddedRemovedKeys, - proof::{DecodedProofNodes, ProofRetainer}, +use reth_trie_common::added_removed_keys::MultiAddedRemovedKeys; +use std::sync::{ + mpsc::{channel, Receiver}, + Arc, }; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use std::sync::{mpsc::Receiver, Arc}; use tracing::trace; /// Parallel proof calculator. @@ -42,9 +27,7 @@ use tracing::trace; /// This can collect proof for many targets in parallel, spawning a task for each hashed address /// that has proof targets. #[derive(Debug)] -pub struct ParallelProof { - /// Consistent view of the database. - view: ConsistentDbView, +pub struct ParallelProof { /// The sorted collection of cached in-memory intermediate trie nodes that /// can be reused for computation. pub nodes_sorted: Arc, @@ -58,8 +41,8 @@ pub struct ParallelProof { collect_branch_node_masks: bool, /// Provided by the user to give the necessary context to retain extra proofs. multi_added_removed_keys: Option>, - /// Handle to the storage proof task. - storage_proof_task_handle: ProofTaskManagerHandle>, + /// Handle to the proof task manager. + proof_task_handle: ProofTaskManagerHandle, /// Cached storage proof roots for missed leaves; this maps /// hashed (missed) addresses to their storage proof roots. missed_leaves_storage_roots: Arc>, @@ -67,25 +50,23 @@ pub struct ParallelProof { metrics: ParallelTrieMetrics, } -impl ParallelProof { +impl ParallelProof { /// Create new state proof generator. pub fn new( - view: ConsistentDbView, nodes_sorted: Arc, state_sorted: Arc, prefix_sets: Arc, missed_leaves_storage_roots: Arc>, - storage_proof_task_handle: ProofTaskManagerHandle>, + proof_task_handle: ProofTaskManagerHandle, ) -> Self { Self { - view, nodes_sorted, state_sorted, prefix_sets, missed_leaves_storage_roots, collect_branch_node_masks: false, multi_added_removed_keys: None, - storage_proof_task_handle, + proof_task_handle, #[cfg(feature = "metrics")] metrics: ParallelTrieMetrics::new_with_labels(&[("type", "proof")]), } @@ -106,12 +87,6 @@ impl ParallelProof { self.multi_added_removed_keys = multi_added_removed_keys; self } -} - -impl ParallelProof -where - Factory: DatabaseProviderFactory + Clone + 'static, -{ /// Queues a storage proof task and returns a receiver for the result. fn queue_storage_proof( &self, @@ -128,8 +103,7 @@ where ); let (sender, receiver) = std::sync::mpsc::channel(); - let _ = - self.storage_proof_task_handle.queue_task(ProofTaskKind::StorageProof(input, sender)); + let _ = self.proof_task_handle.queue_task(ProofTaskKind::StorageProof(input, sender)); receiver } @@ -167,16 +141,16 @@ where proof_result } - /// Generate a state multiproof according to specified targets. - pub fn decoded_multiproof( - self, - targets: MultiProofTargets, - ) -> Result { - let mut tracker = ParallelTrieTracker::default(); - - // Extend prefix sets with targets - let mut prefix_sets = (*self.prefix_sets).clone(); - prefix_sets.extend(TriePrefixSetsMut { + /// Extends prefix sets with the given multiproof targets and returns the frozen result. + /// + /// This is a helper function used to prepare prefix sets before computing multiproofs. + /// Returns frozen (immutable) prefix sets ready for use in proof computation. + pub fn extend_prefix_sets_with_targets( + base_prefix_sets: &TriePrefixSetsMut, + targets: &MultiProofTargets, + ) -> TriePrefixSets { + let mut extended = base_prefix_sets.clone(); + extended.extend(TriePrefixSetsMut { account_prefix_set: PrefixSetMut::from(targets.keys().copied().map(Nibbles::unpack)), storage_prefix_sets: targets .iter() @@ -187,13 +161,21 @@ where .collect(), destroyed_accounts: Default::default(), }); - let prefix_sets = prefix_sets.freeze(); + extended.freeze() + } + + /// Generate a state multiproof according to specified targets. + pub fn decoded_multiproof( + self, + targets: MultiProofTargets, + ) -> Result { + // Extend prefix sets with targets + let prefix_sets = Self::extend_prefix_sets_with_targets(&self.prefix_sets, &targets); - let storage_root_targets = StorageRootTargets::new( - prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), - prefix_sets.storage_prefix_sets.clone(), + let storage_root_targets_len = StorageRootTargets::count( + &prefix_sets.account_prefix_set, + &prefix_sets.storage_prefix_sets, ); - let storage_root_targets_len = storage_root_targets.len(); trace!( target: "trie::parallel_proof", @@ -201,150 +183,36 @@ where "Starting parallel proof generation" ); - // Pre-calculate storage roots for accounts which were changed. - tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); - - // stores the receiver for the storage proof outcome for the hashed addresses - // this way we can lazily await the outcome when we iterate over the map - let mut storage_proof_receivers = - B256Map::with_capacity_and_hasher(storage_root_targets.len(), Default::default()); - - for (hashed_address, prefix_set) in - storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) - { - let target_slots = targets.get(&hashed_address).cloned().unwrap_or_default(); - let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots); - - // store the receiver for that result with the hashed address so we can await this in - // place when we iterate over the trie - storage_proof_receivers.insert(hashed_address, receiver); - } + // Queue account multiproof request to account worker pool - let provider_ro = self.view.provider_ro()?; - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - &self.nodes_sorted, - ); - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &self.state_sorted, - ); + let input = AccountMultiproofInput { + targets, + prefix_sets, + collect_branch_node_masks: self.collect_branch_node_masks, + multi_added_removed_keys: self.multi_added_removed_keys.clone(), + missed_leaves_storage_roots: self.missed_leaves_storage_roots.clone(), + }; - let accounts_added_removed_keys = - self.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); + let (sender, receiver) = channel(); + self.proof_task_handle + .queue_task(ProofTaskKind::AccountMultiproof(input, sender)) + .map_err(|_| { + ParallelStateRootError::Other( + "Failed to queue account multiproof: account worker pool unavailable" + .to_string(), + ) + })?; - // Create the walker. - let walker = TrieWalker::<_>::state_trie( - trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, - prefix_sets.account_prefix_set, - ) - .with_added_removed_keys(accounts_added_removed_keys) - .with_deletions_retained(true); - - // Create a hash builder to rebuild the root node since it is not available in the database. - let retainer = targets - .keys() - .map(Nibbles::unpack) - .collect::() - .with_added_removed_keys(accounts_added_removed_keys); - let mut hash_builder = HashBuilder::default() - .with_proof_retainer(retainer) - .with_updates(self.collect_branch_node_masks); - - // Initialize all storage multiproofs as empty. - // Storage multiproofs for non empty tries will be overwritten if necessary. - let mut collected_decoded_storages: B256Map = - targets.keys().map(|key| (*key, DecodedStorageMultiProof::empty())).collect(); - let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); - let mut account_node_iter = TrieNodeIter::state_trie( - walker, - hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, - ); - while let Some(account_node) = - account_node_iter.try_next().map_err(ProviderError::Database)? - { - match account_node { - TrieElement::Branch(node) => { - hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); - } - TrieElement::Leaf(hashed_address, account) => { - let root = match storage_proof_receivers.remove(&hashed_address) { - Some(rx) => { - let decoded_storage_multiproof = rx.recv().map_err(|e| { - ParallelStateRootError::StorageRoot(StorageRootError::Database( - DatabaseError::Other(format!( - "channel closed for {hashed_address}: {e}" - )), - )) - })??; - let root = decoded_storage_multiproof.root; - collected_decoded_storages - .insert(hashed_address, decoded_storage_multiproof); - root - } - // Since we do not store all intermediate nodes in the database, there might - // be a possibility of re-adding a non-modified leaf to the hash builder. - None => { - tracker.inc_missed_leaves(); - - match self.missed_leaves_storage_roots.entry(hashed_address) { - dashmap::Entry::Occupied(occ) => *occ.get(), - dashmap::Entry::Vacant(vac) => { - let root = StorageProof::new_hashed( - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), - hashed_address, - ) - .with_prefix_set_mut(Default::default()) - .storage_multiproof( - targets.get(&hashed_address).cloned().unwrap_or_default(), - ) - .map_err(|e| { - ParallelStateRootError::StorageRoot( - StorageRootError::Database(DatabaseError::Other( - e.to_string(), - )), - ) - })? - .root; - vac.insert(root); - root - } - } - } - }; - - // Encode account - account_rlp.clear(); - let account = account.into_trie_account(root); - account.encode(&mut account_rlp as &mut dyn BufMut); - - hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - } - } - } - let _ = hash_builder.root(); + // Wait for account multiproof result from worker + let (multiproof, stats) = receiver.recv().map_err(|_| { + ParallelStateRootError::Other( + "Account multiproof channel dropped: worker died or pool shutdown".to_string(), + ) + })??; - let stats = tracker.finish(); #[cfg(feature = "metrics")] self.metrics.record(stats); - let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); - let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; - - let (branch_node_hash_masks, branch_node_tree_masks) = if self.collect_branch_node_masks { - let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); - ( - updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), - updated_branch_nodes - .into_iter() - .map(|(path, node)| (path, node.tree_mask)) - .collect(), - ) - } else { - (HashMap::default(), HashMap::default()) - }; - trace!( target: "trie::parallel_proof", total_targets = storage_root_targets_len, @@ -356,12 +224,7 @@ where "Calculated decoded proof" ); - Ok(DecodedMultiProof { - account_subtree: decoded_account_subtree, - branch_node_hash_masks, - branch_node_tree_masks, - storages: collected_decoded_storages, - }) + Ok(multiproof) } } @@ -371,13 +234,16 @@ mod tests { use crate::proof_task::{ProofTaskCtx, ProofTaskManager}; use alloy_primitives::{ keccak256, - map::{B256Set, DefaultHashBuilder}, + map::{B256Set, DefaultHashBuilder, HashMap}, Address, U256, }; use rand::Rng; use reth_primitives_traits::{Account, StorageEntry}; - use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; + use reth_provider::{ + providers::ConsistentDbView, test_utils::create_test_provider_factory, HashingWriter, + }; use reth_trie::proof::Proof; + use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use tokio::runtime::Runtime; #[test] @@ -448,8 +314,7 @@ mod tests { let task_ctx = ProofTaskCtx::new(Default::default(), Default::default(), Default::default()); let proof_task = - ProofTaskManager::new(rt.handle().clone(), consistent_view.clone(), task_ctx, 1, 1) - .unwrap(); + ProofTaskManager::new(rt.handle().clone(), consistent_view, task_ctx, 1, 1).unwrap(); let proof_task_handle = proof_task.handle(); // keep the join handle around to make sure it does not return any errors @@ -457,7 +322,6 @@ mod tests { let join_handle = rt.spawn_blocking(move || proof_task.run()); let parallel_result = ParallelProof::new( - consistent_view, Default::default(), Default::default(), Default::default(), diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 0c513c55763..780839c238a 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -8,34 +8,47 @@ //! Individual [`ProofTaskTx`] instances manage a dedicated [`InMemoryTrieCursorFactory`] and //! [`HashedPostStateCursorFactory`], which are each backed by a database transaction. -use crate::root::ParallelStateRootError; -use alloy_primitives::{map::B256Set, B256}; +use crate::{ + root::ParallelStateRootError, + stats::{ParallelTrieStats, ParallelTrieTracker}, + StorageRootTargets, +}; +use alloy_primitives::{ + map::{B256Map, B256Set}, + B256, +}; +use alloy_rlp::{BufMut, Encodable}; use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; +use dashmap::DashMap; use reth_db_api::transaction::DbTx; -use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; +use reth_execution_errors::SparseTrieError; use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, FactoryTx, + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, ProviderResult, }; +use reth_storage_errors::db::DatabaseError; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, - prefix_set::TriePrefixSetsMut, + node_iter::{TrieElement, TrieNodeIter}, + prefix_set::{TriePrefixSets, TriePrefixSetsMut}, proof::{ProofTrieNodeProviderFactory, StorageProof}, trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdatesSorted, - DecodedStorageMultiProof, HashedPostStateSorted, Nibbles, + walker::TrieWalker, + DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostStateSorted, + MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::{PrefixSet, PrefixSetMut}, + proof::{DecodedProofNodes, ProofRetainer}, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ - collections::VecDeque, sync::{ atomic::{AtomicUsize, Ordering}, - mpsc::{channel, Receiver, SendError, Sender}, + mpsc::{channel, Receiver, Sender}, Arc, }, time::Instant, @@ -48,6 +61,8 @@ use crate::proof_task_metrics::ProofTaskMetrics; type StorageProofResult = Result; type TrieNodeProviderResult = Result, SparseTrieError>; +type AccountMultiproofResult = + Result<(DecodedMultiProof, ParallelTrieStats), ParallelStateRootError>; /// Internal message for storage workers. /// @@ -73,42 +88,20 @@ enum StorageWorkerJob { }, } -impl StorageWorkerJob { - /// Sends an error back to the caller when worker pool is unavailable. - /// - /// Returns `Ok(())` if the error was sent successfully, or `Err(())` if the receiver was - /// dropped. - fn send_worker_unavailable_error(&self) -> Result<(), ()> { - let error = - ParallelStateRootError::Other("Storage proof worker pool unavailable".to_string()); - - match self { - Self::StorageProof { result_sender, .. } => { - result_sender.send(Err(error)).map_err(|_| ()) - } - Self::BlindedStorageNode { result_sender, .. } => result_sender - .send(Err(SparseTrieError::from(SparseTrieErrorKind::Other(Box::new(error))))) - .map_err(|_| ()), - } - } -} - /// Manager for coordinating proof request execution across different task types. /// /// # Architecture /// -/// This manager handles two distinct execution paths: +/// This manager operates two distinct worker pools for parallel trie operations: /// -/// 1. **Storage Worker Pool** (for storage trie operations): +/// **Worker Pools**: /// - Pre-spawned workers with dedicated long-lived transactions -/// - Handles `StorageProof` and `BlindedStorageNode` requests -/// - Tasks queued via crossbeam unbounded channel +/// - **Storage pool**: Handles `StorageProof` and `BlindedStorageNode` requests +/// - **Account pool**: Handles `AccountMultiproof` and `BlindedAccountNode` requests, delegates +/// storage proof computation to storage pool +/// - Tasks queued via crossbeam unbounded channels /// - Workers continuously process without transaction overhead -/// - Unbounded queue ensures all storage proofs benefit from transaction reuse -/// -/// 2. **On-Demand Execution** (for account trie operations): -/// - Lazy transaction creation for `BlindedAccountNode` requests -/// - Transactions returned to pool after use for reuse +/// - Returns error if worker pool is unavailable (all workers panicked) /// /// # Public Interface /// @@ -117,7 +110,7 @@ impl StorageWorkerJob { /// - Use standard `std::mpsc` message passing /// - Receive consistent return types and error handling #[derive(Debug)] -pub struct ProofTaskManager { +pub struct ProofTaskManager { /// Sender for storage worker jobs to worker pool. storage_work_tx: CrossbeamSender, @@ -126,33 +119,17 @@ pub struct ProofTaskManager { /// May be less than requested if concurrency limits reduce the worker budget. storage_worker_count: usize, - /// Max number of database transactions to create for on-demand account trie operations. - max_concurrency: usize, - - /// Number of database transactions created for on-demand operations. - total_transactions: usize, - - /// Proof tasks pending execution (account trie operations only). - pending_tasks: VecDeque, - - /// The proof task transactions, containing owned cursor factories that are reused for proof - /// calculation (account trie operations only). - proof_task_txs: Vec>>, + /// Sender for account worker jobs to worker pool. + account_work_tx: CrossbeamSender, - /// Consistent view provider used for creating transactions on-demand. - view: ConsistentDbView, - - /// Proof task context shared across all proof tasks. - task_ctx: ProofTaskCtx, - - /// The underlying handle from which to spawn proof tasks. - executor: Handle, + /// Number of account workers successfully spawned. + account_worker_count: usize, /// Receives proof task requests from [`ProofTaskManagerHandle`]. - proof_task_rx: Receiver>>, + proof_task_rx: CrossbeamReceiver, - /// Internal channel for on-demand tasks to return transactions after use. - tx_sender: Sender>>, + /// Sender for creating handles that can queue tasks. + proof_task_tx: CrossbeamSender, /// The number of active handles. /// @@ -307,47 +284,490 @@ fn storage_worker_loop( ); } -impl ProofTaskManager +// TODO: Refactor this with storage_worker_loop. ProofTaskManager should be removed in the following +// pr and `MultiproofManager` should be used instead to dispatch jobs directly. +/// Worker loop for account trie operations. +/// +/// # Lifecycle +/// +/// Each worker: +/// 1. Receives `AccountWorkerJob` from crossbeam unbounded channel +/// 2. Computes result using its dedicated long-lived transaction +/// 3. Sends result directly to original caller via `std::mpsc` +/// 4. Repeats until channel closes (graceful shutdown) +/// +/// # Transaction Reuse +/// +/// Reuses the same transaction and cursor factories across multiple operations +/// to avoid transaction creation and cursor factory setup overhead. +/// +/// # Panic Safety +/// +/// If this function panics, the worker thread terminates but other workers +/// continue operating and the system degrades gracefully. +/// +/// # Shutdown +/// +/// Worker shuts down when the crossbeam channel closes (all senders dropped). +fn account_worker_loop( + proof_tx: ProofTaskTx, + work_rx: CrossbeamReceiver, + storage_work_tx: CrossbeamSender, + worker_id: usize, +) where + Tx: DbTx, +{ + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Account worker started" + ); + + // Create factories once at worker startup to avoid recreation overhead. + let (trie_cursor_factory, hashed_cursor_factory) = proof_tx.create_factories(); + + // Create blinded provider factory once for all blinded node requests + let blinded_provider_factory = ProofTrieNodeProviderFactory::new( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + proof_tx.task_ctx.prefix_sets.clone(), + ); + + let mut account_proofs_processed = 0u64; + let mut account_nodes_processed = 0u64; + + while let Ok(job) = work_rx.recv() { + match job { + AccountWorkerJob::AccountMultiproof { mut input, result_sender } => { + trace!( + target: "trie::proof_task", + worker_id, + targets = input.targets.len(), + "Processing account multiproof" + ); + + let proof_start = Instant::now(); + let mut tracker = ParallelTrieTracker::default(); + + let mut storage_prefix_sets = + std::mem::take(&mut input.prefix_sets.storage_prefix_sets); + + let storage_root_targets_len = StorageRootTargets::count( + &input.prefix_sets.account_prefix_set, + &storage_prefix_sets, + ); + tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); + + let storage_proof_receivers = match queue_storage_proofs( + &storage_work_tx, + &input.targets, + &mut storage_prefix_sets, + input.collect_branch_node_masks, + input.multi_added_removed_keys.as_ref(), + ) { + Ok(receivers) => receivers, + Err(error) => { + let _ = result_sender.send(Err(error)); + continue; + } + }; + + // Use the missed leaves cache passed from the multiproof manager + let missed_leaves_storage_roots = &input.missed_leaves_storage_roots; + + let account_prefix_set = std::mem::take(&mut input.prefix_sets.account_prefix_set); + + let ctx = AccountMultiproofParams { + targets: &input.targets, + prefix_set: account_prefix_set, + collect_branch_node_masks: input.collect_branch_node_masks, + multi_added_removed_keys: input.multi_added_removed_keys.as_ref(), + storage_proof_receivers, + missed_leaves_storage_roots, + }; + + let result = build_account_multiproof_with_storage_roots( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + ctx, + &mut tracker, + ); + + let proof_elapsed = proof_start.elapsed(); + let stats = tracker.finish(); + let result = result.map(|proof| (proof, stats)); + account_proofs_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + "Account multiproof receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + proof_time_us = proof_elapsed.as_micros(), + total_processed = account_proofs_processed, + "Account multiproof completed" + ); + } + + AccountWorkerJob::BlindedAccountNode { path, result_sender } => { + trace!( + target: "trie::proof_task", + worker_id, + ?path, + "Processing blinded account node" + ); + + let start = Instant::now(); + let result = blinded_provider_factory.account_node_provider().trie_node(&path); + let elapsed = start.elapsed(); + + account_nodes_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + ?path, + account_nodes_processed, + "Blinded account node receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + ?path, + node_time_us = elapsed.as_micros(), + total_processed = account_nodes_processed, + "Blinded account node completed" + ); + } + } + } + + tracing::debug!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + account_nodes_processed, + "Account worker shutting down" + ); +} + +/// Builds an account multiproof by consuming storage proof receivers lazily during trie walk. +/// +/// This is a helper function used by account workers to build the account subtree proof +/// while storage proofs are still being computed. Receivers are consumed only when needed, +/// enabling interleaved parallelism between account trie traversal and storage proof computation. +/// +/// Returns a `DecodedMultiProof` containing the account subtree and storage proofs. +fn build_account_multiproof_with_storage_roots( + trie_cursor_factory: C, + hashed_cursor_factory: H, + ctx: AccountMultiproofParams<'_>, + tracker: &mut ParallelTrieTracker, +) -> Result where - Factory: DatabaseProviderFactory, + C: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, { - /// Creates a new [`ProofTaskManager`] with pre-spawned storage proof workers. + let accounts_added_removed_keys = + ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); + + // Create the walker. + let walker = TrieWalker::<_>::state_trie( + trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, + ctx.prefix_set, + ) + .with_added_removed_keys(accounts_added_removed_keys) + .with_deletions_retained(true); + + // Create a hash builder to rebuild the root node since it is not available in the database. + let retainer = ctx + .targets + .keys() + .map(Nibbles::unpack) + .collect::() + .with_added_removed_keys(accounts_added_removed_keys); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(ctx.collect_branch_node_masks); + + // Initialize storage multiproofs map with pre-allocated capacity. + // Proofs will be inserted as they're consumed from receivers during trie walk. + let mut collected_decoded_storages: B256Map = + B256Map::with_capacity_and_hasher(ctx.targets.len(), Default::default()); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); + let mut account_node_iter = TrieNodeIter::state_trie( + walker, + hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, + ); + + let mut storage_proof_receivers = ctx.storage_proof_receivers; + + while let Some(account_node) = account_node_iter.try_next().map_err(ProviderError::Database)? { + match account_node { + TrieElement::Branch(node) => { + hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); + } + TrieElement::Leaf(hashed_address, account) => { + let root = match storage_proof_receivers.remove(&hashed_address) { + Some(receiver) => { + // Block on this specific storage proof receiver - enables interleaved + // parallelism + let proof = receiver.recv().map_err(|_| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(format!( + "Storage proof channel closed for {hashed_address}" + )), + ), + ) + })??; + let root = proof.root; + collected_decoded_storages.insert(hashed_address, proof); + root + } + // Since we do not store all intermediate nodes in the database, there might + // be a possibility of re-adding a non-modified leaf to the hash builder. + None => { + tracker.inc_missed_leaves(); + + match ctx.missed_leaves_storage_roots.entry(hashed_address) { + dashmap::Entry::Occupied(occ) => *occ.get(), + dashmap::Entry::Vacant(vac) => { + let root = StorageProof::new_hashed( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + hashed_address, + ) + .with_prefix_set_mut(Default::default()) + .storage_multiproof( + ctx.targets.get(&hashed_address).cloned().unwrap_or_default(), + ) + .map_err(|e| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(e.to_string()), + ), + ) + })? + .root; + + vac.insert(root); + root + } + } + } + }; + + // Encode account + account_rlp.clear(); + let account = account.into_trie_account(root); + account.encode(&mut account_rlp as &mut dyn BufMut); + + hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + } + } + } + + // Consume remaining storage proof receivers for accounts not encountered during trie walk. + for (hashed_address, receiver) in storage_proof_receivers { + if let Ok(Ok(proof)) = receiver.recv() { + collected_decoded_storages.insert(hashed_address, proof); + } + } + + let _ = hash_builder.root(); + + let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); + let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; + + let (branch_node_hash_masks, branch_node_tree_masks) = if ctx.collect_branch_node_masks { + let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); + ( + updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), + updated_branch_nodes.into_iter().map(|(path, node)| (path, node.tree_mask)).collect(), + ) + } else { + (Default::default(), Default::default()) + }; + + Ok(DecodedMultiProof { + account_subtree: decoded_account_subtree, + branch_node_hash_masks, + branch_node_tree_masks, + storages: collected_decoded_storages, + }) +} + +/// Queues storage proofs for all accounts in the targets and returns receivers. +/// +/// This function queues all storage proof tasks to the worker pool but returns immediately +/// with receivers, allowing the account trie walk to proceed in parallel with storage proof +/// computation. This enables interleaved parallelism for better performance. +/// +/// Propagates errors up if queuing fails. Receivers must be consumed by the caller. +fn queue_storage_proofs( + storage_work_tx: &CrossbeamSender, + targets: &MultiProofTargets, + storage_prefix_sets: &mut B256Map, + with_branch_node_masks: bool, + multi_added_removed_keys: Option<&Arc>, +) -> Result>, ParallelStateRootError> { + let mut storage_proof_receivers = + B256Map::with_capacity_and_hasher(targets.len(), Default::default()); + + // Queue all storage proofs to worker pool + for (hashed_address, target_slots) in targets.iter() { + let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); + + // Always queue a storage proof so we obtain the storage root even when no slots are + // requested. + let input = StorageProofInput::new( + *hashed_address, + prefix_set, + target_slots.clone(), + with_branch_node_masks, + multi_added_removed_keys.cloned(), + ); + + let (sender, receiver) = channel(); + + // If queuing fails, propagate error up (no fallback) + storage_work_tx + .send(StorageWorkerJob::StorageProof { input, result_sender: sender }) + .map_err(|_| { + ParallelStateRootError::Other(format!( + "Failed to queue storage proof for {}: storage worker pool unavailable", + hashed_address + )) + })?; + + storage_proof_receivers.insert(*hashed_address, receiver); + } + + Ok(storage_proof_receivers) +} + +impl ProofTaskManager { + /// Creates a new [`ProofTaskManager`] with pre-spawned storage and account proof workers. + /// + /// This manager coordinates both storage and account worker pools: + /// - Storage workers handle `StorageProof` and `BlindedStorageNode` requests + /// - Account workers handle `AccountMultiproof` and `BlindedAccountNode` requests /// /// The `storage_worker_count` determines how many storage workers to spawn, and - /// `max_concurrency` determines the limit for on-demand operations (blinded account nodes). - /// These are now independent - storage workers are spawned as requested, and on-demand - /// operations use a separate concurrency pool for blinded account nodes. + /// `account_worker_count` determines how many account workers to spawn. /// Returns an error if the underlying provider fails to create the transactions required for /// spawning workers. - pub fn new( + pub fn new( executor: Handle, view: ConsistentDbView, task_ctx: ProofTaskCtx, - max_concurrency: usize, storage_worker_count: usize, - ) -> ProviderResult { - let (tx_sender, proof_task_rx) = channel(); + account_worker_count: usize, + ) -> ProviderResult + where + Factory: DatabaseProviderFactory, + { + // Use unbounded channel for the router to prevent account workers from blocking + // when queuing storage proofs. Account workers queue many storage proofs through + // this channel, and blocking on a bounded channel wastes parallel worker capacity. + let (proof_task_tx, proof_task_rx) = unbounded(); // Use unbounded channel to ensure all storage operations are queued to workers. // This maintains transaction reuse benefits and avoids fallback to on-demand execution. let (storage_work_tx, storage_work_rx) = unbounded::(); + let (account_work_tx, account_work_rx) = unbounded::(); tracing::info!( target: "trie::proof_task", storage_worker_count, - max_concurrency, - "Initializing storage worker pool with unbounded queue" + account_worker_count, + "Initializing storage and account worker pools with unbounded queues" ); + // Spawn storage workers + let spawned_storage_workers = Self::spawn_storage_workers( + &executor, + &view, + &task_ctx, + storage_worker_count, + storage_work_rx, + )?; + + // Spawn account workers with direct access to the storage worker queue + let spawned_account_workers = Self::spawn_account_workers( + &executor, + &view, + &task_ctx, + account_worker_count, + account_work_rx, + storage_work_tx.clone(), + )?; + + Ok(Self { + storage_work_tx, + storage_worker_count: spawned_storage_workers, + account_work_tx, + account_worker_count: spawned_account_workers, + proof_task_rx, + proof_task_tx, + active_handles: Arc::new(AtomicUsize::new(0)), + + #[cfg(feature = "metrics")] + metrics: ProofTaskMetrics::default(), + }) + } + + /// Returns a handle for sending new proof tasks to the [`ProofTaskManager`]. + pub fn handle(&self) -> ProofTaskManagerHandle { + ProofTaskManagerHandle::new(self.proof_task_tx.clone(), self.active_handles.clone()) + } + + /// Spawns a pool of storage workers with dedicated database transactions. + /// + /// Each worker receives `StorageWorkerJob` from the channel and processes storage proofs + /// and blinded storage node requests using a dedicated long-lived transaction. + /// + /// # Parameters + /// - `executor`: Tokio runtime handle for spawning blocking tasks + /// - `view`: Consistent database view for creating transactions + /// - `task_ctx`: Shared context with trie updates and prefix sets + /// - `worker_count`: Number of storage workers to spawn + /// - `work_rx`: Receiver for storage worker jobs + /// + /// # Returns + /// The number of storage workers successfully spawned + fn spawn_storage_workers( + executor: &Handle, + view: &ConsistentDbView, + task_ctx: &ProofTaskCtx, + worker_count: usize, + work_rx: CrossbeamReceiver, + ) -> ProviderResult + where + Factory: DatabaseProviderFactory, + { let mut spawned_workers = 0; - for worker_id in 0..storage_worker_count { - let provider_ro = view.provider_ro()?; + for worker_id in 0..worker_count { + let provider_ro = view.provider_ro()?; let tx = provider_ro.into_tx(); let proof_task_tx = ProofTaskTx::new(tx, task_ctx.clone(), worker_id); - let work_rx = storage_work_rx.clone(); + let work_rx_clone = work_rx.clone(); - executor.spawn_blocking(move || storage_worker_loop(proof_task_tx, work_rx, worker_id)); + executor.spawn_blocking(move || { + storage_worker_loop(proof_task_tx, work_rx_clone, worker_id) + }); spawned_workers += 1; @@ -359,99 +779,77 @@ where ); } - Ok(Self { - storage_work_tx, - storage_worker_count: spawned_workers, - max_concurrency, - total_transactions: 0, - pending_tasks: VecDeque::new(), - proof_task_txs: Vec::with_capacity(max_concurrency), - view, - task_ctx, - executor, - proof_task_rx, - tx_sender, - active_handles: Arc::new(AtomicUsize::new(0)), - - #[cfg(feature = "metrics")] - metrics: ProofTaskMetrics::default(), - }) + Ok(spawned_workers) } - /// Returns a handle for sending new proof tasks to the [`ProofTaskManager`]. - pub fn handle(&self) -> ProofTaskManagerHandle> { - ProofTaskManagerHandle::new(self.tx_sender.clone(), self.active_handles.clone()) - } -} - -impl ProofTaskManager -where - Factory: DatabaseProviderFactory + 'static, -{ - /// Inserts the task into the pending tasks queue. - pub fn queue_proof_task(&mut self, task: ProofTaskKind) { - self.pending_tasks.push_back(task); - } - - /// Gets either the next available transaction, or creates a new one if all are in use and the - /// total number of transactions created is less than the max concurrency. - pub fn get_or_create_tx(&mut self) -> ProviderResult>>> { - if let Some(proof_task_tx) = self.proof_task_txs.pop() { - return Ok(Some(proof_task_tx)); - } + /// Spawns a pool of account workers with dedicated database transactions. + /// + /// Each worker receives `AccountWorkerJob` from the channel and processes account multiproofs + /// and blinded account node requests using a dedicated long-lived transaction. Account workers + /// can delegate storage proof computation to the storage worker pool. + /// + /// # Parameters + /// - `executor`: Tokio runtime handle for spawning blocking tasks + /// - `view`: Consistent database view for creating transactions + /// - `task_ctx`: Shared context with trie updates and prefix sets + /// - `worker_count`: Number of account workers to spawn + /// - `work_rx`: Receiver for account worker jobs + /// - `storage_work_tx`: Sender to delegate storage proofs to storage worker pool + /// + /// # Returns + /// The number of account workers successfully spawned + fn spawn_account_workers( + executor: &Handle, + view: &ConsistentDbView, + task_ctx: &ProofTaskCtx, + worker_count: usize, + work_rx: CrossbeamReceiver, + storage_work_tx: CrossbeamSender, + ) -> ProviderResult + where + Factory: DatabaseProviderFactory, + { + let mut spawned_workers = 0; - // if we can create a new tx within our concurrency limits, create one on-demand - if self.total_transactions < self.max_concurrency { - let provider_ro = self.view.provider_ro()?; + for worker_id in 0..worker_count { + let provider_ro = view.provider_ro()?; let tx = provider_ro.into_tx(); - self.total_transactions += 1; - return Ok(Some(ProofTaskTx::new(tx, self.task_ctx.clone(), self.total_transactions))); - } + let proof_task_tx = ProofTaskTx::new(tx, task_ctx.clone(), worker_id); + let work_rx_clone = work_rx.clone(); + let storage_work_tx_clone = storage_work_tx.clone(); - Ok(None) - } + executor.spawn_blocking(move || { + account_worker_loop(proof_task_tx, work_rx_clone, storage_work_tx_clone, worker_id) + }); - /// Spawns the next queued proof task on the executor with the given input, if there are any - /// transactions available. - /// - /// This will return an error if a transaction must be created on-demand and the consistent view - /// provider fails. - pub fn try_spawn_next(&mut self) -> ProviderResult<()> { - let Some(task) = self.pending_tasks.pop_front() else { return Ok(()) }; - - let Some(proof_task_tx) = self.get_or_create_tx()? else { - // if there are no txs available, requeue the proof task - self.pending_tasks.push_front(task); - return Ok(()) - }; - - let tx_sender = self.tx_sender.clone(); - self.executor.spawn_blocking(move || match task { - ProofTaskKind::BlindedAccountNode(path, sender) => { - proof_task_tx.blinded_account_node(path, sender, tx_sender); - } - // Storage trie operations should never reach here as they're routed to worker pool - ProofTaskKind::BlindedStorageNode(_, _, _) | ProofTaskKind::StorageProof(_, _) => { - unreachable!("Storage trie operations should be routed to worker pool") - } - }); + spawned_workers += 1; - Ok(()) + tracing::debug!( + target: "trie::proof_task", + worker_id, + spawned_workers, + "Account worker spawned successfully" + ); + } + + Ok(spawned_workers) } - /// Loops, managing the proof tasks, and sending new tasks to the executor. + /// Loops, managing the proof tasks, routing them to the appropriate worker pools. /// /// # Task Routing /// /// - **Storage Trie Operations** (`StorageProof` and `BlindedStorageNode`): Routed to - /// pre-spawned worker pool via unbounded channel. - /// - **Account Trie Operations** (`BlindedAccountNode`): Queued for on-demand execution via - /// `pending_tasks`. + /// pre-spawned storage worker pool via unbounded channel. Returns error if workers are + /// disconnected (e.g., all workers panicked). + /// - **Account Trie Operations** (`AccountMultiproof` and `BlindedAccountNode`): Routed to + /// pre-spawned account worker pool via unbounded channel. Returns error if workers are + /// disconnected. /// /// # Shutdown /// - /// On termination, `storage_work_tx` is dropped, closing the channel and - /// signaling all workers to shut down gracefully. + /// On termination, `storage_work_tx` and `account_work_tx` are dropped, closing the channels + /// and signaling all workers to shut down gracefully. pub fn run(mut self) -> ProviderResult<()> { loop { match self.proof_task_rx.recv() { @@ -459,27 +857,17 @@ where match message { ProofTaskMessage::QueueTask(task) => match task { ProofTaskKind::StorageProof(input, sender) => { - match self.storage_work_tx.send(StorageWorkerJob::StorageProof { - input, - result_sender: sender, - }) { - Ok(_) => { - tracing::trace!( - target: "trie::proof_task", - "Storage proof dispatched to worker pool" - ); - } - Err(crossbeam_channel::SendError(job)) => { - tracing::error!( - target: "trie::proof_task", - storage_worker_count = self.storage_worker_count, - "Worker pool disconnected, cannot process storage proof" - ); - - // Send error back to caller - let _ = job.send_worker_unavailable_error(); - } - } + self.storage_work_tx + .send(StorageWorkerJob::StorageProof { + input, + result_sender: sender, + }) + .expect("failed to dispatch storage proof: storage worker pool unavailable (all workers panicked or pool shut down)"); + + tracing::trace!( + target: "trie::proof_task", + "Storage proof dispatched to worker pool" + ); } ProofTaskKind::BlindedStorageNode(account, path, sender) => { @@ -488,56 +876,65 @@ where self.metrics.storage_nodes += 1; } - match self.storage_work_tx.send( - StorageWorkerJob::BlindedStorageNode { + self.storage_work_tx + .send(StorageWorkerJob::BlindedStorageNode { account, path, result_sender: sender, - }, - ) { - Ok(_) => { - tracing::trace!( - target: "trie::proof_task", - ?account, - ?path, - "Blinded storage node dispatched to worker pool" - ); - } - Err(crossbeam_channel::SendError(job)) => { - tracing::warn!( - target: "trie::proof_task", - storage_worker_count = self.storage_worker_count, - ?account, - ?path, - "Worker pool disconnected, cannot process blinded storage node" - ); - - // Send error back to caller - let _ = job.send_worker_unavailable_error(); - } - } + }) + .expect("failed to dispatch blinded storage node: storage worker pool unavailable (all workers panicked or pool shut down)"); + + tracing::trace!( + target: "trie::proof_task", + ?account, + ?path, + "Blinded storage node dispatched to worker pool" + ); } - ProofTaskKind::BlindedAccountNode(_, _) => { - // Route account trie operations to pending_tasks + ProofTaskKind::BlindedAccountNode(path, sender) => { #[cfg(feature = "metrics")] { self.metrics.account_nodes += 1; } - self.queue_proof_task(task); + + self.account_work_tx + .send(AccountWorkerJob::BlindedAccountNode { + path, + result_sender: sender, + }) + .expect("failed to dispatch blinded account node: account worker pool unavailable (all workers panicked or pool shut down)"); + + tracing::trace!( + target: "trie::proof_task", + ?path, + "Blinded account node dispatched to worker pool" + ); + } + + ProofTaskKind::AccountMultiproof(input, sender) => { + self.account_work_tx + .send(AccountWorkerJob::AccountMultiproof { + input, + result_sender: sender, + }) + .expect("failed to dispatch account multiproof: account worker pool unavailable (all workers panicked or pool shut down)"); + + tracing::trace!( + target: "trie::proof_task", + "Account multiproof dispatched to worker pool" + ); } }, - ProofTaskMessage::Transaction(tx) => { - // Return transaction to pending_tasks pool - self.proof_task_txs.push(tx); - } ProofTaskMessage::Terminate => { - // Drop storage_work_tx to signal workers to shut down + // Drop worker channels to signal workers to shut down drop(self.storage_work_tx); + drop(self.account_work_tx); tracing::debug!( target: "trie::proof_task", storage_worker_count = self.storage_worker_count, + account_worker_count = self.account_worker_count, "Shutting down proof task manager, signaling workers to terminate" ); @@ -553,9 +950,6 @@ where // However this should never happen, as this struct stores a sender Err(_) => return Ok(()), }; - - // Try spawning pending account trie tasks - self.try_spawn_next()?; } } } @@ -672,49 +1066,6 @@ where decoded_result } - - /// Retrieves blinded account node by path. - fn blinded_account_node( - self, - path: Nibbles, - result_sender: Sender, - tx_sender: Sender>, - ) { - trace!( - target: "trie::proof_task", - ?path, - "Starting blinded account node retrieval" - ); - - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - - let blinded_provider_factory = ProofTrieNodeProviderFactory::new( - trie_cursor_factory, - hashed_cursor_factory, - self.task_ctx.prefix_sets.clone(), - ); - - let start = Instant::now(); - let result = blinded_provider_factory.account_node_provider().trie_node(&path); - trace!( - target: "trie::proof_task", - ?path, - elapsed = ?start.elapsed(), - "Completed blinded account node retrieval" - ); - - if let Err(error) = result_sender.send(result) { - tracing::error!( - target: "trie::proof_task", - ?path, - ?error, - "Failed to send blinded account node result" - ); - } - - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); - } } /// This represents an input for a storage proof. @@ -752,6 +1103,59 @@ impl StorageProofInput { } } +/// Input parameters for account multiproof computation. +#[derive(Debug, Clone)] +pub struct AccountMultiproofInput { + /// The targets for which to compute the multiproof. + pub targets: MultiProofTargets, + /// The prefix sets for the proof calculation. + pub prefix_sets: TriePrefixSets, + /// Whether or not to collect branch node masks. + pub collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + pub multi_added_removed_keys: Option>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + pub missed_leaves_storage_roots: Arc>, +} + +/// Parameters for building an account multiproof with pre-computed storage roots. +struct AccountMultiproofParams<'a> { + /// The targets for which to compute the multiproof. + targets: &'a MultiProofTargets, + /// The prefix set for the account trie walk. + prefix_set: PrefixSet, + /// Whether or not to collect branch node masks. + collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + multi_added_removed_keys: Option<&'a Arc>, + /// Receivers for storage proofs being computed in parallel. + storage_proof_receivers: B256Map>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + missed_leaves_storage_roots: &'a DashMap, +} + +/// Internal message for account workers. +/// +/// This is NOT exposed publicly. External callers use `ProofTaskKind::AccountMultiproof` or +/// `ProofTaskKind::BlindedAccountNode` which are routed through the manager's `std::mpsc` channel. +#[derive(Debug)] +enum AccountWorkerJob { + /// Account multiproof computation request + AccountMultiproof { + /// Account multiproof input parameters + input: AccountMultiproofInput, + /// Channel to send result back to original caller + result_sender: Sender, + }, + /// Blinded account node retrieval request + BlindedAccountNode { + /// Path to the account node + path: Nibbles, + /// Channel to send result back to original caller + result_sender: Sender, + }, +} + /// Data used for initializing cursor factories that is shared across all storage proof instances. #[derive(Debug, Clone)] pub struct ProofTaskCtx { @@ -779,11 +1183,9 @@ impl ProofTaskCtx { /// Message used to communicate with [`ProofTaskManager`]. #[derive(Debug)] -pub enum ProofTaskMessage { +pub enum ProofTaskMessage { /// A request to queue a proof task. QueueTask(ProofTaskKind), - /// A returned database transaction. - Transaction(ProofTaskTx), /// A request to terminate the proof task manager. Terminate, } @@ -800,27 +1202,35 @@ pub enum ProofTaskKind { BlindedAccountNode(Nibbles, Sender), /// A blinded storage node request. BlindedStorageNode(B256, Nibbles, Sender), + /// An account multiproof request. + AccountMultiproof(AccountMultiproofInput, Sender), } /// A handle that wraps a single proof task sender that sends a terminate message on `Drop` if the /// number of active handles went to zero. #[derive(Debug)] -pub struct ProofTaskManagerHandle { +pub struct ProofTaskManagerHandle { /// The sender for the proof task manager. - sender: Sender>, + sender: CrossbeamSender, /// The number of active handles. active_handles: Arc, } -impl ProofTaskManagerHandle { +impl ProofTaskManagerHandle { /// Creates a new [`ProofTaskManagerHandle`] with the given sender. - pub fn new(sender: Sender>, active_handles: Arc) -> Self { + pub fn new( + sender: CrossbeamSender, + active_handles: Arc, + ) -> Self { active_handles.fetch_add(1, Ordering::SeqCst); Self { sender, active_handles } } /// Queues a task to the proof task manager. - pub fn queue_task(&self, task: ProofTaskKind) -> Result<(), SendError>> { + pub fn queue_task( + &self, + task: ProofTaskKind, + ) -> Result<(), crossbeam_channel::SendError> { self.sender.send(ProofTaskMessage::QueueTask(task)) } @@ -830,13 +1240,13 @@ impl ProofTaskManagerHandle { } } -impl Clone for ProofTaskManagerHandle { +impl Clone for ProofTaskManagerHandle { fn clone(&self) -> Self { Self::new(self.sender.clone(), self.active_handles.clone()) } } -impl Drop for ProofTaskManagerHandle { +impl Drop for ProofTaskManagerHandle { fn drop(&mut self) { // Decrement the number of active handles and terminate the manager if it was the last // handle. @@ -846,9 +1256,9 @@ impl Drop for ProofTaskManagerHandle { } } -impl TrieNodeProviderFactory for ProofTaskManagerHandle { - type AccountNodeProvider = ProofTaskTrieNodeProvider; - type StorageNodeProvider = ProofTaskTrieNodeProvider; +impl TrieNodeProviderFactory for ProofTaskManagerHandle { + type AccountNodeProvider = ProofTaskTrieNodeProvider; + type StorageNodeProvider = ProofTaskTrieNodeProvider; fn account_node_provider(&self) -> Self::AccountNodeProvider { ProofTaskTrieNodeProvider::AccountNode { sender: self.sender.clone() } @@ -861,22 +1271,22 @@ impl TrieNodeProviderFactory for ProofTaskManagerHandle { /// Trie node provider for retrieving trie nodes by path. #[derive(Debug)] -pub enum ProofTaskTrieNodeProvider { +pub enum ProofTaskTrieNodeProvider { /// Blinded account trie node provider. AccountNode { /// Sender to the proof task. - sender: Sender>, + sender: CrossbeamSender, }, /// Blinded storage trie node provider. StorageNode { /// Target account. account: B256, /// Sender to the proof task. - sender: Sender>, + sender: CrossbeamSender, }, } -impl TrieNodeProvider for ProofTaskTrieNodeProvider { +impl TrieNodeProvider for ProofTaskTrieNodeProvider { fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let (tx, rx) = channel(); match self { @@ -919,7 +1329,7 @@ mod tests { ) } - /// Ensures `max_concurrency` is independent of storage workers. + /// Ensures `max_concurrency` is independent of storage and account workers. #[test] fn proof_task_manager_independent_pools() { let runtime = Builder::new_multi_thread().worker_threads(1).enable_all().build().unwrap(); @@ -929,11 +1339,11 @@ mod tests { let view = ConsistentDbView::new(factory, None); let ctx = test_ctx(); - let manager = ProofTaskManager::new(handle.clone(), view, ctx, 1, 5).unwrap(); - // With storage_worker_count=5, we get exactly 5 workers + let manager = ProofTaskManager::new(handle.clone(), view, ctx, 5, 3).unwrap(); + // With storage_worker_count=5, we get exactly 5 storage workers assert_eq!(manager.storage_worker_count, 5); - // max_concurrency=1 is for on-demand operations only - assert_eq!(manager.max_concurrency, 1); + // With account_worker_count=3, we get exactly 3 account workers + assert_eq!(manager.account_worker_count, 3); drop(manager); task::yield_now().await; diff --git a/crates/trie/parallel/src/storage_root_targets.rs b/crates/trie/parallel/src/storage_root_targets.rs index f844b70fca5..0c6d9f43498 100644 --- a/crates/trie/parallel/src/storage_root_targets.rs +++ b/crates/trie/parallel/src/storage_root_targets.rs @@ -24,6 +24,23 @@ impl StorageRootTargets { .collect(), ) } + + /// Returns the total number of unique storage root targets without allocating new maps. + pub fn count( + account_prefix_set: &PrefixSet, + storage_prefix_sets: &B256Map, + ) -> usize { + let mut count = storage_prefix_sets.len(); + + for nibbles in account_prefix_set { + let hashed_address = B256::from_slice(&nibbles.pack()); + if !storage_prefix_sets.contains_key(&hashed_address) { + count += 1; + } + } + + count + } } impl IntoIterator for StorageRootTargets { diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 086187bc927..edb982caf88 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -867,6 +867,9 @@ Engine: --engine.storage-worker-count Configure the number of storage proof workers in the Tokio blocking pool. If not specified, defaults to 2x available parallelism, clamped between 2 and 64 + --engine.account-worker-count + Configure the number of account proof workers in the Tokio blocking pool. If not specified, defaults to the same count as storage workers + ERA: --era.enable Enable import from ERA1 files From 082b5dad3782418339c85b05ffa8d295891684a6 Mon Sep 17 00:00:00 2001 From: anim001k <140460766+anim001k@users.noreply.github.com> Date: Wed, 15 Oct 2025 03:11:01 +0200 Subject: [PATCH 046/371] refactor(storage): fix ChainStateKey enum variant name (#18992) --- crates/storage/db-api/src/tables/mod.rs | 6 +++--- crates/storage/provider/src/providers/database/provider.rs | 6 ++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/crates/storage/db-api/src/tables/mod.rs b/crates/storage/db-api/src/tables/mod.rs index a5cb5ff477d..259b2d39b15 100644 --- a/crates/storage/db-api/src/tables/mod.rs +++ b/crates/storage/db-api/src/tables/mod.rs @@ -531,7 +531,7 @@ pub enum ChainStateKey { /// Last finalized block key LastFinalizedBlock, /// Last safe block key - LastSafeBlockBlock, + LastSafeBlock, } impl Encode for ChainStateKey { @@ -540,7 +540,7 @@ impl Encode for ChainStateKey { fn encode(self) -> Self::Encoded { match self { Self::LastFinalizedBlock => [0], - Self::LastSafeBlockBlock => [1], + Self::LastSafeBlock => [1], } } } @@ -549,7 +549,7 @@ impl Decode for ChainStateKey { fn decode(value: &[u8]) -> Result { match value { [0] => Ok(Self::LastFinalizedBlock), - [1] => Ok(Self::LastSafeBlockBlock), + [1] => Ok(Self::LastSafeBlock), _ => Err(crate::DatabaseError::Decode), } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index f534a0ea127..6fdc37c4f53 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -2866,7 +2866,7 @@ impl ChainStateBlockReader for DatabaseProvide let mut finalized_blocks = self .tx .cursor_read::()? - .walk(Some(tables::ChainStateKey::LastSafeBlockBlock))? + .walk(Some(tables::ChainStateKey::LastSafeBlock))? .take(1) .collect::, _>>()?; @@ -2883,9 +2883,7 @@ impl ChainStateBlockWriter for DatabaseProvider ProviderResult<()> { - Ok(self - .tx - .put::(tables::ChainStateKey::LastSafeBlockBlock, block_number)?) + Ok(self.tx.put::(tables::ChainStateKey::LastSafeBlock, block_number)?) } } From 11c9949add5008237c735d2d22b3b57e6a32b99f Mon Sep 17 00:00:00 2001 From: YK Date: Wed, 15 Oct 2025 09:49:39 +0800 Subject: [PATCH 047/371] refactor(trie): remove proof task manager (#18934) Co-authored-by: Brian Picciano Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- crates/engine/primitives/src/config.rs | 13 +- .../tree/src/tree/payload_processor/mod.rs | 29 +- .../src/tree/payload_processor/multiproof.rs | 47 +- .../engine/tree/src/tree/payload_validator.rs | 5 +- crates/trie/parallel/src/proof.rs | 62 +- crates/trie/parallel/src/proof_task.rs | 658 ++++++------------ .../trie/parallel/src/proof_task_metrics.rs | 19 - 7 files changed, 265 insertions(+), 568 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 70763b6701f..9e2c8210f08 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -9,11 +9,14 @@ pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; /// Default maximum concurrency for on-demand proof tasks (blinded nodes) pub const DEFAULT_MAX_PROOF_TASK_CONCURRENCY: u64 = 256; +/// Minimum number of workers we allow configuring explicitly. +pub const MIN_WORKER_COUNT: usize = 32; + /// Returns the default number of storage worker threads based on available parallelism. fn default_storage_worker_count() -> usize { #[cfg(feature = "std")] { - std::thread::available_parallelism().map(|n| (n.get() * 2).clamp(2, 64)).unwrap_or(8) + std::thread::available_parallelism().map_or(8, |n| n.get() * 2).min(MIN_WORKER_COUNT) } #[cfg(not(feature = "std"))] { @@ -491,8 +494,8 @@ impl TreeConfig { } /// Setter for the number of storage proof worker threads. - pub const fn with_storage_worker_count(mut self, storage_worker_count: usize) -> Self { - self.storage_worker_count = storage_worker_count; + pub fn with_storage_worker_count(mut self, storage_worker_count: usize) -> Self { + self.storage_worker_count = storage_worker_count.max(MIN_WORKER_COUNT); self } @@ -502,8 +505,8 @@ impl TreeConfig { } /// Setter for the number of account proof worker threads. - pub const fn with_account_worker_count(mut self, account_worker_count: usize) -> Self { - self.account_worker_count = account_worker_count; + pub fn with_account_worker_count(mut self, account_worker_count: usize) -> Self { + self.account_worker_count = account_worker_count.max(MIN_WORKER_COUNT); self } } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index c24b0d1fe16..f3ecdfa86d5 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -32,7 +32,7 @@ use reth_provider::{ use reth_revm::{db::BundleState, state::EvmState}; use reth_trie::TrieInput; use reth_trie_parallel::{ - proof_task::{ProofTaskCtx, ProofTaskManager}, + proof_task::{ProofTaskCtx, ProofWorkerHandle}, root::ParallelStateRootError, }; use reth_trie_sparse::{ @@ -167,8 +167,7 @@ where /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) /// - /// Returns an error with the original transactions iterator if the proof task manager fails to - /// initialize. + /// Returns an error with the original transactions iterator if proof worker spawning fails. #[allow(clippy::type_complexity)] pub fn spawn>( &mut self, @@ -204,14 +203,14 @@ where let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); let max_proof_task_concurrency = config.max_proof_task_concurrency() as usize; - let proof_task = match ProofTaskManager::new( + let proof_handle = match ProofWorkerHandle::new( self.executor.handle().clone(), consistent_view, task_ctx, storage_worker_count, account_worker_count, ) { - Ok(task) => task, + Ok(handle) => handle, Err(error) => { return Err((error, transactions, env, provider_builder)); } @@ -223,7 +222,7 @@ where let multi_proof_task = MultiProofTask::new( state_root_config, self.executor.clone(), - proof_task.handle(), + proof_handle.clone(), to_sparse_trie, max_multi_proof_task_concurrency, config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), @@ -252,19 +251,7 @@ where let (state_root_tx, state_root_rx) = channel(); // Spawn the sparse trie task using any stored trie and parallel trie configuration. - self.spawn_sparse_trie_task(sparse_trie_rx, proof_task.handle(), state_root_tx); - - // spawn the proof task - self.executor.spawn_blocking(move || { - if let Err(err) = proof_task.run() { - // At least log if there is an error at any point - tracing::error!( - target: "engine::root", - ?err, - "Storage proof task returned an error" - ); - } - }); + self.spawn_sparse_trie_task(sparse_trie_rx, proof_handle, state_root_tx); Ok(PayloadHandle { to_multi_proof, @@ -406,7 +393,7 @@ where fn spawn_sparse_trie_task( &self, sparse_trie_rx: mpsc::Receiver, - proof_task_handle: BPF, + proof_worker_handle: BPF, state_root_tx: mpsc::Sender>, ) where BPF: TrieNodeProviderFactory + Clone + Send + Sync + 'static, @@ -436,7 +423,7 @@ where let task = SparseTrieTask::<_, ConfiguredSparseTrie, ConfiguredSparseTrie>::new_with_cleared_trie( sparse_trie_rx, - proof_task_handle, + proof_worker_handle, self.trie_metrics.clone(), sparse_state_trie, ); diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index f865312b83d..4a71bf620f7 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -20,7 +20,7 @@ use reth_trie::{ }; use reth_trie_parallel::{ proof::ParallelProof, - proof_task::{AccountMultiproofInput, ProofTaskKind, ProofTaskManagerHandle}, + proof_task::{AccountMultiproofInput, ProofWorkerHandle}, root::ParallelStateRootError, }; use std::{ @@ -346,11 +346,8 @@ pub struct MultiproofManager { pending: VecDeque, /// Executor for tasks executor: WorkloadExecutor, - /// Handle to the proof task manager used for creating `ParallelProof` instances for storage - /// proofs. - storage_proof_task_handle: ProofTaskManagerHandle, - /// Handle to the proof task manager used for account multiproofs. - account_proof_task_handle: ProofTaskManagerHandle, + /// Handle to the proof worker pools (storage and account). + proof_worker_handle: ProofWorkerHandle, /// Cached storage proof roots for missed leaves; this maps /// hashed (missed) addresses to their storage proof roots. /// @@ -372,8 +369,7 @@ impl MultiproofManager { fn new( executor: WorkloadExecutor, metrics: MultiProofTaskMetrics, - storage_proof_task_handle: ProofTaskManagerHandle, - account_proof_task_handle: ProofTaskManagerHandle, + proof_worker_handle: ProofWorkerHandle, max_concurrent: usize, ) -> Self { Self { @@ -382,8 +378,7 @@ impl MultiproofManager { executor, inflight: 0, metrics, - storage_proof_task_handle, - account_proof_task_handle, + proof_worker_handle, missed_leaves_storage_roots: Default::default(), } } @@ -452,7 +447,7 @@ impl MultiproofManager { multi_added_removed_keys, } = storage_multiproof_input; - let storage_proof_task_handle = self.storage_proof_task_handle.clone(); + let storage_proof_worker_handle = self.proof_worker_handle.clone(); let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone(); self.executor.spawn_blocking(move || { @@ -471,7 +466,7 @@ impl MultiproofManager { config.state_sorted, config.prefix_sets, missed_leaves_storage_roots, - storage_proof_task_handle, + storage_proof_worker_handle, ) .with_branch_node_masks(true) .with_multi_added_removed_keys(Some(multi_added_removed_keys)) @@ -524,7 +519,7 @@ impl MultiproofManager { state_root_message_sender, multi_added_removed_keys, } = multiproof_input; - let account_proof_task_handle = self.account_proof_task_handle.clone(); + let account_proof_worker_handle = self.proof_worker_handle.clone(); let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone(); self.executor.spawn_blocking(move || { @@ -556,15 +551,10 @@ impl MultiproofManager { missed_leaves_storage_roots, }; - let (sender, receiver) = channel(); let proof_result: Result = (|| { - account_proof_task_handle - .queue_task(ProofTaskKind::AccountMultiproof(input, sender)) - .map_err(|_| { - ParallelStateRootError::Other( - "Failed to queue account multiproof to worker pool".into(), - ) - })?; + let receiver = account_proof_worker_handle + .queue_account_multiproof(input) + .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; receiver .recv() @@ -693,7 +683,7 @@ impl MultiProofTask { pub(super) fn new( config: MultiProofConfig, executor: WorkloadExecutor, - proof_task_handle: ProofTaskManagerHandle, + proof_worker_handle: ProofWorkerHandle, to_sparse_trie: Sender, max_concurrency: usize, chunk_size: Option, @@ -713,8 +703,7 @@ impl MultiProofTask { multiproof_manager: MultiproofManager::new( executor, metrics.clone(), - proof_task_handle.clone(), // handle for storage proof workers - proof_task_handle, // handle for account proof workers + proof_worker_handle, max_concurrency, ), metrics, @@ -1223,7 +1212,7 @@ mod tests { DatabaseProviderFactory, }; use reth_trie::{MultiProof, TrieInput}; - use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofTaskManager}; + use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use revm_primitives::{B256, U256}; fn create_test_state_root_task(factory: F) -> MultiProofTask @@ -1238,12 +1227,12 @@ mod tests { config.prefix_sets.clone(), ); let consistent_view = ConsistentDbView::new(factory, None); - let proof_task = - ProofTaskManager::new(executor.handle().clone(), consistent_view, task_ctx, 1, 1) - .expect("Failed to create ProofTaskManager"); + let proof_handle = + ProofWorkerHandle::new(executor.handle().clone(), consistent_view, task_ctx, 1, 1) + .expect("Failed to spawn proof workers"); let channel = channel(); - MultiProofTask::new(config, executor, proof_task.handle(), channel.0, 1, None) + MultiProofTask::new(config, executor, proof_handle, channel.0, 1, None) } #[test] diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 51e669b8883..17dc511a445 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -892,13 +892,12 @@ where (handle, StateRootStrategy::StateRootTask) } Err((error, txs, env, provider_builder)) => { - // Failed to initialize proof task manager, fallback to parallel state - // root + // Failed to spawn proof workers, fallback to parallel state root error!( target: "engine::tree", block=?block_num_hash, ?error, - "Failed to initialize proof task manager, falling back to parallel state root" + "Failed to spawn proof workers, falling back to parallel state root" ); ( self.payload_processor.spawn_cache_exclusive( diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 7fc1f022a7e..0f29502f8c7 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -1,8 +1,6 @@ use crate::{ metrics::ParallelTrieMetrics, - proof_task::{ - AccountMultiproofInput, ProofTaskKind, ProofTaskManagerHandle, StorageProofInput, - }, + proof_task::{AccountMultiproofInput, ProofWorkerHandle, StorageProofInput}, root::ParallelStateRootError, StorageRootTargets, }; @@ -16,10 +14,7 @@ use reth_trie::{ DecodedMultiProof, DecodedStorageMultiProof, HashedPostStateSorted, MultiProofTargets, Nibbles, }; use reth_trie_common::added_removed_keys::MultiAddedRemovedKeys; -use std::sync::{ - mpsc::{channel, Receiver}, - Arc, -}; +use std::sync::{mpsc::Receiver, Arc}; use tracing::trace; /// Parallel proof calculator. @@ -41,8 +36,8 @@ pub struct ParallelProof { collect_branch_node_masks: bool, /// Provided by the user to give the necessary context to retain extra proofs. multi_added_removed_keys: Option>, - /// Handle to the proof task manager. - proof_task_handle: ProofTaskManagerHandle, + /// Handle to the proof worker pools. + proof_worker_handle: ProofWorkerHandle, /// Cached storage proof roots for missed leaves; this maps /// hashed (missed) addresses to their storage proof roots. missed_leaves_storage_roots: Arc>, @@ -57,7 +52,7 @@ impl ParallelProof { state_sorted: Arc, prefix_sets: Arc, missed_leaves_storage_roots: Arc>, - proof_task_handle: ProofTaskManagerHandle, + proof_worker_handle: ProofWorkerHandle, ) -> Self { Self { nodes_sorted, @@ -66,7 +61,7 @@ impl ParallelProof { missed_leaves_storage_roots, collect_branch_node_masks: false, multi_added_removed_keys: None, - proof_task_handle, + proof_worker_handle, #[cfg(feature = "metrics")] metrics: ParallelTrieMetrics::new_with_labels(&[("type", "proof")]), } @@ -93,7 +88,10 @@ impl ParallelProof { hashed_address: B256, prefix_set: PrefixSet, target_slots: B256Set, - ) -> Receiver> { + ) -> Result< + Receiver>, + ParallelStateRootError, + > { let input = StorageProofInput::new( hashed_address, prefix_set, @@ -102,9 +100,9 @@ impl ParallelProof { self.multi_added_removed_keys.clone(), ); - let (sender, receiver) = std::sync::mpsc::channel(); - let _ = self.proof_task_handle.queue_task(ProofTaskKind::StorageProof(input, sender)); - receiver + self.proof_worker_handle + .queue_storage_proof(input) + .map_err(|e| ParallelStateRootError::Other(e.to_string())) } /// Generate a storage multiproof according to the specified targets and hashed address. @@ -124,7 +122,7 @@ impl ParallelProof { "Starting storage proof generation" ); - let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots); + let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots)?; let proof_result = receiver.recv().map_err(|_| { ParallelStateRootError::StorageRoot(StorageRootError::Database(DatabaseError::Other( format!("channel closed for {hashed_address}"), @@ -193,15 +191,10 @@ impl ParallelProof { missed_leaves_storage_roots: self.missed_leaves_storage_roots.clone(), }; - let (sender, receiver) = channel(); - self.proof_task_handle - .queue_task(ProofTaskKind::AccountMultiproof(input, sender)) - .map_err(|_| { - ParallelStateRootError::Other( - "Failed to queue account multiproof: account worker pool unavailable" - .to_string(), - ) - })?; + let receiver = self + .proof_worker_handle + .queue_account_multiproof(input) + .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; // Wait for account multiproof result from worker let (multiproof, stats) = receiver.recv().map_err(|_| { @@ -231,7 +224,7 @@ impl ParallelProof { #[cfg(test)] mod tests { use super::*; - use crate::proof_task::{ProofTaskCtx, ProofTaskManager}; + use crate::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use alloy_primitives::{ keccak256, map::{B256Set, DefaultHashBuilder, HashMap}, @@ -313,20 +306,15 @@ mod tests { let task_ctx = ProofTaskCtx::new(Default::default(), Default::default(), Default::default()); - let proof_task = - ProofTaskManager::new(rt.handle().clone(), consistent_view, task_ctx, 1, 1).unwrap(); - let proof_task_handle = proof_task.handle(); - - // keep the join handle around to make sure it does not return any errors - // after we compute the state root - let join_handle = rt.spawn_blocking(move || proof_task.run()); + let proof_worker_handle = + ProofWorkerHandle::new(rt.handle().clone(), consistent_view, task_ctx, 1, 1).unwrap(); let parallel_result = ParallelProof::new( Default::default(), Default::default(), Default::default(), Default::default(), - proof_task_handle.clone(), + proof_worker_handle.clone(), ) .decoded_multiproof(targets.clone()) .unwrap(); @@ -354,9 +342,7 @@ mod tests { // then compare the entire thing for any mask differences assert_eq!(parallel_result, sequential_result_decoded); - // drop the handle to terminate the task and then block on the proof task handle to make - // sure it does not return any errors - drop(proof_task_handle); - rt.block_on(join_handle).unwrap().expect("The proof task should not return an error"); + // Workers shut down automatically when handle is dropped + drop(proof_worker_handle); } } diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 780839c238a..2d0f7e933c8 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -1,9 +1,14 @@ -//! A Task that manages sending proof requests to a number of tasks that have longer-running -//! database transactions. +//! Parallel proof computation using worker pools with dedicated database transactions. //! -//! The [`ProofTaskManager`] ensures that there are a max number of currently executing proof tasks, -//! and is responsible for managing the fixed number of database transactions created at the start -//! of the task. +//! +//! # Architecture +//! +//! - **Worker Pools**: Pre-spawned workers with dedicated database transactions +//! - Storage pool: Handles storage proofs and blinded storage node requests +//! - Account pool: Handles account multiproofs and blinded account node requests +//! - **Direct Channel Access**: [`ProofWorkerHandle`] provides type-safe queue methods with direct +//! access to worker channels, eliminating routing overhead +//! - **Automatic Shutdown**: Workers terminate gracefully when all handles are dropped //! //! Individual [`ProofTaskTx`] instances manage a dedicated [`InMemoryTrieCursorFactory`] and //! [`HashedPostStateCursorFactory`], which are each backed by a database transaction. @@ -21,7 +26,7 @@ use alloy_rlp::{BufMut, Encodable}; use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use dashmap::DashMap; use reth_db_api::transaction::DbTx; -use reth_execution_errors::SparseTrieError; +use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, ProviderResult, @@ -47,7 +52,6 @@ use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ sync::{ - atomic::{AtomicUsize, Ordering}, mpsc::{channel, Receiver, Sender}, Arc, }, @@ -57,7 +61,7 @@ use tokio::runtime::Handle; use tracing::trace; #[cfg(feature = "metrics")] -use crate::proof_task_metrics::ProofTaskMetrics; +use crate::proof_task_metrics::ProofTaskTrieMetrics; type StorageProofResult = Result; type TrieNodeProviderResult = Result, SparseTrieError>; @@ -65,9 +69,6 @@ type AccountMultiproofResult = Result<(DecodedMultiProof, ParallelTrieStats), ParallelStateRootError>; /// Internal message for storage workers. -/// -/// This is NOT exposed publicly. External callers use `ProofTaskKind::StorageProof` or -/// `ProofTaskKind::BlindedStorageNode` which are routed through the manager's `std::mpsc` channel. #[derive(Debug)] enum StorageWorkerJob { /// Storage proof computation request @@ -88,60 +89,6 @@ enum StorageWorkerJob { }, } -/// Manager for coordinating proof request execution across different task types. -/// -/// # Architecture -/// -/// This manager operates two distinct worker pools for parallel trie operations: -/// -/// **Worker Pools**: -/// - Pre-spawned workers with dedicated long-lived transactions -/// - **Storage pool**: Handles `StorageProof` and `BlindedStorageNode` requests -/// - **Account pool**: Handles `AccountMultiproof` and `BlindedAccountNode` requests, delegates -/// storage proof computation to storage pool -/// - Tasks queued via crossbeam unbounded channels -/// - Workers continuously process without transaction overhead -/// - Returns error if worker pool is unavailable (all workers panicked) -/// -/// # Public Interface -/// -/// The public interface through `ProofTaskManagerHandle` allows external callers to: -/// - Submit tasks via `queue_task(ProofTaskKind)` -/// - Use standard `std::mpsc` message passing -/// - Receive consistent return types and error handling -#[derive(Debug)] -pub struct ProofTaskManager { - /// Sender for storage worker jobs to worker pool. - storage_work_tx: CrossbeamSender, - - /// Number of storage workers successfully spawned. - /// - /// May be less than requested if concurrency limits reduce the worker budget. - storage_worker_count: usize, - - /// Sender for account worker jobs to worker pool. - account_work_tx: CrossbeamSender, - - /// Number of account workers successfully spawned. - account_worker_count: usize, - - /// Receives proof task requests from [`ProofTaskManagerHandle`]. - proof_task_rx: CrossbeamReceiver, - - /// Sender for creating handles that can queue tasks. - proof_task_tx: CrossbeamSender, - - /// The number of active handles. - /// - /// Incremented in [`ProofTaskManagerHandle::new`] and decremented in - /// [`ProofTaskManagerHandle::drop`]. - active_handles: Arc, - - /// Metrics tracking proof task operations. - #[cfg(feature = "metrics")] - metrics: ProofTaskMetrics, -} - /// Worker loop for storage trie operations. /// /// # Lifecycle @@ -169,6 +116,7 @@ fn storage_worker_loop( proof_tx: ProofTaskTx, work_rx: CrossbeamReceiver, worker_id: usize, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where Tx: DbTx, { @@ -282,10 +230,11 @@ fn storage_worker_loop( storage_nodes_processed, "Storage worker shutting down" ); + + #[cfg(feature = "metrics")] + metrics.record_storage_nodes(storage_nodes_processed as usize); } -// TODO: Refactor this with storage_worker_loop. ProofTaskManager should be removed in the following -// pr and `MultiproofManager` should be used instead to dispatch jobs directly. /// Worker loop for account trie operations. /// /// # Lifecycle @@ -314,6 +263,7 @@ fn account_worker_loop( work_rx: CrossbeamReceiver, storage_work_tx: CrossbeamSender, worker_id: usize, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where Tx: DbTx, { @@ -459,6 +409,9 @@ fn account_worker_loop( account_nodes_processed, "Account worker shutting down" ); + + #[cfg(feature = "metrics")] + metrics.record_account_nodes(account_nodes_processed as usize); } /// Builds an account multiproof by consuming storage proof receivers lazily during trie walk. @@ -657,303 +610,6 @@ fn queue_storage_proofs( Ok(storage_proof_receivers) } -impl ProofTaskManager { - /// Creates a new [`ProofTaskManager`] with pre-spawned storage and account proof workers. - /// - /// This manager coordinates both storage and account worker pools: - /// - Storage workers handle `StorageProof` and `BlindedStorageNode` requests - /// - Account workers handle `AccountMultiproof` and `BlindedAccountNode` requests - /// - /// The `storage_worker_count` determines how many storage workers to spawn, and - /// `account_worker_count` determines how many account workers to spawn. - /// Returns an error if the underlying provider fails to create the transactions required for - /// spawning workers. - pub fn new( - executor: Handle, - view: ConsistentDbView, - task_ctx: ProofTaskCtx, - storage_worker_count: usize, - account_worker_count: usize, - ) -> ProviderResult - where - Factory: DatabaseProviderFactory, - { - // Use unbounded channel for the router to prevent account workers from blocking - // when queuing storage proofs. Account workers queue many storage proofs through - // this channel, and blocking on a bounded channel wastes parallel worker capacity. - let (proof_task_tx, proof_task_rx) = unbounded(); - - // Use unbounded channel to ensure all storage operations are queued to workers. - // This maintains transaction reuse benefits and avoids fallback to on-demand execution. - let (storage_work_tx, storage_work_rx) = unbounded::(); - let (account_work_tx, account_work_rx) = unbounded::(); - - tracing::info!( - target: "trie::proof_task", - storage_worker_count, - account_worker_count, - "Initializing storage and account worker pools with unbounded queues" - ); - - // Spawn storage workers - let spawned_storage_workers = Self::spawn_storage_workers( - &executor, - &view, - &task_ctx, - storage_worker_count, - storage_work_rx, - )?; - - // Spawn account workers with direct access to the storage worker queue - let spawned_account_workers = Self::spawn_account_workers( - &executor, - &view, - &task_ctx, - account_worker_count, - account_work_rx, - storage_work_tx.clone(), - )?; - - Ok(Self { - storage_work_tx, - storage_worker_count: spawned_storage_workers, - account_work_tx, - account_worker_count: spawned_account_workers, - proof_task_rx, - proof_task_tx, - active_handles: Arc::new(AtomicUsize::new(0)), - - #[cfg(feature = "metrics")] - metrics: ProofTaskMetrics::default(), - }) - } - - /// Returns a handle for sending new proof tasks to the [`ProofTaskManager`]. - pub fn handle(&self) -> ProofTaskManagerHandle { - ProofTaskManagerHandle::new(self.proof_task_tx.clone(), self.active_handles.clone()) - } - - /// Spawns a pool of storage workers with dedicated database transactions. - /// - /// Each worker receives `StorageWorkerJob` from the channel and processes storage proofs - /// and blinded storage node requests using a dedicated long-lived transaction. - /// - /// # Parameters - /// - `executor`: Tokio runtime handle for spawning blocking tasks - /// - `view`: Consistent database view for creating transactions - /// - `task_ctx`: Shared context with trie updates and prefix sets - /// - `worker_count`: Number of storage workers to spawn - /// - `work_rx`: Receiver for storage worker jobs - /// - /// # Returns - /// The number of storage workers successfully spawned - fn spawn_storage_workers( - executor: &Handle, - view: &ConsistentDbView, - task_ctx: &ProofTaskCtx, - worker_count: usize, - work_rx: CrossbeamReceiver, - ) -> ProviderResult - where - Factory: DatabaseProviderFactory, - { - let mut spawned_workers = 0; - - for worker_id in 0..worker_count { - let provider_ro = view.provider_ro()?; - let tx = provider_ro.into_tx(); - let proof_task_tx = ProofTaskTx::new(tx, task_ctx.clone(), worker_id); - let work_rx_clone = work_rx.clone(); - - executor.spawn_blocking(move || { - storage_worker_loop(proof_task_tx, work_rx_clone, worker_id) - }); - - spawned_workers += 1; - - tracing::debug!( - target: "trie::proof_task", - worker_id, - spawned_workers, - "Storage worker spawned successfully" - ); - } - - Ok(spawned_workers) - } - - /// Spawns a pool of account workers with dedicated database transactions. - /// - /// Each worker receives `AccountWorkerJob` from the channel and processes account multiproofs - /// and blinded account node requests using a dedicated long-lived transaction. Account workers - /// can delegate storage proof computation to the storage worker pool. - /// - /// # Parameters - /// - `executor`: Tokio runtime handle for spawning blocking tasks - /// - `view`: Consistent database view for creating transactions - /// - `task_ctx`: Shared context with trie updates and prefix sets - /// - `worker_count`: Number of account workers to spawn - /// - `work_rx`: Receiver for account worker jobs - /// - `storage_work_tx`: Sender to delegate storage proofs to storage worker pool - /// - /// # Returns - /// The number of account workers successfully spawned - fn spawn_account_workers( - executor: &Handle, - view: &ConsistentDbView, - task_ctx: &ProofTaskCtx, - worker_count: usize, - work_rx: CrossbeamReceiver, - storage_work_tx: CrossbeamSender, - ) -> ProviderResult - where - Factory: DatabaseProviderFactory, - { - let mut spawned_workers = 0; - - for worker_id in 0..worker_count { - let provider_ro = view.provider_ro()?; - let tx = provider_ro.into_tx(); - let proof_task_tx = ProofTaskTx::new(tx, task_ctx.clone(), worker_id); - let work_rx_clone = work_rx.clone(); - let storage_work_tx_clone = storage_work_tx.clone(); - - executor.spawn_blocking(move || { - account_worker_loop(proof_task_tx, work_rx_clone, storage_work_tx_clone, worker_id) - }); - - spawned_workers += 1; - - tracing::debug!( - target: "trie::proof_task", - worker_id, - spawned_workers, - "Account worker spawned successfully" - ); - } - - Ok(spawned_workers) - } - - /// Loops, managing the proof tasks, routing them to the appropriate worker pools. - /// - /// # Task Routing - /// - /// - **Storage Trie Operations** (`StorageProof` and `BlindedStorageNode`): Routed to - /// pre-spawned storage worker pool via unbounded channel. Returns error if workers are - /// disconnected (e.g., all workers panicked). - /// - **Account Trie Operations** (`AccountMultiproof` and `BlindedAccountNode`): Routed to - /// pre-spawned account worker pool via unbounded channel. Returns error if workers are - /// disconnected. - /// - /// # Shutdown - /// - /// On termination, `storage_work_tx` and `account_work_tx` are dropped, closing the channels - /// and signaling all workers to shut down gracefully. - pub fn run(mut self) -> ProviderResult<()> { - loop { - match self.proof_task_rx.recv() { - Ok(message) => { - match message { - ProofTaskMessage::QueueTask(task) => match task { - ProofTaskKind::StorageProof(input, sender) => { - self.storage_work_tx - .send(StorageWorkerJob::StorageProof { - input, - result_sender: sender, - }) - .expect("failed to dispatch storage proof: storage worker pool unavailable (all workers panicked or pool shut down)"); - - tracing::trace!( - target: "trie::proof_task", - "Storage proof dispatched to worker pool" - ); - } - - ProofTaskKind::BlindedStorageNode(account, path, sender) => { - #[cfg(feature = "metrics")] - { - self.metrics.storage_nodes += 1; - } - - self.storage_work_tx - .send(StorageWorkerJob::BlindedStorageNode { - account, - path, - result_sender: sender, - }) - .expect("failed to dispatch blinded storage node: storage worker pool unavailable (all workers panicked or pool shut down)"); - - tracing::trace!( - target: "trie::proof_task", - ?account, - ?path, - "Blinded storage node dispatched to worker pool" - ); - } - - ProofTaskKind::BlindedAccountNode(path, sender) => { - #[cfg(feature = "metrics")] - { - self.metrics.account_nodes += 1; - } - - self.account_work_tx - .send(AccountWorkerJob::BlindedAccountNode { - path, - result_sender: sender, - }) - .expect("failed to dispatch blinded account node: account worker pool unavailable (all workers panicked or pool shut down)"); - - tracing::trace!( - target: "trie::proof_task", - ?path, - "Blinded account node dispatched to worker pool" - ); - } - - ProofTaskKind::AccountMultiproof(input, sender) => { - self.account_work_tx - .send(AccountWorkerJob::AccountMultiproof { - input, - result_sender: sender, - }) - .expect("failed to dispatch account multiproof: account worker pool unavailable (all workers panicked or pool shut down)"); - - tracing::trace!( - target: "trie::proof_task", - "Account multiproof dispatched to worker pool" - ); - } - }, - ProofTaskMessage::Terminate => { - // Drop worker channels to signal workers to shut down - drop(self.storage_work_tx); - drop(self.account_work_tx); - - tracing::debug!( - target: "trie::proof_task", - storage_worker_count = self.storage_worker_count, - account_worker_count = self.account_worker_count, - "Shutting down proof task manager, signaling workers to terminate" - ); - - // Record metrics before terminating - #[cfg(feature = "metrics")] - self.metrics.record(); - - return Ok(()) - } - } - } - // All senders are disconnected, so we can terminate - // However this should never happen, as this struct stores a sender - Err(_) => return Ok(()), - }; - } - } -} - /// Type alias for the factory tuple returned by `create_factories` type ProofFactories<'a, Tx> = ( InMemoryTrieCursorFactory, &'a TrieUpdatesSorted>, @@ -969,8 +625,7 @@ pub struct ProofTaskTx { /// Trie updates, prefix sets, and state updates task_ctx: ProofTaskCtx, - /// Identifier for the tx within the context of a single [`ProofTaskManager`], used only for - /// tracing. + /// Identifier for the worker within the worker pool, used only for tracing. id: usize, } @@ -1135,9 +790,6 @@ struct AccountMultiproofParams<'a> { } /// Internal message for account workers. -/// -/// This is NOT exposed publicly. External callers use `ProofTaskKind::AccountMultiproof` or -/// `ProofTaskKind::BlindedAccountNode` which are routed through the manager's `std::mpsc` channel. #[derive(Debug)] enum AccountWorkerJob { /// Account multiproof computation request @@ -1181,91 +833,192 @@ impl ProofTaskCtx { } } -/// Message used to communicate with [`ProofTaskManager`]. -#[derive(Debug)] -pub enum ProofTaskMessage { - /// A request to queue a proof task. - QueueTask(ProofTaskKind), - /// A request to terminate the proof task manager. - Terminate, -} - -/// Proof task kind. +/// A handle that provides type-safe access to proof worker pools. /// -/// When queueing a task using [`ProofTaskMessage::QueueTask`], this enum -/// specifies the type of proof task to be executed. -#[derive(Debug)] -pub enum ProofTaskKind { - /// A storage proof request. - StorageProof(StorageProofInput, Sender), - /// A blinded account node request. - BlindedAccountNode(Nibbles, Sender), - /// A blinded storage node request. - BlindedStorageNode(B256, Nibbles, Sender), - /// An account multiproof request. - AccountMultiproof(AccountMultiproofInput, Sender), +/// The handle stores direct senders to both storage and account worker pools, +/// eliminating the need for a routing thread. All handles share reference-counted +/// channels, and workers shut down gracefully when all handles are dropped. +#[derive(Debug, Clone)] +pub struct ProofWorkerHandle { + /// Direct sender to storage worker pool + storage_work_tx: CrossbeamSender, + /// Direct sender to account worker pool + account_work_tx: CrossbeamSender, } -/// A handle that wraps a single proof task sender that sends a terminate message on `Drop` if the -/// number of active handles went to zero. -#[derive(Debug)] -pub struct ProofTaskManagerHandle { - /// The sender for the proof task manager. - sender: CrossbeamSender, - /// The number of active handles. - active_handles: Arc, -} +impl ProofWorkerHandle { + /// Spawns storage and account worker pools with dedicated database transactions. + /// + /// Returns a handle for submitting proof tasks to the worker pools. + /// Workers run until the last handle is dropped. + /// + /// # Parameters + /// - `executor`: Tokio runtime handle for spawning blocking tasks + /// - `view`: Consistent database view for creating transactions + /// - `task_ctx`: Shared context with trie updates and prefix sets + /// - `storage_worker_count`: Number of storage workers to spawn + /// - `account_worker_count`: Number of account workers to spawn + pub fn new( + executor: Handle, + view: ConsistentDbView, + task_ctx: ProofTaskCtx, + storage_worker_count: usize, + account_worker_count: usize, + ) -> ProviderResult + where + Factory: DatabaseProviderFactory, + { + let (storage_work_tx, storage_work_rx) = unbounded::(); + let (account_work_tx, account_work_rx) = unbounded::(); + + tracing::debug!( + target: "trie::proof_task", + storage_worker_count, + account_worker_count, + "Spawning proof worker pools" + ); + + // Spawn storage workers + for worker_id in 0..storage_worker_count { + let provider_ro = view.provider_ro()?; + let tx = provider_ro.into_tx(); + let proof_task_tx = ProofTaskTx::new(tx, task_ctx.clone(), worker_id); + let work_rx_clone = storage_work_rx.clone(); + + executor.spawn_blocking(move || { + #[cfg(feature = "metrics")] + let metrics = ProofTaskTrieMetrics::default(); + + storage_worker_loop( + proof_task_tx, + work_rx_clone, + worker_id, + #[cfg(feature = "metrics")] + metrics, + ) + }); -impl ProofTaskManagerHandle { - /// Creates a new [`ProofTaskManagerHandle`] with the given sender. - pub fn new( - sender: CrossbeamSender, - active_handles: Arc, + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Storage worker spawned successfully" + ); + } + + // Spawn account workers + for worker_id in 0..account_worker_count { + let provider_ro = view.provider_ro()?; + let tx = provider_ro.into_tx(); + let proof_task_tx = ProofTaskTx::new(tx, task_ctx.clone(), worker_id); + let work_rx_clone = account_work_rx.clone(); + let storage_work_tx_clone = storage_work_tx.clone(); + + executor.spawn_blocking(move || { + #[cfg(feature = "metrics")] + let metrics = ProofTaskTrieMetrics::default(); + + account_worker_loop( + proof_task_tx, + work_rx_clone, + storage_work_tx_clone, + worker_id, + #[cfg(feature = "metrics")] + metrics, + ) + }); + + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Account worker spawned successfully" + ); + } + + Ok(Self::new_handle(storage_work_tx, account_work_tx)) + } + + /// Creates a new [`ProofWorkerHandle`] with direct access to worker pools. + /// + /// This is an internal constructor used for creating handles. + const fn new_handle( + storage_work_tx: CrossbeamSender, + account_work_tx: CrossbeamSender, ) -> Self { - active_handles.fetch_add(1, Ordering::SeqCst); - Self { sender, active_handles } + Self { storage_work_tx, account_work_tx } } - /// Queues a task to the proof task manager. - pub fn queue_task( + /// Queue a storage proof computation + pub fn queue_storage_proof( &self, - task: ProofTaskKind, - ) -> Result<(), crossbeam_channel::SendError> { - self.sender.send(ProofTaskMessage::QueueTask(task)) + input: StorageProofInput, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.storage_work_tx + .send(StorageWorkerJob::StorageProof { input, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("storage workers unavailable")) + })?; + + Ok(rx) } - /// Terminates the proof task manager. - pub fn terminate(&self) { - let _ = self.sender.send(ProofTaskMessage::Terminate); + /// Queue an account multiproof computation + pub fn queue_account_multiproof( + &self, + input: AccountMultiproofInput, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.account_work_tx + .send(AccountWorkerJob::AccountMultiproof { input, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("account workers unavailable")) + })?; + + Ok(rx) } -} -impl Clone for ProofTaskManagerHandle { - fn clone(&self) -> Self { - Self::new(self.sender.clone(), self.active_handles.clone()) + /// Internal: Queue blinded storage node request + fn queue_blinded_storage_node( + &self, + account: B256, + path: Nibbles, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.storage_work_tx + .send(StorageWorkerJob::BlindedStorageNode { account, path, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("storage workers unavailable")) + })?; + + Ok(rx) } -} -impl Drop for ProofTaskManagerHandle { - fn drop(&mut self) { - // Decrement the number of active handles and terminate the manager if it was the last - // handle. - if self.active_handles.fetch_sub(1, Ordering::SeqCst) == 1 { - self.terminate(); - } + /// Internal: Queue blinded account node request + fn queue_blinded_account_node( + &self, + path: Nibbles, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.account_work_tx + .send(AccountWorkerJob::BlindedAccountNode { path, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("account workers unavailable")) + })?; + + Ok(rx) } } -impl TrieNodeProviderFactory for ProofTaskManagerHandle { +impl TrieNodeProviderFactory for ProofWorkerHandle { type AccountNodeProvider = ProofTaskTrieNodeProvider; type StorageNodeProvider = ProofTaskTrieNodeProvider; fn account_node_provider(&self) -> Self::AccountNodeProvider { - ProofTaskTrieNodeProvider::AccountNode { sender: self.sender.clone() } + ProofTaskTrieNodeProvider::AccountNode { handle: self.clone() } } fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { - ProofTaskTrieNodeProvider::StorageNode { account, sender: self.sender.clone() } + ProofTaskTrieNodeProvider::StorageNode { account, handle: self.clone() } } } @@ -1274,35 +1027,34 @@ impl TrieNodeProviderFactory for ProofTaskManagerHandle { pub enum ProofTaskTrieNodeProvider { /// Blinded account trie node provider. AccountNode { - /// Sender to the proof task. - sender: CrossbeamSender, + /// Handle to the proof worker pools. + handle: ProofWorkerHandle, }, /// Blinded storage trie node provider. StorageNode { /// Target account. account: B256, - /// Sender to the proof task. - sender: CrossbeamSender, + /// Handle to the proof worker pools. + handle: ProofWorkerHandle, }, } impl TrieNodeProvider for ProofTaskTrieNodeProvider { fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { - let (tx, rx) = channel(); match self { - Self::AccountNode { sender } => { - let _ = sender.send(ProofTaskMessage::QueueTask( - ProofTaskKind::BlindedAccountNode(*path, tx), - )); + Self::AccountNode { handle } => { + let rx = handle + .queue_blinded_account_node(*path) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? } - Self::StorageNode { sender, account } => { - let _ = sender.send(ProofTaskMessage::QueueTask( - ProofTaskKind::BlindedStorageNode(*account, *path, tx), - )); + Self::StorageNode { handle, account } => { + let rx = handle + .queue_blinded_storage_node(*account, *path) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? } } - - rx.recv().unwrap() } } @@ -1329,9 +1081,9 @@ mod tests { ) } - /// Ensures `max_concurrency` is independent of storage and account workers. + /// Ensures `ProofWorkerHandle::new` spawns workers correctly. #[test] - fn proof_task_manager_independent_pools() { + fn spawn_proof_workers_creates_handle() { let runtime = Builder::new_multi_thread().worker_threads(1).enable_all().build().unwrap(); runtime.block_on(async { let handle = tokio::runtime::Handle::current(); @@ -1339,13 +1091,13 @@ mod tests { let view = ConsistentDbView::new(factory, None); let ctx = test_ctx(); - let manager = ProofTaskManager::new(handle.clone(), view, ctx, 5, 3).unwrap(); - // With storage_worker_count=5, we get exactly 5 storage workers - assert_eq!(manager.storage_worker_count, 5); - // With account_worker_count=3, we get exactly 3 account workers - assert_eq!(manager.account_worker_count, 3); + let proof_handle = ProofWorkerHandle::new(handle.clone(), view, ctx, 5, 3).unwrap(); + + // Verify handle can be cloned + let _cloned_handle = proof_handle.clone(); - drop(manager); + // Workers shut down automatically when handle is dropped + drop(proof_handle); task::yield_now().await; }); } diff --git a/crates/trie/parallel/src/proof_task_metrics.rs b/crates/trie/parallel/src/proof_task_metrics.rs index cdb59d078d8..6492e28d12d 100644 --- a/crates/trie/parallel/src/proof_task_metrics.rs +++ b/crates/trie/parallel/src/proof_task_metrics.rs @@ -1,24 +1,5 @@ use reth_metrics::{metrics::Histogram, Metrics}; -/// Metrics for blinded node fetching for the duration of the proof task manager. -#[derive(Clone, Debug, Default)] -pub struct ProofTaskMetrics { - /// The actual metrics for blinded nodes. - pub task_metrics: ProofTaskTrieMetrics, - /// Count of blinded account node requests. - pub account_nodes: usize, - /// Count of blinded storage node requests. - pub storage_nodes: usize, -} - -impl ProofTaskMetrics { - /// Record the blinded node counts into the histograms. - pub fn record(&self) { - self.task_metrics.record_account_nodes(self.account_nodes); - self.task_metrics.record_storage_nodes(self.storage_nodes); - } -} - /// Metrics for the proof task. #[derive(Clone, Metrics)] #[metrics(scope = "trie.proof_task")] From 092599bd2c3a3b1c2bf808eaeac323e10c9fb733 Mon Sep 17 00:00:00 2001 From: Julian Meyer Date: Tue, 14 Oct 2025 20:38:21 -0700 Subject: [PATCH 048/371] fix: required optimism primitives features in db-api (#19005) --- crates/storage/db-api/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 3f7e5c7b1a7..bd77b9d63d7 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -28,7 +28,7 @@ alloy-genesis.workspace = true alloy-consensus.workspace = true # optimism -reth-optimism-primitives = { workspace = true, optional = true } +reth-optimism-primitives = { workspace = true, optional = true, features = ["serde", "reth-codec"] } # codecs modular-bitfield.workspace = true From 856ad087766476f9fd20624c136951f654d5fdaa Mon Sep 17 00:00:00 2001 From: Karl Yu <43113774+0xKarl98@users.noreply.github.com> Date: Wed, 15 Oct 2025 11:50:41 +0800 Subject: [PATCH 049/371] refactor(engine): simplify InvalidBlockWitnessHook::on_invalid_block for better testability (#18696) --- Cargo.lock | 7 + crates/engine/invalid-block-hooks/Cargo.toml | 11 + .../engine/invalid-block-hooks/src/witness.rs | 1036 ++++++++++++++--- crates/primitives/Cargo.toml | 2 +- 4 files changed, 884 insertions(+), 172 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20dfb2c62db..7dc6113270d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8619,6 +8619,7 @@ name = "reth-invalid-block-hooks" version = "1.8.2" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", @@ -8626,18 +8627,24 @@ dependencies = [ "futures", "jsonrpsee", "pretty_assertions", + "reth-chainspec", "reth-engine-primitives", + "reth-ethereum-primitives", "reth-evm", + "reth-evm-ethereum", "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", + "reth-testing-utils", "reth-tracing", "reth-trie", + "revm", "revm-bytecode", "revm-database", "serde", "serde_json", + "tempfile", ] [[package]] diff --git a/crates/engine/invalid-block-hooks/Cargo.toml b/crates/engine/invalid-block-hooks/Cargo.toml index 8d4a469ee16..5b3563c7ac3 100644 --- a/crates/engine/invalid-block-hooks/Cargo.toml +++ b/crates/engine/invalid-block-hooks/Cargo.toml @@ -12,6 +12,7 @@ workspace = true [dependencies] # reth +revm.workspace = true revm-bytecode.workspace = true revm-database.workspace = true reth-engine-primitives.workspace = true @@ -38,3 +39,13 @@ jsonrpsee.workspace = true pretty_assertions.workspace = true serde.workspace = true serde_json.workspace = true + +[dev-dependencies] +alloy-eips.workspace = true +reth-chainspec.workspace = true +reth-ethereum-primitives.workspace = true +reth-evm-ethereum.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-revm = { workspace = true, features = ["test-utils"] } +reth-testing-utils.workspace = true +tempfile.workspace = true diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index f979958a198..1df76d9255c 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,31 +1,50 @@ use alloy_consensus::BlockHeader; -use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use pretty_assertions::Comparison; use reth_engine_primitives::InvalidBlockHook; use reth_evm::{execute::Executor, ConfigureEvm}; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader}; -use reth_provider::{BlockExecutionOutput, StateProviderFactory}; -use reth_revm::{database::StateProviderDatabase, db::BundleState, state::AccountInfo}; +use reth_provider::{BlockExecutionOutput, StateProvider, StateProviderFactory}; +use reth_revm::{ + database::StateProviderDatabase, + db::{BundleState, State}, +}; use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; use reth_trie::{updates::TrieUpdates, HashedStorage}; +use revm::state::AccountInfo; use revm_bytecode::Bytecode; -use revm_database::states::{ - reverts::{AccountInfoRevert, RevertToSlot}, - AccountStatus, StorageSlot, +use revm_database::{ + states::{reverts::AccountInfoRevert, StorageSlot}, + AccountStatus, RevertToSlot, }; use serde::Serialize; use std::{collections::BTreeMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; +type CollectionResult = + (BTreeMap, BTreeMap, reth_trie::HashedPostState, BundleState); + +/// Serializable version of `BundleState` for deterministic comparison #[derive(Debug, PartialEq, Eq)] -struct AccountRevertSorted { - pub account: AccountInfoRevert, - pub storage: BTreeMap, - pub previous_status: AccountStatus, - pub wipe_storage: bool, +struct BundleStateSorted { + /// Account state + pub state: BTreeMap, + /// All created contracts in this block. + pub contracts: BTreeMap, + /// Changes to revert + /// + /// **Note**: Inside vector is *not* sorted by address. + /// + /// But it is unique by address. + pub reverts: Vec>, + /// The size of the plain state in the bundle state + pub state_size: usize, + /// The size of reverts in the bundle state + pub reverts_size: usize, } +/// Serializable version of `BundleAccount` #[derive(Debug, PartialEq, Eq)] struct BundleAccountSorted { pub info: Option, @@ -40,74 +59,120 @@ struct BundleAccountSorted { pub status: AccountStatus, } +/// Serializable version of `AccountRevert` #[derive(Debug, PartialEq, Eq)] -struct BundleStateSorted { - /// Account state - pub state: BTreeMap, - /// All created contracts in this block. - pub contracts: BTreeMap, - /// Changes to revert - /// - /// **Note**: Inside vector is *not* sorted by address. - /// - /// But it is unique by address. - pub reverts: Vec>, - /// The size of the plain state in the bundle state - pub state_size: usize, - /// The size of reverts in the bundle state - pub reverts_size: usize, +struct AccountRevertSorted { + pub account: AccountInfoRevert, + pub storage: BTreeMap, + pub previous_status: AccountStatus, + pub wipe_storage: bool, } -impl BundleStateSorted { - fn from_bundle_state(bundle_state: &BundleState) -> Self { - let state = bundle_state +/// Converts bundle state to sorted format for deterministic comparison +fn sort_bundle_state_for_comparison(bundle_state: &BundleState) -> BundleStateSorted { + BundleStateSorted { + state: bundle_state .state - .clone() - .into_iter() - .map(|(address, account)| { + .iter() + .map(|(addr, acc)| { ( - address, + *addr, BundleAccountSorted { - info: account.info, - original_info: account.original_info, - status: account.status, - storage: BTreeMap::from_iter(account.storage), + info: acc.info.clone(), + original_info: acc.original_info.clone(), + storage: BTreeMap::from_iter(acc.storage.clone()), + status: acc.status, }, ) }) - .collect(); - - let contracts = BTreeMap::from_iter(bundle_state.contracts.clone()); - - let reverts = bundle_state + .collect(), + contracts: BTreeMap::from_iter(bundle_state.contracts.clone()), + reverts: bundle_state .reverts .iter() .map(|block| { block .iter() - .map(|(address, account_revert)| { + .map(|(addr, rev)| { ( - *address, + *addr, AccountRevertSorted { - account: account_revert.account.clone(), - previous_status: account_revert.previous_status, - wipe_storage: account_revert.wipe_storage, - storage: BTreeMap::from_iter(account_revert.storage.clone()), + account: rev.account.clone(), + storage: BTreeMap::from_iter(rev.storage.clone()), + previous_status: rev.previous_status, + wipe_storage: rev.wipe_storage, }, ) }) .collect() }) - .collect(); + .collect(), + state_size: bundle_state.state_size, + reverts_size: bundle_state.reverts_size, + } +} + +/// Extracts execution data including codes, preimages, and hashed state from database +fn collect_execution_data( + mut db: State>>, +) -> eyre::Result { + let bundle_state = db.take_bundle(); + let mut codes = BTreeMap::new(); + let mut preimages = BTreeMap::new(); + let mut hashed_state = db.database.hashed_post_state(&bundle_state); + + // Collect codes + db.cache.contracts.values().chain(bundle_state.contracts.values()).for_each(|code| { + let code_bytes = code.original_bytes(); + codes.insert(keccak256(&code_bytes), code_bytes); + }); - let state_size = bundle_state.state_size; - let reverts_size = bundle_state.reverts_size; + // Collect preimages + for (address, account) in db.cache.accounts { + let hashed_address = keccak256(address); + hashed_state + .accounts + .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); - Self { state, contracts, reverts, state_size, reverts_size } + if let Some(account_data) = account.account { + preimages.insert(hashed_address, alloy_rlp::encode(address).into()); + let storage = hashed_state + .storages + .entry(hashed_address) + .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); + + for (slot, value) in account_data.storage { + let slot_bytes = B256::from(slot); + let hashed_slot = keccak256(slot_bytes); + storage.storage.insert(hashed_slot, value); + preimages.insert(hashed_slot, alloy_rlp::encode(slot_bytes).into()); + } + } } + + Ok((codes, preimages, hashed_state, bundle_state)) } -/// Generates a witness for the given block and saves it to a file. +/// Generates execution witness from collected codes, preimages, and hashed state +fn generate( + codes: BTreeMap, + preimages: BTreeMap, + hashed_state: reth_trie::HashedPostState, + state_provider: Box, +) -> eyre::Result { + let state = state_provider.witness(Default::default(), hashed_state)?; + Ok(ExecutionWitness { + state, + codes: codes.into_values().collect(), + keys: preimages.into_values().collect(), + ..Default::default() + }) +} + +/// Hook for generating execution witnesses when invalid blocks are detected. +/// +/// This hook captures the execution state and generates witness data that can be used +/// for debugging and analysis of invalid block execution. #[derive(Debug)] pub struct InvalidBlockWitnessHook { /// The provider to read the historical state and do the EVM execution. @@ -139,103 +204,51 @@ where E: ConfigureEvm + 'static, N: NodePrimitives, { - fn on_invalid_block( + /// Re-executes the block and collects execution data + fn re_execute_block( &self, parent_header: &SealedHeader, block: &RecoveredBlock, - output: &BlockExecutionOutput, - trie_updates: Option<(&TrieUpdates, B256)>, - ) -> eyre::Result<()> { - // TODO(alexey): unify with `DebugApi::debug_execution_witness` - + ) -> eyre::Result<(ExecutionWitness, BundleState)> { let mut executor = self.evm_config.batch_executor(StateProviderDatabase::new( self.provider.state_by_block_hash(parent_header.hash())?, )); executor.execute_one(block)?; + let db = executor.into_state(); + let (codes, preimages, hashed_state, bundle_state) = collect_execution_data(db)?; - // Take the bundle state - let mut db = executor.into_state(); - let bundle_state = db.take_bundle(); - - // Initialize a map of preimages. - let mut state_preimages = Vec::default(); - - // Get codes - let codes = db - .cache - .contracts - .values() - .map(|code| code.original_bytes()) - .chain( - // cache state does not have all the contracts, especially when - // a contract is created within the block - // the contract only exists in bundle state, therefore we need - // to include them as well - bundle_state.contracts.values().map(|code| code.original_bytes()), - ) - .collect(); - - // Grab all account proofs for the data accessed during block execution. - // - // Note: We grab *all* accounts in the cache here, as the `BundleState` prunes - // referenced accounts + storage slots. - let mut hashed_state = db.database.hashed_post_state(&bundle_state); - for (address, account) in db.cache.accounts { - let hashed_address = keccak256(address); - hashed_state - .accounts - .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); + let state_provider = self.provider.state_by_block_hash(parent_header.hash())?; + let witness = generate(codes, preimages, hashed_state, state_provider)?; - let storage = hashed_state - .storages - .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); - - if let Some(account) = account.account { - state_preimages.push(alloy_rlp::encode(address).into()); - - for (slot, value) in account.storage { - let slot = B256::from(slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, value); + Ok((witness, bundle_state)) + } - state_preimages.push(alloy_rlp::encode(slot).into()); - } - } - } + /// Handles witness generation, saving, and comparison with healthy node + fn handle_witness_operations( + &self, + witness: &ExecutionWitness, + block_prefix: &str, + block_number: u64, + ) -> eyre::Result<()> { + let filename = format!("{}.witness.re_executed.json", block_prefix); + let re_executed_witness_path = self.save_file(filename, witness)?; - // Generate an execution witness for the aggregated state of accessed accounts. - // Destruct the cache database to retrieve the state provider. - let state_provider = db.database.into_inner(); - let state = state_provider.witness(Default::default(), hashed_state.clone())?; - - // Write the witness to the output directory. - let response = - ExecutionWitness { state, codes, keys: state_preimages, ..Default::default() }; - let re_executed_witness_path = self.save_file( - format!("{}_{}.witness.re_executed.json", block.number(), block.hash()), - &response, - )?; if let Some(healthy_node_client) = &self.healthy_node_client { - // Compare the witness against the healthy node. let healthy_node_witness = futures::executor::block_on(async move { DebugApiClient::<()>::debug_execution_witness( healthy_node_client, - block.number().into(), + block_number.into(), ) .await })?; - let healthy_path = self.save_file( - format!("{}_{}.witness.healthy.json", block.number(), block.hash()), - &healthy_node_witness, - )?; + let filename = format!("{}.witness.healthy.json", block_prefix); + let healthy_path = self.save_file(filename, &healthy_node_witness)?; - // If the witnesses are different, write the diff to the output directory. - if response != healthy_node_witness { - let filename = format!("{}_{}.witness.diff", block.number(), block.hash()); - let diff_path = self.save_diff(filename, &response, &healthy_node_witness)?; + if witness != &healthy_node_witness { + let filename = format!("{}.witness.diff", block_prefix); + let diff_path = self.save_diff(filename, witness, &healthy_node_witness)?; warn!( target: "engine::invalid_block_hooks::witness", diff_path = %diff_path.display(), @@ -245,29 +258,26 @@ where ); } } + Ok(()) + } - // The bundle state after re-execution should match the original one. - // - // Reverts now supports order-independent equality, so we can compare directly without - // sorting the reverts vectors. - // - // See: https://github.com/bluealloy/revm/pull/1827 - if bundle_state != output.state { - let original_path = self.save_file( - format!("{}_{}.bundle_state.original.json", block.number(), block.hash()), - &output.state, - )?; - let re_executed_path = self.save_file( - format!("{}_{}.bundle_state.re_executed.json", block.number(), block.hash()), - &bundle_state, - )?; - - let filename = format!("{}_{}.bundle_state.diff", block.number(), block.hash()); - // Convert bundle state to sorted struct which has BTreeMap instead of HashMap to - // have deterministic ordering - let bundle_state_sorted = BundleStateSorted::from_bundle_state(&bundle_state); - let output_state_sorted = BundleStateSorted::from_bundle_state(&output.state); + /// Validates that the bundle state after re-execution matches the original + fn validate_bundle_state( + &self, + re_executed_state: &BundleState, + original_state: &BundleState, + block_prefix: &str, + ) -> eyre::Result<()> { + if re_executed_state != original_state { + let original_filename = format!("{}.bundle_state.original.json", block_prefix); + let original_path = self.save_file(original_filename, original_state)?; + let re_executed_filename = format!("{}.bundle_state.re_executed.json", block_prefix); + let re_executed_path = self.save_file(re_executed_filename, re_executed_state)?; + // Convert bundle state to sorted format for deterministic comparison + let bundle_state_sorted = sort_bundle_state_for_comparison(re_executed_state); + let output_state_sorted = sort_bundle_state_for_comparison(original_state); + let filename = format!("{}.bundle_state.diff", block_prefix); let diff_path = self.save_diff(filename, &bundle_state_sorted, &output_state_sorted)?; warn!( @@ -278,37 +288,44 @@ where "Bundle state mismatch after re-execution" ); } + Ok(()) + } - // Calculate the state root and trie updates after re-execution. They should match - // the original ones. + /// Validates state root and trie updates after re-execution + fn validate_state_root_and_trie( + &self, + parent_header: &SealedHeader, + block: &RecoveredBlock, + bundle_state: &BundleState, + trie_updates: Option<(&TrieUpdates, B256)>, + block_prefix: &str, + ) -> eyre::Result<()> { + let state_provider = self.provider.state_by_block_hash(parent_header.hash())?; + let hashed_state = state_provider.hashed_post_state(bundle_state); let (re_executed_root, trie_output) = state_provider.state_root_with_updates(hashed_state)?; + if let Some((original_updates, original_root)) = trie_updates { if re_executed_root != original_root { - let filename = format!("{}_{}.state_root.diff", block.number(), block.hash()); + let filename = format!("{}.state_root.diff", block_prefix); let diff_path = self.save_diff(filename, &re_executed_root, &original_root)?; warn!(target: "engine::invalid_block_hooks::witness", ?original_root, ?re_executed_root, diff_path = %diff_path.display(), "State root mismatch after re-execution"); } - // If the re-executed state root does not match the _header_ state root, also log that. if re_executed_root != block.state_root() { - let filename = - format!("{}_{}.header_state_root.diff", block.number(), block.hash()); + let filename = format!("{}.header_state_root.diff", block_prefix); let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root())?; warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root(), ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root"); } if &trie_output != original_updates { - // Trie updates are too big to diff, so we just save the original and re-executed - let trie_output_sorted = &trie_output.into_sorted_ref(); - let original_updates_sorted = &original_updates.into_sorted_ref(); let original_path = self.save_file( - format!("{}_{}.trie_updates.original.json", block.number(), block.hash()), - original_updates_sorted, + format!("{}.trie_updates.original.json", block_prefix), + &original_updates.into_sorted_ref(), )?; let re_executed_path = self.save_file( - format!("{}_{}.trie_updates.re_executed.json", block.number(), block.hash()), - trie_output_sorted, + format!("{}.trie_updates.re_executed.json", block_prefix), + &trie_output.into_sorted_ref(), )?; warn!( target: "engine::invalid_block_hooks::witness", @@ -318,11 +335,44 @@ where ); } } + Ok(()) + } + + fn on_invalid_block( + &self, + parent_header: &SealedHeader, + block: &RecoveredBlock, + output: &BlockExecutionOutput, + trie_updates: Option<(&TrieUpdates, B256)>, + ) -> eyre::Result<()> { + // TODO(alexey): unify with `DebugApi::debug_execution_witness` + let (witness, bundle_state) = self.re_execute_block(parent_header, block)?; + + let block_prefix = format!("{}_{}", block.number(), block.hash()); + self.handle_witness_operations(&witness, &block_prefix, block.number())?; + + self.validate_bundle_state(&bundle_state, &output.state, &block_prefix)?; + + self.validate_state_root_and_trie( + parent_header, + block, + &bundle_state, + trie_updates, + &block_prefix, + )?; Ok(()) } - /// Saves the diff of two values into a file with the given name in the output directory. + /// Serializes and saves a value to a JSON file in the output directory + fn save_file(&self, filename: String, value: &T) -> eyre::Result { + let path = self.output_directory.join(filename); + File::create(&path)?.write_all(serde_json::to_string(value)?.as_bytes())?; + + Ok(path) + } + + /// Compares two values and saves their diff to a file in the output directory fn save_diff( &self, filename: String, @@ -335,13 +385,6 @@ where Ok(path) } - - fn save_file(&self, filename: String, value: &T) -> eyre::Result { - let path = self.output_directory.join(filename); - File::create(&path)?.write_all(serde_json::to_string(value)?.as_bytes())?; - - Ok(path) - } } impl InvalidBlockHook for InvalidBlockWitnessHook @@ -361,3 +404,654 @@ where } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::eip7685::Requests; + use alloy_primitives::{map::HashMap, Address, Bytes, B256, U256}; + use reth_chainspec::ChainSpec; + use reth_ethereum_primitives::EthPrimitives; + use reth_evm_ethereum::EthEvmConfig; + use reth_provider::test_utils::MockEthProvider; + use reth_revm::db::{BundleAccount, BundleState}; + use revm_database::states::reverts::AccountRevert; + use tempfile::TempDir; + + use reth_revm::test_utils::StateProviderTest; + use reth_testing_utils::generators::{self, random_block, random_eoa_accounts, BlockParams}; + use revm_bytecode::Bytecode; + + /// Creates a test `BundleState` with realistic accounts, contracts, and reverts + fn create_bundle_state() -> BundleState { + let mut rng = generators::rng(); + let mut bundle_state = BundleState::default(); + + // Generate realistic EOA accounts using generators + let accounts = random_eoa_accounts(&mut rng, 3); + + for (i, (addr, account)) in accounts.into_iter().enumerate() { + // Create storage entries for each account + let mut storage = HashMap::default(); + let storage_key = U256::from(i + 1); + storage.insert( + storage_key, + StorageSlot { + present_value: U256::from((i + 1) * 10), + previous_or_original_value: U256::from((i + 1) * 15), + }, + ); + + let bundle_account = BundleAccount { + info: Some(AccountInfo { + balance: account.balance, + nonce: account.nonce, + code_hash: account.bytecode_hash.unwrap_or_default(), + code: None, + }), + original_info: (i == 0).then(|| AccountInfo { + balance: account.balance.checked_div(U256::from(2)).unwrap_or(U256::ZERO), + nonce: 0, + code_hash: account.bytecode_hash.unwrap_or_default(), + code: None, + }), + storage, + status: AccountStatus::default(), + }; + + bundle_state.state.insert(addr, bundle_account); + } + + // Generate realistic contract bytecode using generators + let contract_hashes: Vec = (0..3).map(|_| B256::random()).collect(); + for (i, hash) in contract_hashes.iter().enumerate() { + let bytecode = match i { + 0 => Bytes::from(vec![0x60, 0x80, 0x60, 0x40, 0x52]), // Simple contract + 1 => Bytes::from(vec![0x61, 0x81, 0x60, 0x00, 0x39]), // Another contract + _ => Bytes::from(vec![0x60, 0x00, 0x60, 0x00, 0xfd]), // REVERT contract + }; + bundle_state.contracts.insert(*hash, Bytecode::new_raw(bytecode)); + } + + // Add reverts for multiple blocks using different accounts + let addresses: Vec
= bundle_state.state.keys().copied().collect(); + for (i, addr) in addresses.iter().take(2).enumerate() { + let revert = AccountRevert { + wipe_storage: i == 0, // First account has storage wiped + ..AccountRevert::default() + }; + bundle_state.reverts.push(vec![(*addr, revert)]); + } + + // Set realistic sizes + bundle_state.state_size = bundle_state.state.len(); + bundle_state.reverts_size = bundle_state.reverts.len(); + + bundle_state + } + #[test] + fn test_sort_bundle_state_for_comparison() { + // Use the fixture function to create test data + let bundle_state = create_bundle_state(); + + // Call the function under test + let sorted = sort_bundle_state_for_comparison(&bundle_state); + + // Verify state_size and reverts_size values match the fixture + assert_eq!(sorted.state_size, 3); + assert_eq!(sorted.reverts_size, 2); + + // Verify state contains our mock accounts + assert_eq!(sorted.state.len(), 3); // We added 3 accounts + + // Verify contracts contains our mock contracts + assert_eq!(sorted.contracts.len(), 3); // We added 3 contracts + + // Verify reverts is an array with multiple blocks of reverts + let reverts = &sorted.reverts; + assert_eq!(reverts.len(), 2); // Fixture has two blocks of reverts + + // Verify that the state accounts have the expected structure + for account_data in sorted.state.values() { + // BundleAccountSorted has info, original_info, storage, and status fields + // Just verify the structure exists by accessing the fields + let _info = &account_data.info; + let _original_info = &account_data.original_info; + let _storage = &account_data.storage; + let _status = &account_data.status; + } + } + + #[test] + fn test_data_collector_collect() { + // Create test data using the fixture function + let bundle_state = create_bundle_state(); + + // Create a State with StateProviderTest + let state_provider = StateProviderTest::default(); + let mut state = State::builder() + .with_database(StateProviderDatabase::new( + Box::new(state_provider) as Box + )) + .with_bundle_update() + .build(); + + // Insert contracts from the fixture into the state cache + for (code_hash, bytecode) in &bundle_state.contracts { + state.cache.contracts.insert(*code_hash, bytecode.clone()); + } + + // Manually set the bundle state in the state object + state.bundle_state = bundle_state; + + // Call the collect function + let result = collect_execution_data(state); + // Verify the function returns successfully + assert!(result.is_ok()); + + let (codes, _preimages, _hashed_state, returned_bundle_state) = result.unwrap(); + + // Verify that the returned data contains expected values + // Since we used the fixture data, we should have some codes and state + assert!(!codes.is_empty(), "Expected some bytecode entries"); + assert!(!returned_bundle_state.state.is_empty(), "Expected some state entries"); + + // Verify the bundle state structure matches our fixture + assert_eq!(returned_bundle_state.state.len(), 3, "Expected 3 accounts from fixture"); + assert_eq!(returned_bundle_state.contracts.len(), 3, "Expected 3 contracts from fixture"); + } + + #[test] + fn test_re_execute_block() { + // Create hook instance + let (hook, _output_directory, _temp_dir) = create_test_hook(); + + // Setup to call re_execute_block + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + + // Create a random block that inherits from the parent header + let recovered_block = random_block( + &mut rng, + 2, // block number + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let result = hook.re_execute_block(&parent_header, &recovered_block); + + // Verify the function behavior with mock data + assert!(result.is_ok(), "re_execute_block should return Ok"); + } + + /// Creates test `InvalidBlockWitnessHook` with temporary directory + fn create_test_hook() -> ( + InvalidBlockWitnessHook, EthEvmConfig>, + PathBuf, + TempDir, + ) { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let output_directory = temp_dir.path().to_path_buf(); + + let provider = MockEthProvider::::default(); + let evm_config = EthEvmConfig::mainnet(); + + let hook = + InvalidBlockWitnessHook::new(provider, evm_config, output_directory.clone(), None); + + (hook, output_directory, temp_dir) + } + + #[test] + fn test_handle_witness_operations_with_healthy_client_mock() { + // Create hook instance with mock healthy client + let (hook, output_directory, _temp_dir) = create_test_hook(); + + // Create sample ExecutionWitness with correct types + let witness = ExecutionWitness { + state: vec![Bytes::from("state_data")], + codes: vec![Bytes::from("code_data")], + keys: vec![Bytes::from("key_data")], + ..Default::default() + }; + + // Call handle_witness_operations + let result = hook.handle_witness_operations(&witness, "test_block_healthy", 67890); + + // Should succeed + assert!(result.is_ok()); + + // Check that witness file was created + let witness_file = output_directory.join("test_block_healthy.witness.re_executed.json"); + assert!(witness_file.exists()); + } + + #[test] + fn test_handle_witness_operations_file_creation() { + // Test file creation and content validation + let (hook, output_directory, _temp_dir) = create_test_hook(); + + let witness = ExecutionWitness { + state: vec![Bytes::from("test_state")], + codes: vec![Bytes::from("test_code")], + keys: vec![Bytes::from("test_key")], + ..Default::default() + }; + + let block_prefix = "file_test_block"; + let block_number = 11111; + + // Call handle_witness_operations + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + + // Verify file was created with correct name + let expected_file = + output_directory.join(format!("{}.witness.re_executed.json", block_prefix)); + assert!(expected_file.exists()); + + // Read and verify file content is valid JSON and contains witness structure + let file_content = std::fs::read_to_string(&expected_file).expect("Failed to read file"); + let parsed_witness: serde_json::Value = + serde_json::from_str(&file_content).expect("File should contain valid JSON"); + + // Verify the JSON structure contains expected fields + assert!(parsed_witness.get("state").is_some(), "JSON should contain 'state' field"); + assert!(parsed_witness.get("codes").is_some(), "JSON should contain 'codes' field"); + assert!(parsed_witness.get("keys").is_some(), "JSON should contain 'keys' field"); + } + + #[test] + fn test_proof_generator_generate() { + // Use existing MockEthProvider + let mock_provider = MockEthProvider::default(); + let state_provider: Box = Box::new(mock_provider); + + // Mock Data + let mut codes = BTreeMap::new(); + codes.insert(B256::from([1u8; 32]), Bytes::from("contract_code_1")); + codes.insert(B256::from([2u8; 32]), Bytes::from("contract_code_2")); + + let mut preimages = BTreeMap::new(); + preimages.insert(B256::from([3u8; 32]), Bytes::from("preimage_1")); + preimages.insert(B256::from([4u8; 32]), Bytes::from("preimage_2")); + + let hashed_state = reth_trie::HashedPostState::default(); + + // Call generate function + let result = generate(codes.clone(), preimages.clone(), hashed_state, state_provider); + + // Verify result + assert!(result.is_ok(), "generate function should succeed"); + let execution_witness = result.unwrap(); + + assert!(execution_witness.state.is_empty(), "State should be empty from MockEthProvider"); + + let expected_codes: Vec = codes.into_values().collect(); + assert_eq!( + execution_witness.codes.len(), + expected_codes.len(), + "Codes length should match" + ); + for code in &expected_codes { + assert!( + execution_witness.codes.contains(code), + "Codes should contain expected bytecode" + ); + } + + let expected_keys: Vec = preimages.into_values().collect(); + assert_eq!(execution_witness.keys.len(), expected_keys.len(), "Keys length should match"); + for key in &expected_keys { + assert!(execution_witness.keys.contains(key), "Keys should contain expected preimage"); + } + } + + #[test] + fn test_validate_bundle_state_matching() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + let block_prefix = "test_block_123"; + + // Test with identical states - should not produce any warnings or files + let result = hook.validate_bundle_state(&bundle_state, &bundle_state, block_prefix); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_mismatch() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let original_state = create_bundle_state(); + let mut modified_state = create_bundle_state(); + + // Modify the state to create a mismatch + let addr = Address::from([1u8; 20]); + if let Some(account) = modified_state.state.get_mut(&addr) && + let Some(ref mut info) = account.info + { + info.balance = U256::from(999); + } + + let block_prefix = "test_block_mismatch"; + + // Test with different states - should save files and log warning + let result = hook.validate_bundle_state(&modified_state, &original_state, block_prefix); + assert!(result.is_ok()); + + // Verify that files were created + let original_file = output_dir.join(format!("{}.bundle_state.original.json", block_prefix)); + let re_executed_file = + output_dir.join(format!("{}.bundle_state.re_executed.json", block_prefix)); + let diff_file = output_dir.join(format!("{}.bundle_state.diff", block_prefix)); + + assert!(original_file.exists(), "Original bundle state file should be created"); + assert!(re_executed_file.exists(), "Re-executed bundle state file should be created"); + assert!(diff_file.exists(), "Diff file should be created"); + } + + /// Creates test `TrieUpdates` with account nodes and removed nodes + fn create_test_trie_updates() -> TrieUpdates { + use alloy_primitives::map::HashMap; + use reth_trie::{updates::TrieUpdates, BranchNodeCompact, Nibbles}; + use std::collections::HashSet; + + let mut account_nodes = HashMap::default(); + let nibbles = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3]); + let branch_node = BranchNodeCompact::new( + 0b1010, // state_mask + 0b1010, // tree_mask - must be subset of state_mask + 0b1000, // hash_mask + vec![B256::from([1u8; 32])], // hashes + None, // root_hash + ); + account_nodes.insert(nibbles, branch_node); + + let mut removed_nodes = HashSet::default(); + removed_nodes.insert(Nibbles::from_nibbles_unchecked([0x4, 0x5, 0x6])); + + TrieUpdates { account_nodes, removed_nodes, storage_tries: HashMap::default() } + } + + #[test] + fn test_validate_state_root_and_trie_with_trie_updates() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + // Generate test data + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let trie_updates = create_test_trie_updates(); + let original_root = B256::from([2u8; 32]); // Different from what will be computed + let block_prefix = "test_state_root_with_trie"; + + // Test with trie updates - this will likely produce warnings due to mock data + let result = hook.validate_state_root_and_trie( + &parent_header, + &recovered_block, + &bundle_state, + Some((&trie_updates, original_root)), + block_prefix, + ); + assert!(result.is_ok()); + } + + #[test] + fn test_on_invalid_block_calls_all_validation_methods() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + // Generate test data + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + // Create mock BlockExecutionOutput + let output = BlockExecutionOutput { + state: bundle_state, + result: reth_provider::BlockExecutionResult { + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + }, + }; + + // Create test trie updates + let trie_updates = create_test_trie_updates(); + let state_root = B256::random(); + + // Test that on_invalid_block attempts to call all its internal methods + // by checking that it doesn't panic and tries to create files + let files_before = output_dir.read_dir().unwrap().count(); + + let _result = hook.on_invalid_block( + &parent_header, + &recovered_block, + &output, + Some((&trie_updates, state_root)), + ); + + // Verify that the function attempted to process the block: + // Either it succeeded, or it created some output files during processing + let files_after = output_dir.read_dir().unwrap().count(); + + // The function should attempt to execute its workflow + assert!( + files_after >= files_before, + "on_invalid_block should attempt to create output files during processing" + ); + } + + #[test] + fn test_handle_witness_operations_with_empty_witness() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let witness = ExecutionWitness::default(); + let block_prefix = "empty_witness_test"; + let block_number = 12345; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_handle_witness_operations_with_zero_block_number() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let witness = ExecutionWitness { + state: vec![Bytes::from("test_state")], + codes: vec![Bytes::from("test_code")], + keys: vec![Bytes::from("test_key")], + ..Default::default() + }; + let block_prefix = "zero_block_test"; + let block_number = 0; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_handle_witness_operations_with_large_witness_data() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let large_data = vec![0u8; 10000]; // 10KB of data + let witness = ExecutionWitness { + state: vec![Bytes::from(large_data.clone())], + codes: vec![Bytes::from(large_data.clone())], + keys: vec![Bytes::from(large_data)], + ..Default::default() + }; + let block_prefix = "large_witness_test"; + let block_number = 999999; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_with_empty_states() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let empty_state = BundleState::default(); + let block_prefix = "empty_states_test"; + + let result = hook.validate_bundle_state(&empty_state, &empty_state, block_prefix); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_with_different_contract_counts() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let state1 = create_bundle_state(); + let mut state2 = create_bundle_state(); + + // Add extra contract to state2 + let extra_contract_hash = B256::random(); + state2.contracts.insert( + extra_contract_hash, + Bytecode::new_raw(Bytes::from(vec![0x60, 0x00, 0x60, 0x00, 0xfd])), // REVERT opcode + ); + + let block_prefix = "different_contracts_test"; + let result = hook.validate_bundle_state(&state1, &state2, block_prefix); + assert!(result.is_ok()); + + // Verify diff files were created + let diff_file = output_dir.join(format!("{}.bundle_state.diff", block_prefix)); + assert!(diff_file.exists()); + } + + #[test] + fn test_save_diff_with_identical_values() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let value1 = "identical_value"; + let value2 = "identical_value"; + let filename = "identical_diff_test".to_string(); + + let result = hook.save_diff(filename.clone(), &value1, &value2); + assert!(result.is_ok()); + + let diff_file = output_dir.join(filename); + assert!(diff_file.exists()); + } + + #[test] + fn test_validate_state_root_and_trie_without_trie_updates() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let block_prefix = "no_trie_updates_test"; + + // Test without trie updates (None case) + let result = hook.validate_state_root_and_trie( + &parent_header, + &recovered_block, + &bundle_state, + None, + block_prefix, + ); + assert!(result.is_ok()); + } + + #[test] + fn test_complete_invalid_block_workflow() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let mut rng = generators::rng(); + + // Create a realistic block scenario + let parent_header = generators::random_header(&mut rng, 100, None); + let invalid_block = random_block( + &mut rng, + 101, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(3), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let bundle_state = create_bundle_state(); + let trie_updates = create_test_trie_updates(); + + // Test validation methods + let validation_result = + hook.validate_bundle_state(&bundle_state, &bundle_state, "integration_test"); + assert!(validation_result.is_ok(), "Bundle state validation should succeed"); + + let state_root_result = hook.validate_state_root_and_trie( + &parent_header, + &invalid_block, + &bundle_state, + Some((&trie_updates, B256::random())), + "integration_test", + ); + assert!(state_root_result.is_ok(), "State root validation should succeed"); + } + + #[test] + fn test_integration_workflow_components() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let mut rng = generators::rng(); + + // Create test data + let parent_header = generators::random_header(&mut rng, 50, None); + let _invalid_block = random_block( + &mut rng, + 51, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(2), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let bundle_state = create_bundle_state(); + let _trie_updates = create_test_trie_updates(); + + // Test individual components that would be part of the complete flow + let validation_result = + hook.validate_bundle_state(&bundle_state, &bundle_state, "integration_component_test"); + assert!(validation_result.is_ok(), "Component validation should succeed"); + } +} diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 665dcab9a88..1717cc6ec3f 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-ethereum-primitives = { workspace = true, features = ["serde"] } -reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-primitives-traits.workspace = true reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true From 06b33fd64b70a256fa5a8c89a80933c58758cbd3 Mon Sep 17 00:00:00 2001 From: stevencartavia <112043913+stevencartavia@users.noreply.github.com> Date: Wed, 15 Oct 2025 02:12:01 -0600 Subject: [PATCH 050/371] chore: replace poll_next_unpin loop with poll_recv_many (#18978) --- crates/net/network/src/budget.rs | 7 ------ crates/net/network/src/transactions/mod.rs | 27 +++++++++++----------- 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/crates/net/network/src/budget.rs b/crates/net/network/src/budget.rs index 824148387b4..f1d9ca87469 100644 --- a/crates/net/network/src/budget.rs +++ b/crates/net/network/src/budget.rs @@ -35,13 +35,6 @@ pub const DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS: u32 = DEFAULT_BUD // Default is 40 pending pool imports. pub const DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS: u32 = 4 * DEFAULT_BUDGET_TRY_DRAIN_STREAM; -/// Default budget to try and stream hashes of successfully imported transactions from the pool. -/// -/// Default is naturally same as the number of transactions to attempt importing, -/// [`DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS`], so 40 pool imports. -pub const DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS: u32 = - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS; - /// Polls the given stream. Breaks with `true` if there maybe is more work. #[macro_export] macro_rules! poll_nested_stream_with_budget { diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 9eb07e7b1a0..f4ef42523d5 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -28,8 +28,7 @@ use self::constants::{tx_manager::*, DEFAULT_SOFT_LIMIT_BYTE_SIZE_TRANSACTIONS_B use crate::{ budget::{ DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - DEFAULT_BUDGET_TRY_DRAIN_STREAM, + DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_STREAM, }, cache::LruCache, duration_metered_exec, metered_poll_nested_stream_with_budget, @@ -77,7 +76,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; -use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; +use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, trace}; /// The future for importing transactions into the pool. @@ -339,7 +338,7 @@ pub struct TransactionsManager< /// - no nonce gaps /// - all dynamic fee requirements are (currently) met /// - account has enough balance to cover the transaction's gas - pending_transactions: ReceiverStream, + pending_transactions: mpsc::Receiver, /// Incoming events from the [`NetworkManager`](crate::NetworkManager). transaction_events: UnboundedMeteredReceiver>, /// How the `TransactionsManager` is configured. @@ -422,7 +421,7 @@ impl peers: Default::default(), command_tx, command_rx: UnboundedReceiverStream::new(command_rx), - pending_transactions: ReceiverStream::new(pending), + pending_transactions: pending, transaction_events: UnboundedMeteredReceiver::new( from_network, NETWORK_POOL_TRANSACTIONS_SCOPE, @@ -1529,14 +1528,16 @@ where // We don't expect this buffer to be large, since only pending transactions are // emitted here. let mut new_txs = Vec::new(); - let maybe_more_pending_txns = metered_poll_nested_stream_with_budget!( - poll_durations.acc_imported_txns, - "net::tx", - "Pending transactions stream", - DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - this.pending_transactions.poll_next_unpin(cx), - |hash| new_txs.push(hash) - ); + let maybe_more_pending_txns = match this.pending_transactions.poll_recv_many( + cx, + &mut new_txs, + SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE, + ) { + Poll::Ready(count) => { + count == SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE + } + Poll::Pending => false, + }; if !new_txs.is_empty() { this.on_new_pending_transactions(new_txs); } From 00f173307cc165bc9f217d8498c7a0476d88d0dc Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 15 Oct 2025 10:15:34 +0200 Subject: [PATCH 051/371] fix: Set Era pipeline stage to last checkpoint when there is no target (#19000) --- crates/stages/stages/src/stages/era.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/stages/stages/src/stages/era.rs b/crates/stages/stages/src/stages/era.rs index 436ee769659..971bc11f897 100644 --- a/crates/stages/stages/src/stages/era.rs +++ b/crates/stages/stages/src/stages/era.rs @@ -211,10 +211,16 @@ where height } else { - input.target() + // It's possible for a pipeline sync to be executed with a None target, e.g. after a + // stage was manually dropped, and `reth node` is then called without a `--debug.tip`. + // + // In this case we don't want to simply default to zero, as that would overwrite the + // previously stored checkpoint block number. Instead we default to that previous + // checkpoint. + input.target.unwrap_or_else(|| input.checkpoint().block_number) }; - Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height == input.target() }) + Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height >= input.target() }) } fn unwind( From b6f7fae19adc9f45493ef53d6b164b406fdabc11 Mon Sep 17 00:00:00 2001 From: Jennifer Date: Wed, 15 Oct 2025 09:49:51 +0100 Subject: [PATCH 052/371] ci: Add tests for Paris scenario in hive.yml (#19013) --- .github/workflows/hive.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 13a952e6875..4b1b36027f2 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -153,6 +153,8 @@ jobs: limit: .*tests/homestead.* - sim: ethereum/eest/consume-engine limit: .*tests/frontier.* + - sim: ethereum/eest/consume-engine + limit: .*tests/paris.* # consume-rlp - sim: ethereum/eest/consume-rlp @@ -171,6 +173,8 @@ jobs: limit: .*tests/homestead.* - sim: ethereum/eest/consume-rlp limit: .*tests/frontier.* + - sim: ethereum/eest/consume-rlp + limit: .*tests/paris.* needs: - prepare-reth - prepare-hive From 2f82b7c77115019041d15bf707e2a4df5d7c3068 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Oct 2025 11:06:34 +0200 Subject: [PATCH 053/371] chore: bump book timeout (#19016) --- .github/workflows/book.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 389bd34c700..9e4cf965eda 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -13,7 +13,7 @@ on: jobs: build: runs-on: ubuntu-latest - timeout-minutes: 60 + timeout-minutes: 90 steps: - name: Checkout uses: actions/checkout@v5 From ee6cac72de520c985c1eb9176a8964bc5733895f Mon Sep 17 00:00:00 2001 From: Ivan Wang <314130948@qq.com> Date: Wed, 15 Oct 2025 17:07:42 +0800 Subject: [PATCH 054/371] feat: add metrics for safe and finalized block heights (#18987) --- crates/engine/tree/src/tree/metrics.rs | 4 ++++ crates/engine/tree/src/tree/mod.rs | 8 ++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 4d3310543d1..844db1e63b9 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -122,6 +122,10 @@ pub(crate) struct TreeMetrics { pub reorgs: Counter, /// The latest reorg depth pub latest_reorg_depth: Gauge, + /// The current safe block height (this is required by optimism) + pub safe_block_height: Gauge, + /// The current finalized block height (this is required by optimism) + pub finalized_block_height: Gauge, } /// Metrics for the `EngineApi`. diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 2ea4b552e88..7f1183f5efc 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -2805,7 +2805,9 @@ where // we're also persisting the finalized block on disk so we can reload it on // restart this is required by optimism which queries the finalized block: let _ = self.persistence.save_finalized_block_number(finalized.number()); - self.canonical_in_memory_state.set_finalized(finalized); + self.canonical_in_memory_state.set_finalized(finalized.clone()); + // Update finalized block height metric + self.metrics.tree.finalized_block_height.set(finalized.number() as f64); } } Err(err) => { @@ -2833,7 +2835,9 @@ where // we're also persisting the safe block on disk so we can reload it on // restart this is required by optimism which queries the safe block: let _ = self.persistence.save_safe_block_number(safe.number()); - self.canonical_in_memory_state.set_safe(safe); + self.canonical_in_memory_state.set_safe(safe.clone()); + // Update safe block height metric + self.metrics.tree.safe_block_height.set(safe.number() as f64); } } Err(err) => { From a1aed9d9f072123a07b56c9e2ffe528fd76371d6 Mon Sep 17 00:00:00 2001 From: GarmashAlex Date: Wed, 15 Oct 2025 12:41:02 +0300 Subject: [PATCH 055/371] chore(privitives-traits): remove unused serde derives and camelCase attribute (#19014) --- .../src/transaction/access_list.rs | 28 ++----------------- 1 file changed, 2 insertions(+), 26 deletions(-) diff --git a/crates/primitives-traits/src/transaction/access_list.rs b/crates/primitives-traits/src/transaction/access_list.rs index 06c033e36b0..e4d5638f562 100644 --- a/crates/primitives-traits/src/transaction/access_list.rs +++ b/crates/primitives-traits/src/transaction/access_list.rs @@ -8,22 +8,11 @@ mod tests { use proptest::proptest; use proptest_arbitrary_interop::arb; use reth_codecs::{add_arbitrary_tests, Compact}; - use serde::{Deserialize, Serialize}; /// This type is kept for compatibility tests after the codec support was added to alloy-eips /// `AccessList` type natively #[derive( - Clone, - Debug, - PartialEq, - Eq, - Hash, - Default, - RlpDecodableWrapper, - RlpEncodableWrapper, - Serialize, - Deserialize, - Compact, + Clone, Debug, PartialEq, Eq, Default, RlpDecodableWrapper, RlpEncodableWrapper, Compact, )] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact, rlp)] @@ -36,22 +25,9 @@ mod tests { } // This - #[derive( - Clone, - Debug, - PartialEq, - Eq, - Hash, - Default, - RlpDecodable, - RlpEncodable, - Serialize, - Deserialize, - Compact, - )] + #[derive(Clone, Debug, PartialEq, Eq, Default, RlpDecodable, RlpEncodable, Compact)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact, rlp)] - #[serde(rename_all = "camelCase")] struct RethAccessListItem { /// Account address that would be loaded at the start of execution address: Address, From 731e107ee6c813dd6e93692e2b03477fc58c430a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?jos=C3=A9=20v?= <52646071+Peponks9@users.noreply.github.com> Date: Wed, 15 Oct 2025 03:42:11 -0600 Subject: [PATCH 056/371] chore: refactor loop in `add_new_transactions` (#19006) --- crates/transaction-pool/src/pool/best.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index a5aa664e764..90cd042df69 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -16,6 +16,8 @@ use std::{ use tokio::sync::broadcast::{error::TryRecvError, Receiver}; use tracing::debug; +const MAX_NEW_TRANSACTIONS_PER_BATCH: usize = 16; + /// An iterator that returns transactions that can be executed on the current state (*best* /// transactions). /// @@ -165,13 +167,17 @@ impl BestTransactions { /// Checks for new transactions that have come into the `PendingPool` after this iterator was /// created and inserts them fn add_new_transactions(&mut self) { - while let Some(pending_tx) = self.try_recv() { - // same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked - let tx_id = *pending_tx.transaction.id(); - if self.ancestor(&tx_id).is_none() { - self.independent.insert(pending_tx.clone()); + for _ in 0..MAX_NEW_TRANSACTIONS_PER_BATCH { + if let Some(pending_tx) = self.try_recv() { + // same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked + let tx_id = *pending_tx.transaction.id(); + if self.ancestor(&tx_id).is_none() { + self.independent.insert(pending_tx.clone()); + } + self.all.insert(tx_id, pending_tx); + } else { + break; } - self.all.insert(tx_id, pending_tx); } } } From 7fc3980904445f077264c578539b0671afb4c698 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 15 Oct 2025 12:45:36 +0200 Subject: [PATCH 057/371] chore(ci): bump hive eest to v5.3.0 (#19021) --- .github/assets/hive/build_simulators.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/assets/hive/build_simulators.sh b/.github/assets/hive/build_simulators.sh index dab77772f8e..709ecc51e01 100755 --- a/.github/assets/hive/build_simulators.sh +++ b/.github/assets/hive/build_simulators.sh @@ -11,7 +11,7 @@ go build . # Run each hive command in the background for each simulator and wait echo "Building images" -./hive -client reth --sim "ethereum/eest" --sim.buildarg fixtures=https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0/fixtures_develop.tar.gz --sim.buildarg branch=v5.1.0 -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/eest" --sim.buildarg fixtures=https://github.com/ethereum/execution-spec-tests/releases/download/v5.3.0/fixtures_develop.tar.gz --sim.buildarg branch=v5.3.0 -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/engine" -sim.timelimit 1s || true & ./hive -client reth --sim "devp2p" -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/rpc-compat" -sim.timelimit 1s || true & From 6b08d30e14411b804e6a831cafd5c69ba29c2cdb Mon Sep 17 00:00:00 2001 From: Udoagwa Franklin <54338168+frankudoags@users.noreply.github.com> Date: Wed, 15 Oct 2025 12:15:42 +0100 Subject: [PATCH 058/371] feat(devp2p): make eth p2p networkId configurable (#19020) Co-authored-by: frankudoags --- crates/net/network/src/config.rs | 16 +++++++++++++++- crates/node/core/src/args/network.rs | 6 ++++++ docs/vocs/docs/pages/cli/reth/node.mdx | 3 +++ docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 3 +++ docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 3 +++ docs/vocs/docs/pages/cli/reth/stage/run.mdx | 3 +++ 6 files changed, 33 insertions(+), 1 deletion(-) diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 8e8d11fe69d..c403bdcb557 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -226,6 +226,8 @@ pub struct NetworkConfigBuilder { handshake: Arc, /// List of block hashes to check for required blocks. required_block_hashes: Vec, + /// Optional network id + network_id: Option, } impl NetworkConfigBuilder { @@ -267,6 +269,7 @@ impl NetworkConfigBuilder { nat: None, handshake: Arc::new(EthHandshake::default()), required_block_hashes: Vec::new(), + network_id: None, } } @@ -587,6 +590,12 @@ impl NetworkConfigBuilder { self } + /// Set the optional network id. + pub const fn network_id(mut self, network_id: Option) -> Self { + self.network_id = network_id; + self + } + /// Consumes the type and creates the actual [`NetworkConfig`] /// for the given client type that can interact with the chain. /// @@ -620,6 +629,7 @@ impl NetworkConfigBuilder { nat, handshake, required_block_hashes, + network_id, } = self; let head = head.unwrap_or_else(|| Head { @@ -646,7 +656,11 @@ impl NetworkConfigBuilder { hello_message.port = listener_addr.port(); // set the status - let status = UnifiedStatus::spec_builder(&chain_spec, &head); + let mut status = UnifiedStatus::spec_builder(&chain_spec, &head); + + if let Some(id) = network_id { + status.chain = id.into(); + } // set a fork filter based on the chain spec and head let fork_filter = chain_spec.fork_filter(head); diff --git a/crates/node/core/src/args/network.rs b/crates/node/core/src/args/network.rs index a32f14edd41..52ff52b1cee 100644 --- a/crates/node/core/src/args/network.rs +++ b/crates/node/core/src/args/network.rs @@ -184,6 +184,10 @@ pub struct NetworkArgs { /// Peers that don't have these blocks will be filtered out. #[arg(long = "required-block-hashes", value_delimiter = ',')] pub required_block_hashes: Vec, + + /// Optional network ID to override the chain specification's network ID for P2P connections + #[arg(long)] + pub network_id: Option, } impl NetworkArgs { @@ -297,6 +301,7 @@ impl NetworkArgs { )) .disable_tx_gossip(self.disable_tx_gossip) .required_block_hashes(self.required_block_hashes.clone()) + .network_id(self.network_id) } /// If `no_persist_peers` is false then this returns the path to the persistent peers file path. @@ -371,6 +376,7 @@ impl Default for NetworkArgs { disable_tx_gossip: false, propagation_mode: TransactionPropagationMode::Sqrt, required_block_hashes: vec![], + network_id: None, } } } diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index edb982caf88..a172256058b 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -248,6 +248,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + RPC: --http Enable the HTTP-RPC server diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index 070079b715f..ae0f3d293d1 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -206,6 +206,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + Datadir: --datadir The path to the data dir for all reth files and subdirectories. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index 8725c940e49..9e542916d4c 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -206,6 +206,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + Datadir: --datadir The path to the data dir for all reth files and subdirectories. diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index e876c83f84a..2af69a053d6 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -302,6 +302,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + Logging: --log.stdout.format The format to use for logs written to stdout From 45194fc5df0d07beb09aeaa5586fea8837583c32 Mon Sep 17 00:00:00 2001 From: Skylar Ray <137945430+sky-coderay@users.noreply.github.com> Date: Wed, 15 Oct 2025 14:22:21 +0300 Subject: [PATCH 059/371] chore: remove unused Args struct from exex-subscription example (#19019) --- examples/exex-subscription/src/main.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/examples/exex-subscription/src/main.rs b/examples/exex-subscription/src/main.rs index eb7ffaaf754..2f0c38f3852 100644 --- a/examples/exex-subscription/src/main.rs +++ b/examples/exex-subscription/src/main.rs @@ -4,7 +4,6 @@ //! requested address. #[allow(dead_code)] use alloy_primitives::{Address, U256}; -use clap::Parser; use futures::TryStreamExt; use jsonrpsee::{ core::SubscriptionResult, proc_macros::rpc, tracing, PendingSubscriptionSink, @@ -166,14 +165,8 @@ async fn my_exex( Ok(()) } -#[derive(Parser, Debug)] -struct Args { - #[arg(long)] - enable_ext: bool, -} - fn main() -> eyre::Result<()> { - reth_ethereum::cli::Cli::parse_args().run(|builder, _args| async move { + reth_ethereum::cli::Cli::parse_args().run(|builder, _| async move { let (subscriptions_tx, subscriptions_rx) = mpsc::unbounded_channel::(); let rpc = StorageWatcherRpc::new(subscriptions_tx.clone()); From fc03347cdd201b9514cc2c96395860cbb91d31c7 Mon Sep 17 00:00:00 2001 From: Luca Provini Date: Wed, 15 Oct 2025 14:02:26 +0200 Subject: [PATCH 060/371] feat: add pending sequence as pub (#19022) --- crates/optimism/flashblocks/src/lib.rs | 2 +- crates/optimism/flashblocks/src/sequence.rs | 34 +++++++++++++-------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index 582cbca633f..11647039930 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -12,7 +12,7 @@ pub use consensus::FlashBlockConsensusClient; mod payload; pub use payload::PendingFlashBlock; mod sequence; -pub use sequence::FlashBlockCompleteSequence; +pub use sequence::{FlashBlockCompleteSequence, FlashBlockPendingSequence}; mod service; mod worker; diff --git a/crates/optimism/flashblocks/src/sequence.rs b/crates/optimism/flashblocks/src/sequence.rs index 087f97db7be..59d4cfecbcd 100644 --- a/crates/optimism/flashblocks/src/sequence.rs +++ b/crates/optimism/flashblocks/src/sequence.rs @@ -13,7 +13,7 @@ const FLASHBLOCK_SEQUENCE_CHANNEL_SIZE: usize = 128; /// An ordered B-tree keeping the track of a sequence of [`FlashBlock`]s by their indices. #[derive(Debug)] -pub(crate) struct FlashBlockPendingSequence { +pub struct FlashBlockPendingSequence { /// tracks the individual flashblocks in order /// /// With a blocktime of 2s and flashblock tick-rate of 200ms plus one extra flashblock per new @@ -29,7 +29,8 @@ impl FlashBlockPendingSequence where T: SignedTransaction, { - pub(crate) fn new() -> Self { + /// Create a new pending sequence. + pub fn new() -> Self { // Note: if the channel is full, send will not block but rather overwrite the oldest // messages. Order is preserved. let (tx, _) = broadcast::channel(FLASHBLOCK_SEQUENCE_CHANNEL_SIZE); @@ -37,7 +38,7 @@ where } /// Gets a subscriber to the flashblock sequences produced. - pub(crate) fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { + pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { self.block_broadcaster.subscribe() } @@ -70,7 +71,7 @@ where /// Inserts a new block into the sequence. /// /// A [`FlashBlock`] with index 0 resets the set. - pub(crate) fn insert(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { + pub fn insert(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { if flashblock.index == 0 { trace!(number=%flashblock.block_number(), "Tracking new flashblock sequence"); @@ -93,7 +94,7 @@ where } /// Set state root - pub(crate) const fn set_state_root(&mut self, state_root: Option) { + pub const fn set_state_root(&mut self, state_root: Option) { self.state_root = state_root; } @@ -103,9 +104,7 @@ where /// the sequence /// /// Note: flashblocks start at `index 0`. - pub(crate) fn ready_transactions( - &self, - ) -> impl Iterator>> + '_ { + pub fn ready_transactions(&self) -> impl Iterator>> + '_ { self.inner .values() .enumerate() @@ -117,31 +116,40 @@ where } /// Returns the first block number - pub(crate) fn block_number(&self) -> Option { + pub fn block_number(&self) -> Option { Some(self.inner.values().next()?.block().metadata.block_number) } /// Returns the payload base of the first tracked flashblock. - pub(crate) fn payload_base(&self) -> Option { + pub fn payload_base(&self) -> Option { self.inner.values().next()?.block().base.clone() } /// Returns the number of tracked flashblocks. - pub(crate) fn count(&self) -> usize { + pub fn count(&self) -> usize { self.inner.len() } /// Returns the reference to the last flashblock. - pub(crate) fn last_flashblock(&self) -> Option<&FlashBlock> { + pub fn last_flashblock(&self) -> Option<&FlashBlock> { self.inner.last_key_value().map(|(_, b)| &b.block) } /// Returns the current/latest flashblock index in the sequence - pub(crate) fn index(&self) -> Option { + pub fn index(&self) -> Option { Some(self.inner.values().last()?.block().index) } } +impl Default for FlashBlockPendingSequence +where + T: SignedTransaction, +{ + fn default() -> Self { + Self::new() + } +} + /// A complete sequence of flashblocks, often corresponding to a full block. /// Ensure invariants of a complete flashblocks sequence. #[derive(Debug, Clone)] From 39ef9dd528b34a58dbcbb9e04f02eb7c850b06b5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Oct 2025 14:39:06 +0200 Subject: [PATCH 061/371] chore: bump alloy-core (#19026) --- Cargo.lock | 36 ++++++++++++++++++------------------ Cargo.toml | 8 ++++---- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7dc6113270d..e095cc6fcf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -176,9 +176,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6c2905bafc2df7ccd32ca3af13f0b0d82f2e2ff9dfbeb12196c0d978d5c0deb" +checksum = "3fdff496dd4e98a81f4861e66f7eaf5f2488971848bb42d9c892f871730245c8" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -313,9 +313,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2acb6637a9c0e1cdf8971e0ced8f3fa34c04c5e9dccf6bb184f6a64fe0e37d8" +checksum = "5513d5e6bd1cba6bdcf5373470f559f320c05c8c59493b6e98912fbe6733943f" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -409,9 +409,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b77f7d5e60ad8ae6bd2200b8097919712a07a6db622a4b201e7ead6166f02e5" +checksum = "355bf68a433e0fd7f7d33d5a9fc2583fde70bf5c530f63b80845f8da5505cf28" dependencies = [ "alloy-rlp", "arbitrary", @@ -763,9 +763,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78c84c3637bee9b5c4a4d2b93360ee16553d299c3b932712353caf1cea76d0e6" +checksum = "f3ce480400051b5217f19d6e9a82d9010cdde20f1ae9c00d53591e4a1afbb312" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -777,9 +777,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a882aa4e1790063362434b9b40d358942b188477ac1c44cfb8a52816ffc0cc17" +checksum = "6d792e205ed3b72f795a8044c52877d2e6b6e9b1d13f431478121d8d4eaa9028" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -795,9 +795,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e5772107f9bb265d8d8c86e0733937bb20d0857ea5425b1b6ddf51a9804042" +checksum = "0bd1247a8f90b465ef3f1207627547ec16940c35597875cdc09c49d58b19693c" dependencies = [ "const-hex", "dunce", @@ -811,9 +811,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e188b939aa4793edfaaa099cb1be4e620036a775b4bdf24fdc56f1cd6fd45890" +checksum = "954d1b2533b9b2c7959652df3076954ecb1122a28cc740aa84e7b0a49f6ac0a9" dependencies = [ "serde", "winnow", @@ -821,9 +821,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c8a9a909872097caffc05df134e5ef2253a1cdb56d3a9cf0052a042ac763f9" +checksum = "70319350969a3af119da6fb3e9bddb1bce66c9ea933600cb297c8b1850ad2a3c" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -12080,9 +12080,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2375c17f6067adc651d8c2c51658019cef32edfff4a982adaf1d7fd1c039f08b" +checksum = "ff790eb176cc81bb8936aed0f7b9f14fc4670069a2d371b3e3b0ecce908b2cb3" dependencies = [ "paste", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index e8e94930193..6a1c89d023f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -483,13 +483,13 @@ revm-inspectors = "0.30.0" # eth alloy-chains = { version = "0.2.5", default-features = false } -alloy-dyn-abi = "1.3.1" +alloy-dyn-abi = "1.4.1" alloy-eip2124 = { version = "0.2.0", default-features = false } alloy-evm = { version = "0.21.2", default-features = false } -alloy-primitives = { version = "1.3.1", default-features = false, features = ["map-foldhash"] } +alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } -alloy-sol-macro = "1.3.1" -alloy-sol-types = { version = "1.3.1", default-features = false } +alloy-sol-macro = "1.4.1" +alloy-sol-types = { version = "1.4.1", default-features = false } alloy-trie = { version = "0.9.1", default-features = false } alloy-hardforks = "0.3.5" From 1b952def2696342d9f51a1eb4f6606d7374bca54 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Oct 2025 14:39:54 +0200 Subject: [PATCH 062/371] fix: unused warnings for tracing (#19025) --- crates/tracing/Cargo.toml | 4 ++-- crates/tracing/src/layers.rs | 6 ++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index b5bcfacd530..8cf83e138ca 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -26,8 +26,8 @@ tracing-logfmt.workspace = true clap = { workspace = true, features = ["derive"] } eyre.workspace = true rolling-file.workspace = true -url.workspace = true +url = { workspace = true, optional = true } [features] default = ["otlp"] -otlp = ["reth-tracing-otlp"] +otlp = ["reth-tracing-otlp", "dep:url"] diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 44b2fff5995..385c4fac51d 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -6,10 +6,8 @@ use std::{ fmt, path::{Path, PathBuf}, }; -use tracing::level_filters::LevelFilter; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{filter::Directive, EnvFilter, Layer, Registry}; -use url::Url; /// A worker guard returned by the file layer. /// @@ -131,14 +129,14 @@ impl Layers { pub fn with_span_layer( &mut self, service_name: String, - endpoint_exporter: Url, + endpoint_exporter: url::Url, level: tracing::Level, ) -> eyre::Result<()> { // Create the span provider let span_layer = span_layer(service_name, &endpoint_exporter) .map_err(|e| eyre::eyre!("Failed to build OTLP span exporter {}", e))? - .with_filter(LevelFilter::from_level(level)); + .with_filter(tracing::level_filters::LevelFilter::from_level(level)); self.add_layer(span_layer); From 63ec808973c159b5cea4e95b8eb7eca399531608 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Oct 2025 14:52:43 +0200 Subject: [PATCH 063/371] fix: respect cli blob size setting (#19024) --- crates/node/builder/src/components/pool.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index ddc137031b7..9be184bc9c0 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -1,5 +1,6 @@ //! Pool component for the node builder. +use crate::{BuilderContext, FullNodeTypes}; use alloy_primitives::Address; use reth_chain_state::CanonStateSubscriptions; use reth_node_api::TxTy; @@ -9,8 +10,6 @@ use reth_transaction_pool::{ }; use std::{collections::HashSet, future::Future}; -use crate::{BuilderContext, FullNodeTypes}; - /// A type that knows how to build the transaction pool. pub trait PoolBuilder: Send { /// The transaction pool to build. @@ -166,14 +165,12 @@ where pub fn create_blob_store( ctx: &BuilderContext, ) -> eyre::Result { - let data_dir = ctx.config().datadir(); - Ok(reth_transaction_pool::blobstore::DiskFileBlobStore::open( - data_dir.blobstore(), - Default::default(), - )?) + let cache_size = Some(ctx.config().txpool.max_cached_entries); + create_blob_store_with_cache(ctx, cache_size) } -/// Create blob store with custom cache size configuration. +/// Create blob store with custom cache size configuration for how many blobs should be cached in +/// memory. pub fn create_blob_store_with_cache( ctx: &BuilderContext, cache_size: Option, From 0cbd514e4b730f4ffd8ccd5adcdaf75ae63512a5 Mon Sep 17 00:00:00 2001 From: Galoretka Date: Wed, 15 Oct 2025 16:03:49 +0300 Subject: [PATCH 064/371] feat(engine): deprecate TestPipelineBuilder::with_executor_results (#19017) --- crates/engine/tree/src/test_utils.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index 2ec00f9b918..e011a54b73c 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -3,9 +3,8 @@ use reth_chainspec::ChainSpec; use reth_ethereum_primitives::BlockBody; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives_traits::SealedHeader; -use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ExecutionOutcome, +use reth_provider::test_utils::{ + create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }; use reth_prune_types::PruneModes; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; @@ -18,13 +17,12 @@ use tokio::sync::watch; #[derive(Default, Debug)] pub struct TestPipelineBuilder { pipeline_exec_outputs: VecDeque>, - executor_results: Vec, } impl TestPipelineBuilder { /// Create a new [`TestPipelineBuilder`]. pub const fn new() -> Self { - Self { pipeline_exec_outputs: VecDeque::new(), executor_results: Vec::new() } + Self { pipeline_exec_outputs: VecDeque::new() } } /// Set the pipeline execution outputs to use for the test consensus engine. @@ -37,8 +35,14 @@ impl TestPipelineBuilder { } /// Set the executor results to use for the test consensus engine. - pub fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.executor_results = executor_results; + #[deprecated( + note = "no-op: executor results are not used and will be removed in a future release" + )] + pub fn with_executor_results( + self, + executor_results: Vec, + ) -> Self { + let _ = executor_results; self } From eb9b08c696810ef0ecffcfd3173784eb77828d36 Mon Sep 17 00:00:00 2001 From: YK Date: Wed, 15 Oct 2025 21:16:54 +0800 Subject: [PATCH 065/371] perf: background init of workers (#19012) --- .../tree/src/tree/payload_processor/mod.rs | 13 +---- .../src/tree/payload_processor/multiproof.rs | 5 +- crates/trie/parallel/src/proof.rs | 4 +- crates/trie/parallel/src/proof_task.rs | 55 +++++++++++-------- 4 files changed, 40 insertions(+), 37 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index f3ecdfa86d5..e3090d60756 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -166,8 +166,6 @@ where /// /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) - /// - /// Returns an error with the original transactions iterator if proof worker spawning fails. #[allow(clippy::type_complexity)] pub fn spawn>( &mut self, @@ -179,7 +177,7 @@ where config: &TreeConfig, ) -> Result< PayloadHandle, I::Tx>, I::Error>, - (reth_provider::ProviderError, I, ExecutionEnv, StateProviderBuilder), + (ParallelStateRootError, I, ExecutionEnv, StateProviderBuilder), > where P: DatabaseProviderFactory @@ -203,18 +201,13 @@ where let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); let max_proof_task_concurrency = config.max_proof_task_concurrency() as usize; - let proof_handle = match ProofWorkerHandle::new( + let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), consistent_view, task_ctx, storage_worker_count, account_worker_count, - ) { - Ok(handle) => handle, - Err(error) => { - return Err((error, transactions, env, provider_builder)); - } - }; + ); // We set it to half of the proof task concurrency, because often for each multiproof we // spawn one Tokio task for the account proof, and one Tokio task for the storage proof. diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 4a71bf620f7..a528b759570 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -553,7 +553,7 @@ impl MultiproofManager { let proof_result: Result = (|| { let receiver = account_proof_worker_handle - .queue_account_multiproof(input) + .dispatch_account_multiproof(input) .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; receiver @@ -1228,8 +1228,7 @@ mod tests { ); let consistent_view = ConsistentDbView::new(factory, None); let proof_handle = - ProofWorkerHandle::new(executor.handle().clone(), consistent_view, task_ctx, 1, 1) - .expect("Failed to spawn proof workers"); + ProofWorkerHandle::new(executor.handle().clone(), consistent_view, task_ctx, 1, 1); let channel = channel(); MultiProofTask::new(config, executor, proof_handle, channel.0, 1, None) diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 0f29502f8c7..ffa7aa4dc31 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -193,7 +193,7 @@ impl ParallelProof { let receiver = self .proof_worker_handle - .queue_account_multiproof(input) + .dispatch_account_multiproof(input) .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; // Wait for account multiproof result from worker @@ -307,7 +307,7 @@ mod tests { let task_ctx = ProofTaskCtx::new(Default::default(), Default::default(), Default::default()); let proof_worker_handle = - ProofWorkerHandle::new(rt.handle().clone(), consistent_view, task_ctx, 1, 1).unwrap(); + ProofWorkerHandle::new(rt.handle().clone(), consistent_view, task_ctx, 1, 1); let parallel_result = ParallelProof::new( Default::default(), diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 2d0f7e933c8..5c26f6d99c3 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -29,7 +29,6 @@ use reth_db_api::transaction::DbTx; use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, - ProviderResult, }; use reth_storage_errors::db::DatabaseError; use reth_trie::{ @@ -112,14 +111,20 @@ enum StorageWorkerJob { /// # Shutdown /// /// Worker shuts down when the crossbeam channel closes (all senders dropped). -fn storage_worker_loop( - proof_tx: ProofTaskTx, +fn storage_worker_loop( + view: ConsistentDbView, + task_ctx: ProofTaskCtx, work_rx: CrossbeamReceiver, worker_id: usize, #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where - Tx: DbTx, + Factory: DatabaseProviderFactory, { + // Create db transaction before entering work loop + let provider = + view.provider_ro().expect("Storage worker failed to initialize: database unavailable"); + let proof_tx = ProofTaskTx::new(provider.into_tx(), task_ctx, worker_id); + tracing::debug!( target: "trie::proof_task", worker_id, @@ -258,15 +263,21 @@ fn storage_worker_loop( /// # Shutdown /// /// Worker shuts down when the crossbeam channel closes (all senders dropped). -fn account_worker_loop( - proof_tx: ProofTaskTx, +fn account_worker_loop( + view: ConsistentDbView, + task_ctx: ProofTaskCtx, work_rx: CrossbeamReceiver, storage_work_tx: CrossbeamSender, worker_id: usize, #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where - Tx: DbTx, + Factory: DatabaseProviderFactory, { + // Create db transaction before entering work loop + let provider = + view.provider_ro().expect("Account worker failed to initialize: database unavailable"); + let proof_tx = ProofTaskTx::new(provider.into_tx(), task_ctx, worker_id); + tracing::debug!( target: "trie::proof_task", worker_id, @@ -308,7 +319,7 @@ fn account_worker_loop( ); tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); - let storage_proof_receivers = match queue_storage_proofs( + let storage_proof_receivers = match dispatch_storage_proofs( &storage_work_tx, &input.targets, &mut storage_prefix_sets, @@ -568,7 +579,7 @@ where /// computation. This enables interleaved parallelism for better performance. /// /// Propagates errors up if queuing fails. Receivers must be consumed by the caller. -fn queue_storage_proofs( +fn dispatch_storage_proofs( storage_work_tx: &CrossbeamSender, targets: &MultiProofTargets, storage_prefix_sets: &mut B256Map, @@ -864,9 +875,9 @@ impl ProofWorkerHandle { task_ctx: ProofTaskCtx, storage_worker_count: usize, account_worker_count: usize, - ) -> ProviderResult + ) -> Self where - Factory: DatabaseProviderFactory, + Factory: DatabaseProviderFactory + Clone + 'static, { let (storage_work_tx, storage_work_rx) = unbounded::(); let (account_work_tx, account_work_rx) = unbounded::(); @@ -880,9 +891,8 @@ impl ProofWorkerHandle { // Spawn storage workers for worker_id in 0..storage_worker_count { - let provider_ro = view.provider_ro()?; - let tx = provider_ro.into_tx(); - let proof_task_tx = ProofTaskTx::new(tx, task_ctx.clone(), worker_id); + let view_clone = view.clone(); + let task_ctx_clone = task_ctx.clone(); let work_rx_clone = storage_work_rx.clone(); executor.spawn_blocking(move || { @@ -890,7 +900,8 @@ impl ProofWorkerHandle { let metrics = ProofTaskTrieMetrics::default(); storage_worker_loop( - proof_task_tx, + view_clone, + task_ctx_clone, work_rx_clone, worker_id, #[cfg(feature = "metrics")] @@ -907,9 +918,8 @@ impl ProofWorkerHandle { // Spawn account workers for worker_id in 0..account_worker_count { - let provider_ro = view.provider_ro()?; - let tx = provider_ro.into_tx(); - let proof_task_tx = ProofTaskTx::new(tx, task_ctx.clone(), worker_id); + let view_clone = view.clone(); + let task_ctx_clone = task_ctx.clone(); let work_rx_clone = account_work_rx.clone(); let storage_work_tx_clone = storage_work_tx.clone(); @@ -918,7 +928,8 @@ impl ProofWorkerHandle { let metrics = ProofTaskTrieMetrics::default(); account_worker_loop( - proof_task_tx, + view_clone, + task_ctx_clone, work_rx_clone, storage_work_tx_clone, worker_id, @@ -934,7 +945,7 @@ impl ProofWorkerHandle { ); } - Ok(Self::new_handle(storage_work_tx, account_work_tx)) + Self::new_handle(storage_work_tx, account_work_tx) } /// Creates a new [`ProofWorkerHandle`] with direct access to worker pools. @@ -963,7 +974,7 @@ impl ProofWorkerHandle { } /// Queue an account multiproof computation - pub fn queue_account_multiproof( + pub fn dispatch_account_multiproof( &self, input: AccountMultiproofInput, ) -> Result, ProviderError> { @@ -1091,7 +1102,7 @@ mod tests { let view = ConsistentDbView::new(factory, None); let ctx = test_ctx(); - let proof_handle = ProofWorkerHandle::new(handle.clone(), view, ctx, 5, 3).unwrap(); + let proof_handle = ProofWorkerHandle::new(handle.clone(), view, ctx, 5, 3); // Verify handle can be cloned let _cloned_handle = proof_handle.clone(); From daa91939f82a6b007e270b5d977d8debac0a1373 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 15 Oct 2025 15:43:12 +0200 Subject: [PATCH 066/371] chore(ci): update expected failures (#19034) --- .github/assets/hive/expected_failures.yaml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index f28fd70be03..ae3817cfc3d 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -41,11 +41,7 @@ engine-cancun: sync: [] -# https://github.com/ethereum/hive/issues/1277 -engine-auth: - - "JWT Authentication: No time drift, correct secret (Paris) (reth)" - - "JWT Authentication: Negative time drift, within limit, correct secret (Paris) (reth)" - - "JWT Authentication: Positive time drift, within limit, correct secret (Paris) (reth)" +engine-auth: [] # 7702 test - no fix: it’s too expensive to check whether the storage is empty on each creation # 6110 related tests - may start passing when fixtures improve From 8880119e176c672670694066f38c73e4ed03d19f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Oct 2025 16:27:44 +0200 Subject: [PATCH 067/371] fix: use header type generic for mask (#19037) --- crates/cli/commands/src/db/get.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 6214df0ec98..9d06a35dcaa 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -1,4 +1,3 @@ -use alloy_consensus::Header; use alloy_primitives::{hex, BlockHash}; use clap::Parser; use reth_db::{ @@ -66,9 +65,10 @@ impl Command { } Subcommand::StaticFile { segment, key, raw } => { let (key, mask): (u64, _) = match segment { - StaticFileSegment::Headers => { - (table_key::(&key)?, >::MASK) - } + StaticFileSegment::Headers => ( + table_key::(&key)?, + >>::MASK, + ), StaticFileSegment::Transactions => { (table_key::(&key)?, >>::MASK) } From 20b14d59c7f1dabfda3e232688303fef4290837c Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 15 Oct 2025 20:08:26 +0400 Subject: [PATCH 068/371] fix: correct `Compact` impl for `Option` (#19042) --- crates/storage/codecs/src/lib.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 67e5f32b07c..1ac37966c2e 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -312,10 +312,9 @@ where return (None, buf) } - let (len, mut buf) = decode_varuint(buf); + let (len, buf) = decode_varuint(buf); - let (element, _) = T::from_compact(&buf[..len], len); - buf.advance(len); + let (element, buf) = T::from_compact(buf, len); (Some(element), buf) } From 6bb0d1b9297f8699698a1f5bed3eac3e83b3806a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Oct 2025 19:52:03 +0200 Subject: [PATCH 069/371] chore: increase versioned hash index cache (#19038) --- crates/transaction-pool/src/blobstore/disk.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 5ccafe15000..b883345aac6 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -4,6 +4,8 @@ use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStor use alloy_eips::{ eip4844::{BlobAndProofV1, BlobAndProofV2}, eip7594::BlobTransactionSidecarVariant, + eip7840::BlobParams, + merge::EPOCH_SLOTS, }; use alloy_primitives::{TxHash, B256}; use parking_lot::{Mutex, RwLock}; @@ -14,6 +16,13 @@ use tracing::{debug, trace}; /// How many [`BlobTransactionSidecarVariant`] to cache in memory. pub const DEFAULT_MAX_CACHED_BLOBS: u32 = 100; +/// A cache size heuristic based on the highest blob params +/// +/// This uses the max blobs per tx and max blobs per block over 16 epochs: `21 * 6 * 512 = 64512` +/// This should be ~4MB +const VERSIONED_HASH_TO_TX_HASH_CACHE_SIZE: u64 = + BlobParams::bpo2().max_blobs_per_tx * BlobParams::bpo2().max_blob_count * EPOCH_SLOTS * 16; + /// A blob store that stores blob data on disk. /// /// The type uses deferred deletion, meaning that blobs are not immediately deleted from disk, but @@ -288,7 +297,9 @@ impl DiskFileBlobStoreInner { size_tracker: Default::default(), file_lock: Default::default(), txs_to_delete: Default::default(), - versioned_hashes_to_txhash: Mutex::new(LruMap::new(ByLength::new(max_length * 6))), + versioned_hashes_to_txhash: Mutex::new(LruMap::new(ByLength::new( + VERSIONED_HASH_TO_TX_HASH_CACHE_SIZE as u32, + ))), } } From fd4597e9bd7737c47a908bc7f58e3c11a112edaa Mon Sep 17 00:00:00 2001 From: sashass1315 Date: Wed, 15 Oct 2025 20:53:47 +0300 Subject: [PATCH 070/371] chore(primitives-traits): relax SignerRecoverable bounds for Extended (#19045) --- crates/primitives-traits/src/extended.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/primitives-traits/src/extended.rs b/crates/primitives-traits/src/extended.rs index 4cba4b7d52d..da2bbc533aa 100644 --- a/crates/primitives-traits/src/extended.rs +++ b/crates/primitives-traits/src/extended.rs @@ -142,8 +142,8 @@ where impl SignerRecoverable for Extended where - B: SignedTransaction + IsTyped2718, - T: SignedTransaction, + B: SignerRecoverable, + T: SignerRecoverable, { fn recover_signer(&self) -> Result { delegate!(self => tx.recover_signer()) From 7779ed8c73eadb2c6e7bcc9a8a5f5dae40e0d9f8 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 15 Oct 2025 21:55:35 +0400 Subject: [PATCH 071/371] feat: bump revm (#18999) --- Cargo.lock | 265 ++++++++---------- Cargo.toml | 32 +-- crates/chainspec/src/spec.rs | 60 ++-- .../cli/commands/src/test_vectors/compact.rs | 2 +- .../engine/invalid-block-hooks/src/witness.rs | 1 + crates/engine/tree/src/tree/metrics.rs | 1 + .../engine/tree/src/tree/payload_validator.rs | 3 +- crates/ethereum/evm/src/build.rs | 26 +- crates/ethereum/evm/src/lib.rs | 4 +- crates/ethereum/evm/src/test_utils.rs | 1 + crates/ethereum/node/src/node.rs | 4 +- crates/ethereum/payload/src/lib.rs | 4 +- crates/evm/evm/src/aliases.rs | 5 +- crates/evm/evm/src/engine.rs | 2 +- crates/evm/evm/src/execute.rs | 9 +- crates/optimism/evm/src/build.rs | 15 +- crates/optimism/evm/src/l1.rs | 38 +-- crates/optimism/evm/src/lib.rs | 6 +- crates/optimism/node/tests/it/builder.rs | 3 +- crates/optimism/payload/src/builder.rs | 8 +- crates/optimism/rpc/src/eth/call.rs | 22 +- crates/optimism/rpc/src/eth/receipt.rs | 18 +- crates/rpc/rpc-convert/src/transaction.rs | 69 ++--- crates/rpc/rpc-eth-api/src/helpers/call.rs | 39 +-- .../rpc/rpc-eth-api/src/helpers/estimate.rs | 13 +- .../rpc-eth-api/src/helpers/pending_block.rs | 24 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 10 +- crates/rpc/rpc-eth-types/src/error/mod.rs | 5 +- crates/rpc/rpc-eth-types/src/pending_block.rs | 12 +- crates/rpc/rpc-eth-types/src/simulate.rs | 3 +- crates/rpc/rpc/src/aliases.rs | 5 +- crates/rpc/rpc/src/debug.rs | 15 +- crates/rpc/rpc/src/eth/bundle.rs | 32 ++- crates/rpc/rpc/src/eth/helpers/call.rs | 22 +- crates/rpc/rpc/src/eth/sim_bundle.rs | 12 +- .../custom-beacon-withdrawals/src/main.rs | 4 +- examples/custom-evm/src/main.rs | 3 +- examples/custom-node/src/evm/alloy.rs | 2 + examples/custom-node/src/evm/env.rs | 10 + examples/precompile-cache/src/main.rs | 3 +- 40 files changed, 380 insertions(+), 432 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e095cc6fcf6..f9d8401ec7b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59094911f05dbff1cf5b29046a00ef26452eccc8d47136d50a47c0cf22f00c85" +checksum = "6a0dd3ed764953a6b20458b2b7abbfdc93d20d14b38babe1a70fe631a443a9f1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -139,9 +139,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903cb8f728107ca27c816546f15be38c688df3c381d7bd1a4a9f215effc1ddb4" +checksum = "9556182afa73cddffa91e64a5aa9508d5e8c912b3a15f26998d2388a824d2c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -237,9 +237,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac7f1c9a1ccc7f3e03c36976455751a6166a4f0d2d2c530c3f87dfe7d0cdc836" +checksum = "305fa99b538ca7006b0c03cfed24ec6d82beda67aac857ef4714be24231d15e6" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -256,15 +256,15 @@ dependencies = [ "ethereum_ssz_derive", "serde", "serde_with", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", ] [[package]] name = "alloy-evm" -version = "0.21.2" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a5f67ee74999aa4fe576a83be1996bdf74a30fce3d248bf2007d6fc7dae8aa" +checksum = "24a48fa6a4a5a69ae8e46c0ae60851602c5016baa3379d076c76e4c2f3b889f7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -299,9 +299,9 @@ dependencies = [ [[package]] name = "alloy-hardforks" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "889eb3949b58368a09d4f16931c660275ef5fb08e5fbd4a96573b19c7085c41f" +checksum = "4b16ee6b2c7d39da592d30a5f9607a83f50ee5ec2a2c301746cc81e91891f4ca" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -366,9 +366,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46e9374c667c95c41177602ebe6f6a2edd455193844f011d973d374b65501b38" +checksum = "223612259a080160ce839a4e5df0125ca403a1d5e7206cc911cea54af5d769aa" dependencies = [ "alloy-consensus", "alloy-eips", @@ -379,9 +379,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.21.2" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17aaeb600740c181bf29c9f138f9b228d115ea74fa6d0f0343e1952f1a766968" +checksum = "d1e0abe910a26d1b3686f4f6ad58287ce8c7fb85b08603d8c832869f02eb3d79" dependencies = [ "alloy-consensus", "alloy-eips", @@ -392,13 +392,14 @@ dependencies = [ "op-alloy-consensus", "op-revm", "revm", + "thiserror 2.0.16", ] [[package]] name = "alloy-op-hardforks" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "599c1d7dfbccb66603cb93fde00980d12848d32fe5e814f50562104a92df6487" +checksum = "af8bb236fc008fd3b83b2792e30ae79617a99ffc4c3f584f0c9b4ce0a2da52de" dependencies = [ "alloy-chains", "alloy-hardforks", @@ -654,9 +655,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db46b0901ee16bbb68d986003c66dcb74a12f9d9b3c44f8e85d51974f2458f0f" +checksum = "6d7d47bca1a2a1541e4404aa38b7e262bb4dffd9ac23b4f178729a4ddc5a5caa" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -691,9 +692,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f10620724bd45f80c79668a8cdbacb6974f860686998abce28f6196ae79444" +checksum = "c331c8e48665607682e8a9549a2347c13674d4fbcbdc342e7032834eba2424f4" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -717,9 +718,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5413814be7a22fbc81e0f04a2401fcc3eb25e56fd53b04683e8acecc6e1fe01b" +checksum = "6a8468f1a7f9ee3bae73c24eead0239abea720dbf7779384b9c7e20d51bfb6b0" dependencies = [ "alloy-primitives", "arbitrary", @@ -930,9 +931,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64c09ec565a90ed8390d82aa08cd3b22e492321b96cb4a3d4f58414683c9e2f" +checksum = "7bf39928a5e70c9755d6811a2928131b53ba785ad37c8bf85c90175b5d43b818" dependencies = [ "alloy-primitives", "darling 0.21.3", @@ -1671,15 +1672,6 @@ dependencies = [ "wyz", ] -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -1794,7 +1786,7 @@ dependencies = [ "hashbrown 0.15.5", "indexmap 2.11.4", "once_cell", - "phf", + "phf 0.11.3", "rustc-hash 2.1.1", "static_assertions", ] @@ -1885,7 +1877,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ - "sha2 0.10.9", + "sha2", "tinyvec", ] @@ -2251,7 +2243,7 @@ dependencies = [ "hmac", "k256", "serde", - "sha2 0.10.9", + "sha2", "thiserror 1.0.69", ] @@ -2267,7 +2259,7 @@ dependencies = [ "once_cell", "pbkdf2", "rand 0.8.5", - "sha2 0.10.9", + "sha2", "thiserror 1.0.69", ] @@ -2285,7 +2277,7 @@ dependencies = [ "generic-array", "ripemd", "serde", - "sha2 0.10.9", + "sha2", "sha3", "thiserror 1.0.69", ] @@ -2939,7 +2931,7 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "const-oid", "crypto-common", "subtle", @@ -3093,7 +3085,7 @@ dependencies = [ "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.9", + "sha2", "subtle", "zeroize", ] @@ -3271,7 +3263,7 @@ checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" dependencies = [ "cpufeatures", "ring", - "sha2 0.10.9", + "sha2", ] [[package]] @@ -5274,7 +5266,7 @@ dependencies = [ "elliptic-curve", "once_cell", "serdect", - "sha2 0.10.9", + "sha2", "signature", ] @@ -5370,7 +5362,7 @@ dependencies = [ "k256", "multihash", "quick-protobuf", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", "tracing", "zeroize", @@ -5398,52 +5390,6 @@ dependencies = [ "redox_syscall", ] -[[package]] -name = "libsecp256k1" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79019718125edc905a079a70cfa5f3820bc76139fc91d6f9abc27ea2a887139" -dependencies = [ - "arrayref", - "base64 0.22.1", - "digest 0.9.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.5", - "serde", - "sha2 0.9.9", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" -dependencies = [ - "libsecp256k1-core", -] - [[package]] name = "libz-sys" version = "1.1.22" @@ -6202,9 +6148,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "10.1.0" +version = "11.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9ba4f4693811e73449193c8bd656d3978f265871916882e6a51a487e4f96217" +checksum = "23a2811256cd65560453ea6f7174b1b6caa7909cb5652cf05dc7d8144c5e4b38" dependencies = [ "auto_impl", "revm", @@ -6325,7 +6271,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "primeorder", - "sha2 0.10.9", + "sha2", ] [[package]] @@ -6456,8 +6402,18 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ - "phf_macros", - "phf_shared", + "phf_macros 0.11.3", + "phf_shared 0.11.3", +] + +[[package]] +name = "phf" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" +dependencies = [ + "phf_macros 0.13.1", + "phf_shared 0.13.1", "serde", ] @@ -6467,18 +6423,41 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared", + "phf_shared 0.11.3", "rand 0.8.5", ] +[[package]] +name = "phf_generator" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737" +dependencies = [ + "fastrand 2.3.0", + "phf_shared 0.13.1", +] + [[package]] name = "phf_macros" version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" dependencies = [ - "phf_generator", - "phf_shared", + "phf_generator 0.11.3", + "phf_shared 0.11.3", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "phf_macros" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef" +dependencies = [ + "phf_generator 0.13.1", + "phf_shared 0.13.1", "proc-macro2", "quote", "syn 2.0.106", @@ -6493,6 +6472,15 @@ dependencies = [ "siphasher", ] +[[package]] +name = "phf_shared" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.1.10" @@ -7936,7 +7924,7 @@ dependencies = [ "rand 0.8.5", "reth-network-peers", "secp256k1 0.30.0", - "sha2 0.10.9", + "sha2", "sha3", "thiserror 2.0.16", "tokio", @@ -8154,7 +8142,7 @@ dependencies = [ "futures-util", "reqwest", "reth-fs-util", - "sha2 0.10.9", + "sha2", "tempfile", "test-case", "tokio", @@ -8351,7 +8339,7 @@ dependencies = [ "reth-primitives-traits", "serde", "serde_json", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", ] @@ -9476,7 +9464,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", "tracing", ] @@ -9985,7 +9973,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", "tokio", "tokio-stream", @@ -10840,9 +10828,9 @@ dependencies = [ [[package]] name = "revm" -version = "29.0.1" +version = "30.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718d90dce5f07e115d0e66450b1b8aa29694c1cf3f89ebddaddccc2ccbd2f13e" +checksum = "8ca37fd2db4a76e4fb805b583ca3500ad9f6789b8d069473c70d8182ed5547d6" dependencies = [ "revm-bytecode", "revm-context", @@ -10859,21 +10847,21 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "6.2.2" +version = "7.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c52031b73cae95d84cd1b07725808b5fd1500da3e5e24574a3b2dc13d9f16d" +checksum = "451748b17ac78bd2b0748ec472a5392cd78fc0f7d19d528be44770fda28fd6f7" dependencies = [ "bitvec", - "phf", + "phf 0.13.1", "revm-primitives", "serde", ] [[package]] name = "revm-context" -version = "9.1.0" +version = "10.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a20c98e7008591a6f012550c2a00aa36cba8c14cc88eb88dec32eb9102554b4" +checksum = "94dffb17f4ac19cc3e7ace5b9bb69406b53a2d2e74a0a0c6b56591762aa7c30a" dependencies = [ "bitvec", "cfg-if", @@ -10888,9 +10876,9 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "10.2.0" +version = "11.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b50d241ed1ce647b94caf174fcd0239b7651318b2c4c06b825b59b973dfb8495" +checksum = "2fc1793e0092475f28d9cc4e663ff45846bc06d034c5ca33d89b6556143e2930" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10904,9 +10892,9 @@ dependencies = [ [[package]] name = "revm-database" -version = "7.0.5" +version = "9.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a276ed142b4718dcf64bc9624f474373ed82ef20611025045c3fb23edbef9c" +checksum = "637ceeefe76c93a69a1453e98272150ad10691d801b51033a68d5d03a6268f6a" dependencies = [ "alloy-eips", "revm-bytecode", @@ -10918,9 +10906,9 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "7.0.5" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c523c77e74eeedbac5d6f7c092e3851dbe9c7fec6f418b85992bd79229db361" +checksum = "f318a603e1179e57c72ceca6e37f8d44c7b9ab7caec1feffc1202b42f25f4ac4" dependencies = [ "auto_impl", "either", @@ -10931,9 +10919,9 @@ dependencies = [ [[package]] name = "revm-handler" -version = "10.0.1" +version = "11.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "550331ea85c1d257686e672081576172fe3d5a10526248b663bbf54f1bef226a" +checksum = "085ec3b976336478c29d96ec222445c964badefe0fd408a61da7079cb168b9c7" dependencies = [ "auto_impl", "derive-where", @@ -10950,9 +10938,9 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "10.0.1" +version = "11.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0a6e9ccc2ae006f5bed8bd80cd6f8d3832cd55c5e861b9402fdd556098512f" +checksum = "b8a9b5f2375e5a90f289669e7403f96b0fff21052116f3ed1e7cc7759327127e" dependencies = [ "auto_impl", "either", @@ -10968,9 +10956,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9b329afcc0f9fd5adfa2c6349a7435a8558e82bcae203142103a9a95e2a63b6" +checksum = "0ce1228a7989cc3d9af84c0de2abe39680a252c265877e67d2f0fb4f392cb690" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -10988,21 +10976,22 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "25.0.3" +version = "27.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06575dc51b1d8f5091daa12a435733a90b4a132dca7ccee0666c7db3851bc30c" +checksum = "7a8301ef34c8c242ecc040a5b0880fb04df3caaf844d81920a48c0073fd7d5d1" dependencies = [ "revm-bytecode", "revm-context-interface", "revm-primitives", + "revm-state", "serde", ] [[package]] name = "revm-precompile" -version = "27.0.0" +version = "28.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b57d4bd9e6b5fe469da5452a8a137bc2d030a3cd47c46908efc615bbc699da" +checksum = "e57aadd7a2087705f653b5aaacc8ad4f8e851f5d330661e3f4c43b5475bbceae" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11015,20 +11004,19 @@ dependencies = [ "c-kzg", "cfg-if", "k256", - "libsecp256k1", "p256", "revm-primitives", "ripemd", "rug", "secp256k1 0.31.1", - "sha2 0.10.9", + "sha2", ] [[package]] name = "revm-primitives" -version = "20.2.1" +version = "21.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa29d9da06fe03b249b6419b33968ecdf92ad6428e2f012dc57bcd619b5d94e" +checksum = "536f30e24c3c2bf0d3d7d20fa9cf99b93040ed0f021fd9301c78cddb0dacda13" dependencies = [ "alloy-primitives", "num_enum", @@ -11038,9 +11026,9 @@ dependencies = [ [[package]] name = "revm-state" -version = "7.0.5" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f64fbacb86008394aaebd3454f9643b7d5a782bd251135e17c5b33da592d84d" +checksum = "ef7e3342f602a1a7a38d15e140ec08d1dc4f4d703c4196aadfd1744b2008e915" dependencies = [ "bitflags 2.9.4", "revm-bytecode", @@ -11728,19 +11716,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha2" version = "0.10.9" diff --git a/Cargo.toml b/Cargo.toml index 6a1c89d023f..68dc13584fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -468,31 +468,31 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { version = "29.0.1", default-features = false } -revm-bytecode = { version = "6.2.2", default-features = false } -revm-database = { version = "7.0.5", default-features = false } -revm-state = { version = "7.0.5", default-features = false } -revm-primitives = { version = "20.2.1", default-features = false } -revm-interpreter = { version = "25.0.3", default-features = false } -revm-inspector = { version = "10.0.1", default-features = false } -revm-context = { version = "9.1.0", default-features = false } -revm-context-interface = { version = "10.2.0", default-features = false } -revm-database-interface = { version = "7.0.5", default-features = false } -op-revm = { version = "10.1.0", default-features = false } -revm-inspectors = "0.30.0" +revm = { version = "30.1.1", default-features = false } +revm-bytecode = { version = "7.0.2", default-features = false } +revm-database = { version = "9.0.0", default-features = false } +revm-state = { version = "8.0.0", default-features = false } +revm-primitives = { version = "21.0.0", default-features = false } +revm-interpreter = { version = "27.0.0", default-features = false } +revm-inspector = { version = "11.1.0", default-features = false } +revm-context = { version = "10.1.0", default-features = false } +revm-context-interface = { version = "11.1.0", default-features = false } +revm-database-interface = { version = "8.0.1", default-features = false } +op-revm = { version = "11.1.0", default-features = false } +revm-inspectors = "0.31.0" # eth alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.4.1" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.21.2", default-features = false } +alloy-evm = { version = "0.22.0", default-features = false } alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.4.1" alloy-sol-types = { version = "1.4.1", default-features = false } alloy-trie = { version = "0.9.1", default-features = false } -alloy-hardforks = "0.3.5" +alloy-hardforks = "0.4.0" alloy-consensus = { version = "1.0.37", default-features = false } alloy-contract = { version = "1.0.37", default-features = false } @@ -523,8 +523,8 @@ alloy-transport-ipc = { version = "1.0.37", default-features = false } alloy-transport-ws = { version = "1.0.37", default-features = false } # op -alloy-op-evm = { version = "0.21.2", default-features = false } -alloy-op-hardforks = "0.3.5" +alloy-op-evm = { version = "0.22.0", default-features = false } +alloy-op-hardforks = "0.4.0" op-alloy-rpc-types = { version = "0.20.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.20.0", default-features = false } op-alloy-network = { version = "0.20.0", default-features = false } diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 88e5a370d6d..a0cccfcc449 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -3,7 +3,7 @@ use alloy_evm::eth::spec::EthExecutorSpec; use crate::{ constants::{MAINNET_DEPOSIT_CONTRACT, MAINNET_PRUNE_DELETE_LIMIT}, - holesky, hoodi, mainnet, sepolia, EthChainSpec, + holesky, hoodi, sepolia, EthChainSpec, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; @@ -108,10 +108,7 @@ pub static MAINNET: LazyLock> = LazyLock::new(|| { deposit_contract: Some(MAINNET_DEPOSIT_CONTRACT), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), prune_delete_limit: MAINNET_PRUNE_DELETE_LIMIT, - blob_params: BlobScheduleBlobParams::default().with_scheduled([ - (mainnet::MAINNET_BPO1_TIMESTAMP, BlobParams::bpo1()), - (mainnet::MAINNET_BPO2_TIMESTAMP, BlobParams::bpo2()), - ]), + blob_params: BlobScheduleBlobParams::default(), }; spec.genesis.config.dao_fork_support = true; spec.into() @@ -1129,10 +1126,7 @@ Merge hard forks: Post-merge hard forks (timestamp based): - Shanghai @1681338455 - Cancun @1710338135 -- Prague @1746612311 -- Osaka @1764798551 -- Bpo1 @1765978199 -- Bpo2 @1767747671" +- Prague @1746612311" ); } @@ -1376,10 +1370,7 @@ Post-merge hard forks (timestamp based): ), ( EthereumHardfork::Prague, - ForkId { - hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), - next: mainnet::MAINNET_OSAKA_TIMESTAMP, - }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), ], ); @@ -1523,22 +1514,12 @@ Post-merge hard forks (timestamp based): // First Prague block ( Head { number: 20000002, timestamp: 1746612311, ..Default::default() }, - ForkId { - hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), - next: mainnet::MAINNET_OSAKA_TIMESTAMP, - }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), - // Osaka block + // Future Prague block ( - Head { - number: 20000002, - timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP, - ..Default::default() - }, - ForkId { - hash: ForkHash(hex!("0x5167e2a6")), - next: mainnet::MAINNET_BPO1_TIMESTAMP, - }, + Head { number: 20000002, timestamp: 2000000000, ..Default::default() }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), ], ); @@ -1847,22 +1828,11 @@ Post-merge hard forks (timestamp based): ), // First Prague block ( Head { number: 20000004, timestamp: 1746612311, ..Default::default() }, - ForkId { - hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), - next: mainnet::MAINNET_OSAKA_TIMESTAMP, - }, - ), - // Osaka block + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + ), // Future Prague block ( - Head { - number: 20000004, - timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP, - ..Default::default() - }, - ForkId { - hash: ForkHash(hex!("0x5167e2a6")), - next: mainnet::MAINNET_BPO1_TIMESTAMP, - }, + Head { number: 20000004, timestamp: 2000000000, ..Default::default() }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), ], ); @@ -2519,8 +2489,10 @@ Post-merge hard forks (timestamp based): #[test] fn latest_eth_mainnet_fork_id() { - // BPO2 - assert_eq!(ForkId { hash: ForkHash(hex!("0xfd414558")), next: 0 }, MAINNET.latest_fork_id()) + assert_eq!( + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + MAINNET.latest_fork_id() + ) } #[test] diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index ca88c131ff6..f4636f5f83b 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -283,7 +283,7 @@ pub fn type_name() -> String { // With alloy type transition the types are renamed, we map them here to the original name so that test vector files remain consistent let name = std::any::type_name::(); match name { - "alloy_consensus::transaction::typed::EthereumTypedTransaction" => "Transaction".to_string(), + "alloy_consensus::transaction::envelope::EthereumTypedTransaction" => "Transaction".to_string(), "alloy_consensus::transaction::envelope::EthereumTxEnvelope" => "TransactionSigned".to_string(), name => { name.split("::").last().unwrap_or(std::any::type_name::()).to_string() diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 1df76d9255c..d00f3b8287b 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -839,6 +839,7 @@ mod tests { receipts: vec![], requests: Requests::default(), gas_used: 0, + blob_gas_used: 0, }, }; diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 844db1e63b9..c014d8ba15e 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -314,6 +314,7 @@ mod tests { receipts: vec![], requests: Requests::default(), gas_used: 1000, + blob_gas_used: 0, }, )) } diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 17dc511a445..a565757284e 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -43,6 +43,7 @@ use reth_revm::db::State; use reth_trie::{updates::TrieUpdates, HashedPostState, KeccakKeyHasher, TrieInput}; use reth_trie_db::DatabaseHashedPostState; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; +use revm::context::Block; use std::{collections::HashMap, sync::Arc, time::Instant}; use tracing::{debug, debug_span, error, info, trace, warn}; @@ -642,7 +643,7 @@ where T: PayloadTypes>, Evm: ConfigureEngineEvm, { - let num_hash = NumHash::new(env.evm_env.block_env.number.to(), env.hash); + let num_hash = NumHash::new(env.evm_env.block_env.number().to(), env.hash); let span = debug_span!(target: "engine::tree", "execute_block", num = ?num_hash.number, hash = ?num_hash.hash); let _enter = span.enter(); diff --git a/crates/ethereum/evm/src/build.rs b/crates/ethereum/evm/src/build.rs index 5f5e014d297..85d4cae311b 100644 --- a/crates/ethereum/evm/src/build.rs +++ b/crates/ethereum/evm/src/build.rs @@ -1,7 +1,7 @@ use alloc::{sync::Arc, vec::Vec}; use alloy_consensus::{ proofs::{self, calculate_receipt_root}, - Block, BlockBody, BlockHeader, Header, Transaction, TxReceipt, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, BlockHeader, Header, TxReceipt, EMPTY_OMMER_ROOT_HASH, }; use alloy_eips::merge::BEACON_NONCE; use alloy_evm::{block::BlockExecutorFactory, eth::EthBlockExecutionCtx}; @@ -10,6 +10,7 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::execute::{BlockAssembler, BlockAssemblerInput, BlockExecutionError}; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{logs_bloom, Receipt, SignedTransaction}; +use revm::context::Block as _; /// Block builder for Ethereum. #[derive(Debug, Clone)] @@ -47,12 +48,12 @@ where execution_ctx: ctx, parent, transactions, - output: BlockExecutionResult { receipts, requests, gas_used }, + output: BlockExecutionResult { receipts, requests, gas_used, blob_gas_used }, state_root, .. } = input; - let timestamp = evm_env.block_env.timestamp.saturating_to(); + let timestamp = evm_env.block_env.timestamp().saturating_to(); let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = calculate_receipt_root( @@ -73,12 +74,11 @@ where .then(|| requests.requests_hash()); let mut excess_blob_gas = None; - let mut blob_gas_used = None; + let mut block_blob_gas_used = None; // only determine cancun fields when active if self.chain_spec.is_cancun_active_at_timestamp(timestamp) { - blob_gas_used = - Some(transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum()); + block_blob_gas_used = Some(*blob_gas_used); excess_blob_gas = if self.chain_spec.is_cancun_active_at_timestamp(parent.timestamp) { parent.maybe_next_block_excess_blob_gas( self.chain_spec.blob_params_at_timestamp(timestamp), @@ -96,23 +96,23 @@ where let header = Header { parent_hash: ctx.parent_hash, ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: evm_env.block_env.beneficiary, + beneficiary: evm_env.block_env.beneficiary(), state_root, transactions_root, receipts_root, withdrawals_root, logs_bloom, timestamp, - mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(evm_env.block_env.basefee), - number: evm_env.block_env.number.saturating_to(), - gas_limit: evm_env.block_env.gas_limit, - difficulty: evm_env.block_env.difficulty, + base_fee_per_gas: Some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().saturating_to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, extra_data: self.extra_data.clone(), parent_beacon_block_root: ctx.parent_beacon_block_root, - blob_gas_used, + blob_gas_used: block_blob_gas_used, excess_blob_gas, requests_hash, }; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index eaf91f0c7be..c0f8adc9c54 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -132,6 +132,7 @@ where + FromRecoveredTx + FromTxWithEncoded, Spec = SpecId, + BlockEnv = BlockEnv, Precompiles = PrecompilesMap, > + Clone + Debug @@ -154,7 +155,7 @@ where &self.block_assembler } - fn evm_env(&self, header: &Header) -> Result { + fn evm_env(&self, header: &Header) -> Result, Self::Error> { Ok(EvmEnv::for_eth_block( header, self.chain_spec(), @@ -217,6 +218,7 @@ where + FromRecoveredTx + FromTxWithEncoded, Spec = SpecId, + BlockEnv = BlockEnv, Precompiles = PrecompilesMap, > + Clone + Debug diff --git a/crates/ethereum/evm/src/test_utils.rs b/crates/ethereum/evm/src/test_utils.rs index 87875dbc848..fe791b9f5fd 100644 --- a/crates/ethereum/evm/src/test_utils.rs +++ b/crates/ethereum/evm/src/test_utils.rs @@ -125,6 +125,7 @@ impl<'a, DB: Database, I: Inspector>>> BlockExec reqs }), gas_used: 0, + blob_gas_used: 0, }; evm.db_mut().bundle_state = bundle; diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 089353f6b73..74740643a41 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -15,7 +15,6 @@ use reth_ethereum_engine_primitives::{ use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; use reth_evm::{ eth::spec::EthExecutorSpec, ConfigureEvm, EvmFactory, EvmFactoryFor, NextBlockEnvAttributes, - SpecFor, TxEnvFor, }; use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ @@ -159,10 +158,9 @@ where NetworkT: RpcTypes>>, EthRpcConverterFor: RpcConvert< Primitives = PrimitivesTy, - TxEnv = TxEnvFor, Error = EthApiError, Network = NetworkT, - Spec = SpecFor, + Evm = N::Evm, >, EthApiError: FromEvmError, { diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 8c969c9d44c..7f40e983bc8 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -176,8 +176,8 @@ where debug!(target: "payload_builder", id=%attributes.id, parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; - let block_gas_limit: u64 = builder.evm_mut().block().gas_limit; - let base_fee = builder.evm_mut().block().basefee; + let block_gas_limit: u64 = builder.evm_mut().block().gas_limit(); + let base_fee = builder.evm_mut().block().basefee(); let mut best_txs = best_txs(BestTransactionsAttributes::new( base_fee, diff --git a/crates/evm/evm/src/aliases.rs b/crates/evm/evm/src/aliases.rs index 6bb1ab1c35a..7758f0aea17 100644 --- a/crates/evm/evm/src/aliases.rs +++ b/crates/evm/evm/src/aliases.rs @@ -11,6 +11,9 @@ pub type EvmFactoryFor = /// Helper to access [`EvmFactory::Spec`] for a given [`ConfigureEvm`]. pub type SpecFor = as EvmFactory>::Spec; +/// Helper to access [`EvmFactory::BlockEnv`] for a given [`ConfigureEvm`]. +pub type BlockEnvFor = as EvmFactory>::BlockEnv; + /// Helper to access [`EvmFactory::Evm`] for a given [`ConfigureEvm`]. pub type EvmFor = as EvmFactory>::Evm; @@ -31,7 +34,7 @@ pub type ExecutionCtxFor<'a, Evm> = <::BlockExecutorFactory as BlockExecutorFactory>::ExecutionCtx<'a>; /// Type alias for [`EvmEnv`] for a given [`ConfigureEvm`]. -pub type EvmEnvFor = EvmEnv>; +pub type EvmEnvFor = EvmEnv, BlockEnvFor>; /// Helper trait to bound [`Inspector`] for a [`ConfigureEvm`]. pub trait InspectorFor: Inspector> {} diff --git a/crates/evm/evm/src/engine.rs b/crates/evm/evm/src/engine.rs index 5c721d811bc..5b46a086170 100644 --- a/crates/evm/evm/src/engine.rs +++ b/crates/evm/evm/src/engine.rs @@ -2,7 +2,7 @@ use crate::{execute::ExecutableTxFor, ConfigureEvm, EvmEnvFor, ExecutionCtxFor}; /// [`ConfigureEvm`] extension providing methods for executing payloads. pub trait ConfigureEngineEvm: ConfigureEvm { - /// Returns an [`EvmEnvFor`] for the given payload. + /// Returns an [`crate::EvmEnv`] for the given payload. fn evm_env_for_payload(&self, payload: &ExecutionData) -> Result, Self::Error>; /// Returns an [`ExecutionCtxFor`] for the given payload. diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 28b972e7c95..76a9b078394 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -203,7 +203,8 @@ pub struct BlockAssemblerInput<'a, 'b, F: BlockExecutorFactory, H = Header> { /// Configuration of EVM used when executing the block. /// /// Contains context relevant to EVM such as [`revm::context::BlockEnv`]. - pub evm_env: EvmEnv<::Spec>, + pub evm_env: + EvmEnv<::Spec, ::BlockEnv>, /// [`BlockExecutorFactory::ExecutionCtx`] used to execute the block. pub execution_ctx: F::ExecutionCtx<'a>, /// Parent block header. @@ -225,7 +226,10 @@ impl<'a, 'b, F: BlockExecutorFactory, H> BlockAssemblerInput<'a, 'b, F, H> { /// Creates a new [`BlockAssemblerInput`]. #[expect(clippy::too_many_arguments)] pub fn new( - evm_env: EvmEnv<::Spec>, + evm_env: EvmEnv< + ::Spec, + ::BlockEnv, + >, execution_ctx: F::ExecutionCtx<'a>, parent: &'a SealedHeader, transactions: Vec, @@ -465,6 +469,7 @@ where Evm: Evm< Spec = ::Spec, HaltReason = ::HaltReason, + BlockEnv = ::BlockEnv, DB = &'a mut State, >, Transaction = N::SignedTx, diff --git a/crates/optimism/evm/src/build.rs b/crates/optimism/evm/src/build.rs index 087b7f10046..edc877a9a5d 100644 --- a/crates/optimism/evm/src/build.rs +++ b/crates/optimism/evm/src/build.rs @@ -14,6 +14,7 @@ use reth_optimism_consensus::{calculate_receipt_root_no_memo_optimism, isthmus}; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::DepositReceipt; use reth_primitives_traits::{Receipt, SignedTransaction}; +use revm::context::Block as _; /// Block builder for Optimism. #[derive(Debug)] @@ -53,7 +54,7 @@ impl OpBlockAssembler { } = input; let ctx = ctx.into(); - let timestamp = evm_env.block_env.timestamp.saturating_to(); + let timestamp = evm_env.block_env.timestamp().saturating_to(); let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = @@ -88,19 +89,19 @@ impl OpBlockAssembler { let header = Header { parent_hash: ctx.parent_hash, ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: evm_env.block_env.beneficiary, + beneficiary: evm_env.block_env.beneficiary(), state_root, transactions_root, receipts_root, withdrawals_root, logs_bloom, timestamp, - mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(evm_env.block_env.basefee), - number: evm_env.block_env.number.saturating_to(), - gas_limit: evm_env.block_env.gas_limit, - difficulty: evm_env.block_env.difficulty, + base_fee_per_gas: Some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().saturating_to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, extra_data: ctx.extra_data, parent_beacon_block_root: ctx.parent_beacon_block_root, diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index a538c8d8690..4165221c987 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -88,10 +88,12 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result Result + FromTxWithEncoded - + TransactionEnv, + + TransactionEnv + + OpTxEnv, Precompiles = PrecompilesMap, Spec = OpSpecId, + BlockEnv = BlockEnv, > + Debug, Self: Send + Sync + Unpin + Clone + 'static, { diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index e0437a5f655..b495fdb47ce 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -19,7 +19,7 @@ use reth_optimism_node::{args::RollupArgs, OpEvmConfig, OpExecutorBuilder, OpNod use reth_optimism_primitives::OpPrimitives; use reth_provider::providers::BlockchainProvider; use revm::{ - context::{Cfg, ContextTr, TxEnv}, + context::{BlockEnv, Cfg, ContextTr, TxEnv}, context_interface::result::EVMError, inspector::NoOpInspector, interpreter::interpreter::EthInterpreter, @@ -94,6 +94,7 @@ fn test_setup_custom_precompiles() { EVMError; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm( diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 1d73464e178..ecc7a400349 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -567,9 +567,9 @@ where } /// Returns the current fee settings for transactions from the mempool - pub fn best_transaction_attributes(&self, block_env: &BlockEnv) -> BestTransactionsAttributes { + pub fn best_transaction_attributes(&self, block_env: impl Block) -> BestTransactionsAttributes { BestTransactionsAttributes::new( - block_env.basefee, + block_env.basefee(), block_env.blob_gasprice().map(|p| p as u64), ) } @@ -659,10 +659,10 @@ where Transaction: PoolTransaction> + OpPooledTx, >, ) -> Result, PayloadBuilderError> { - let block_gas_limit = builder.evm_mut().block().gas_limit; + let block_gas_limit = builder.evm_mut().block().gas_limit(); let block_da_limit = self.da_config.max_da_block_size(); let tx_da_limit = self.da_config.max_da_tx_size(); - let base_fee = builder.evm_mut().block().basefee; + let base_fee = builder.evm_mut().block().basefee(); while let Some(tx) = best_txs.next(()) { let interop = tx.interop_deadline(); diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index b7ce75c51b2..4e853984ac9 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,5 +1,4 @@ use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; -use reth_evm::{SpecFor, TxEnvFor}; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall}, FromEvmError, RpcConvert, @@ -9,12 +8,7 @@ impl EthCall for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } @@ -22,12 +16,7 @@ impl EstimateCall for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } @@ -35,12 +24,7 @@ impl Call for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { #[inline] fn call_gas_limit(&self) -> u64 { diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 97fe3a0b5b7..775e79d5aff 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -458,10 +458,11 @@ mod test { OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) .unwrap(); - let mut l1_block_info = op_revm::L1BlockInfo::default(); - - l1_block_info.operator_fee_scalar = Some(U256::ZERO); - l1_block_info.operator_fee_constant = Some(U256::from(2)); + let mut l1_block_info = op_revm::L1BlockInfo { + operator_fee_scalar: Some(U256::ZERO), + operator_fee_constant: Some(U256::from(2)), + ..Default::default() + }; let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) @@ -481,10 +482,11 @@ mod test { OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) .unwrap(); - let mut l1_block_info = op_revm::L1BlockInfo::default(); - - l1_block_info.operator_fee_scalar = Some(U256::ZERO); - l1_block_info.operator_fee_constant = Some(U256::ZERO); + let mut l1_block_info = op_revm::L1BlockInfo { + operator_fee_scalar: Some(U256::ZERO), + operator_fee_constant: Some(U256::ZERO), + ..Default::default() + }; let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index b8fb25c66c4..a89104bcbaf 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -17,7 +17,7 @@ use core::error; use dyn_clone::DynClone; use reth_evm::{ revm::context_interface::{either::Either, Block}, - ConfigureEvm, SpecFor, TxEnvFor, + BlockEnvFor, ConfigureEvm, EvmEnvFor, TxEnvFor, }; use reth_primitives_traits::{ BlockTy, HeaderTy, NodePrimitives, SealedBlock, SealedHeader, SealedHeaderFor, TransactionMeta, @@ -123,19 +123,16 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { /// Associated lower layer consensus types to convert from and into types of [`Self::Network`]. type Primitives: NodePrimitives; + /// The EVM configuration. + type Evm: ConfigureEvm; + /// Associated upper layer JSON-RPC API network requests and responses to convert from and into /// types of [`Self::Primitives`]. type Network: RpcTypes + Send + Sync + Unpin + Clone + Debug; - /// A set of variables for executing a transaction. - type TxEnv; - /// An associated RPC conversion error. type Error: error::Error + Into>; - /// The EVM specification identifier. - type Spec; - /// Wrapper for `fill()` with default `TransactionInfo` /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. @@ -169,9 +166,8 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { fn tx_env( &self, request: RpcTxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result; + evm_env: &EvmEnvFor, + ) -> Result, Self::Error>; /// Converts a set of primitive receipts to RPC representations. It is guaranteed that all /// receipts are from the same block. @@ -199,8 +195,8 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { } dyn_clone::clone_trait_object!( - - RpcConvert + + RpcConvert ); /// Converts `self` into `T`. The opposite of [`FromConsensusTx`]. @@ -439,7 +435,7 @@ impl TryIntoSimTx> for TransactionRequest { /// implementation for free, thanks to the blanket implementation, unless the conversion requires /// more context. For example, some configuration parameters or access handles to database, network, /// etc. -pub trait TxEnvConverter: +pub trait TxEnvConverter: Debug + Send + Sync + Unpin + Clone + 'static { /// An associated error that can occur during conversion. @@ -451,31 +447,30 @@ pub trait TxEnvConverter: fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result; + evm_env: &EvmEnvFor, + ) -> Result, Self::Error>; } -impl TxEnvConverter for () +impl TxEnvConverter for () where - TxReq: TryIntoTxEnv, + TxReq: TryIntoTxEnv, BlockEnvFor>, + Evm: ConfigureEvm, { type Error = TxReq::Err; fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result { - tx_req.try_into_tx_env(cfg_env, block_env) + evm_env: &EvmEnvFor, + ) -> Result, Self::Error> { + tx_req.try_into_tx_env(&evm_env.cfg_env, &evm_env.block_env) } } /// Converts rpc transaction requests into transaction environment using a closure. -impl TxEnvConverter for F +impl TxEnvConverter for F where - F: Fn(TxReq, &CfgEnv, &BlockEnv) -> Result + F: Fn(TxReq, &EvmEnvFor) -> Result, E> + Debug + Send + Sync @@ -483,6 +478,7 @@ where + Clone + 'static, TxReq: Clone, + Evm: ConfigureEvm, E: error::Error + Send + Sync + 'static, { type Error = E; @@ -490,17 +486,16 @@ where fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result { - self(tx_req, cfg_env, block_env) + evm_env: &EvmEnvFor, + ) -> Result, Self::Error> { + self(tx_req, evm_env) } } /// Converts `self` into `T`. /// /// Should create an executable transaction environment using [`TransactionRequest`]. -pub trait TryIntoTxEnv { +pub trait TryIntoTxEnv { /// An associated error that can occur during the conversion. type Err; @@ -836,7 +831,6 @@ impl } /// Converts `self` into a boxed converter. - #[expect(clippy::type_complexity)] pub fn erased( self, ) -> Box< @@ -844,8 +838,7 @@ impl Primitives = ::Primitives, Network = ::Network, Error = ::Error, - TxEnv = ::TxEnv, - Spec = ::Spec, + Evm = ::Evm, >, > where @@ -933,13 +926,12 @@ where SimTx: SimTxConverter, TxTy>, RpcTx: RpcTxConverter, Network::TransactionResponse, >>::Out>, - TxEnv: TxEnvConverter, TxEnvFor, SpecFor>, + TxEnv: TxEnvConverter, Evm>, { type Primitives = N; + type Evm = Evm; type Network = Network; - type TxEnv = TxEnvFor; type Error = Receipt::Error; - type Spec = SpecFor; fn fill( &self, @@ -965,10 +957,9 @@ where fn tx_env( &self, request: RpcTxReq, - cfg_env: &CfgEnv>, - block_env: &BlockEnv, - ) -> Result { - self.tx_env_converter.convert_tx_env(request, cfg_env, block_env).map_err(Into::into) + evm_env: &EvmEnvFor, + ) -> Result, Self::Error> { + self.tx_env_converter.convert_tx_env(request, evm_env).map_err(Into::into) } fn convert_receipts( diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index b96dab882a0..8f325e757f1 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -20,8 +20,8 @@ use alloy_rpc_types_eth::{ use futures::Future; use reth_errors::{ProviderError, RethError}; use reth_evm::{ - ConfigureEvm, Evm, EvmEnv, EvmEnvFor, HaltReasonFor, InspectorFor, SpecFor, TransactionEnv, - TxEnvFor, + env::BlockEnvironment, ConfigureEvm, Evm, EvmEnvFor, HaltReasonFor, InspectorFor, + TransactionEnv, TxEnvFor, }; use reth_node_api::BlockBody; use reth_primitives_traits::Recovered; @@ -38,6 +38,7 @@ use reth_rpc_eth_types::{ }; use reth_storage_api::{BlockIdReader, ProviderTx}; use revm::{ + context::Block, context_interface::{ result::{ExecutionResult, ResultAndState}, Transaction, @@ -115,7 +116,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA evm_env.cfg_env.disable_nonce_check = true; evm_env.cfg_env.disable_base_fee = true; evm_env.cfg_env.tx_gas_limit_cap = Some(u64::MAX); - evm_env.block_env.basefee = 0; + evm_env.block_env.inner_mut().basefee = 0; } let SimBlock { block_overrides, state_overrides, calls } = block; @@ -123,19 +124,23 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA if let Some(block_overrides) = block_overrides { // ensure we don't allow uncapped gas limit per block if let Some(gas_limit_override) = block_overrides.gas_limit && - gas_limit_override > evm_env.block_env.gas_limit && + gas_limit_override > evm_env.block_env.gas_limit() && gas_limit_override > this.call_gas_limit() { return Err(EthApiError::other(EthSimulateError::GasLimitReached).into()) } - apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env); + apply_block_overrides( + block_overrides, + &mut db, + evm_env.block_env.inner_mut(), + ); } if let Some(state_overrides) = state_overrides { apply_state_overrides(state_overrides, &mut db) .map_err(Self::Error::from_eth_err)?; } - let block_gas_limit = evm_env.block_env.gas_limit; + let block_gas_limit = evm_env.block_env.gas_limit(); let chain_id = evm_env.cfg_env.chain_id; let default_gas_limit = { @@ -404,7 +409,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let cap = this.caller_gas_allowance(&mut db, &evm_env, &tx_env)?; // no gas limit was provided in the request, so we need to cap the request's gas // limit - tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); + tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit())); } // can consume the list since we're not using the request anymore @@ -461,7 +466,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// Executes code on state. pub trait Call: LoadState< - RpcConvert: RpcConvert, Spec = SpecFor>, + RpcConvert: RpcConvert, Error: FromEvmError + From<::Error> + From, @@ -520,7 +525,7 @@ pub trait Call: Ok(res) } - /// Executes the [`EvmEnv`] against the given [Database] without committing state + /// Executes the [`reth_evm::EvmEnv`] against the given [Database] without committing state /// changes. fn transact_with_inspector( &self, @@ -574,7 +579,7 @@ pub trait Call: /// Prepares the state and env for the given [`RpcTxReq`] at the given [`BlockId`] and /// executes the closure on a new task returning the result of the closure. /// - /// This returns the configured [`EvmEnv`] for the given [`RpcTxReq`] at + /// This returns the configured [`reth_evm::EvmEnv`] for the given [`RpcTxReq`] at /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. /// /// This is primarily used by `eth_call`. @@ -712,10 +717,10 @@ pub trait Call: /// /// All `TxEnv` fields are derived from the given [`RpcTxReq`], if fields are - /// `None`, they fall back to the [`EvmEnv`]'s settings. + /// `None`, they fall back to the [`reth_evm::EvmEnv`]'s settings. fn create_txn_env( &self, - evm_env: &EvmEnv>, + evm_env: &EvmEnvFor, mut request: RpcTxReq<::Network>, mut db: impl Database>, ) -> Result, Self::Error> { @@ -728,10 +733,10 @@ pub trait Call: request.as_mut().set_nonce(nonce); } - Ok(self.tx_resp_builder().tx_env(request, &evm_env.cfg_env, &evm_env.block_env)?) + Ok(self.tx_resp_builder().tx_env(request, evm_env)?) } - /// Prepares the [`EvmEnv`] for execution of calls. + /// Prepares the [`reth_evm::EvmEnv`] for execution of calls. /// /// Does not commit any changes to the underlying database. /// @@ -790,7 +795,7 @@ pub trait Call: request.as_mut().take_nonce(); if let Some(block_overrides) = overrides.block { - apply_block_overrides(*block_overrides, db, &mut evm_env.block_env); + apply_block_overrides(*block_overrides, db, evm_env.block_env.inner_mut()); } if let Some(state_overrides) = overrides.state { apply_state_overrides(state_overrides, db) @@ -801,7 +806,7 @@ pub trait Call: // lower the basefee to 0 to avoid breaking EVM invariants (basefee < gasprice): if tx_env.gas_price() == 0 { - evm_env.block_env.basefee = 0; + evm_env.block_env.inner_mut().basefee = 0; } if !request_has_gas_limit { @@ -811,7 +816,7 @@ pub trait Call: trace!(target: "rpc::eth::call", ?tx_env, "Applying gas limit cap with caller allowance"); let cap = self.caller_gas_allowance(db, &evm_env, &tx_env)?; // ensure we cap gas_limit to the block's - tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); + tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit())); } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index cca674e9739..cd2518345ce 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -18,7 +18,10 @@ use reth_rpc_eth_types::{ }; use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; use reth_storage_api::StateProvider; -use revm::context_interface::{result::ExecutionResult, Transaction}; +use revm::{ + context::Block, + context_interface::{result::ExecutionResult, Transaction}, +}; use tracing::trace; /// Gas execution estimates @@ -60,10 +63,10 @@ pub trait EstimateCall: Call { let tx_request_gas_limit = request.as_ref().gas_limit(); let tx_request_gas_price = request.as_ref().gas_price(); // the gas limit of the corresponding block - let max_gas_limit = evm_env - .cfg_env - .tx_gas_limit_cap - .map_or(evm_env.block_env.gas_limit, |cap| cap.min(evm_env.block_env.gas_limit)); + let max_gas_limit = evm_env.cfg_env.tx_gas_limit_cap.map_or_else( + || evm_env.block_env.gas_limit(), + |cap| cap.min(evm_env.block_env.gas_limit()), + ); // Determine the highest possible gas limit, considering both the request's specified limit // and the block's limit. diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 94dc214b6c8..6c3e076fb1e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -13,7 +13,7 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError, RethError}; use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome, ExecutionOutcome}, - ConfigureEvm, Evm, NextBlockEnvAttributes, SpecFor, + ConfigureEvm, Evm, NextBlockEnvAttributes, }; use reth_primitives_traits::{transaction::error::InvalidTransactionError, HeaderTy, SealedHeader}; use reth_revm::{database::StateProviderDatabase, db::State}; @@ -23,8 +23,8 @@ use reth_rpc_eth_types::{ PendingBlockEnv, PendingBlockEnvOrigin, }; use reth_storage_api::{ - noop::NoopProvider, BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, - ProviderReceipt, ProviderTx, ReceiptProvider, StateProviderBox, StateProviderFactory, + noop::NoopProvider, BlockReader, BlockReaderIdExt, ProviderHeader, ProviderTx, ReceiptProvider, + StateProviderBox, StateProviderFactory, }; use reth_transaction_pool::{ error::InvalidPoolTransactionError, BestTransactions, BestTransactionsAttributes, @@ -61,17 +61,7 @@ pub trait LoadPendingBlock: /// Configures the [`PendingBlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block - #[expect(clippy::type_complexity)] - fn pending_block_env_and_cfg( - &self, - ) -> Result< - PendingBlockEnv< - ProviderBlock, - ProviderReceipt, - SpecFor, - >, - Self::Error, - > { + fn pending_block_env_and_cfg(&self) -> Result, Self::Error> { if let Some(block) = self.provider().pending_block().map_err(Self::Error::from_eth_err)? && let Some(receipts) = self .provider() @@ -166,7 +156,7 @@ pub trait LoadPendingBlock: // Is the pending block cached? if let Some(pending_block) = lock.as_ref() { // Is the cached block not expired and latest is its parent? - if pending.evm_env.block_env.number == U256::from(pending_block.block().number()) && + if pending.evm_env.block_env.number() == U256::from(pending_block.block().number()) && parent.hash() == pending_block.block().parent_hash() && now <= pending_block.expires_at { @@ -265,14 +255,14 @@ pub trait LoadPendingBlock: .unwrap_or_else(BlobParams::cancun); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; - let block_gas_limit: u64 = block_env.gas_limit; + let block_gas_limit: u64 = block_env.gas_limit(); // Only include transactions if not configured as Empty if !self.pending_block_kind().is_empty() { let mut best_txs = self .pool() .best_transactions_with_attributes(BestTransactionsAttributes::new( - block_env.basefee, + block_env.basefee(), block_env.blob_gasprice().map(|gasprice| gasprice as u64), )) // freeze to get a block as fast as possible diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index a3c79416cfe..86039e38082 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -19,14 +19,14 @@ use reth_rpc_eth_types::{ EthApiError, }; use reth_storage_api::{ProviderBlock, ProviderTx}; -use revm::{context_interface::result::ResultAndState, DatabaseCommit}; +use revm::{context::Block, context_interface::result::ResultAndState, DatabaseCommit}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::sync::Arc; /// Executes CPU heavy tasks. pub trait Trace: LoadState> { - /// Executes the [`TxEnvFor`] with [`EvmEnvFor`] against the given [Database] without committing - /// state changes. + /// Executes the [`TxEnvFor`] with [`reth_evm::EvmEnv`] against the given [Database] without + /// committing state changes. fn inspect( &self, db: DB, @@ -301,8 +301,8 @@ pub trait Trace: LoadState> { let state_at = block.parent_hash(); let block_hash = block.hash(); - let block_number = evm_env.block_env.number.saturating_to(); - let base_fee = evm_env.block_env.basefee; + let block_number = evm_env.block_env.number().saturating_to(); + let base_fee = evm_env.block_env.basefee(); // now get the state let state = this.state_at_block_id(state_at.into()).await?; diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 1f3ee7dd6dd..196461d18ce 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -681,7 +681,7 @@ impl RpcInvalidTransactionError { /// Converts the halt error /// /// Takes the configured gas limit of the transaction which is attached to the error - pub const fn halt(reason: HaltReason, gas_limit: u64) -> Self { + pub fn halt(reason: HaltReason, gas_limit: u64) -> Self { match reason { HaltReason::OutOfGas(err) => Self::out_of_gas(err, gas_limit), HaltReason::NonceOverflow => Self::NonceMaxValue, @@ -762,7 +762,7 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::BlobVersionedHashesNotSupported => { Self::BlobVersionedHashesNotSupported } - InvalidTransaction::BlobGasPriceGreaterThanMax => Self::BlobFeeCapTooLow, + InvalidTransaction::BlobGasPriceGreaterThanMax { .. } => Self::BlobFeeCapTooLow, InvalidTransaction::EmptyBlobs => Self::BlobTransactionMissingBlobHashes, InvalidTransaction::BlobVersionNotSupported => Self::BlobHashVersionMismatch, InvalidTransaction::TooManyBlobs { have, .. } => Self::TooManyBlobs { have }, @@ -780,6 +780,7 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::Eip7873MissingTarget => { Self::other(internal_rpc_err(err.to_string())) } + InvalidTransaction::Str(_) => Self::other(internal_rpc_err(err.to_string())), } } } diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 05ad6fb4e27..d0b5c65c1ed 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -13,18 +13,18 @@ use reth_chain_state::{ BlockState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, }; use reth_ethereum_primitives::Receipt; -use reth_evm::EvmEnv; +use reth_evm::{ConfigureEvm, EvmEnvFor}; use reth_primitives_traits::{ Block, BlockTy, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, }; -/// Configured [`EvmEnv`] for a pending block. +/// Configured [`reth_evm::EvmEnv`] for a pending block. #[derive(Debug, Clone, Constructor)] -pub struct PendingBlockEnv { - /// Configured [`EvmEnv`] for the pending block. - pub evm_env: EvmEnv, +pub struct PendingBlockEnv { + /// Configured [`reth_evm::EvmEnv`] for the pending block. + pub evm_env: EvmEnvFor, /// Origin block for the config - pub origin: PendingBlockEnvOrigin, + pub origin: PendingBlockEnvOrigin, ReceiptTy>, } /// The origin for a configured [`PendingBlockEnv`] diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 5492e127b77..ec63443da3d 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -24,6 +24,7 @@ use reth_rpc_convert::{RpcBlock, RpcConvert, RpcTxReq}; use reth_rpc_server_types::result::rpc_err; use reth_storage_api::noop::NoopProvider; use revm::{ + context::Block, context_interface::result::ExecutionResult, primitives::{Address, Bytes, TxKind}, Database, @@ -88,7 +89,7 @@ where let tx = resolve_transaction( call, default_gas_limit, - builder.evm().block().basefee, + builder.evm().block().basefee(), chain_id, builder.evm_mut().db_mut(), tx_resp_builder, diff --git a/crates/rpc/rpc/src/aliases.rs b/crates/rpc/rpc/src/aliases.rs index 4e317305ca4..8854f1b607d 100644 --- a/crates/rpc/rpc/src/aliases.rs +++ b/crates/rpc/rpc/src/aliases.rs @@ -1,4 +1,4 @@ -use reth_evm::{ConfigureEvm, SpecFor, TxEnvFor}; +use reth_evm::ConfigureEvm; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_types::EthApiError; @@ -8,7 +8,6 @@ pub type DynRpcConverter = Box< Primitives = ::Primitives, Network = Network, Error = Error, - TxEnv = TxEnvFor, - Spec = SpecFor, + Evm = Evm, >, >; diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index b3715c0e8e0..00a89c10831 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -3,6 +3,7 @@ use alloy_consensus::{ BlockHeader, }; use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; +use alloy_evm::env::BlockEnvironment; use alloy_genesis::ChainConfig; use alloy_primitives::{uint, Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable}; @@ -40,7 +41,7 @@ use reth_storage_api::{ }; use reth_tasks::pool::BlockingTaskGuard; use reth_trie_common::{updates::TrieUpdates, HashedPostState}; -use revm::{context_interface::Transaction, state::EvmState, DatabaseCommit}; +use revm::{context::Block, context_interface::Transaction, state::EvmState, DatabaseCommit}; use revm_inspectors::tracing::{ FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext, }; @@ -372,8 +373,8 @@ where let db = db.0; let tx_info = TransactionInfo { - block_number: Some(evm_env.block_env.number.saturating_to()), - base_fee: Some(evm_env.block_env.basefee), + block_number: Some(evm_env.block_env.number().saturating_to()), + base_fee: Some(evm_env.block_env.basefee()), hash: None, block_hash: None, index: None, @@ -589,8 +590,8 @@ where results.push(trace); } // Increment block_env number and timestamp for the next bundle - evm_env.block_env.number += uint!(1_U256); - evm_env.block_env.timestamp += uint!(12_U256); + evm_env.block_env.inner_mut().number += uint!(1_U256); + evm_env.block_env.inner_mut().timestamp += uint!(12_U256); all_bundles.push(results); } @@ -741,8 +742,8 @@ where .map(|c| c.tx_index.map(|i| i as u64)) .unwrap_or_default(), block_hash: transaction_context.as_ref().map(|c| c.block_hash).unwrap_or_default(), - block_number: Some(evm_env.block_env.number.saturating_to()), - base_fee: Some(evm_env.block_env.basefee), + block_number: Some(evm_env.block_env.number().saturating_to()), + base_fee: Some(evm_env.block_env.basefee()), }; if let Some(tracer) = tracer { diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 48e3219daa3..0797c2f1f8c 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -2,12 +2,12 @@ use alloy_consensus::{transaction::TxHashRef, EnvKzgSettings, Transaction as _}; use alloy_eips::eip7840::BlobParams; +use alloy_evm::env::BlockEnvironment; use alloy_primitives::{uint, Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ConfigureEvm, Evm}; - use reth_revm::{database::StateProviderDatabase, db::CacheDB}; use reth_rpc_eth_api::{ helpers::{Call, EthTransactions, LoadPendingBlock}, @@ -18,7 +18,9 @@ use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{ EthBlobTransactionSidecar, EthPoolTransaction, PoolPooledTx, PoolTransaction, TransactionPool, }; -use revm::{context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef}; +use revm::{ + context::Block, context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef, +}; use std::sync::Arc; /// `Eth` bundle implementation. @@ -88,18 +90,18 @@ where let (mut evm_env, at) = self.eth_api().evm_env_at(block_id).await?; if let Some(coinbase) = coinbase { - evm_env.block_env.beneficiary = coinbase; + evm_env.block_env.inner_mut().beneficiary = coinbase; } // need to adjust the timestamp for the next block if let Some(timestamp) = timestamp { - evm_env.block_env.timestamp = U256::from(timestamp); + evm_env.block_env.inner_mut().timestamp = U256::from(timestamp); } else { - evm_env.block_env.timestamp += uint!(12_U256); + evm_env.block_env.inner_mut().timestamp += uint!(12_U256); } if let Some(difficulty) = difficulty { - evm_env.block_env.difficulty = U256::from(difficulty); + evm_env.block_env.inner_mut().difficulty = U256::from(difficulty); } // Validate that the bundle does not contain more than MAX_BLOB_NUMBER_PER_BLOCK blob @@ -110,7 +112,7 @@ where .eth_api() .provider() .chain_spec() - .blob_params_at_timestamp(evm_env.block_env.timestamp.saturating_to()) + .blob_params_at_timestamp(evm_env.block_env.timestamp().saturating_to()) .unwrap_or_else(BlobParams::cancun); if transactions.iter().filter_map(|tx| tx.blob_gas_used()).sum::() > blob_params.max_blob_gas_per_block() @@ -124,30 +126,30 @@ where } // default to call gas limit unless user requests a smaller limit - evm_env.block_env.gas_limit = self.inner.eth_api.call_gas_limit(); + evm_env.block_env.inner_mut().gas_limit = self.inner.eth_api.call_gas_limit(); if let Some(gas_limit) = gas_limit { - if gas_limit > evm_env.block_env.gas_limit { + if gas_limit > evm_env.block_env.gas_limit() { return Err( EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into() ) } - evm_env.block_env.gas_limit = gas_limit; + evm_env.block_env.inner_mut().gas_limit = gas_limit; } if let Some(base_fee) = base_fee { - evm_env.block_env.basefee = base_fee.try_into().unwrap_or(u64::MAX); + evm_env.block_env.inner_mut().basefee = base_fee.try_into().unwrap_or(u64::MAX); } - let state_block_number = evm_env.block_env.number; + let state_block_number = evm_env.block_env.number(); // use the block number of the request - evm_env.block_env.number = U256::from(block_number); + evm_env.block_env.inner_mut().number = U256::from(block_number); let eth_api = self.eth_api().clone(); self.eth_api() .spawn_with_state_at_block(at, move |state| { - let coinbase = evm_env.block_env.beneficiary; - let basefee = evm_env.block_env.basefee; + let coinbase = evm_env.block_env.beneficiary(); + let basefee = evm_env.block_env.basefee(); let db = CacheDB::new(StateProviderDatabase::new(state)); let initial_coinbase = db diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index a76e146042d..abe06cb55ec 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,7 +1,6 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. use crate::EthApi; -use reth_evm::{SpecFor, TxEnvFor}; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall}, @@ -13,12 +12,7 @@ impl EthCall for EthApi where N: RpcNodeCore, EthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } @@ -26,12 +20,7 @@ impl Call for EthApi where N: RpcNodeCore, EthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -48,11 +37,6 @@ impl EstimateCall for EthApi where N: RpcNodeCore, EthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index f7043821754..c738a64c2d5 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -2,7 +2,7 @@ use alloy_consensus::{transaction::TxHashRef, BlockHeader}; use alloy_eips::BlockNumberOrTag; -use alloy_evm::overrides::apply_block_overrides; +use alloy_evm::{env::BlockEnvironment, overrides::apply_block_overrides}; use alloy_primitives::U256; use alloy_rpc_types_eth::BlockId; use alloy_rpc_types_mev::{ @@ -22,7 +22,9 @@ use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_storage_api::ProviderTx; use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool}; -use revm::{context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef}; +use revm::{ + context::Block, context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef, +}; use std::{sync::Arc, time::Duration}; use tracing::trace; @@ -242,12 +244,12 @@ where .spawn_with_state_at_block(current_block_id, move |state| { // Setup environment let current_block_number = current_block.number(); - let coinbase = evm_env.block_env.beneficiary; - let basefee = evm_env.block_env.basefee; + let coinbase = evm_env.block_env.beneficiary(); + let basefee = evm_env.block_env.basefee(); let mut db = CacheDB::new(StateProviderDatabase::new(state)); // apply overrides - apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env); + apply_block_overrides(block_overrides, &mut db, evm_env.block_env.inner_mut()); let initial_coinbase_balance = DatabaseRef::basic_ref(&db, coinbase) .map_err(EthApiError::from_eth_err)? diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index a72b2c44487..1d93226dd6a 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -8,7 +8,7 @@ use alloy_evm::{ block::{BlockExecutorFactory, BlockExecutorFor, ExecutableTx}, eth::{EthBlockExecutionCtx, EthBlockExecutor}, precompiles::PrecompilesMap, - revm::context::result::ResultAndState, + revm::context::{result::ResultAndState, Block as _}, EthEvm, EthEvmFactory, }; use alloy_sol_macro::sol; @@ -271,7 +271,7 @@ pub fn apply_withdrawals_contract_call( // Clean-up post system tx context state.remove(&SYSTEM_ADDRESS); - state.remove(&evm.block().beneficiary); + state.remove(&evm.block().beneficiary()); evm.db_mut().commit(state); diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index b5e69670ec7..e32f0be6bd5 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -18,7 +18,7 @@ use reth_ethereum::{ evm::{ primitives::{Database, EvmEnv}, revm::{ - context::{Context, TxEnv}, + context::{BlockEnv, Context, TxEnv}, context_interface::result::{EVMError, HaltReason}, inspector::{Inspector, NoOpInspector}, interpreter::interpreter::EthInterpreter, @@ -54,6 +54,7 @@ impl EvmFactory for MyEvmFactory { type HaltReason = HaltReason; type Context = EthEvmContext; type Spec = SpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm(&self, db: DB, input: EvmEnv) -> Self::Evm { diff --git a/examples/custom-node/src/evm/alloy.rs b/examples/custom-node/src/evm/alloy.rs index 6071a2c6dd8..d8df842cfc5 100644 --- a/examples/custom-node/src/evm/alloy.rs +++ b/examples/custom-node/src/evm/alloy.rs @@ -40,6 +40,7 @@ where type Error = EVMError; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = P; type Inspector = I; @@ -103,6 +104,7 @@ impl EvmFactory for CustomEvmFactory { type Error = EVMError; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm( diff --git a/examples/custom-node/src/evm/env.rs b/examples/custom-node/src/evm/env.rs index 5508ec4e6d0..53a2b4e3f15 100644 --- a/examples/custom-node/src/evm/env.rs +++ b/examples/custom-node/src/evm/env.rs @@ -1,6 +1,7 @@ use crate::primitives::{CustomTransaction, TxPayment}; use alloy_eips::{eip2930::AccessList, Typed2718}; use alloy_evm::{FromRecoveredTx, FromTxWithEncoded, IntoTxEnv}; +use alloy_op_evm::block::OpTxEnv; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use op_alloy_consensus::OpTxEnvelope; use op_revm::OpTransaction; @@ -328,3 +329,12 @@ impl IntoTxEnv for CustomTxEnv { self } } + +impl OpTxEnv for CustomTxEnv { + fn encoded_bytes(&self) -> Option<&Bytes> { + match self { + Self::Op(tx) => tx.encoded_bytes(), + Self::Payment(_) => None, + } + } +} diff --git a/examples/precompile-cache/src/main.rs b/examples/precompile-cache/src/main.rs index 69aaf7b4035..fe748db4636 100644 --- a/examples/precompile-cache/src/main.rs +++ b/examples/precompile-cache/src/main.rs @@ -16,7 +16,7 @@ use reth_ethereum::{ evm::{ primitives::{Database, EvmEnv}, revm::{ - context::{Context, TxEnv}, + context::{BlockEnv, Context, TxEnv}, context_interface::result::{EVMError, HaltReason}, inspector::{Inspector, NoOpInspector}, interpreter::interpreter::EthInterpreter, @@ -69,6 +69,7 @@ impl EvmFactory for MyEvmFactory { type HaltReason = HaltReason; type Context = EthEvmContext; type Spec = SpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm(&self, db: DB, input: EvmEnv) -> Self::Evm { From 080d508ebff845d4c05c8db17f6bdcefa3f12242 Mon Sep 17 00:00:00 2001 From: radik878 Date: Wed, 15 Oct 2025 23:14:42 +0300 Subject: [PATCH 072/371] fix(session): remove Clone derive from SessionCounter (#19051) --- crates/net/network/src/session/counter.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/network/src/session/counter.rs b/crates/net/network/src/session/counter.rs index db9bd16cda9..a3318ea05c5 100644 --- a/crates/net/network/src/session/counter.rs +++ b/crates/net/network/src/session/counter.rs @@ -3,7 +3,7 @@ use reth_network_api::Direction; use reth_network_types::SessionLimits; /// Keeps track of all sessions. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct SessionCounter { /// Limits to enforce. limits: SessionLimits, From f6a583ffc40084f9724d11889d8be109afccb9f2 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 16 Oct 2025 00:15:47 +0400 Subject: [PATCH 073/371] feat: stricter bound (#19049) --- crates/evm/evm/src/lib.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/evm/evm/src/lib.rs b/crates/evm/evm/src/lib.rs index a2a30b9e0ab..e2101fd915b 100644 --- a/crates/evm/evm/src/lib.rs +++ b/crates/evm/evm/src/lib.rs @@ -404,7 +404,13 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { db: &'a mut State, parent: &'a SealedHeader<::BlockHeader>, attributes: Self::NextBlockEnvCtx, - ) -> Result, Self::Error> { + ) -> Result< + impl BlockBuilder< + Primitives = Self::Primitives, + Executor: BlockExecutorFor<'a, Self::BlockExecutorFactory, DB>, + >, + Self::Error, + > { let evm_env = self.next_evm_env(parent, &attributes)?; let evm = self.evm_with_env(db, evm_env); let ctx = self.context_for_next_block(parent, attributes)?; From 5c19ce75805d8477b6e839cc0afd259c8f99da77 Mon Sep 17 00:00:00 2001 From: drhgencer Date: Thu, 16 Oct 2025 04:19:03 +0800 Subject: [PATCH 074/371] refactor(txpool): reuse cached gas_limit value (#19052) --- crates/transaction-pool/src/validate/eth.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 6d1a0147f0b..9eab8767d6d 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -396,7 +396,7 @@ where // max possible tx fee is (gas_price * gas_limit) // (if EIP1559) max possible tx fee is (max_fee_per_gas * gas_limit) let gas_price = transaction.max_fee_per_gas(); - let max_tx_fee_wei = gas_price.saturating_mul(transaction.gas_limit() as u128); + let max_tx_fee_wei = gas_price.saturating_mul(transaction_gas_limit as u128); if max_tx_fee_wei > tx_fee_cap_wei { return Err(TransactionValidationOutcome::Invalid( transaction, From 65a7f35a56cf3f9b1c0b86498e51b2ac92ca0ba7 Mon Sep 17 00:00:00 2001 From: James Prestwich Date: Wed, 15 Oct 2025 17:10:24 -0400 Subject: [PATCH 075/371] feat: use env filter for otlp, respect otel env var (#19050) --- crates/ethereum/cli/src/app.rs | 2 +- crates/node/core/Cargo.toml | 2 +- crates/node/core/src/args/trace.rs | 26 +++++++++++++------ crates/optimism/cli/src/app.rs | 2 +- crates/tracing/src/layers.rs | 4 +-- docs/vocs/docs/pages/cli/reth.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/config.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/db.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/db/checksum.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/db/clear.mdx | 10 ++++--- .../docs/pages/cli/reth/db/clear/mdbx.mdx | 10 ++++--- .../pages/cli/reth/db/clear/static-file.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/db/diff.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/db/drop.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/db/get.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx | 10 ++++--- .../pages/cli/reth/db/get/static-file.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/db/list.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/db/path.mdx | 10 ++++--- .../docs/pages/cli/reth/db/repair-trie.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/db/stats.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/db/version.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/download.mdx | 10 ++++--- .../vocs/docs/pages/cli/reth/dump-genesis.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/export-era.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/import-era.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/import.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/init-state.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/init.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/node.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/p2p.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 10 ++++--- .../vocs/docs/pages/cli/reth/p2p/bootnode.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 10 ++++--- .../docs/pages/cli/reth/p2p/rlpx/ping.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/prune.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/stage.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 10 ++++--- .../cli/reth/stage/dump/account-hashing.mdx | 10 ++++--- .../pages/cli/reth/stage/dump/execution.mdx | 10 ++++--- .../docs/pages/cli/reth/stage/dump/merkle.mdx | 10 ++++--- .../cli/reth/stage/dump/storage-hashing.mdx | 10 ++++--- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 10 ++++--- .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 10 ++++--- .../cli/reth/stage/unwind/num-blocks.mdx | 10 ++++--- .../pages/cli/reth/stage/unwind/to-block.mdx | 10 ++++--- 49 files changed, 287 insertions(+), 189 deletions(-) diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index 805c9144257..ffbda96f981 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -119,7 +119,7 @@ where layers.with_span_layer( "reth".to_string(), output_type.clone(), - self.cli.traces.otlp_level, + self.cli.traces.otlp_level.clone(), )?; } diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index bf784b50703..4d4fd475ac4 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -45,7 +45,7 @@ alloy-eips.workspace = true # misc eyre.workspace = true -clap = { workspace = true, features = ["derive"] } +clap = { workspace = true, features = ["derive", "env"] } humantime.workspace = true rand.workspace = true derive_more.workspace = true diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs index 751ab556ac8..b8c9bb18488 100644 --- a/crates/node/core/src/args/trace.rs +++ b/crates/node/core/src/args/trace.rs @@ -2,19 +2,22 @@ use clap::Parser; use eyre::{ensure, WrapErr}; -use tracing::Level; +use reth_tracing::tracing_subscriber::EnvFilter; use url::Url; /// CLI arguments for configuring `Opentelemetry` trace and span export. #[derive(Debug, Clone, Parser)] pub struct TraceArgs { - /// Enable `Opentelemetry` tracing export to an OTLP endpoint. + /// Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently + /// only http exporting is supported. /// /// If no value provided, defaults to `http://localhost:4318/v1/traces`. /// /// Example: --tracing-otlp=http://collector:4318/v1/traces #[arg( long = "tracing-otlp", + // Per specification. + env = "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT", global = true, value_name = "URL", num_args = 0..=1, @@ -25,30 +28,37 @@ pub struct TraceArgs { )] pub otlp: Option, - /// Set the minimum log level for OTLP traces. + /// Set a filter directive for the OTLP tracer. This controls the verbosity + /// of spans and events sent to the OTLP endpoint. It follows the same + /// syntax as the `RUST_LOG` environment variable. /// - /// Valid values: ERROR, WARN, INFO, DEBUG, TRACE + /// Example: --tracing-otlp-level=info,reth=debug,hyper_util=off /// /// Defaults to TRACE if not specified. #[arg( long = "tracing-otlp-level", global = true, - value_name = "LEVEL", + value_name = "FILTER", default_value = "TRACE", help_heading = "Tracing" )] - pub otlp_level: Level, + pub otlp_level: EnvFilter, } impl Default for TraceArgs { fn default() -> Self { - Self { otlp: None, otlp_level: Level::TRACE } + Self { otlp: None, otlp_level: EnvFilter::from_default_env() } } } // Parses and validates an OTLP endpoint url. fn parse_otlp_endpoint(arg: &str) -> eyre::Result { - let url = Url::parse(arg).wrap_err("Invalid URL for OTLP trace output")?; + let mut url = Url::parse(arg).wrap_err("Invalid URL for OTLP trace output")?; + + // If the path is empty, we set the path. + if url.path() == "/" { + url.set_path("/v1/traces") + } // OTLP url must end with `/v1/traces` per the OTLP specification. ensure!( diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 891578cbe24..7b6c2a0d004 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -124,7 +124,7 @@ where layers.with_span_layer( "reth".to_string(), output_type.clone(), - self.cli.traces.otlp_level, + self.cli.traces.otlp_level.clone(), )?; } diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 385c4fac51d..d27bbc96b6e 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -130,13 +130,13 @@ impl Layers { &mut self, service_name: String, endpoint_exporter: url::Url, - level: tracing::Level, + filter: EnvFilter, ) -> eyre::Result<()> { // Create the span provider let span_layer = span_layer(service_name, &endpoint_exporter) .map_err(|e| eyre::eyre!("Failed to build OTLP span exporter {}", e))? - .with_filter(tracing::level_filters::LevelFilter::from_level(level)); + .with_filter(filter); self.add_layer(span_layer); diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 5f0ccfca01f..f57862d464b 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -116,16 +116,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index 849f4ec5bab..8f343b5f795 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -102,16 +102,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 3b28b43162a..4fd6d05eb42 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -167,16 +167,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index 13e2c2bd39d..7c79615d1f6 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -119,16 +119,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 5c19682e8b6..a2637e7b3ce 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -111,16 +111,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index 0e5526affe5..eae01b35309 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -110,16 +110,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 72c3108fcf3..464155fb2a3 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -113,16 +113,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index fadd0613ca8..e5082ccd406 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -146,16 +146,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index 0f9ddba9ee9..57eb5979d6f 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -109,16 +109,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index 942eda79998..5512c5ec826 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -111,16 +111,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index b7ccf9e7d3d..0fe1bea66dc 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -119,16 +119,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 28d7c343e94..3ae7fb0af29 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -119,16 +119,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index 3f9ac94c5c5..305aa6c0b85 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -152,16 +152,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index f6714898b35..a86e52aee92 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -106,16 +106,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index 3a6bfae1d3c..4547d9a7f5e 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -109,16 +109,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index a4939c3ef93..cc07cacd4f3 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -119,16 +119,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index 7b3766b4e8a..076c6b02a27 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -106,16 +106,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 74296538855..adc1cca8895 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -164,16 +164,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index a6dbbcb1b27..cb7e8c91658 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -105,16 +105,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index ee65abbeb42..79a3adc3155 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -170,16 +170,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index ae17ab91e0e..887dfafba80 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -165,16 +165,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index f92b52ec591..560a2b95041 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -166,16 +166,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 03d1e7b883b..2827380087c 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -189,16 +189,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index 993ae2dcd85..fea3a6c2cf0 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -154,16 +154,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index a172256058b..9d6a1627984 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -996,16 +996,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 9693e20e756..1ea79131a1f 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -103,16 +103,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index ae0f3d293d1..564dc463fcd 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -323,16 +323,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index d1bf7c69870..608a42181bc 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -114,16 +114,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index 9e542916d4c..05b34b4385b 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -323,16 +323,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index 75ab654964f..6af29692abe 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -100,16 +100,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index 7152b222fb4..9dadabc42eb 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -100,16 +100,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index f54f6687805..72fcc82be51 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -154,16 +154,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index 973ac79f29f..6f5c281b958 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -167,16 +167,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index f382eb2081e..c5cb65599f7 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -103,16 +103,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index e2ba5751b52..8ada3ae1cca 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -168,16 +168,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 01b4f61f29f..83af1939c22 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -161,16 +161,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index 18f44ae13ed..869f9292817 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -118,16 +118,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index de0f693ed57..2774f1a684b 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -118,16 +118,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index aaff755796a..009e7cd3ab3 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -118,16 +118,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index 2ff7b22b76b..869990b351c 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -118,16 +118,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 2af69a053d6..a3fc1c1696f 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -389,16 +389,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 977d949a9b7..ca107a4f837 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -162,16 +162,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index 0b60467c413..2ef35c6b47c 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -110,16 +110,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index 07632cf8285..1ac3b5d654b 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -110,16 +110,18 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. + Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. If no value provided, defaults to `http://localhost:4318/v1/traces`. Example: --tracing-otlp=http://collector:4318/v1/traces - --tracing-otlp-level - Set the minimum log level for OTLP traces. + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - Valid values: ERROR, WARN, INFO, DEBUG, TRACE + --tracing-otlp-level + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp-level=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. From a09670986532ea84ee455f03af0ea805c6e727d9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Oct 2025 23:28:29 +0200 Subject: [PATCH 076/371] chore: defense for new SubscriptionKind item (#19054) --- crates/rpc/rpc/src/eth/pubsub.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 1c7982f80fd..985cdf3129e 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -101,6 +101,7 @@ where kind: SubscriptionKind, params: Option, ) -> Result<(), ErrorObject<'static>> { + #[allow(unreachable_patterns)] match kind { SubscriptionKind::NewHeads => { pipe_from_stream(accepted_sink, self.new_headers_stream()).await @@ -199,6 +200,10 @@ where Ok(()) } + _ => { + // TODO: implement once https://github.com/alloy-rs/alloy/pull/2974 is released + Err(invalid_params_rpc_err("Unsupported subscription kind")) + } } } } From 926b1a43fef5aba3adf46c7ac671f753b13c51f1 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Wed, 15 Oct 2025 23:52:27 +0000 Subject: [PATCH 077/371] refactor: Remove max_proof_task_concurrency as configurable variable (#19009) Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: yongkangc <46377366+yongkangc@users.noreply.github.com> Co-authored-by: Yong Kang --- crates/engine/primitives/src/config.rs | 24 ------------------- .../tree/src/tree/payload_processor/mod.rs | 9 ++++--- crates/node/core/src/args/engine.rs | 10 ++------ crates/node/core/src/node_config.rs | 3 +-- docs/cli/help.rs | 5 ---- docs/vocs/docs/pages/cli/reth/node.mdx | 5 ---- 6 files changed, 7 insertions(+), 49 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 9e2c8210f08..fbe79920d2b 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -6,9 +6,6 @@ pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; -/// Default maximum concurrency for on-demand proof tasks (blinded nodes) -pub const DEFAULT_MAX_PROOF_TASK_CONCURRENCY: u64 = 256; - /// Minimum number of workers we allow configuring explicitly. pub const MIN_WORKER_COUNT: usize = 32; @@ -102,8 +99,6 @@ pub struct TreeConfig { cross_block_cache_size: u64, /// Whether the host has enough parallelism to run state root task. has_enough_parallelism: bool, - /// Maximum number of concurrent proof tasks - max_proof_task_concurrency: u64, /// Whether multiproof task should chunk proof targets. multiproof_chunking_enabled: bool, /// Multiproof task chunk size for proof targets. @@ -153,7 +148,6 @@ impl Default for TreeConfig { state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE, has_enough_parallelism: has_enough_parallelism(), - max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY, multiproof_chunking_enabled: true, multiproof_chunk_size: DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE, reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, @@ -184,7 +178,6 @@ impl TreeConfig { state_provider_metrics: bool, cross_block_cache_size: u64, has_enough_parallelism: bool, - max_proof_task_concurrency: u64, multiproof_chunking_enabled: bool, multiproof_chunk_size: usize, reserved_cpu_cores: usize, @@ -196,7 +189,6 @@ impl TreeConfig { storage_worker_count: usize, account_worker_count: usize, ) -> Self { - assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); Self { persistence_threshold, memory_block_buffer_target, @@ -210,7 +202,6 @@ impl TreeConfig { state_provider_metrics, cross_block_cache_size, has_enough_parallelism, - max_proof_task_concurrency, multiproof_chunking_enabled, multiproof_chunk_size, reserved_cpu_cores, @@ -249,11 +240,6 @@ impl TreeConfig { self.max_execute_block_batch_size } - /// Return the maximum proof task concurrency. - pub const fn max_proof_task_concurrency(&self) -> u64 { - self.max_proof_task_concurrency - } - /// Return whether the multiproof task chunking is enabled. pub const fn multiproof_chunking_enabled(&self) -> bool { self.multiproof_chunking_enabled @@ -423,16 +409,6 @@ impl TreeConfig { self } - /// Setter for maximum number of concurrent proof tasks. - pub const fn with_max_proof_task_concurrency( - mut self, - max_proof_task_concurrency: u64, - ) -> Self { - assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); - self.max_proof_task_concurrency = max_proof_task_concurrency; - self - } - /// Setter for whether multiproof task should chunk proof targets. pub const fn with_multiproof_chunking_enabled( mut self, diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index e3090d60756..74ef660402a 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -200,7 +200,6 @@ where ); let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); - let max_proof_task_concurrency = config.max_proof_task_concurrency() as usize; let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), consistent_view, @@ -209,15 +208,15 @@ where account_worker_count, ); - // We set it to half of the proof task concurrency, because often for each multiproof we - // spawn one Tokio task for the account proof, and one Tokio task for the storage proof. - let max_multi_proof_task_concurrency = max_proof_task_concurrency / 2; + // Limit concurrent multiproof tasks to match the account worker pool size. + // Each multiproof task spawns a tokio task that queues to one account worker, + // which then fans out to storage workers as needed. let multi_proof_task = MultiProofTask::new( state_root_config, self.executor.clone(), proof_handle.clone(), to_sparse_trie, - max_multi_proof_task_concurrency, + account_worker_count, config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), ); diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 6b678b5789b..8a77eaa780f 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -4,8 +4,8 @@ use clap::Args; use reth_engine_primitives::{TreeConfig, DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE}; use crate::node_config::{ - DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, DEFAULT_MAX_PROOF_TASK_CONCURRENCY, - DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, + DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, + DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; /// Parameters for configuring the engine driver. @@ -63,10 +63,6 @@ pub struct EngineArgs { #[arg(long = "engine.accept-execution-requests-hash")] pub accept_execution_requests_hash: bool, - /// Configure the maximum number of concurrent proof tasks - #[arg(long = "engine.max-proof-task-concurrency", default_value_t = DEFAULT_MAX_PROOF_TASK_CONCURRENCY)] - pub max_proof_task_concurrency: u64, - /// Whether multiproof task should chunk proof targets. #[arg(long = "engine.multiproof-chunking", default_value = "true")] pub multiproof_chunking_enabled: bool, @@ -135,7 +131,6 @@ impl Default for EngineArgs { state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, accept_execution_requests_hash: false, - max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY, multiproof_chunking_enabled: true, multiproof_chunk_size: DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE, reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, @@ -162,7 +157,6 @@ impl EngineArgs { .with_state_provider_metrics(self.state_provider_metrics) .with_always_compare_trie_updates(self.state_root_task_compare_updates) .with_cross_block_cache_size(self.cross_block_cache_size * 1024 * 1024) - .with_max_proof_task_concurrency(self.max_proof_task_concurrency) .with_multiproof_chunking_enabled(self.multiproof_chunking_enabled) .with_multiproof_chunk_size(self.multiproof_chunk_size) .with_reserved_cpu_cores(self.reserved_cpu_cores) diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 94dbecb649c..19b51bce03f 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -34,8 +34,7 @@ use tracing::*; use crate::args::{EraArgs, MetricArgs}; pub use reth_engine_primitives::{ - DEFAULT_MAX_PROOF_TASK_CONCURRENCY, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, - DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, + DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; /// Default size of cross-block cache in megabytes. diff --git a/docs/cli/help.rs b/docs/cli/help.rs index 05e61eef740..0474d00e723 100755 --- a/docs/cli/help.rs +++ b/docs/cli/help.rs @@ -269,11 +269,6 @@ fn preprocess_help(s: &str) -> Cow<'_, str> { r"(rpc.max-tracing-requests \n.*\n.*\n.*\n.*\n.*)\[default: \d+\]", r"$1[default: ]", ), - // Handle engine.max-proof-task-concurrency dynamic default - ( - r"(engine\.max-proof-task-concurrency.*)\[default: \d+\]", - r"$1[default: ]", - ), // Handle engine.reserved-cpu-cores dynamic default ( r"(engine\.reserved-cpu-cores.*)\[default: \d+\]", diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 9d6a1627984..ef3274001d8 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -835,11 +835,6 @@ Engine: --engine.accept-execution-requests-hash Enables accepting requests hash instead of an array of requests in `engine_newPayloadV4` - --engine.max-proof-task-concurrency - Configure the maximum number of concurrent proof tasks - - [default: 256] - --engine.multiproof-chunking Whether multiproof task should chunk proof targets From a84bef0832af08477465707eea597d3d60389479 Mon Sep 17 00:00:00 2001 From: YK Date: Thu, 16 Oct 2025 15:28:04 +0800 Subject: [PATCH 078/371] refactor: revert Remove max_proof_task_concurrency as configurable variable" (#19062) --- crates/engine/primitives/src/config.rs | 24 +++++++++++++++++++ .../tree/src/tree/payload_processor/mod.rs | 9 +++---- crates/node/core/src/args/engine.rs | 10 ++++++-- crates/node/core/src/node_config.rs | 3 ++- docs/cli/help.rs | 5 ++++ docs/vocs/docs/pages/cli/reth/node.mdx | 5 ++++ 6 files changed, 49 insertions(+), 7 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index fbe79920d2b..9e2c8210f08 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -6,6 +6,9 @@ pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; +/// Default maximum concurrency for on-demand proof tasks (blinded nodes) +pub const DEFAULT_MAX_PROOF_TASK_CONCURRENCY: u64 = 256; + /// Minimum number of workers we allow configuring explicitly. pub const MIN_WORKER_COUNT: usize = 32; @@ -99,6 +102,8 @@ pub struct TreeConfig { cross_block_cache_size: u64, /// Whether the host has enough parallelism to run state root task. has_enough_parallelism: bool, + /// Maximum number of concurrent proof tasks + max_proof_task_concurrency: u64, /// Whether multiproof task should chunk proof targets. multiproof_chunking_enabled: bool, /// Multiproof task chunk size for proof targets. @@ -148,6 +153,7 @@ impl Default for TreeConfig { state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE, has_enough_parallelism: has_enough_parallelism(), + max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY, multiproof_chunking_enabled: true, multiproof_chunk_size: DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE, reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, @@ -178,6 +184,7 @@ impl TreeConfig { state_provider_metrics: bool, cross_block_cache_size: u64, has_enough_parallelism: bool, + max_proof_task_concurrency: u64, multiproof_chunking_enabled: bool, multiproof_chunk_size: usize, reserved_cpu_cores: usize, @@ -189,6 +196,7 @@ impl TreeConfig { storage_worker_count: usize, account_worker_count: usize, ) -> Self { + assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); Self { persistence_threshold, memory_block_buffer_target, @@ -202,6 +210,7 @@ impl TreeConfig { state_provider_metrics, cross_block_cache_size, has_enough_parallelism, + max_proof_task_concurrency, multiproof_chunking_enabled, multiproof_chunk_size, reserved_cpu_cores, @@ -240,6 +249,11 @@ impl TreeConfig { self.max_execute_block_batch_size } + /// Return the maximum proof task concurrency. + pub const fn max_proof_task_concurrency(&self) -> u64 { + self.max_proof_task_concurrency + } + /// Return whether the multiproof task chunking is enabled. pub const fn multiproof_chunking_enabled(&self) -> bool { self.multiproof_chunking_enabled @@ -409,6 +423,16 @@ impl TreeConfig { self } + /// Setter for maximum number of concurrent proof tasks. + pub const fn with_max_proof_task_concurrency( + mut self, + max_proof_task_concurrency: u64, + ) -> Self { + assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); + self.max_proof_task_concurrency = max_proof_task_concurrency; + self + } + /// Setter for whether multiproof task should chunk proof targets. pub const fn with_multiproof_chunking_enabled( mut self, diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 74ef660402a..e3090d60756 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -200,6 +200,7 @@ where ); let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); + let max_proof_task_concurrency = config.max_proof_task_concurrency() as usize; let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), consistent_view, @@ -208,15 +209,15 @@ where account_worker_count, ); - // Limit concurrent multiproof tasks to match the account worker pool size. - // Each multiproof task spawns a tokio task that queues to one account worker, - // which then fans out to storage workers as needed. + // We set it to half of the proof task concurrency, because often for each multiproof we + // spawn one Tokio task for the account proof, and one Tokio task for the storage proof. + let max_multi_proof_task_concurrency = max_proof_task_concurrency / 2; let multi_proof_task = MultiProofTask::new( state_root_config, self.executor.clone(), proof_handle.clone(), to_sparse_trie, - account_worker_count, + max_multi_proof_task_concurrency, config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), ); diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 8a77eaa780f..6b678b5789b 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -4,8 +4,8 @@ use clap::Args; use reth_engine_primitives::{TreeConfig, DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE}; use crate::node_config::{ - DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, - DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, + DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, DEFAULT_MAX_PROOF_TASK_CONCURRENCY, + DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; /// Parameters for configuring the engine driver. @@ -63,6 +63,10 @@ pub struct EngineArgs { #[arg(long = "engine.accept-execution-requests-hash")] pub accept_execution_requests_hash: bool, + /// Configure the maximum number of concurrent proof tasks + #[arg(long = "engine.max-proof-task-concurrency", default_value_t = DEFAULT_MAX_PROOF_TASK_CONCURRENCY)] + pub max_proof_task_concurrency: u64, + /// Whether multiproof task should chunk proof targets. #[arg(long = "engine.multiproof-chunking", default_value = "true")] pub multiproof_chunking_enabled: bool, @@ -131,6 +135,7 @@ impl Default for EngineArgs { state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, accept_execution_requests_hash: false, + max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY, multiproof_chunking_enabled: true, multiproof_chunk_size: DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE, reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, @@ -157,6 +162,7 @@ impl EngineArgs { .with_state_provider_metrics(self.state_provider_metrics) .with_always_compare_trie_updates(self.state_root_task_compare_updates) .with_cross_block_cache_size(self.cross_block_cache_size * 1024 * 1024) + .with_max_proof_task_concurrency(self.max_proof_task_concurrency) .with_multiproof_chunking_enabled(self.multiproof_chunking_enabled) .with_multiproof_chunk_size(self.multiproof_chunk_size) .with_reserved_cpu_cores(self.reserved_cpu_cores) diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 19b51bce03f..94dbecb649c 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -34,7 +34,8 @@ use tracing::*; use crate::args::{EraArgs, MetricArgs}; pub use reth_engine_primitives::{ - DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, + DEFAULT_MAX_PROOF_TASK_CONCURRENCY, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, + DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; /// Default size of cross-block cache in megabytes. diff --git a/docs/cli/help.rs b/docs/cli/help.rs index 0474d00e723..05e61eef740 100755 --- a/docs/cli/help.rs +++ b/docs/cli/help.rs @@ -269,6 +269,11 @@ fn preprocess_help(s: &str) -> Cow<'_, str> { r"(rpc.max-tracing-requests \n.*\n.*\n.*\n.*\n.*)\[default: \d+\]", r"$1[default: ]", ), + // Handle engine.max-proof-task-concurrency dynamic default + ( + r"(engine\.max-proof-task-concurrency.*)\[default: \d+\]", + r"$1[default: ]", + ), // Handle engine.reserved-cpu-cores dynamic default ( r"(engine\.reserved-cpu-cores.*)\[default: \d+\]", diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index ef3274001d8..9d6a1627984 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -835,6 +835,11 @@ Engine: --engine.accept-execution-requests-hash Enables accepting requests hash instead of an array of requests in `engine_newPayloadV4` + --engine.max-proof-task-concurrency + Configure the maximum number of concurrent proof tasks + + [default: 256] + --engine.multiproof-chunking Whether multiproof task should chunk proof targets From 84aa51481b4864e93b899a37b99eed07b2279870 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 16 Oct 2025 09:42:25 +0100 Subject: [PATCH 079/371] chore: rename CLI argument `--tracing-otlp-level` to `--tracing-otlp.filter` (#19061) Co-authored-by: Claude --- crates/ethereum/cli/src/app.rs | 2 +- crates/node/core/src/args/trace.rs | 8 ++++---- crates/optimism/cli/src/app.rs | 2 +- docs/vocs/docs/pages/cli/reth.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/config.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/checksum.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/clear.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/diff.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/drop.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/get.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/list.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/path.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/stats.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/db/version.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/download.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/dump-genesis.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/export-era.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/import-era.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/import.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/init-state.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/init.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/node.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/p2p.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/prune.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/stage.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 4 ++-- .../docs/pages/cli/reth/stage/dump/account-hashing.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx | 4 ++-- .../docs/pages/cli/reth/stage/dump/storage-hashing.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/stage/unwind.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx | 4 ++-- 47 files changed, 94 insertions(+), 94 deletions(-) diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index ffbda96f981..c0e2e4662ca 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -119,7 +119,7 @@ where layers.with_span_layer( "reth".to_string(), output_type.clone(), - self.cli.traces.otlp_level.clone(), + self.cli.traces.otlp_filter.clone(), )?; } diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs index b8c9bb18488..2e37feb6739 100644 --- a/crates/node/core/src/args/trace.rs +++ b/crates/node/core/src/args/trace.rs @@ -32,22 +32,22 @@ pub struct TraceArgs { /// of spans and events sent to the OTLP endpoint. It follows the same /// syntax as the `RUST_LOG` environment variable. /// - /// Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + /// Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off /// /// Defaults to TRACE if not specified. #[arg( - long = "tracing-otlp-level", + long = "tracing-otlp.filter", global = true, value_name = "FILTER", default_value = "TRACE", help_heading = "Tracing" )] - pub otlp_level: EnvFilter, + pub otlp_filter: EnvFilter, } impl Default for TraceArgs { fn default() -> Self { - Self { otlp: None, otlp_level: EnvFilter::from_default_env() } + Self { otlp: None, otlp_filter: EnvFilter::from_default_env() } } } diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 7b6c2a0d004..621d16c7e13 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -124,7 +124,7 @@ where layers.with_span_layer( "reth".to_string(), output_type.clone(), - self.cli.traces.otlp_level.clone(), + self.cli.traces.otlp_filter.clone(), )?; } diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index f57862d464b..feb4e8bf50d 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -124,10 +124,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index 8f343b5f795..6c7cf532995 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -110,10 +110,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 4fd6d05eb42..04b779c0f13 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -175,10 +175,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index 7c79615d1f6..d4a32382302 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -127,10 +127,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index a2637e7b3ce..5f1f9935b0f 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -119,10 +119,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index eae01b35309..324e6f15ca2 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -118,10 +118,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 464155fb2a3..375692f315f 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -121,10 +121,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index e5082ccd406..24c2493d6c8 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -154,10 +154,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index 57eb5979d6f..58f4e3771b9 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -117,10 +117,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index 5512c5ec826..93d12e2130e 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -119,10 +119,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 0fe1bea66dc..7f1a6e2a121 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -127,10 +127,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 3ae7fb0af29..7ec416f4a4d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -127,10 +127,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index 305aa6c0b85..7a9ee35145e 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -160,10 +160,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index a86e52aee92..113fbb21509 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -114,10 +114,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index 4547d9a7f5e..e4fd2eeb118 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -117,10 +117,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index cc07cacd4f3..cb100a63e4f 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -127,10 +127,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index 076c6b02a27..88616890e51 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -114,10 +114,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index adc1cca8895..f6b75e785d2 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -172,10 +172,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index cb7e8c91658..48ccb4855a6 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -113,10 +113,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 79a3adc3155..0f769e77599 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -178,10 +178,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 887dfafba80..71742b25b33 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -173,10 +173,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 560a2b95041..80621a4deac 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -174,10 +174,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 2827380087c..86132c163d4 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -197,10 +197,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index fea3a6c2cf0..81be59d6789 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -162,10 +162,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 9d6a1627984..ea2d259f9ec 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -1004,10 +1004,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 1ea79131a1f..2fc4aa30849 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -111,10 +111,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index 564dc463fcd..10efb9b85d7 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -331,10 +331,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 608a42181bc..7541ba55651 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -122,10 +122,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index 05b34b4385b..f854ab9000b 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -331,10 +331,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index 6af29692abe..1d287c7cf09 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -108,10 +108,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index 9dadabc42eb..d4f07885fea 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -108,10 +108,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 72fcc82be51..202a14b2e19 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -162,10 +162,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index 6f5c281b958..2bb23f77d23 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -175,10 +175,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index c5cb65599f7..eed32a608be 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -111,10 +111,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 8ada3ae1cca..b97fffa00d0 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -176,10 +176,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 83af1939c22..6dbee5df10c 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -169,10 +169,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index 869f9292817..13819423bfd 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -126,10 +126,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index 2774f1a684b..73b24e9ba46 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -126,10 +126,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index 009e7cd3ab3..a5b3c0f4ff6 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -126,10 +126,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index 869990b351c..e6deadb2581 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -126,10 +126,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index a3fc1c1696f..2e9873034ff 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -397,10 +397,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index ca107a4f837..fa62d0546d6 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -170,10 +170,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index 2ef35c6b47c..2799b752fef 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -118,10 +118,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index 1ac3b5d654b..d2056f7e349 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -118,10 +118,10 @@ Tracing: [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] - --tracing-otlp-level + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. - Example: --tracing-otlp-level=info,reth=debug,hyper_util=off + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off Defaults to TRACE if not specified. From 3de82cf2bd7a71e7313f3db7606521126718bd42 Mon Sep 17 00:00:00 2001 From: GarmashAlex Date: Thu, 16 Oct 2025 11:58:05 +0300 Subject: [PATCH 080/371] fix(net): Increment out_of_order_requests in BodiesDownloader on range reset (#19063) --- crates/net/downloaders/src/bodies/bodies.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 09eb22854d4..153f269fe41 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -347,6 +347,12 @@ where // written by external services (e.g. BlockchainTree). tracing::trace!(target: "downloaders::bodies", ?range, prev_range = ?self.download_range, "Download range reset"); info!(target: "downloaders::bodies", count, ?range, "Downloading bodies"); + // Increment out-of-order requests metric if the new start is below the last returned block + if let Some(last_returned) = self.latest_queued_block_number && + *range.start() < last_returned + { + self.metrics.out_of_order_requests.increment(1); + } self.clear(); self.download_range = range; Ok(()) From be94d0d393b44ad6cd34758408d660b7423ccdf8 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Thu, 16 Oct 2025 11:52:35 +0200 Subject: [PATCH 081/371] feat(trie): Merge trie changesets changes into main (#19068) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Co-authored-by: Roman Hodulák Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- .../benches/canonical_hashes_range.rs | 7 +- crates/chain-state/src/in_memory.rs | 141 +- crates/chain-state/src/memory_overlay.rs | 11 +- crates/chain-state/src/test_utils.rs | 32 +- crates/cli/commands/src/stage/drop.rs | 6 +- crates/cli/commands/src/stage/unwind.rs | 60 +- crates/config/src/config.rs | 8 +- crates/engine/primitives/src/event.rs | 6 +- crates/engine/tree/src/engine.rs | 4 +- crates/engine/tree/src/persistence.rs | 8 +- crates/engine/tree/src/tree/error.rs | 15 - crates/engine/tree/src/tree/mod.rs | 267 +-- .../engine/tree/src/tree/payload_validator.rs | 160 +- crates/engine/tree/src/tree/state.rs | 75 +- crates/engine/tree/src/tree/tests.rs | 33 +- crates/exex/exex/src/backfill/factory.rs | 2 +- crates/node/core/src/args/pruning.rs | 1 + crates/node/core/src/args/stage.rs | 5 + crates/optimism/flashblocks/src/worker.rs | 1 + crates/optimism/payload/src/builder.rs | 14 +- crates/optimism/payload/src/payload.rs | 8 +- crates/payload/primitives/src/traits.rs | 4 +- crates/prune/prune/src/builder.rs | 8 +- crates/prune/prune/src/segments/mod.rs | 4 +- crates/prune/prune/src/segments/set.rs | 14 +- .../src/segments/user/merkle_change_sets.rs | 116 ++ crates/prune/prune/src/segments/user/mod.rs | 2 + crates/prune/types/src/segment.rs | 13 +- crates/prune/types/src/target.rs | 84 +- crates/ress/provider/src/lib.rs | 20 +- crates/ress/provider/src/pending_state.rs | 10 +- .../rpc-eth-api/src/helpers/pending_block.rs | 3 +- crates/rpc/rpc-eth-types/src/pending_block.rs | 11 +- crates/stages/api/src/pipeline/mod.rs | 18 +- crates/stages/stages/benches/setup/mod.rs | 2 +- crates/stages/stages/src/sets.rs | 30 +- crates/stages/stages/src/stages/execution.rs | 4 +- crates/stages/stages/src/stages/merkle.rs | 8 +- .../stages/src/stages/merkle_changesets.rs | 380 ++++ crates/stages/stages/src/stages/mod.rs | 5 +- crates/stages/stages/src/stages/prune.rs | 6 +- crates/stages/types/src/checkpoints.rs | 29 +- crates/stages/types/src/id.rs | 5 +- crates/stages/types/src/lib.rs | 4 +- crates/storage/db-api/src/cursor.rs | 2 +- crates/storage/db-api/src/models/accounts.rs | 31 +- crates/storage/db-api/src/models/mod.rs | 5 +- crates/storage/db-api/src/tables/mod.rs | 22 +- crates/storage/db-common/src/init.rs | 4 +- crates/storage/errors/src/provider.rs | 8 + .../storage/provider/src/bundle_state/mod.rs | 5 - .../provider/src/changesets_utils/mod.rs | 7 + .../state_reverts.rs | 0 .../provider/src/changesets_utils/trie.rs | 147 ++ crates/storage/provider/src/lib.rs | 2 +- .../src/providers/blockchain_provider.rs | 146 +- .../provider/src/providers/consistent.rs | 70 +- .../provider/src/providers/database/mod.rs | 6 +- .../src/providers/database/provider.rs | 1752 +++++++++++++++-- crates/storage/provider/src/providers/mod.rs | 2 +- .../provider/src/providers/state/overlay.rs | 136 +- .../storage/provider/src/test_utils/mock.rs | 20 +- crates/storage/provider/src/test_utils/mod.rs | 2 +- crates/storage/provider/src/traits/full.rs | 6 +- crates/storage/provider/src/writer/mod.rs | 7 +- crates/storage/storage-api/src/noop.rs | 24 +- crates/storage/storage-api/src/trie.rs | 82 +- crates/trie/common/src/hashed_state.rs | 150 ++ crates/trie/common/src/input.rs | 10 +- crates/trie/common/src/lib.rs | 5 +- crates/trie/common/src/storage.rs | 174 +- crates/trie/common/src/updates.rs | 214 +- crates/trie/common/src/utils.rs | 53 + crates/trie/db/src/trie_cursor.rs | 24 +- crates/trie/db/tests/trie.rs | 10 +- crates/trie/parallel/benches/root.rs | 2 +- crates/trie/sparse-parallel/src/trie.rs | 14 +- crates/trie/sparse/src/trie.rs | 14 +- crates/trie/trie/src/trie_cursor/mod.rs | 45 + docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 23 +- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 23 +- examples/custom-node/src/engine.rs | 4 +- 82 files changed, 3777 insertions(+), 1118 deletions(-) create mode 100644 crates/prune/prune/src/segments/user/merkle_change_sets.rs create mode 100644 crates/stages/stages/src/stages/merkle_changesets.rs delete mode 100644 crates/storage/provider/src/bundle_state/mod.rs create mode 100644 crates/storage/provider/src/changesets_utils/mod.rs rename crates/storage/provider/src/{bundle_state => changesets_utils}/state_reverts.rs (100%) create mode 100644 crates/storage/provider/src/changesets_utils/trie.rs create mode 100644 crates/trie/common/src/utils.rs diff --git a/crates/chain-state/benches/canonical_hashes_range.rs b/crates/chain-state/benches/canonical_hashes_range.rs index 58fdd73bf99..c19ce25ec4f 100644 --- a/crates/chain-state/benches/canonical_hashes_range.rs +++ b/crates/chain-state/benches/canonical_hashes_range.rs @@ -2,7 +2,7 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; use reth_chain_state::{ - test_utils::TestBlockBuilder, ExecutedBlockWithTrieUpdates, MemoryOverlayStateProviderRef, + test_utils::TestBlockBuilder, ExecutedBlock, MemoryOverlayStateProviderRef, }; use reth_ethereum_primitives::EthPrimitives; use reth_storage_api::{noop::NoopProvider, BlockHashReader}; @@ -84,10 +84,7 @@ fn bench_canonical_hashes_range(c: &mut Criterion) { fn setup_provider_with_blocks( num_blocks: usize, -) -> ( - MemoryOverlayStateProviderRef<'static, EthPrimitives>, - Vec>, -) { +) -> (MemoryOverlayStateProviderRef<'static, EthPrimitives>, Vec>) { let mut builder = TestBlockBuilder::::default(); let blocks: Vec<_> = builder.get_executed_blocks(1000..1000 + num_blocks as u64).collect(); diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index dd78b6cf5fe..5b2f666657b 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -242,7 +242,7 @@ impl CanonicalInMemoryState { /// Updates the pending block with the given block. /// /// Note: This assumes that the parent block of the pending block is canonical. - pub fn set_pending_block(&self, pending: ExecutedBlockWithTrieUpdates) { + pub fn set_pending_block(&self, pending: ExecutedBlock) { // fetch the state of the pending block's parent block let parent = self.state_by_hash(pending.recovered_block().parent_hash()); let pending = BlockState::with_parent(pending, parent); @@ -258,7 +258,7 @@ impl CanonicalInMemoryState { /// them to their parent blocks. fn update_blocks(&self, new_blocks: I, reorged: R) where - I: IntoIterator>, + I: IntoIterator>, R: IntoIterator>, { { @@ -568,22 +568,19 @@ impl CanonicalInMemoryState { #[derive(Debug, PartialEq, Eq, Clone)] pub struct BlockState { /// The executed block that determines the state after this block has been executed. - block: ExecutedBlockWithTrieUpdates, + block: ExecutedBlock, /// The block's parent block if it exists. parent: Option>>, } impl BlockState { /// [`BlockState`] constructor. - pub const fn new(block: ExecutedBlockWithTrieUpdates) -> Self { + pub const fn new(block: ExecutedBlock) -> Self { Self { block, parent: None } } /// [`BlockState`] constructor with parent. - pub const fn with_parent( - block: ExecutedBlockWithTrieUpdates, - parent: Option>, - ) -> Self { + pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { Self { block, parent } } @@ -597,12 +594,12 @@ impl BlockState { } /// Returns the executed block that determines the state. - pub fn block(&self) -> ExecutedBlockWithTrieUpdates { + pub fn block(&self) -> ExecutedBlock { self.block.clone() } /// Returns a reference to the executed block that determines the state. - pub const fn block_ref(&self) -> &ExecutedBlockWithTrieUpdates { + pub const fn block_ref(&self) -> &ExecutedBlock { &self.block } @@ -730,6 +727,8 @@ pub struct ExecutedBlock { pub execution_output: Arc>, /// Block's hashed state. pub hashed_state: Arc, + /// Trie updates that result from calculating the state root for the block. + pub trie_updates: Arc, } impl Default for ExecutedBlock { @@ -738,6 +737,7 @@ impl Default for ExecutedBlock { recovered_block: Default::default(), execution_output: Default::default(), hashed_state: Default::default(), + trie_updates: Default::default(), } } } @@ -767,113 +767,16 @@ impl ExecutedBlock { &self.hashed_state } - /// Returns a [`BlockNumber`] of the block. + /// Returns a reference to the trie updates resulting from the execution outcome #[inline] - pub fn block_number(&self) -> BlockNumber { - self.recovered_block.header().number() + pub fn trie_updates(&self) -> &TrieUpdates { + &self.trie_updates } -} -/// Trie updates that result from calculating the state root for the block. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum ExecutedTrieUpdates { - /// Trie updates present. State root was calculated, and the trie updates can be applied to the - /// database. - Present(Arc), - /// Trie updates missing. State root was calculated, but the trie updates cannot be applied to - /// the current database state. To apply the updates, the state root must be recalculated, and - /// new trie updates must be generated. - /// - /// This can happen when processing fork chain blocks that are building on top of the - /// historical database state. Since we don't store the historical trie state, we cannot - /// generate the trie updates for it. - Missing, -} - -impl ExecutedTrieUpdates { - /// Creates a [`ExecutedTrieUpdates`] with present but empty trie updates. - pub fn empty() -> Self { - Self::Present(Arc::default()) - } - - /// Sets the trie updates to the provided value as present. - pub fn set_present(&mut self, updates: Arc) { - *self = Self::Present(updates); - } - - /// Takes the present trie updates, leaving the state as missing. - pub fn take_present(&mut self) -> Option> { - match self { - Self::Present(updates) => { - let updates = core::mem::take(updates); - *self = Self::Missing; - Some(updates) - } - Self::Missing => None, - } - } - - /// Returns a reference to the trie updates if present. - #[allow(clippy::missing_const_for_fn)] // false positive - pub fn as_ref(&self) -> Option<&TrieUpdates> { - match self { - Self::Present(updates) => Some(updates), - Self::Missing => None, - } - } - - /// Returns `true` if the trie updates are present. - pub const fn is_present(&self) -> bool { - matches!(self, Self::Present(_)) - } - - /// Returns `true` if the trie updates are missing. - pub const fn is_missing(&self) -> bool { - matches!(self, Self::Missing) - } -} - -/// An [`ExecutedBlock`] with its [`TrieUpdates`]. -/// -/// We store it as separate type because [`TrieUpdates`] are only available for blocks stored in -/// memory and can't be obtained for canonical persisted blocks. -#[derive( - Clone, Debug, PartialEq, Eq, derive_more::Deref, derive_more::DerefMut, derive_more::Into, -)] -pub struct ExecutedBlockWithTrieUpdates { - /// Inner [`ExecutedBlock`]. - #[deref] - #[deref_mut] - #[into] - pub block: ExecutedBlock, - /// Trie updates that result from calculating the state root for the block. - /// - /// If [`ExecutedTrieUpdates::Missing`], the trie updates should be computed when persisting - /// the block **on top of the canonical parent**. - pub trie: ExecutedTrieUpdates, -} - -impl ExecutedBlockWithTrieUpdates { - /// [`ExecutedBlock`] constructor. - pub const fn new( - recovered_block: Arc>, - execution_output: Arc>, - hashed_state: Arc, - trie: ExecutedTrieUpdates, - ) -> Self { - Self { block: ExecutedBlock { recovered_block, execution_output, hashed_state }, trie } - } - - /// Returns a reference to the trie updates for the block, if present. + /// Returns a [`BlockNumber`] of the block. #[inline] - pub fn trie_updates(&self) -> Option<&TrieUpdates> { - self.trie.as_ref() - } - - /// Converts the value into [`SealedBlock`]. - pub fn into_sealed_block(self) -> SealedBlock { - let block = Arc::unwrap_or_clone(self.block.recovered_block); - block.into_sealed_block() + pub fn block_number(&self) -> BlockNumber { + self.recovered_block.header().number() } } @@ -883,18 +786,14 @@ pub enum NewCanonicalChain { /// A simple append to the current canonical head Commit { /// all blocks that lead back to the canonical head - new: Vec>, + new: Vec>, }, /// A reorged chain consists of two chains that trace back to a shared ancestor block at which /// point they diverge. Reorg { /// All blocks of the _new_ chain - new: Vec>, + new: Vec>, /// All blocks of the _old_ chain - /// - /// These are not [`ExecutedBlockWithTrieUpdates`] because we don't always have the trie - /// updates for the old canonical chain. For example, in case of node being restarted right - /// before the reorg [`TrieUpdates`] can't be fetched from database. old: Vec>, }, } @@ -1257,7 +1156,7 @@ mod tests { block1.recovered_block().hash() ); - let chain = NewCanonicalChain::Reorg { new: vec![block2.clone()], old: vec![block1.block] }; + let chain = NewCanonicalChain::Reorg { new: vec![block2.clone()], old: vec![block1] }; state.update_chain(chain); assert_eq!( state.head_state().unwrap().block_ref().recovered_block().hash(), @@ -1539,7 +1438,7 @@ mod tests { // Test reorg notification let chain_reorg = NewCanonicalChain::Reorg { new: vec![block1a.clone(), block2a.clone()], - old: vec![block1.block.clone(), block2.block.clone()], + old: vec![block1.clone(), block2.clone()], }; assert_eq!( diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index a035d833a46..2e1efd1ed1b 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -1,4 +1,4 @@ -use super::ExecutedBlockWithTrieUpdates; +use super::ExecutedBlock; use alloy_consensus::BlockHeader; use alloy_primitives::{keccak256, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; use reth_errors::ProviderResult; @@ -24,7 +24,7 @@ pub struct MemoryOverlayStateProviderRef< /// Historical state provider for state lookups that are not found in memory blocks. pub(crate) historical: Box, /// The collection of executed parent blocks. Expected order is newest to oldest. - pub(crate) in_memory: Vec>, + pub(crate) in_memory: Vec>, /// Lazy-loaded in-memory trie data. pub(crate) trie_input: OnceLock, } @@ -41,10 +41,7 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { /// - `in_memory` - the collection of executed ancestor blocks in reverse. /// - `historical` - a historical state provider for the latest ancestor block stored in the /// database. - pub fn new( - historical: Box, - in_memory: Vec>, - ) -> Self { + pub fn new(historical: Box, in_memory: Vec>) -> Self { Self { historical, in_memory, trie_input: OnceLock::new() } } @@ -60,7 +57,7 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { self.in_memory .iter() .rev() - .map(|block| (block.hashed_state.as_ref(), block.trie.as_ref())), + .map(|block| (block.hashed_state.as_ref(), block.trie_updates.as_ref())), ) }) } diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index ace30b9cb35..5d318aca56c 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -1,6 +1,6 @@ use crate::{ - in_memory::ExecutedBlockWithTrieUpdates, CanonStateNotification, CanonStateNotifications, - CanonStateSubscriptions, ExecutedTrieUpdates, + in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, + CanonStateSubscriptions, }; use alloy_consensus::{Header, SignableTransaction, TxEip1559, TxReceipt, EMPTY_ROOT_HASH}; use alloy_eips::{ @@ -23,7 +23,7 @@ use reth_primitives_traits::{ SignedTransaction, }; use reth_storage_api::NodePrimitivesProvider; -use reth_trie::{root::state_root_unhashed, HashedPostState}; +use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm_database::BundleState; use revm_state::AccountInfo; use std::{ @@ -198,45 +198,45 @@ impl TestBlockBuilder { fork } - /// Gets an [`ExecutedBlockWithTrieUpdates`] with [`BlockNumber`], receipts and parent hash. + /// Gets an [`ExecutedBlock`] with [`BlockNumber`], receipts and parent hash. fn get_executed_block( &mut self, block_number: BlockNumber, receipts: Vec>, parent_hash: B256, - ) -> ExecutedBlockWithTrieUpdates { + ) -> ExecutedBlock { let block_with_senders = self.generate_random_block(block_number, parent_hash); let (block, senders) = block_with_senders.split_sealed(); - ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed(block, senders)), - Arc::new(ExecutionOutcome::new( + ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed(block, senders)), + execution_output: Arc::new(ExecutionOutcome::new( BundleState::default(), receipts, block_number, vec![Requests::default()], )), - Arc::new(HashedPostState::default()), - ExecutedTrieUpdates::empty(), - ) + hashed_state: Arc::new(HashedPostState::default()), + trie_updates: Arc::new(TrieUpdates::default()), + } } - /// Generates an [`ExecutedBlockWithTrieUpdates`] that includes the given receipts. + /// Generates an [`ExecutedBlock`] that includes the given receipts. pub fn get_executed_block_with_receipts( &mut self, receipts: Vec>, parent_hash: B256, - ) -> ExecutedBlockWithTrieUpdates { + ) -> ExecutedBlock { let number = rand::rng().random::(); self.get_executed_block(number, receipts, parent_hash) } - /// Generates an [`ExecutedBlockWithTrieUpdates`] with the given [`BlockNumber`]. + /// Generates an [`ExecutedBlock`] with the given [`BlockNumber`]. pub fn get_executed_block_with_number( &mut self, block_number: BlockNumber, parent_hash: B256, - ) -> ExecutedBlockWithTrieUpdates { + ) -> ExecutedBlock { self.get_executed_block(block_number, vec![vec![]], parent_hash) } @@ -244,7 +244,7 @@ impl TestBlockBuilder { pub fn get_executed_blocks( &mut self, range: Range, - ) -> impl Iterator + '_ { + ) -> impl Iterator + '_ { let mut parent_hash = B256::default(); range.map(move |number| { let current_parent_hash = parent_hash; diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 66227e10271..5a01ad1fed6 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -15,7 +15,7 @@ use reth_db_common::{ }; use reth_node_api::{HeaderTy, ReceiptTy, TxTy}; use reth_node_core::args::StageEnum; -use reth_provider::{DBProvider, DatabaseProviderFactory, StaticFileProviderFactory}; +use reth_provider::{DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, TrieWriter}; use reth_prune::PruneSegment; use reth_stages::StageId; use reth_static_file_types::StaticFileSegment; @@ -138,6 +138,10 @@ impl Command { None, )?; } + StageEnum::MerkleChangeSets => { + provider_rw.clear_trie_changesets()?; + reset_stage_checkpoint(tx, StageId::MerkleChangeSets)?; + } StageEnum::AccountHistory | StageEnum::StorageHistory => { tx.clear::()?; tx.clear::()?; diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index 9ef2085a065..ba9a00b11e2 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -15,10 +15,7 @@ use reth_db::DatabaseEnv; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::ConfigureEvm; use reth_exex::ExExManagerHandle; -use reth_provider::{ - providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, ChainStateBlockReader, - ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, -}; +use reth_provider::{providers::ProviderNodeTypes, BlockNumReader, ProviderFactory}; use reth_stages::{ sets::{DefaultStages, OfflineStages}, stages::ExecutionStage, @@ -60,54 +57,21 @@ impl> Command let components = components(provider_factory.chain_spec()); - let highest_static_file_block = provider_factory - .static_file_provider() - .get_highest_static_files() - .max_block_num() - .filter(|highest_static_file_block| *highest_static_file_block > target); - - // Execute a pipeline unwind if the start of the range overlaps the existing static - // files. If that's the case, then copy all available data from MDBX to static files, and - // only then, proceed with the unwind. - // - // We also execute a pipeline unwind if `offline` is specified, because we need to only - // unwind the data associated with offline stages. - if highest_static_file_block.is_some() || self.offline { - if self.offline { - info!(target: "reth::cli", "Performing an unwind for offline-only data!"); - } - - if let Some(highest_static_file_block) = highest_static_file_block { - info!(target: "reth::cli", ?target, ?highest_static_file_block, "Executing a pipeline unwind."); - } else { - info!(target: "reth::cli", ?target, "Executing a pipeline unwind."); - } - info!(target: "reth::cli", prune_config=?config.prune, "Using prune settings"); - - // This will build an offline-only pipeline if the `offline` flag is enabled - let mut pipeline = - self.build_pipeline(config, provider_factory, components.evm_config().clone())?; - - // Move all applicable data from database to static files. - pipeline.move_to_static_files()?; + if self.offline { + info!(target: "reth::cli", "Performing an unwind for offline-only data!"); + } - pipeline.unwind(target, None)?; - } else { - info!(target: "reth::cli", ?target, "Executing a database unwind."); - let provider = provider_factory.provider_rw()?; + let highest_static_file_block = provider_factory.provider()?.last_block_number()?; + info!(target: "reth::cli", ?target, ?highest_static_file_block, prune_config=?config.prune, "Executing a pipeline unwind."); - provider - .remove_block_and_execution_above(target) - .map_err(|err| eyre::eyre!("Transaction error on unwind: {err}"))?; + // This will build an offline-only pipeline if the `offline` flag is enabled + let mut pipeline = + self.build_pipeline(config, provider_factory, components.evm_config().clone())?; - // update finalized block if needed - let last_saved_finalized_block_number = provider.last_finalized_block_number()?; - if last_saved_finalized_block_number.is_none_or(|f| f > target) { - provider.save_finalized_block_number(target)?; - } + // Move all applicable data from database to static files. + pipeline.move_to_static_files()?; - provider.commit()?; - } + pipeline.unwind(target, None)?; info!(target: "reth::cli", ?target, "Unwound blocks"); diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index c1c5ef96075..7ea5569834c 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -440,7 +440,7 @@ pub struct PruneConfig { impl Default for PruneConfig { fn default() -> Self { - Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::none() } + Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::default() } } } @@ -464,6 +464,7 @@ impl PruneConfig { account_history, storage_history, bodies_history, + merkle_changesets, receipts_log_filter, }, } = other; @@ -480,6 +481,8 @@ impl PruneConfig { self.segments.account_history = self.segments.account_history.or(account_history); self.segments.storage_history = self.segments.storage_history.or(storage_history); self.segments.bodies_history = self.segments.bodies_history.or(bodies_history); + // Merkle changesets is not optional, so we just replace it if provided + self.segments.merkle_changesets = merkle_changesets; if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() { self.segments.receipts_log_filter = receipts_log_filter; @@ -1001,6 +1004,7 @@ receipts = 'full' account_history: None, storage_history: Some(PruneMode::Before(5000)), bodies_history: None, + merkle_changesets: PruneMode::Before(0), receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([( Address::random(), PruneMode::Full, @@ -1017,6 +1021,7 @@ receipts = 'full' account_history: Some(PruneMode::Distance(2000)), storage_history: Some(PruneMode::Distance(3000)), bodies_history: None, + merkle_changesets: PruneMode::Distance(10000), receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([ (Address::random(), PruneMode::Distance(1000)), (Address::random(), PruneMode::Before(2000)), @@ -1035,6 +1040,7 @@ receipts = 'full' assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000))); assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000))); assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000))); + assert_eq!(config1.segments.merkle_changesets, PruneMode::Distance(10000)); assert_eq!(config1.segments.receipts_log_filter, original_filter); } diff --git a/crates/engine/primitives/src/event.rs b/crates/engine/primitives/src/event.rs index 1c74282cba5..8cced031524 100644 --- a/crates/engine/primitives/src/event.rs +++ b/crates/engine/primitives/src/event.rs @@ -10,7 +10,7 @@ use core::{ fmt::{Display, Formatter, Result}, time::Duration, }; -use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_chain_state::ExecutedBlock; use reth_ethereum_primitives::EthPrimitives; use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader}; @@ -24,11 +24,11 @@ pub enum ConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), /// A block was added to the fork chain. - ForkBlockAdded(ExecutedBlockWithTrieUpdates, Duration), + ForkBlockAdded(ExecutedBlock, Duration), /// A new block was received from the consensus engine BlockReceived(BlockNumHash), /// A block was added to the canonical chain, and the elapsed time validating the block - CanonicalBlockAdded(ExecutedBlockWithTrieUpdates, Duration), + CanonicalBlockAdded(ExecutedBlock, Duration), /// A canonical chain was committed, and the elapsed time committing the data CanonicalChainCommitted(Box>, Duration), /// The consensus engine processed an invalid block. diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index bee52a46438..f08195b205e 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -7,7 +7,7 @@ use crate::{ }; use alloy_primitives::B256; use futures::{Stream, StreamExt}; -use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_chain_state::ExecutedBlock; use reth_engine_primitives::{BeaconEngineMessage, ConsensusEngineEvent}; use reth_ethereum_primitives::EthPrimitives; use reth_payload_primitives::PayloadTypes; @@ -246,7 +246,7 @@ pub enum EngineApiRequest { /// A request received from the consensus engine. Beacon(BeaconEngineMessage), /// Request to insert an already executed block, e.g. via payload building. - InsertExecutedBlock(ExecutedBlockWithTrieUpdates), + InsertExecutedBlock(ExecutedBlock), } impl Display for EngineApiRequest { diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index de5b10c331c..751356fc399 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -1,7 +1,7 @@ use crate::metrics::PersistenceMetrics; use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; -use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; use reth_ethereum_primitives::EthPrimitives; use reth_primitives_traits::NodePrimitives; @@ -140,7 +140,7 @@ where fn on_save_blocks( &self, - blocks: Vec>, + blocks: Vec>, ) -> Result, PersistenceError> { debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.recovered_block.num_hash()), last=?blocks.last().map(|b| b.recovered_block.num_hash()), "Saving range of blocks"); let start_time = Instant::now(); @@ -180,7 +180,7 @@ pub enum PersistenceAction { /// /// First, header, transaction, and receipt-related data should be written to static files. /// Then the execution history-related data will be written to the database. - SaveBlocks(Vec>, oneshot::Sender>), + SaveBlocks(Vec>, oneshot::Sender>), /// Removes block data above the given block number from the database. /// @@ -257,7 +257,7 @@ impl PersistenceHandle { /// If there are no blocks to persist, then `None` is sent in the sender. pub fn save_blocks( &self, - blocks: Vec>, + blocks: Vec>, tx: oneshot::Sender>, ) -> Result<(), SendError>> { self.send_action(PersistenceAction::SaveBlocks(blocks, tx)) diff --git a/crates/engine/tree/src/tree/error.rs b/crates/engine/tree/src/tree/error.rs index f7b1111df06..8589bc59d3d 100644 --- a/crates/engine/tree/src/tree/error.rs +++ b/crates/engine/tree/src/tree/error.rs @@ -1,7 +1,6 @@ //! Internal errors for the tree module. use alloy_consensus::BlockHeader; -use alloy_primitives::B256; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError}; use reth_evm::execute::InternalBlockExecutionError; @@ -19,20 +18,6 @@ pub enum AdvancePersistenceError { /// A provider error #[error(transparent)] Provider(#[from] ProviderError), - /// Missing ancestor. - /// - /// This error occurs when we need to compute the state root for a block with missing trie - /// updates, but the ancestor block is not available. State root computation requires the state - /// from the parent block as a starting point. - /// - /// A block may be missing the trie updates when it's a fork chain block building on top of the - /// historical database state. Since we don't store the historical trie state, we cannot - /// generate the trie updates for it until the moment when database is unwound to the canonical - /// chain. - /// - /// Also see [`reth_chain_state::ExecutedTrieUpdates::Missing`]. - #[error("Missing ancestor with hash {0}")] - MissingAncestor(B256), } #[derive(thiserror::Error)] diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 7f1183f5efc..e66b2a8892e 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -13,10 +13,8 @@ use alloy_rpc_types_engine::{ ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; use error::{InsertBlockError, InsertBlockFatalError}; -use persistence_state::CurrentPersistenceAction; use reth_chain_state::{ - CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, - MemoryOverlayStateProvider, NewCanonicalChain, + CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; use reth_consensus::{Consensus, FullConsensus}; use reth_engine_primitives::{ @@ -31,14 +29,12 @@ use reth_payload_primitives::{ }; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; use reth_provider::{ - providers::ConsistentDbView, BlockNumReader, BlockReader, DBProvider, DatabaseProviderFactory, - HashedPostStateProvider, ProviderError, StateProviderBox, StateProviderFactory, StateReader, - StateRootProvider, TransactionVariant, + providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, HashedPostStateProvider, + ProviderError, StateProviderBox, StateProviderFactory, StateReader, TransactionVariant, + TrieReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; -use reth_trie::{HashedPostState, TrieInput}; -use reth_trie_db::DatabaseHashedPostState; use revm::state::EvmState; use state::TreeState; use std::{ @@ -78,7 +74,6 @@ pub use payload_processor::*; pub use payload_validator::{BasicEngineValidator, EngineValidator}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::TreeConfig; -use reth_trie::KeccakKeyHasher; pub mod state; @@ -101,7 +96,7 @@ pub struct StateProviderBuilder { /// The historical block hash to fetch state from. historical: B256, /// The blocks that form the chain from historical to target and are in memory. - overlay: Option>>, + overlay: Option>>, } impl StateProviderBuilder { @@ -110,7 +105,7 @@ impl StateProviderBuilder { pub const fn new( provider_factory: P, historical: B256, - overlay: Option>>, + overlay: Option>>, ) -> Self { Self { provider_factory, historical, overlay } } @@ -318,6 +313,7 @@ where + StateProviderFactory + StateReader + HashedPostStateProvider + + TrieReader + Clone + 'static,

::Provider: @@ -823,7 +819,7 @@ where for block_num in (new_head_number + 1)..=current_head_number { if let Some(block_state) = self.canonical_in_memory_state.state_by_number(block_num) { - let executed_block = block_state.block_ref().block.clone(); + let executed_block = block_state.block_ref().clone(); old_blocks.push(executed_block); debug!( target: "engine::tree", @@ -855,14 +851,9 @@ where // Try to load the canonical ancestor's block match self.canonical_block_by_hash(new_head_hash)? { Some(executed_block) => { - let block_with_trie = ExecutedBlockWithTrieUpdates { - block: executed_block, - trie: ExecutedTrieUpdates::Missing, - }; - // Perform the reorg to properly handle the unwind self.canonical_in_memory_state.update_chain(NewCanonicalChain::Reorg { - new: vec![block_with_trie], + new: vec![executed_block], old: old_blocks, }); @@ -915,13 +906,8 @@ where // Try to load the block from storage if let Some(executed_block) = self.canonical_block_by_hash(block_hash)? { - let block_with_trie = ExecutedBlockWithTrieUpdates { - block: executed_block, - trie: ExecutedTrieUpdates::Missing, - }; - self.canonical_in_memory_state - .update_chain(NewCanonicalChain::Commit { new: vec![block_with_trie] }); + .update_chain(NewCanonicalChain::Commit { new: vec![executed_block] }); debug!( target: "engine::tree", @@ -976,29 +962,6 @@ where Ok(true) } - /// Returns the persisting kind for the input block. - fn persisting_kind_for(&self, block: BlockWithParent) -> PersistingKind { - // Check that we're currently persisting. - let Some(action) = self.persistence_state.current_action() else { - return PersistingKind::NotPersisting - }; - // Check that the persistince action is saving blocks, not removing them. - let CurrentPersistenceAction::SavingBlocks { highest } = action else { - return PersistingKind::PersistingNotDescendant - }; - - // The block being validated can only be a descendant if its number is higher than - // the highest block persisting. Otherwise, it's likely a fork of a lower block. - if block.block.number > highest.number && - self.state.tree_state.is_descendant(*highest, block) - { - return PersistingKind::PersistingDescendant - } - - // In all other cases, the block is not a descendant. - PersistingKind::PersistingNotDescendant - } - /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid /// chain. @@ -1305,7 +1268,7 @@ where /// Helper method to save blocks and set the persistence state. This ensures we keep track of /// the current persistence action while we're saving blocks. - fn persist_blocks(&mut self, blocks_to_persist: Vec>) { + fn persist_blocks(&mut self, blocks_to_persist: Vec>) { if blocks_to_persist.is_empty() { debug!(target: "engine::tree", "Returned empty set of blocks to persist"); return @@ -1696,17 +1659,9 @@ where /// Returns a batch of consecutive canonical blocks to persist in the range /// `(last_persisted_number .. canonical_head - threshold]`. The expected /// order is oldest -> newest. - /// - /// If any blocks are missing trie updates, all blocks are persisted, not taking `threshold` - /// into account. - /// - /// For those blocks that didn't have the trie updates calculated, runs the state root - /// calculation, and saves the trie updates. - /// - /// Returns an error if the state root calculation fails. fn get_canonical_blocks_to_persist( - &mut self, - ) -> Result>, AdvancePersistenceError> { + &self, + ) -> Result>, AdvancePersistenceError> { // We will calculate the state root using the database, so we need to be sure there are no // changes debug_assert!(!self.persistence_state.in_progress()); @@ -1715,27 +1670,16 @@ where let mut current_hash = self.state.tree_state.canonical_block_hash(); let last_persisted_number = self.persistence_state.last_persisted_block.number; let canonical_head_number = self.state.tree_state.canonical_block_number(); - let all_blocks_have_trie_updates = self - .state - .tree_state - .blocks_by_hash - .values() - .all(|block| block.trie_updates().is_some()); - - let target_number = if all_blocks_have_trie_updates { - // Persist only up to block buffer target if all blocks have trie updates - canonical_head_number.saturating_sub(self.config.memory_block_buffer_target()) - } else { - // Persist all blocks if any block is missing trie updates - canonical_head_number - }; + + // Persist only up to block buffer target + let target_number = + canonical_head_number.saturating_sub(self.config.memory_block_buffer_target()); debug!( target: "engine::tree", ?current_hash, ?last_persisted_number, ?canonical_head_number, - ?all_blocks_have_trie_updates, ?target_number, "Returning canonical blocks to persist" ); @@ -1754,48 +1698,6 @@ where // Reverse the order so that the oldest block comes first blocks_to_persist.reverse(); - // Calculate missing trie updates - for block in &mut blocks_to_persist { - if block.trie.is_present() { - continue - } - - debug!( - target: "engine::tree", - block = ?block.recovered_block().num_hash(), - "Calculating trie updates before persisting" - ); - - let provider = self - .state_provider_builder(block.recovered_block().parent_hash())? - .ok_or(AdvancePersistenceError::MissingAncestor( - block.recovered_block().parent_hash(), - ))? - .build()?; - - let mut trie_input = self.compute_trie_input( - self.persisting_kind_for(block.recovered_block.block_with_parent()), - self.provider.database_provider_ro()?, - block.recovered_block().parent_hash(), - None, - )?; - // Extend with block we are generating trie updates for. - trie_input.append_ref(block.hashed_state()); - let (_root, updates) = provider.state_root_from_nodes_with_updates(trie_input)?; - debug_assert_eq!(_root, block.recovered_block().state_root()); - - // Update trie updates in both tree state and blocks to persist that we return - let trie_updates = Arc::new(updates); - let tree_state_block = self - .state - .tree_state - .blocks_by_hash - .get_mut(&block.recovered_block().hash()) - .expect("blocks to persist are constructed from tree state blocks"); - tree_state_block.trie.set_present(trie_updates.clone()); - block.trie.set_present(trie_updates); - } - Ok(blocks_to_persist) } @@ -1834,7 +1736,7 @@ where trace!(target: "engine::tree", ?hash, "Fetching executed block by hash"); // check memory first if let Some(block) = self.state.tree_state.executed_block_by_hash(hash) { - return Ok(Some(block.block.clone())) + return Ok(Some(block.clone())) } let (block, senders) = self @@ -1847,11 +1749,13 @@ where .get_state(block.header().number())? .ok_or_else(|| ProviderError::StateForNumberNotFound(block.header().number()))?; let hashed_state = self.provider.hashed_post_state(execution_output.state()); + let trie_updates = self.provider.get_block_trie_updates(block.number())?; Ok(Some(ExecutedBlock { recovered_block: Arc::new(RecoveredBlock::new_sealed(block, senders)), execution_output: Arc::new(execution_output), hashed_state: Arc::new(hashed_state), + trie_updates: Arc::new(trie_updates.into()), })) } @@ -2289,25 +2193,7 @@ where self.update_reorg_metrics(old.len()); self.reinsert_reorged_blocks(new.clone()); - // Try reinserting the reorged canonical chain. This is only possible if we have - // `persisted_trie_updates` for those blocks. - let old = old - .iter() - .filter_map(|block| { - let trie = self - .state - .tree_state - .persisted_trie_updates - .get(&block.recovered_block.hash())? - .1 - .clone(); - Some(ExecutedBlockWithTrieUpdates { - block: block.clone(), - trie: ExecutedTrieUpdates::Present(trie), - }) - }) - .collect::>(); - self.reinsert_reorged_blocks(old); + self.reinsert_reorged_blocks(old.clone()); } // update the tracked in-memory state with the new chain @@ -2334,7 +2220,7 @@ where } /// This reinserts any blocks in the new chain that do not already exist in the tree - fn reinsert_reorged_blocks(&mut self, new_chain: Vec>) { + fn reinsert_reorged_blocks(&mut self, new_chain: Vec>) { for block in new_chain { if self .state @@ -2505,11 +2391,7 @@ where &mut self, block_id: BlockWithParent, input: Input, - execute: impl FnOnce( - &mut V, - Input, - TreeCtx<'_, N>, - ) -> Result, Err>, + execute: impl FnOnce(&mut V, Input, TreeCtx<'_, N>) -> Result, Err>, convert_to_block: impl FnOnce(&mut Self, Input) -> Result, Err>, ) -> Result where @@ -2604,109 +2486,6 @@ where Ok(InsertPayloadOk::Inserted(BlockStatus::Valid)) } - /// Computes the trie input at the provided parent hash. - /// - /// The goal of this function is to take in-memory blocks and generate a [`TrieInput`] that - /// serves as an overlay to the database blocks. - /// - /// It works as follows: - /// 1. Collect in-memory blocks that are descendants of the provided parent hash using - /// [`TreeState::blocks_by_hash`]. - /// 2. If the persistence is in progress, and the block that we're computing the trie input for - /// is a descendant of the currently persisting blocks, we need to be sure that in-memory - /// blocks are not overlapping with the database blocks that may have been already persisted. - /// To do that, we're filtering out in-memory blocks that are lower than the highest database - /// block. - /// 3. Once in-memory blocks are collected and optionally filtered, we compute the - /// [`HashedPostState`] from them. - fn compute_trie_input( - &self, - persisting_kind: PersistingKind, - provider: TP, - parent_hash: B256, - allocated_trie_input: Option, - ) -> ProviderResult { - // get allocated trie input or use a default trie input - let mut input = allocated_trie_input.unwrap_or_default(); - - let best_block_number = provider.best_block_number()?; - - let (mut historical, mut blocks) = self - .state - .tree_state - .blocks_by_hash(parent_hash) - .map_or_else(|| (parent_hash.into(), vec![]), |(hash, blocks)| (hash.into(), blocks)); - - // If the current block is a descendant of the currently persisting blocks, then we need to - // filter in-memory blocks, so that none of them are already persisted in the database. - if persisting_kind.is_descendant() { - // Iterate over the blocks from oldest to newest. - while let Some(block) = blocks.last() { - let recovered_block = block.recovered_block(); - if recovered_block.number() <= best_block_number { - // Remove those blocks that lower than or equal to the highest database - // block. - blocks.pop(); - } else { - // If the block is higher than the best block number, stop filtering, as it's - // the first block that's not in the database. - break - } - } - - historical = if let Some(block) = blocks.last() { - // If there are any in-memory blocks left after filtering, set the anchor to the - // parent of the oldest block. - (block.recovered_block().number() - 1).into() - } else { - // Otherwise, set the anchor to the original provided parent hash. - parent_hash.into() - }; - } - - if blocks.is_empty() { - debug!(target: "engine::tree", %parent_hash, "Parent found on disk"); - } else { - debug!(target: "engine::tree", %parent_hash, %historical, blocks = blocks.len(), "Parent found in memory"); - } - - // Convert the historical block to the block number. - let block_number = provider - .convert_hash_or_number(historical)? - .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; - - // Retrieve revert state for historical block. - let revert_state = if block_number == best_block_number { - // We do not check against the `last_block_number` here because - // `HashedPostState::from_reverts` only uses the database tables, and not static files. - debug!(target: "engine::tree", block_number, best_block_number, "Empty revert state"); - HashedPostState::default() - } else { - let revert_state = HashedPostState::from_reverts::( - provider.tx_ref(), - block_number + 1.., - ) - .map_err(ProviderError::from)?; - debug!( - target: "engine::tree", - block_number, - best_block_number, - accounts = revert_state.accounts.len(), - storages = revert_state.storages.len(), - "Non-empty revert state" - ); - revert_state - }; - input.append(revert_state); - - // Extend with contents of parent in-memory blocks. - input.extend_with_blocks( - blocks.iter().rev().map(|block| (block.hashed_state(), block.trie_updates())), - ); - - Ok(input) - } - /// Handles an error that occurred while inserting a block. /// /// If this is a validation error this will mark the block as invalid. diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index a565757284e..4a3d45af8fd 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -16,9 +16,7 @@ use alloy_consensus::transaction::Either; use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_evm::Evm; use alloy_primitives::B256; -use reth_chain_state::{ - CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, -}; +use reth_chain_state::{CanonicalInMemoryState, ExecutedBlock}; use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_primitives::{ ConfigureEngineEvm, ExecutableTxIterator, ExecutionPayload, InvalidBlockHook, PayloadValidator, @@ -35,12 +33,15 @@ use reth_primitives_traits::{ AlloyBlockHeader, BlockTy, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, }; use reth_provider::{ - BlockExecutionOutput, BlockHashReader, BlockNumReader, BlockReader, DBProvider, - DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, HeaderProvider, - ProviderError, StateProvider, StateProviderFactory, StateReader, StateRootProvider, + BlockExecutionOutput, BlockNumReader, BlockReader, DBProvider, DatabaseProviderFactory, + ExecutionOutcome, HashedPostStateProvider, ProviderError, StateProvider, StateProviderFactory, + StateReader, StateRootProvider, TrieReader, }; use reth_revm::db::State; -use reth_trie::{updates::TrieUpdates, HashedPostState, KeccakKeyHasher, TrieInput}; +use reth_trie::{ + updates::{TrieUpdates, TrieUpdatesSorted}, + HashedPostState, KeccakKeyHasher, TrieInput, +}; use reth_trie_db::DatabaseHashedPostState; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use revm::context::Block; @@ -167,7 +168,7 @@ where impl BasicEngineValidator where N: NodePrimitives, - P: DatabaseProviderFactory + P: DatabaseProviderFactory + BlockReader

+ StateProviderFactory + StateReader @@ -283,7 +284,7 @@ where input: BlockOrPayload, execution_err: InsertBlockErrorKind, parent_block: &SealedHeader, - ) -> Result, InsertPayloadError> + ) -> Result, InsertPayloadError> where V: PayloadValidator, { @@ -396,15 +397,12 @@ where // Plan the strategy used for state root computation. let state_root_plan = self.plan_state_root_computation(&input, &ctx); let persisting_kind = state_root_plan.persisting_kind; - let has_ancestors_with_missing_trie_updates = - state_root_plan.has_ancestors_with_missing_trie_updates; let strategy = state_root_plan.strategy; debug!( target: "engine::tree", block=?block_num_hash, ?strategy, - ?has_ancestors_with_missing_trie_updates, "Deciding which state root algorithm to run" ); @@ -561,38 +559,11 @@ where // terminate prewarming task with good state output handle.terminate_caching(Some(&output.state)); - // If the block doesn't connect to the database tip, we don't save its trie updates, because - // they may be incorrect as they were calculated on top of the forked block. - // - // We also only save trie updates if all ancestors have trie updates, because otherwise the - // trie updates may be incorrect. - // - // Instead, they will be recomputed on persistence. - let connects_to_last_persisted = - ensure_ok_post_block!(self.block_connects_to_last_persisted(ctx, &block), block); - let should_discard_trie_updates = - !connects_to_last_persisted || has_ancestors_with_missing_trie_updates; - debug!( - target: "engine::tree", - block = ?block_num_hash, - connects_to_last_persisted, - has_ancestors_with_missing_trie_updates, - should_discard_trie_updates, - "Checking if should discard trie updates" - ); - let trie_updates = if should_discard_trie_updates { - ExecutedTrieUpdates::Missing - } else { - ExecutedTrieUpdates::Present(Arc::new(trie_output)) - }; - - Ok(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block), - execution_output: Arc::new(ExecutionOutcome::from((output, block_num_hash.number))), - hashed_state: Arc::new(hashed_state), - }, - trie: trie_updates, + Ok(ExecutedBlock { + recovered_block: Arc::new(block), + execution_output: Arc::new(ExecutionOutcome::from((output, block_num_hash.number))), + hashed_state: Arc::new(hashed_state), + trie_updates: Arc::new(trie_output), }) } @@ -720,51 +691,6 @@ where ParallelStateRoot::new(consistent_view, input).incremental_root_with_updates() } - /// Checks if the given block connects to the last persisted block, i.e. if the last persisted - /// block is the ancestor of the given block. - /// - /// This checks the database for the actual last persisted block, not [`PersistenceState`]. - fn block_connects_to_last_persisted( - &self, - ctx: TreeCtx<'_, N>, - block: &RecoveredBlock, - ) -> ProviderResult { - let provider = self.provider.database_provider_ro()?; - let last_persisted_block = provider.best_block_number()?; - let last_persisted_hash = provider - .block_hash(last_persisted_block)? - .ok_or(ProviderError::HeaderNotFound(last_persisted_block.into()))?; - let last_persisted = NumHash::new(last_persisted_block, last_persisted_hash); - - let parent_num_hash = |hash: B256| -> ProviderResult { - let parent_num_hash = - if let Some(header) = ctx.state().tree_state.sealed_header_by_hash(&hash) { - Some(header.parent_num_hash()) - } else { - provider.sealed_header_by_hash(hash)?.map(|header| header.parent_num_hash()) - }; - - parent_num_hash.ok_or(ProviderError::BlockHashNotFound(hash)) - }; - - let mut parent_block = block.parent_num_hash(); - while parent_block.number > last_persisted.number { - parent_block = parent_num_hash(parent_block.hash)?; - } - - let connects = parent_block == last_persisted; - - debug!( - target: "engine::tree", - num_hash = ?block.num_hash(), - ?last_persisted, - ?parent_block, - "Checking if block connects to last persisted block" - ); - - Ok(connects) - } - /// Validates the block after execution. /// /// This performs: @@ -948,27 +874,6 @@ where } } - /// Check if the given block has any ancestors with missing trie updates. - fn has_ancestors_with_missing_trie_updates( - &self, - target_header: BlockWithParent, - state: &EngineApiTreeState, - ) -> bool { - // Walk back through the chain starting from the parent of the target block - let mut current_hash = target_header.parent; - while let Some(block) = state.tree_state.blocks_by_hash.get(¤t_hash) { - // Check if this block is missing trie updates - if block.trie.is_missing() { - return true; - } - - // Move to the parent block - current_hash = block.recovered_block().parent_hash(); - } - - false - } - /// Creates a `StateProviderBuilder` for the given parent hash. /// /// This method checks if the parent is in the tree state (in-memory) or persisted to disk, @@ -1019,20 +924,12 @@ where let can_run_parallel = persisting_kind.can_run_parallel_state_root() && !self.config.state_root_fallback(); - // Check for ancestors with missing trie updates - let has_ancestors_with_missing_trie_updates = - self.has_ancestors_with_missing_trie_updates(input.block_with_parent(), ctx.state()); - // Decide on the strategy. // Use state root task only if: // 1. No persistence is in progress // 2. Config allows it - // 3. No ancestors with missing trie updates. If any exist, it will mean that every state - // root task proof calculation will include a lot of unrelated paths in the prefix sets. - // It's cheaper to run a parallel state root that does one walk over trie tables while - // accounting for the prefix sets. let strategy = if can_run_parallel { - if self.config.use_state_root_task() && !has_ancestors_with_missing_trie_updates { + if self.config.use_state_root_task() { StateRootStrategy::StateRootTask } else { StateRootStrategy::Parallel @@ -1045,11 +942,10 @@ where target: "engine::tree", block=?input.num_hash(), ?strategy, - has_ancestors_with_missing_trie_updates, "Planned state root computation strategy" ); - StateRootPlan { strategy, has_ancestors_with_missing_trie_updates, persisting_kind } + StateRootPlan { strategy, persisting_kind } } /// Called when an invalid block is encountered during validation. @@ -1083,7 +979,7 @@ where /// block. /// 3. Once in-memory blocks are collected and optionally filtered, we compute the /// [`HashedPostState`] from them. - fn compute_trie_input( + fn compute_trie_input( &self, persisting_kind: PersistingKind, provider: TP, @@ -1140,17 +1036,19 @@ where .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; // Retrieve revert state for historical block. - let revert_state = if block_number == best_block_number { + let (revert_state, revert_trie) = if block_number == best_block_number { // We do not check against the `last_block_number` here because - // `HashedPostState::from_reverts` only uses the database tables, and not static files. + // `HashedPostState::from_reverts` / `trie_reverts` only use the database tables, and + // not static files. debug!(target: "engine::tree", block_number, best_block_number, "Empty revert state"); - HashedPostState::default() + (HashedPostState::default(), TrieUpdatesSorted::default()) } else { let revert_state = HashedPostState::from_reverts::( provider.tx_ref(), block_number + 1.., ) .map_err(ProviderError::from)?; + let revert_trie = provider.trie_reverts(block_number + 1)?; debug!( target: "engine::tree", block_number, @@ -1159,9 +1057,10 @@ where storages = revert_state.storages.len(), "Non-empty revert state" ); - revert_state + (revert_state, revert_trie) }; - input.append(revert_state); + + input.append_cached(revert_trie.into(), revert_state); // Extend with contents of parent in-memory blocks. input.extend_with_blocks( @@ -1173,8 +1072,7 @@ where } /// Output of block or payload validation. -pub type ValidationOutcome>> = - Result, E>; +pub type ValidationOutcome>> = Result, E>; /// Strategy describing how to compute the state root. #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -1191,8 +1089,6 @@ enum StateRootStrategy { struct StateRootPlan { /// Strategy that should be attempted for computing the state root. strategy: StateRootStrategy, - /// Whether ancestors have missing trie updates. - has_ancestors_with_missing_trie_updates: bool, /// The persisting kind for this block. persisting_kind: PersistingKind, } @@ -1250,7 +1146,7 @@ pub trait EngineValidator< impl EngineValidator for BasicEngineValidator where - P: DatabaseProviderFactory + P: DatabaseProviderFactory + BlockReader
+ StateProviderFactory + StateReader diff --git a/crates/engine/tree/src/tree/state.rs b/crates/engine/tree/src/tree/state.rs index cab7d35fb22..f38faf6524c 100644 --- a/crates/engine/tree/src/tree/state.rs +++ b/crates/engine/tree/src/tree/state.rs @@ -1,29 +1,19 @@ //! Functionality related to tree state. use crate::engine::EngineApiKind; -use alloy_eips::{eip1898::BlockWithParent, merge::EPOCH_SLOTS, BlockNumHash}; +use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; use alloy_primitives::{ map::{HashMap, HashSet}, BlockNumber, B256, }; -use reth_chain_state::{EthPrimitives, ExecutedBlockWithTrieUpdates}; +use reth_chain_state::{EthPrimitives, ExecutedBlock}; use reth_primitives_traits::{AlloyBlockHeader, NodePrimitives, SealedHeader}; -use reth_trie::updates::TrieUpdates; use std::{ collections::{btree_map, hash_map, BTreeMap, VecDeque}, ops::Bound, - sync::Arc, }; use tracing::debug; -/// Default number of blocks to retain persisted trie updates -const DEFAULT_PERSISTED_TRIE_UPDATES_RETENTION: u64 = EPOCH_SLOTS * 2; - -/// Number of blocks to retain persisted trie updates for OP Stack chains -/// OP Stack chains only need `EPOCH_SLOTS` as reorgs are relevant only when -/// op-node reorgs to the same chain twice -const OPSTACK_PERSISTED_TRIE_UPDATES_RETENTION: u64 = EPOCH_SLOTS; - /// Keeps track of the state of the tree. /// /// ## Invariants @@ -35,19 +25,15 @@ pub struct TreeState { /// __All__ unique executed blocks by block hash that are connected to the canonical chain. /// /// This includes blocks of all forks. - pub(crate) blocks_by_hash: HashMap>, + pub(crate) blocks_by_hash: HashMap>, /// Executed blocks grouped by their respective block number. /// /// This maps unique block number to all known blocks for that height. /// /// Note: there can be multiple blocks at the same height due to forks. - pub(crate) blocks_by_number: BTreeMap>>, + pub(crate) blocks_by_number: BTreeMap>>, /// Map of any parent block hash to its children. pub(crate) parent_to_child: HashMap>, - /// Map of hash to trie updates for canonical blocks that are persisted but not finalized. - /// - /// Contains the block number for easy removal. - pub(crate) persisted_trie_updates: HashMap)>, /// Currently tracked canonical head of the chain. pub(crate) current_canonical_head: BlockNumHash, /// The engine API variant of this handler @@ -62,7 +48,6 @@ impl TreeState { blocks_by_number: BTreeMap::new(), current_canonical_head, parent_to_child: HashMap::default(), - persisted_trie_updates: HashMap::default(), engine_kind, } } @@ -77,11 +62,8 @@ impl TreeState { self.blocks_by_hash.len() } - /// Returns the [`ExecutedBlockWithTrieUpdates`] by hash. - pub(crate) fn executed_block_by_hash( - &self, - hash: B256, - ) -> Option<&ExecutedBlockWithTrieUpdates> { + /// Returns the [`ExecutedBlock`] by hash. + pub(crate) fn executed_block_by_hash(&self, hash: B256) -> Option<&ExecutedBlock> { self.blocks_by_hash.get(&hash) } @@ -97,10 +79,7 @@ impl TreeState { /// newest to oldest. And the parent hash of the oldest block that is missing from the buffer. /// /// Returns `None` if the block for the given hash is not found. - pub(crate) fn blocks_by_hash( - &self, - hash: B256, - ) -> Option<(B256, Vec>)> { + pub(crate) fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec>)> { let block = self.blocks_by_hash.get(&hash).cloned()?; let mut parent_hash = block.recovered_block().parent_hash(); let mut blocks = vec![block]; @@ -113,7 +92,7 @@ impl TreeState { } /// Insert executed block into the state. - pub(crate) fn insert_executed(&mut self, executed: ExecutedBlockWithTrieUpdates) { + pub(crate) fn insert_executed(&mut self, executed: ExecutedBlock) { let hash = executed.recovered_block().hash(); let parent_hash = executed.recovered_block().parent_hash(); let block_number = executed.recovered_block().number(); @@ -138,10 +117,7 @@ impl TreeState { /// ## Returns /// /// The removed block and the block hashes of its children. - fn remove_by_hash( - &mut self, - hash: B256, - ) -> Option<(ExecutedBlockWithTrieUpdates, HashSet)> { + fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock, HashSet)> { let executed = self.blocks_by_hash.remove(&hash)?; // Remove this block from collection of children of its parent block. @@ -215,41 +191,12 @@ impl TreeState { if executed.recovered_block().number() <= upper_bound { let num_hash = executed.recovered_block().num_hash(); debug!(target: "engine::tree", ?num_hash, "Attempting to remove block walking back from the head"); - if let Some((mut removed, _)) = - self.remove_by_hash(executed.recovered_block().hash()) - { - debug!(target: "engine::tree", ?num_hash, "Removed block walking back from the head"); - // finally, move the trie updates - let Some(trie_updates) = removed.trie.take_present() else { - debug!(target: "engine::tree", ?num_hash, "No trie updates found for persisted block"); - continue; - }; - self.persisted_trie_updates.insert( - removed.recovered_block().hash(), - (removed.recovered_block().number(), trie_updates), - ); - } + self.remove_by_hash(executed.recovered_block().hash()); } } debug!(target: "engine::tree", ?upper_bound, ?last_persisted_hash, "Removed canonical blocks from the tree"); } - /// Prunes old persisted trie updates based on the current block number - /// and chain type (OP Stack or regular) - pub(crate) fn prune_persisted_trie_updates(&mut self) { - let retention_blocks = if self.engine_kind.is_opstack() { - OPSTACK_PERSISTED_TRIE_UPDATES_RETENTION - } else { - DEFAULT_PERSISTED_TRIE_UPDATES_RETENTION - }; - - let earliest_block_to_retain = - self.current_canonical_head.number.saturating_sub(retention_blocks); - - self.persisted_trie_updates - .retain(|_, (block_number, _)| *block_number > earliest_block_to_retain); - } - /// Removes all blocks that are below the finalized block, as well as removing non-canonical /// sidechains that fork from below the finalized block. pub(crate) fn prune_finalized_sidechains(&mut self, finalized_num_hash: BlockNumHash) { @@ -274,8 +221,6 @@ impl TreeState { } } - self.prune_persisted_trie_updates(); - // The only block that should remain at the `finalized` number now, is the finalized // block, if it exists. // diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 17b5950e077..49ce5ab9cf1 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -3,6 +3,7 @@ use crate::{ persistence::PersistenceAction, tree::{ payload_validator::{BasicEngineValidator, TreeCtx, ValidationOutcome}, + persistence_state::CurrentPersistenceAction, TreeConfig, }, }; @@ -26,7 +27,7 @@ use reth_ethereum_primitives::{Block, EthPrimitives}; use reth_evm_ethereum::MockEvmConfig; use reth_primitives_traits::Block as _; use reth_provider::{test_utils::MockEthProvider, ExecutionOutcome}; -use reth_trie::HashedPostState; +use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::{ collections::BTreeMap, str::FromStr, @@ -148,7 +149,7 @@ struct TestHarness { >, to_tree_tx: Sender, Block>>, from_tree_rx: UnboundedReceiver, - blocks: Vec, + blocks: Vec, action_rx: Receiver, block_builder: TestBlockBuilder, provider: MockEthProvider, @@ -228,7 +229,7 @@ impl TestHarness { } } - fn with_blocks(mut self, blocks: Vec) -> Self { + fn with_blocks(mut self, blocks: Vec) -> Self { let mut blocks_by_hash = HashMap::default(); let mut blocks_by_number = BTreeMap::new(); let mut state_by_hash = HashMap::default(); @@ -253,7 +254,6 @@ impl TestHarness { blocks_by_number, current_canonical_head: blocks.last().unwrap().recovered_block().num_hash(), parent_to_child, - persisted_trie_updates: HashMap::default(), engine_kind: EngineApiKind::Ethereum, }; @@ -405,7 +405,6 @@ impl ValidatorTestHarness { /// Configure `PersistenceState` for specific `PersistingKind` scenarios fn start_persistence_operation(&mut self, action: CurrentPersistenceAction) { - use crate::tree::persistence_state::CurrentPersistenceAction; use tokio::sync::oneshot; // Create a dummy receiver for testing - it will never receive a value @@ -828,25 +827,21 @@ fn test_tree_state_on_new_head_deep_fork() { let chain_b = test_block_builder.create_fork(&last_block, 10); for block in &chain_a { - test_harness.tree.state.tree_state.insert_executed(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block.clone()), - execution_output: Arc::new(ExecutionOutcome::default()), - hashed_state: Arc::new(HashedPostState::default()), - }, - trie: ExecutedTrieUpdates::empty(), + test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { + recovered_block: Arc::new(block.clone()), + execution_output: Arc::new(ExecutionOutcome::default()), + hashed_state: Arc::new(HashedPostState::default()), + trie_updates: Arc::new(TrieUpdates::default()), }); } test_harness.tree.state.tree_state.set_canonical_head(chain_a.last().unwrap().num_hash()); for block in &chain_b { - test_harness.tree.state.tree_state.insert_executed(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block.clone()), - execution_output: Arc::new(ExecutionOutcome::default()), - hashed_state: Arc::new(HashedPostState::default()), - }, - trie: ExecutedTrieUpdates::empty(), + test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { + recovered_block: Arc::new(block.clone()), + execution_output: Arc::new(ExecutionOutcome::default()), + hashed_state: Arc::new(HashedPostState::default()), + trie_updates: Arc::new(TrieUpdates::default()), }); } diff --git a/crates/exex/exex/src/backfill/factory.rs b/crates/exex/exex/src/backfill/factory.rs index 789d63f84e2..d9a51bc47a7 100644 --- a/crates/exex/exex/src/backfill/factory.rs +++ b/crates/exex/exex/src/backfill/factory.rs @@ -24,7 +24,7 @@ impl BackfillJobFactory { Self { evm_config, provider, - prune_modes: PruneModes::none(), + prune_modes: PruneModes::default(), thresholds: ExecutionStageThresholds { // Default duration for a database transaction to be considered long-lived is // 60 seconds, so we limit the backfill job to the half of it to be sure we finish diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index e96245350fd..846e4e6b203 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -126,6 +126,7 @@ impl PruningArgs { storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), // TODO: set default to pre-merge block if available bodies_history: None, + merkle_changesets: PruneMode::Distance(MINIMUM_PRUNING_DISTANCE), receipts_log_filter: Default::default(), }, } diff --git a/crates/node/core/src/args/stage.rs b/crates/node/core/src/args/stage.rs index 337f5a4a60b..7718fb85605 100644 --- a/crates/node/core/src/args/stage.rs +++ b/crates/node/core/src/args/stage.rs @@ -38,6 +38,11 @@ pub enum StageEnum { /// /// Handles Merkle tree-related computations and data processing. Merkle, + /// The merkle changesets stage within the pipeline. + /// + /// Handles Merkle trie changesets for storage and accounts. + #[value(name = "merkle-changesets")] + MerkleChangeSets, /// The transaction lookup stage within the pipeline. /// /// Deals with the retrieval and processing of transactions. diff --git a/crates/optimism/flashblocks/src/worker.rs b/crates/optimism/flashblocks/src/worker.rs index 68071851f43..8cf7777f6a6 100644 --- a/crates/optimism/flashblocks/src/worker.rs +++ b/crates/optimism/flashblocks/src/worker.rs @@ -124,6 +124,7 @@ where recovered_block: block.into(), execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), + trie_updates: Arc::default(), }, ); let pending_flashblock = PendingFlashBlock::new( diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index ecc7a400349..67b8faf5608 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -11,7 +11,7 @@ use alloy_primitives::{B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; -use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates}; +use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ execute::{ @@ -379,13 +379,11 @@ impl OpBuilder<'_, Txs> { ); // create the executed block data - let executed: ExecutedBlockWithTrieUpdates = ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block), - execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - }, - trie: ExecutedTrieUpdates::Present(Arc::new(trie_updates)), + let executed: ExecutedBlock = ExecutedBlock { + recovered_block: Arc::new(block), + execution_output: Arc::new(execution_outcome), + hashed_state: Arc::new(hashed_state), + trie_updates: Arc::new(trie_updates), }; let no_tx_pool = ctx.attributes().no_tx_pool(); diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index de1705faa8f..6f530acd853 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -16,7 +16,7 @@ use op_alloy_consensus::{encode_holocene_extra_data, encode_jovian_extra_data, E use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4, }; -use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_chain_state::ExecutedBlock; use reth_chainspec::EthChainSpec; use reth_optimism_evm::OpNextBlockEnvAttributes; use reth_optimism_forks::OpHardforks; @@ -176,7 +176,7 @@ pub struct OpBuiltPayload { /// Sealed block pub(crate) block: Arc>, /// Block execution data for the payload, if any. - pub(crate) executed_block: Option>, + pub(crate) executed_block: Option>, /// The fees of the block pub(crate) fees: U256, } @@ -189,7 +189,7 @@ impl OpBuiltPayload { id: PayloadId, block: Arc>, fees: U256, - executed_block: Option>, + executed_block: Option>, ) -> Self { Self { id, block, fees, executed_block } } @@ -226,7 +226,7 @@ impl BuiltPayload for OpBuiltPayload { self.fees } - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { self.executed_block.clone() } diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 39bd14cc63b..160956afa27 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -9,7 +9,7 @@ use alloy_eips::{ use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use core::fmt; -use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_chain_state::ExecutedBlock; use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader}; /// Represents a successfully built execution payload (block). @@ -30,7 +30,7 @@ pub trait BuiltPayload: Send + Sync + fmt::Debug { /// Returns the complete execution result including state updates. /// /// Returns `None` if execution data is not available or not tracked. - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { None } diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index 1987c500da7..f21319bb458 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -6,8 +6,8 @@ use reth_db_api::{table::Value, transaction::DbTxMut}; use reth_exex_types::FinishedExExHeight; use reth_primitives_traits::NodePrimitives; use reth_provider::{ - providers::StaticFileProvider, BlockReader, DBProvider, DatabaseProviderFactory, - NodePrimitivesProvider, PruneCheckpointReader, PruneCheckpointWriter, + providers::StaticFileProvider, BlockReader, ChainStateBlockReader, DBProvider, + DatabaseProviderFactory, NodePrimitivesProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; @@ -83,6 +83,7 @@ impl PrunerBuilder { ProviderRW: PruneCheckpointWriter + PruneCheckpointReader + BlockReader + + ChainStateBlockReader + StaticFileProviderFactory< Primitives: NodePrimitives, >, @@ -113,6 +114,7 @@ impl PrunerBuilder { Primitives: NodePrimitives, > + DBProvider + BlockReader + + ChainStateBlockReader + PruneCheckpointWriter + PruneCheckpointReader, { @@ -132,7 +134,7 @@ impl Default for PrunerBuilder { fn default() -> Self { Self { block_interval: 5, - segments: PruneModes::none(), + segments: PruneModes::default(), delete_limit: MAINNET_PRUNE_DELETE_LIMIT, timeout: None, finished_exex_height: watch::channel(FinishedExExHeight::NoExExs).1, diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index 1daade01358..dc175254453 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -15,8 +15,8 @@ pub use static_file::{ use std::{fmt::Debug, ops::RangeInclusive}; use tracing::error; pub use user::{ - AccountHistory, Receipts as UserReceipts, ReceiptsByLogs, SenderRecovery, StorageHistory, - TransactionLookup, + AccountHistory, MerkleChangeSets, Receipts as UserReceipts, ReceiptsByLogs, SenderRecovery, + StorageHistory, TransactionLookup, }; /// A segment represents a pruning of some portion of the data. diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 08e41bcdf75..72847219b09 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -1,13 +1,13 @@ use crate::segments::{ - AccountHistory, ReceiptsByLogs, Segment, SenderRecovery, StorageHistory, TransactionLookup, - UserReceipts, + AccountHistory, MerkleChangeSets, ReceiptsByLogs, Segment, SenderRecovery, StorageHistory, + TransactionLookup, UserReceipts, }; use alloy_eips::eip2718::Encodable2718; use reth_db_api::{table::Value, transaction::DbTxMut}; use reth_primitives_traits::NodePrimitives; use reth_provider::{ - providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointReader, - PruneCheckpointWriter, StaticFileProviderFactory, + providers::StaticFileProvider, BlockReader, ChainStateBlockReader, DBProvider, + PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; @@ -52,7 +52,8 @@ where > + DBProvider + PruneCheckpointWriter + PruneCheckpointReader - + BlockReader, + + BlockReader + + ChainStateBlockReader, { /// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and /// [`PruneModes`]. @@ -67,6 +68,7 @@ where account_history, storage_history, bodies_history: _, + merkle_changesets, receipts_log_filter, } = prune_modes; @@ -77,6 +79,8 @@ where .segment(StaticFileTransactions::new(static_file_provider.clone())) // Static file receipts .segment(StaticFileReceipts::new(static_file_provider)) + // Merkle changesets + .segment(MerkleChangeSets::new(merkle_changesets)) // Account history .segment_opt(account_history.map(AccountHistory::new)) // Storage history diff --git a/crates/prune/prune/src/segments/user/merkle_change_sets.rs b/crates/prune/prune/src/segments/user/merkle_change_sets.rs new file mode 100644 index 00000000000..89cc4567b7d --- /dev/null +++ b/crates/prune/prune/src/segments/user/merkle_change_sets.rs @@ -0,0 +1,116 @@ +use crate::{ + db_ext::DbTxPruneExt, + segments::{PruneInput, Segment}, + PrunerError, +}; +use alloy_primitives::B256; +use reth_db_api::{models::BlockNumberHashedAddress, table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + errors::provider::ProviderResult, BlockReader, ChainStateBlockReader, DBProvider, + NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, +}; +use reth_prune_types::{ + PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, +}; +use tracing::{instrument, trace}; + +#[derive(Debug)] +pub struct MerkleChangeSets { + mode: PruneMode, +} + +impl MerkleChangeSets { + pub const fn new(mode: PruneMode) -> Self { + Self { mode } + } +} + +impl Segment for MerkleChangeSets +where + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + ChainStateBlockReader + + NodePrimitivesProvider>, +{ + fn segment(&self) -> PruneSegment { + PruneSegment::MerkleChangeSets + } + + fn mode(&self) -> Option { + Some(self.mode) + } + + fn purpose(&self) -> PrunePurpose { + PrunePurpose::User + } + + #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { + let Some(block_range) = input.get_next_block_range() else { + trace!(target: "pruner", "No change sets to prune"); + return Ok(SegmentOutput::done()) + }; + + let block_range_end = *block_range.end(); + let mut limiter = input.limiter; + + // Create range for StoragesTrieChangeSets which uses BlockNumberHashedAddress as key + let storage_range_start: BlockNumberHashedAddress = + (*block_range.start(), B256::ZERO).into(); + let storage_range_end: BlockNumberHashedAddress = + (*block_range.end() + 1, B256::ZERO).into(); + let storage_range = storage_range_start..storage_range_end; + + let mut last_storages_pruned_block = None; + let (storages_pruned, done) = + provider.tx_ref().prune_table_with_range::( + storage_range, + &mut limiter, + |_| false, + |(BlockNumberHashedAddress((block_number, _)), _)| { + last_storages_pruned_block = Some(block_number); + }, + )?; + + trace!(target: "pruner", %storages_pruned, %done, "Pruned storages change sets"); + + let mut last_accounts_pruned_block = block_range_end; + let last_storages_pruned_block = last_storages_pruned_block + // If there's more storage changesets to prune, set the checkpoint block number to + // previous, so we could finish pruning its storage changesets on the next run. + .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) + .unwrap_or(block_range_end); + + let (accounts_pruned, done) = + provider.tx_ref().prune_table_with_range::( + block_range, + &mut limiter, + |_| false, + |row| last_accounts_pruned_block = row.0, + )?; + + trace!(target: "pruner", %accounts_pruned, %done, "Pruned accounts change sets"); + + let progress = limiter.progress(done); + + Ok(SegmentOutput { + progress, + pruned: accounts_pruned + storages_pruned, + checkpoint: Some(SegmentOutputCheckpoint { + block_number: Some(last_storages_pruned_block.min(last_accounts_pruned_block)), + tx_number: None, + }), + }) + } + + fn save_checkpoint( + &self, + provider: &Provider, + checkpoint: PruneCheckpoint, + ) -> ProviderResult<()> { + provider.save_prune_checkpoint(PruneSegment::MerkleChangeSets, checkpoint) + } +} diff --git a/crates/prune/prune/src/segments/user/mod.rs b/crates/prune/prune/src/segments/user/mod.rs index 0b787d14dae..c25bc6bc764 100644 --- a/crates/prune/prune/src/segments/user/mod.rs +++ b/crates/prune/prune/src/segments/user/mod.rs @@ -1,5 +1,6 @@ mod account_history; mod history; +mod merkle_change_sets; mod receipts; mod receipts_by_logs; mod sender_recovery; @@ -7,6 +8,7 @@ mod storage_history; mod transaction_lookup; pub use account_history::AccountHistory; +pub use merkle_change_sets::MerkleChangeSets; pub use receipts::Receipts; pub use receipts_by_logs::ReceiptsByLogs; pub use sender_recovery::SenderRecovery; diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index e131f353fe3..0d60d900137 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -3,6 +3,9 @@ use derive_more::Display; use thiserror::Error; /// Segment of the data that can be pruned. +/// +/// NOTE new variants must be added to the end of this enum. The variant index is encoded directly +/// when writing to the `PruneCheckpoint` table, so changing the order here will corrupt the table. #[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] #[cfg_attr(test, derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] @@ -26,6 +29,9 @@ pub enum PruneSegment { Headers, /// Prune segment responsible for the `Transactions` table. Transactions, + /// Prune segment responsible for all rows in `AccountsTrieChangeSets` and + /// `StoragesTrieChangeSets` table. + MerkleChangeSets, } #[cfg(test)] @@ -44,9 +50,10 @@ impl PruneSegment { 0 } Self::Receipts if purpose.is_static_file() => 0, - Self::ContractLogs | Self::AccountHistory | Self::StorageHistory => { - MINIMUM_PRUNING_DISTANCE - } + Self::ContractLogs | + Self::AccountHistory | + Self::StorageHistory | + Self::MerkleChangeSets | Self::Receipts => MINIMUM_PRUNING_DISTANCE, } } diff --git a/crates/prune/types/src/target.rs b/crates/prune/types/src/target.rs index 574a0e2e555..657cf6a37c5 100644 --- a/crates/prune/types/src/target.rs +++ b/crates/prune/types/src/target.rs @@ -36,8 +36,13 @@ pub enum HistoryType { StorageHistory, } +/// Default pruning mode for merkle changesets +const fn default_merkle_changesets_mode() -> PruneMode { + PruneMode::Distance(MINIMUM_PRUNING_DISTANCE) +} + /// Pruning configuration for every segment of the data that can be pruned. -#[derive(Debug, Clone, Default, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq)] #[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "serde"), serde(default))] pub struct PruneModes { @@ -84,6 +89,16 @@ pub struct PruneModes { ) )] pub bodies_history: Option, + /// Merkle Changesets pruning configuration for `AccountsTrieChangeSets` and + /// `StoragesTrieChangeSets`. + #[cfg_attr( + any(test, feature = "serde"), + serde( + default = "default_merkle_changesets_mode", + deserialize_with = "deserialize_prune_mode_with_min_blocks::" + ) + )] + pub merkle_changesets: PruneMode, /// Receipts pruning configuration by retaining only those receipts that contain logs emitted /// by the specified addresses, discarding others. This setting is overridden by `receipts`. /// @@ -92,12 +107,22 @@ pub struct PruneModes { pub receipts_log_filter: ReceiptsLogPruneConfig, } -impl PruneModes { - /// Sets pruning to no target. - pub fn none() -> Self { - Self::default() +impl Default for PruneModes { + fn default() -> Self { + Self { + sender_recovery: None, + transaction_lookup: None, + receipts: None, + account_history: None, + storage_history: None, + bodies_history: None, + merkle_changesets: default_merkle_changesets_mode(), + receipts_log_filter: ReceiptsLogPruneConfig::default(), + } } +} +impl PruneModes { /// Sets pruning to all targets. pub fn all() -> Self { Self { @@ -107,6 +132,7 @@ impl PruneModes { account_history: Some(PruneMode::Full), storage_history: Some(PruneMode::Full), bodies_history: Some(PruneMode::Full), + merkle_changesets: PruneMode::Full, receipts_log_filter: Default::default(), } } @@ -116,11 +142,6 @@ impl PruneModes { self.receipts.is_some() || !self.receipts_log_filter.is_empty() } - /// Returns true if all prune modes are set to [`None`]. - pub fn is_empty(&self) -> bool { - self == &Self::none() - } - /// Returns an error if we can't unwind to the targeted block because the target block is /// outside the range. /// @@ -170,6 +191,28 @@ impl PruneModes { } } +/// Deserializes [`PruneMode`] and validates that the value is not less than the const +/// generic parameter `MIN_BLOCKS`. This parameter represents the number of blocks that needs to be +/// left in database after the pruning. +/// +/// 1. For [`PruneMode::Full`], it fails if `MIN_BLOCKS > 0`. +/// 2. For [`PruneMode::Distance`], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is needed because +/// `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we have one +/// block in the database. +#[cfg(any(test, feature = "serde"))] +fn deserialize_prune_mode_with_min_blocks< + 'de, + const MIN_BLOCKS: u64, + D: serde::Deserializer<'de>, +>( + deserializer: D, +) -> Result { + use serde::Deserialize; + let prune_mode = PruneMode::deserialize(deserializer)?; + serde_deserialize_validate::(&prune_mode)?; + Ok(prune_mode) +} + /// Deserializes [`Option`] and validates that the value is not less than the const /// generic parameter `MIN_BLOCKS`. This parameter represents the number of blocks that needs to be /// left in database after the pruning. @@ -186,12 +229,21 @@ fn deserialize_opt_prune_mode_with_min_blocks< >( deserializer: D, ) -> Result, D::Error> { - use alloc::format; use serde::Deserialize; let prune_mode = Option::::deserialize(deserializer)?; + if let Some(prune_mode) = prune_mode.as_ref() { + serde_deserialize_validate::(prune_mode)?; + } + Ok(prune_mode) +} +#[cfg(any(test, feature = "serde"))] +fn serde_deserialize_validate<'a, 'de, const MIN_BLOCKS: u64, D: serde::Deserializer<'de>>( + prune_mode: &'a PruneMode, +) -> Result<(), D::Error> { + use alloc::format; match prune_mode { - Some(PruneMode::Full) if MIN_BLOCKS > 0 => { + PruneMode::Full if MIN_BLOCKS > 0 => { Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str("full"), // This message should have "expected" wording @@ -199,15 +251,15 @@ fn deserialize_opt_prune_mode_with_min_blocks< .as_str(), )) } - Some(PruneMode::Distance(distance)) if distance < MIN_BLOCKS => { + PruneMode::Distance(distance) if *distance < MIN_BLOCKS => { Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Unsigned(distance), + serde::de::Unexpected::Unsigned(*distance), // This message should have "expected" wording &format!("prune mode that leaves at least {MIN_BLOCKS} blocks in the database") .as_str(), )) } - _ => Ok(prune_mode), + _ => Ok(()), } } @@ -240,7 +292,7 @@ mod tests { #[test] fn test_unwind_target_unpruned() { // Test case 1: No pruning configured - should always succeed - let prune_modes = PruneModes::none(); + let prune_modes = PruneModes::default(); assert!(prune_modes.ensure_unwind_target_unpruned(1000, 500, &[]).is_ok()); assert!(prune_modes.ensure_unwind_target_unpruned(1000, 0, &[]).is_ok()); diff --git a/crates/ress/provider/src/lib.rs b/crates/ress/provider/src/lib.rs index 599b37962f0..d986eb9e953 100644 --- a/crates/ress/provider/src/lib.rs +++ b/crates/ress/provider/src/lib.rs @@ -11,9 +11,7 @@ use alloy_consensus::BlockHeader as _; use alloy_primitives::{Bytes, B256}; use parking_lot::Mutex; -use reth_chain_state::{ - ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, MemoryOverlayStateProvider, -}; +use reth_chain_state::{ExecutedBlock, MemoryOverlayStateProvider}; use reth_errors::{ProviderError, ProviderResult}; use reth_ethereum_primitives::{Block, BlockBody, EthPrimitives}; use reth_evm::{execute::Executor, ConfigureEvm}; @@ -125,10 +123,8 @@ where self.pending_state.invalid_recovered_block(&ancestor_hash) { trace!(target: "reth::ress_provider", %block_hash, %ancestor_hash, "Using invalid ancestor block for witness construction"); - executed = Some(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { recovered_block: invalid, ..Default::default() }, - trie: ExecutedTrieUpdates::empty(), - }); + executed = + Some(ExecutedBlock { recovered_block: invalid, ..Default::default() }); } let Some(executed) = executed else { @@ -162,14 +158,8 @@ where let witness_state_provider = self.provider.state_by_block_hash(ancestor_hash)?; let mut trie_input = TrieInput::default(); for block in executed_ancestors.into_iter().rev() { - if let Some(trie_updates) = block.trie.as_ref() { - trie_input.append_cached_ref(trie_updates, &block.hashed_state); - } else { - trace!(target: "reth::ress_provider", ancestor = ?block.recovered_block().num_hash(), "Missing trie updates for ancestor block"); - return Err(ProviderError::TrieWitnessError( - "missing trie updates for ancestor".to_owned(), - )); - } + let trie_updates = block.trie_updates.as_ref(); + trie_input.append_cached_ref(trie_updates, &block.hashed_state); } let mut hashed_state = db.into_state(); hashed_state.extend(record.hashed_state); diff --git a/crates/ress/provider/src/pending_state.rs b/crates/ress/provider/src/pending_state.rs index e1a84661fc2..f536acdb60a 100644 --- a/crates/ress/provider/src/pending_state.rs +++ b/crates/ress/provider/src/pending_state.rs @@ -5,7 +5,7 @@ use alloy_primitives::{ }; use futures::StreamExt; use parking_lot::RwLock; -use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_chain_state::ExecutedBlock; use reth_ethereum_primitives::EthPrimitives; use reth_node_api::{ConsensusEngineEvent, NodePrimitives}; use reth_primitives_traits::{Bytecode, RecoveredBlock}; @@ -20,14 +20,14 @@ pub struct PendingState(Arc>>); #[derive(Default, Debug)] struct PendingStateInner { - blocks_by_hash: B256Map>, + blocks_by_hash: B256Map>, invalid_blocks_by_hash: B256Map>>, block_hashes_by_number: BTreeMap, } impl PendingState { /// Insert executed block with trie updates. - pub fn insert_block(&self, block: ExecutedBlockWithTrieUpdates) { + pub fn insert_block(&self, block: ExecutedBlock) { let mut this = self.0.write(); let block_hash = block.recovered_block.hash(); this.block_hashes_by_number @@ -46,13 +46,13 @@ impl PendingState { } /// Returns only valid executed blocks by hash. - pub fn executed_block(&self, hash: &B256) -> Option> { + pub fn executed_block(&self, hash: &B256) -> Option> { self.0.read().blocks_by_hash.get(hash).cloned() } /// Returns valid recovered block. pub fn recovered_block(&self, hash: &B256) -> Option>> { - self.executed_block(hash).map(|b| b.recovered_block.clone()) + self.executed_block(hash).map(|b| b.recovered_block) } /// Returns invalid recovered block. diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 6c3e076fb1e..1dda44d090e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -359,7 +359,7 @@ pub trait LoadPendingBlock: } } - let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = + let BlockBuilderOutcome { execution_result, block, hashed_state, trie_updates } = builder.finish(NoopProvider::default()).map_err(Self::Error::from_eth_err)?; let execution_outcome = ExecutionOutcome::new( @@ -373,6 +373,7 @@ pub trait LoadPendingBlock: recovered_block: block.into(), execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), + trie_updates: Arc::new(trie_updates), }) } } diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index d0b5c65c1ed..45f50ea82c5 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -9,9 +9,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockHash, B256}; use derive_more::Constructor; -use reth_chain_state::{ - BlockState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, -}; +use reth_chain_state::{BlockState, ExecutedBlock}; use reth_ethereum_primitives::Receipt; use reth_evm::{ConfigureEvm, EvmEnvFor}; use reth_primitives_traits::{ @@ -135,11 +133,6 @@ impl PendingBlock { impl From> for BlockState { fn from(pending_block: PendingBlock) -> Self { - Self::new(ExecutedBlockWithTrieUpdates::::new( - pending_block.executed_block.recovered_block, - pending_block.executed_block.execution_output, - pending_block.executed_block.hashed_state, - ExecutedTrieUpdates::Missing, - )) + Self::new(pending_block.executed_block) } } diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 2446219ea3d..ac35a489031 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -639,14 +639,18 @@ impl Pipeline { // FIXME: When handling errors, we do not commit the database transaction. This // leads to the Merkle stage not clearing its checkpoint, and restarting from an // invalid place. - let provider_rw = self.provider_factory.database_provider_rw()?; - provider_rw.save_stage_checkpoint_progress(StageId::MerkleExecute, vec![])?; - provider_rw.save_stage_checkpoint( - StageId::MerkleExecute, - prev_checkpoint.unwrap_or_default(), - )?; + // Only reset MerkleExecute checkpoint if MerkleExecute itself failed + if stage_id == StageId::MerkleExecute { + let provider_rw = self.provider_factory.database_provider_rw()?; + provider_rw + .save_stage_checkpoint_progress(StageId::MerkleExecute, vec![])?; + provider_rw.save_stage_checkpoint( + StageId::MerkleExecute, + prev_checkpoint.unwrap_or_default(), + )?; - provider_rw.commit()?; + provider_rw.commit()?; + } // We unwind because of a validation error. If the unwind itself // fails, we bail entirely, diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index bd1fb59ebe9..01d7571e0da 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -165,7 +165,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { db.insert_changesets(transitions, None).unwrap(); let provider_rw = db.factory.provider_rw().unwrap(); - provider_rw.write_trie_updates(&updates).unwrap(); + provider_rw.write_trie_updates(updates).unwrap(); provider_rw.commit().unwrap(); let (transitions, final_state) = random_changeset_range( diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 97c3a3116aa..015be507336 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -39,9 +39,9 @@ use crate::{ stages::{ AccountHashingStage, BodyStage, EraImportSource, EraStage, ExecutionStage, FinishStage, - HeaderStage, IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, - PruneSenderRecoveryStage, PruneStage, SenderRecoveryStage, StorageHashingStage, - TransactionLookupStage, + HeaderStage, IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleChangeSets, + MerkleStage, PruneSenderRecoveryStage, PruneStage, SenderRecoveryStage, + StorageHashingStage, TransactionLookupStage, }, StageSet, StageSetBuilder, }; @@ -54,7 +54,7 @@ use reth_primitives_traits::{Block, NodePrimitives}; use reth_provider::HeaderSyncGapProvider; use reth_prune_types::PruneModes; use reth_stages_api::Stage; -use std::{ops::Not, sync::Arc}; +use std::sync::Arc; use tokio::sync::watch; /// A set containing all stages to run a fully syncing instance of reth. @@ -75,6 +75,7 @@ use tokio::sync::watch; /// - [`AccountHashingStage`] /// - [`StorageHashingStage`] /// - [`MerkleStage`] (execute) +/// - [`MerkleChangeSets`] /// - [`TransactionLookupStage`] /// - [`IndexStorageHistoryStage`] /// - [`IndexAccountHistoryStage`] @@ -336,12 +337,12 @@ where stages_config: self.stages_config.clone(), prune_modes: self.prune_modes.clone(), }) - // If any prune modes are set, add the prune stage. - .add_stage_opt(self.prune_modes.is_empty().not().then(|| { - // Prune stage should be added after all hashing stages, because otherwise it will - // delete - PruneStage::new(self.prune_modes.clone(), self.stages_config.prune.commit_threshold) - })) + // Prune stage should be added after all hashing stages, because otherwise it will + // delete + .add_stage(PruneStage::new( + self.prune_modes.clone(), + self.stages_config.prune.commit_threshold, + )) } } @@ -387,6 +388,13 @@ where } /// A set containing all stages that hash account state. +/// +/// This includes: +/// - [`MerkleStage`] (unwind) +/// - [`AccountHashingStage`] +/// - [`StorageHashingStage`] +/// - [`MerkleStage`] (execute) +/// - [`MerkleChangeSets`] #[derive(Debug, Default)] #[non_exhaustive] pub struct HashingStages { @@ -399,6 +407,7 @@ where MerkleStage: Stage, AccountHashingStage: Stage, StorageHashingStage: Stage, + MerkleChangeSets: Stage, { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() @@ -415,6 +424,7 @@ where self.stages_config.merkle.rebuild_threshold, self.stages_config.merkle.incremental_threshold, )) + .add_stage(MerkleChangeSets::new()) } } diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 3736fa523cb..ed50572d58b 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -896,7 +896,7 @@ mod tests { // If there is a pruning configuration, then it's forced to use the database. // This way we test both cases. - let modes = [None, Some(PruneModes::none())]; + let modes = [None, Some(PruneModes::default())]; let random_filter = ReceiptsLogPruneConfig(BTreeMap::from([( Address::random(), PruneMode::Distance(100000), @@ -1033,7 +1033,7 @@ mod tests { // If there is a pruning configuration, then it's forced to use the database. // This way we test both cases. - let modes = [None, Some(PruneModes::none())]; + let modes = [None, Some(PruneModes::default())]; let random_filter = ReceiptsLogPruneConfig(BTreeMap::from([( Address::random(), PruneMode::Before(100000), diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index 6cbed3ab20e..b4f24db7c58 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -247,7 +247,7 @@ where })?; match progress { StateRootProgress::Progress(state, hashed_entries_walked, updates) => { - provider.write_trie_updates(&updates)?; + provider.write_trie_updates(updates)?; let mut checkpoint = MerkleCheckpoint::new( to_block, @@ -290,7 +290,7 @@ where }) } StateRootProgress::Complete(root, hashed_entries_walked, updates) => { - provider.write_trie_updates(&updates)?; + provider.write_trie_updates(updates)?; entities_checkpoint.processed += hashed_entries_walked as u64; @@ -317,7 +317,7 @@ where error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); StageError::Fatal(Box::new(e)) })?; - provider.write_trie_updates(&updates)?; + provider.write_trie_updates(updates)?; final_root = Some(root); } @@ -400,7 +400,7 @@ where validate_state_root(block_root, SealedHeader::seal_slow(target), input.unwind_to)?; // Validation passed, apply unwind changes to the database. - provider.write_trie_updates(&updates)?; + provider.write_trie_updates(updates)?; // Update entities checkpoint to reflect the unwind operation // Since we're unwinding, we need to recalculate the total entities at the target block diff --git a/crates/stages/stages/src/stages/merkle_changesets.rs b/crates/stages/stages/src/stages/merkle_changesets.rs new file mode 100644 index 00000000000..7bf756c3dd3 --- /dev/null +++ b/crates/stages/stages/src/stages/merkle_changesets.rs @@ -0,0 +1,380 @@ +use crate::stages::merkle::INVALID_STATE_ROOT_ERROR_MESSAGE; +use alloy_consensus::BlockHeader; +use alloy_primitives::BlockNumber; +use reth_consensus::ConsensusError; +use reth_primitives_traits::{GotExpected, SealedHeader}; +use reth_provider::{ + ChainStateBlockReader, DBProvider, HeaderProvider, ProviderError, StageCheckpointReader, + TrieWriter, +}; +use reth_stages_api::{ + BlockErrorKind, CheckpointBlockRange, ExecInput, ExecOutput, MerkleChangeSetsCheckpoint, Stage, + StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, +}; +use reth_trie::{updates::TrieUpdates, HashedPostState, KeccakKeyHasher, StateRoot, TrieInput}; +use reth_trie_db::{DatabaseHashedPostState, DatabaseStateRoot}; +use std::ops::Range; +use tracing::{debug, error}; + +/// The `MerkleChangeSets` stage. +/// +/// This stage processes and maintains trie changesets from the finalized block to the latest block. +#[derive(Debug, Clone)] +pub struct MerkleChangeSets { + /// The number of blocks to retain changesets for, used as a fallback when the finalized block + /// is not found. Defaults to 64 (2 epochs in beacon chain). + retention_blocks: u64, +} + +impl MerkleChangeSets { + /// Creates a new `MerkleChangeSets` stage with default retention blocks of 64. + pub const fn new() -> Self { + Self { retention_blocks: 64 } + } + + /// Creates a new `MerkleChangeSets` stage with a custom finalized block height. + pub const fn with_retention_blocks(retention_blocks: u64) -> Self { + Self { retention_blocks } + } + + /// Returns the range of blocks which are already computed. Will return an empty range if none + /// have been computed. + fn computed_range(checkpoint: Option) -> Range { + let to = checkpoint.map(|chk| chk.block_number).unwrap_or_default(); + let from = checkpoint + .map(|chk| chk.merkle_changesets_stage_checkpoint().unwrap_or_default()) + .unwrap_or_default() + .block_range + .to; + from..to + 1 + } + + /// Determines the target range for changeset computation based on the checkpoint and provider + /// state. + /// + /// Returns the target range (exclusive end) to compute changesets for. + fn determine_target_range( + &self, + provider: &Provider, + ) -> Result, StageError> + where + Provider: StageCheckpointReader + ChainStateBlockReader, + { + // Get merkle checkpoint which represents our target end block + let merkle_checkpoint = provider + .get_stage_checkpoint(StageId::MerkleExecute)? + .map(|checkpoint| checkpoint.block_number) + .unwrap_or(0); + + let target_end = merkle_checkpoint + 1; // exclusive + + // Calculate the target range based on the finalized block and the target block. + // We maintain changesets from the finalized block to the latest block. + let finalized_block = provider.last_finalized_block_number()?; + + // Calculate the fallback start position based on retention blocks + let retention_based_start = merkle_checkpoint.saturating_sub(self.retention_blocks); + + // If the finalized block was way in the past then we don't want to generate changesets for + // all of those past blocks; we only care about the recent history. + // + // Use maximum of finalized_block and retention_based_start if finalized_block exists, + // otherwise just use retention_based_start. + let mut target_start = finalized_block + .map(|finalized| finalized.saturating_add(1).max(retention_based_start)) + .unwrap_or(retention_based_start); + + // We cannot revert the genesis block; target_start must be >0 + target_start = target_start.max(1); + + Ok(target_start..target_end) + } + + /// Calculates the trie updates given a [`TrieInput`], asserting that the resulting state root + /// matches the expected one for the block. + fn calculate_block_trie_updates( + provider: &Provider, + block_number: BlockNumber, + input: TrieInput, + ) -> Result { + let (root, trie_updates) = + StateRoot::overlay_root_from_nodes_with_updates(provider.tx_ref(), input).map_err( + |e| { + error!( + target: "sync::stages::merkle_changesets", + %e, + ?block_number, + "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); + StageError::Fatal(Box::new(e)) + }, + )?; + + let block = provider + .header_by_number(block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + + let (got, expected) = (root, block.state_root()); + if got != expected { + // Only seal the header when we need it for the error + let header = SealedHeader::seal_slow(block); + error!( + target: "sync::stages::merkle_changesets", + ?block_number, + ?got, + ?expected, + "Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}", + ); + return Err(StageError::Block { + error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( + GotExpected { got, expected }.into(), + )), + block: Box::new(header.block_with_parent()), + }) + } + + Ok(trie_updates) + } + + fn populate_range( + provider: &Provider, + target_range: Range, + ) -> Result<(), StageError> + where + Provider: StageCheckpointReader + + TrieWriter + + DBProvider + + HeaderProvider + + ChainStateBlockReader, + { + let target_start = target_range.start; + let target_end = target_range.end; + debug!( + target: "sync::stages::merkle_changesets", + ?target_range, + "Starting trie changeset computation", + ); + + // We need to distinguish a cumulative revert and a per-block revert. A cumulative revert + // reverts changes starting at db tip all the way to a block. A per-block revert only + // reverts a block's changes. + // + // We need to calculate the cumulative HashedPostState reverts for every block in the + // target range. The cumulative HashedPostState revert for block N can be calculated as: + // + // + // ``` + // // where `extend` overwrites any shared keys + // cumulative_state_revert(N) = cumulative_state_revert(N + 1).extend(get_block_state_revert(N)) + // ``` + // + // We need per-block reverts to calculate the prefix set for each individual block. By + // using the per-block reverts to calculate cumulative reverts on-the-fly we can save a + // bunch of memory. + debug!( + target: "sync::stages::merkle_changesets", + ?target_range, + "Computing per-block state reverts", + ); + let mut per_block_state_reverts = Vec::new(); + for block_number in target_range.clone() { + per_block_state_reverts.push(HashedPostState::from_reverts::( + provider.tx_ref(), + block_number..=block_number, + )?); + } + + // Helper to retrieve state revert data for a specific block from the pre-computed array + let get_block_state_revert = |block_number: BlockNumber| -> &HashedPostState { + let index = (block_number - target_start) as usize; + &per_block_state_reverts[index] + }; + + // Helper to accumulate state reverts from a given block to the target end + let compute_cumulative_state_revert = |block_number: BlockNumber| -> HashedPostState { + let mut cumulative_revert = HashedPostState::default(); + for n in (block_number..target_end).rev() { + cumulative_revert.extend_ref(get_block_state_revert(n)) + } + cumulative_revert + }; + + // To calculate the changeset for a block, we first need the TrieUpdates which are + // generated as a result of processing the block. To get these we need: + // 1) The TrieUpdates which revert the db's trie to _prior_ to the block + // 2) The HashedPostState to revert the db's state to _after_ the block + // + // To get (1) for `target_start` we need to do a big state root calculation which takes + // into account all changes between that block and db tip. For each block after the + // `target_start` we can update (1) using the TrieUpdates which were output by the previous + // block, only targeting the state changes of that block. + debug!( + target: "sync::stages::merkle_changesets", + ?target_start, + "Computing trie state at starting block", + ); + let mut input = TrieInput::default(); + input.state = compute_cumulative_state_revert(target_start); + input.prefix_sets = input.state.construct_prefix_sets(); + // target_start will be >= 1, see `determine_target_range`. + input.nodes = + Self::calculate_block_trie_updates(provider, target_start - 1, input.clone())?; + + for block_number in target_range { + debug!( + target: "sync::stages::merkle_changesets", + ?block_number, + "Computing trie updates for block", + ); + // Revert the state so that this block has been just processed, meaning we take the + // cumulative revert of the subsequent block. + input.state = compute_cumulative_state_revert(block_number + 1); + + // Construct prefix sets from only this block's `HashedPostState`, because we only care + // about trie updates which occurred as a result of this block being processed. + input.prefix_sets = get_block_state_revert(block_number).construct_prefix_sets(); + + // Calculate the trie updates for this block, then apply those updates to the reverts. + // We calculate the overlay which will be passed into the next step using the trie + // reverts prior to them being updated. + let this_trie_updates = + Self::calculate_block_trie_updates(provider, block_number, input.clone())?; + + let trie_overlay = input.nodes.clone().into_sorted(); + input.nodes.extend_ref(&this_trie_updates); + let this_trie_updates = this_trie_updates.into_sorted(); + + // Write the changesets to the DB using the trie updates produced by the block, and the + // trie reverts as the overlay. + debug!( + target: "sync::stages::merkle_changesets", + ?block_number, + "Writing trie changesets for block", + ); + provider.write_trie_changesets( + block_number, + &this_trie_updates, + Some(&trie_overlay), + )?; + } + + Ok(()) + } +} + +impl Default for MerkleChangeSets { + fn default() -> Self { + Self::new() + } +} + +impl Stage for MerkleChangeSets +where + Provider: + StageCheckpointReader + TrieWriter + DBProvider + HeaderProvider + ChainStateBlockReader, +{ + fn id(&self) -> StageId { + StageId::MerkleChangeSets + } + + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { + // Get merkle checkpoint and assert that the target is the same. + let merkle_checkpoint = provider + .get_stage_checkpoint(StageId::MerkleExecute)? + .map(|checkpoint| checkpoint.block_number) + .unwrap_or(0); + + if input.target.is_none_or(|target| merkle_checkpoint != target) { + return Err(StageError::Fatal(eyre::eyre!("Cannot sync stage to block {:?} when MerkleExecute is at block {merkle_checkpoint:?}", input.target).into())) + } + + let mut target_range = self.determine_target_range(provider)?; + + // Get the previously computed range. This will be updated to reflect the populating of the + // target range. + let mut computed_range = Self::computed_range(input.checkpoint); + + // We want the target range to not include any data already computed previously, if + // possible, so we start the target range from the end of the computed range if that is + // greater. + // + // ------------------------------> Block # + // |------computed-----| + // |-----target-----| + // |--actual--| + // + // However, if the target start is less than the previously computed start, we don't want to + // do this, as it would leave a gap of data at `target_range.start..=computed_range.start`. + // + // ------------------------------> Block # + // |---computed---| + // |-------target-------| + // |-------actual-------| + // + if target_range.start >= computed_range.start { + target_range.start = target_range.start.max(computed_range.end); + } + + // If target range is empty (target_start >= target_end), stage is already successfully + // executed + if target_range.start >= target_range.end { + return Ok(ExecOutput::done(input.checkpoint.unwrap_or_default())); + } + + // If our target range is a continuation of the already computed range then we can keep the + // already computed data. + if target_range.start == computed_range.end { + // Clear from target_start onwards to ensure no stale data exists + provider.clear_trie_changesets_from(target_range.start)?; + computed_range.end = target_range.end; + } else { + // If our target range is not a continuation of the already computed range then we + // simply clear the computed data, to make sure there's no gaps or conflicts. + provider.clear_trie_changesets()?; + computed_range = target_range.clone(); + } + + // Populate the target range with changesets + Self::populate_range(provider, target_range)?; + + let checkpoint_block_range = CheckpointBlockRange { + from: computed_range.start, + // CheckpointBlockRange is inclusive + to: computed_range.end.saturating_sub(1), + }; + + let checkpoint = StageCheckpoint::new(checkpoint_block_range.to) + .with_merkle_changesets_stage_checkpoint(MerkleChangeSetsCheckpoint { + block_range: checkpoint_block_range, + }); + + Ok(ExecOutput::done(checkpoint)) + } + + fn unwind( + &mut self, + provider: &Provider, + input: UnwindInput, + ) -> Result { + // Unwinding is trivial; just clear everything after the target block. + provider.clear_trie_changesets_from(input.unwind_to + 1)?; + + let mut computed_range = Self::computed_range(Some(input.checkpoint)); + computed_range.end = input.unwind_to + 1; + if computed_range.start > computed_range.end { + computed_range.start = computed_range.end; + } + + let checkpoint_block_range = CheckpointBlockRange { + from: computed_range.start, + // computed_range.end is exclusive + to: computed_range.end.saturating_sub(1), + }; + + let checkpoint = StageCheckpoint::new(input.unwind_to) + .with_merkle_changesets_stage_checkpoint(MerkleChangeSetsCheckpoint { + block_range: checkpoint_block_range, + }); + + Ok(UnwindOutput { checkpoint }) + } +} diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 7e57009e808..40c4cb91368 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -16,6 +16,8 @@ mod index_account_history; mod index_storage_history; /// Stage for computing state root. mod merkle; +/// Stage for computing merkle changesets. +mod merkle_changesets; mod prune; /// The sender recovery stage. mod sender_recovery; @@ -32,6 +34,7 @@ pub use headers::*; pub use index_account_history::*; pub use index_storage_history::*; pub use merkle::*; +pub use merkle_changesets::*; pub use prune::*; pub use sender_recovery::*; pub use tx_lookup::*; @@ -223,7 +226,7 @@ mod tests { // In an unpruned configuration there is 1 receipt, 3 changed accounts and 1 changed // storage. - let mut prune = PruneModes::none(); + let mut prune = PruneModes::default(); check_pruning(test_db.factory.clone(), prune.clone(), 1, 3, 1).await; prune.receipts = Some(PruneMode::Full); diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index f62259dcfdd..3161d4b1412 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -1,7 +1,7 @@ use reth_db_api::{table::Value, transaction::DbTxMut}; use reth_primitives_traits::NodePrimitives; use reth_provider::{ - BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, + BlockReader, ChainStateBlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, }; use reth_prune::{ @@ -42,6 +42,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader + + ChainStateBlockReader + StaticFileProviderFactory< Primitives: NodePrimitives, >, @@ -121,7 +122,7 @@ impl PruneSenderRecoveryStage { /// Create new prune sender recovery stage with the given prune mode and commit threshold. pub fn new(prune_mode: PruneMode, commit_threshold: usize) -> Self { Self(PruneStage::new( - PruneModes { sender_recovery: Some(prune_mode), ..PruneModes::none() }, + PruneModes { sender_recovery: Some(prune_mode), ..PruneModes::default() }, commit_threshold, )) } @@ -133,6 +134,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader + + ChainStateBlockReader + StaticFileProviderFactory< Primitives: NodePrimitives, >, diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index 61c399d9ac3..16bee1387f6 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -287,6 +287,17 @@ pub struct IndexHistoryCheckpoint { pub progress: EntitiesCheckpoint, } +/// Saves the progress of `MerkleChangeSets` stage. +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct MerkleChangeSetsCheckpoint { + /// Block range which this checkpoint is valid for. + pub block_range: CheckpointBlockRange, +} + /// Saves the progress of abstract stage iterating over or downloading entities. #[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] @@ -386,6 +397,9 @@ impl StageCheckpoint { StageId::IndexStorageHistory | StageId::IndexAccountHistory => { StageUnitCheckpoint::IndexHistory(IndexHistoryCheckpoint::default()) } + StageId::MerkleChangeSets => { + StageUnitCheckpoint::MerkleChangeSets(MerkleChangeSetsCheckpoint::default()) + } _ => return self, }); _ = self.stage_checkpoint.map(|mut checkpoint| checkpoint.set_block_range(from, to)); @@ -411,6 +425,7 @@ impl StageCheckpoint { progress: entities, .. }) => Some(entities), + StageUnitCheckpoint::MerkleChangeSets(_) => None, } } } @@ -436,6 +451,8 @@ pub enum StageUnitCheckpoint { Headers(HeadersCheckpoint), /// Saves the progress of Index History stage. IndexHistory(IndexHistoryCheckpoint), + /// Saves the progress of `MerkleChangeSets` stage. + MerkleChangeSets(MerkleChangeSetsCheckpoint), } impl StageUnitCheckpoint { @@ -446,7 +463,8 @@ impl StageUnitCheckpoint { Self::Account(AccountHashingCheckpoint { block_range, .. }) | Self::Storage(StorageHashingCheckpoint { block_range, .. }) | Self::Execution(ExecutionCheckpoint { block_range, .. }) | - Self::IndexHistory(IndexHistoryCheckpoint { block_range, .. }) => { + Self::IndexHistory(IndexHistoryCheckpoint { block_range, .. }) | + Self::MerkleChangeSets(MerkleChangeSetsCheckpoint { block_range, .. }) => { let old_range = *block_range; *block_range = CheckpointBlockRange { from, to }; @@ -544,6 +562,15 @@ stage_unit_checkpoints!( index_history_stage_checkpoint, /// Sets the stage checkpoint to index history. with_index_history_stage_checkpoint + ), + ( + 6, + MerkleChangeSets, + MerkleChangeSetsCheckpoint, + /// Returns the merkle changesets stage checkpoint, if any. + merkle_changesets_stage_checkpoint, + /// Sets the stage checkpoint to merkle changesets. + with_merkle_changesets_stage_checkpoint ) ); diff --git a/crates/stages/types/src/id.rs b/crates/stages/types/src/id.rs index 78d7e0ec1b6..8c0a91c8731 100644 --- a/crates/stages/types/src/id.rs +++ b/crates/stages/types/src/id.rs @@ -25,6 +25,7 @@ pub enum StageId { TransactionLookup, IndexStorageHistory, IndexAccountHistory, + MerkleChangeSets, Prune, Finish, /// Other custom stage with a provided string identifier. @@ -39,7 +40,7 @@ static ENCODED_STAGE_IDS: OnceLock>> = OnceLock::new(); impl StageId { /// All supported Stages - pub const ALL: [Self; 15] = [ + pub const ALL: [Self; 16] = [ Self::Era, Self::Headers, Self::Bodies, @@ -53,6 +54,7 @@ impl StageId { Self::TransactionLookup, Self::IndexStorageHistory, Self::IndexAccountHistory, + Self::MerkleChangeSets, Self::Prune, Self::Finish, ]; @@ -88,6 +90,7 @@ impl StageId { Self::TransactionLookup => "TransactionLookup", Self::IndexAccountHistory => "IndexAccountHistory", Self::IndexStorageHistory => "IndexStorageHistory", + Self::MerkleChangeSets => "MerkleChangeSets", Self::Prune => "Prune", Self::Finish => "Finish", Self::Other(s) => s, diff --git a/crates/stages/types/src/lib.rs b/crates/stages/types/src/lib.rs index 4e30ce27cd7..83585fee7ce 100644 --- a/crates/stages/types/src/lib.rs +++ b/crates/stages/types/src/lib.rs @@ -18,8 +18,8 @@ pub use id::StageId; mod checkpoints; pub use checkpoints::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, - HeadersCheckpoint, IndexHistoryCheckpoint, MerkleCheckpoint, StageCheckpoint, - StageUnitCheckpoint, StorageHashingCheckpoint, StorageRootMerkleCheckpoint, + HeadersCheckpoint, IndexHistoryCheckpoint, MerkleChangeSetsCheckpoint, MerkleCheckpoint, + StageCheckpoint, StageUnitCheckpoint, StorageHashingCheckpoint, StorageRootMerkleCheckpoint, }; mod execution; diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index 3aeee949ea1..068b64a3c97 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -87,7 +87,7 @@ pub trait DbDupCursorRO { /// | `key` | `subkey` | **Equivalent starting position** | /// |--------|----------|-----------------------------------------| /// | `None` | `None` | [`DbCursorRO::first()`] | - /// | `Some` | `None` | [`DbCursorRO::seek()`] | + /// | `Some` | `None` | [`DbCursorRO::seek_exact()`] | /// | `None` | `Some` | [`DbDupCursorRO::seek_by_key_subkey()`] | /// | `Some` | `Some` | [`DbDupCursorRO::seek_by_key_subkey()`] | fn walk_dup( diff --git a/crates/storage/db-api/src/models/accounts.rs b/crates/storage/db-api/src/models/accounts.rs index 263e362cc6a..41a11e1c7e5 100644 --- a/crates/storage/db-api/src/models/accounts.rs +++ b/crates/storage/db-api/src/models/accounts.rs @@ -176,7 +176,11 @@ impl Decode for AddressStorageKey { } } -impl_fixed_arbitrary!((BlockNumberAddress, 28), (AddressStorageKey, 52)); +impl_fixed_arbitrary!( + (BlockNumberAddress, 28), + (BlockNumberHashedAddress, 40), + (AddressStorageKey, 52) +); #[cfg(test)] mod tests { @@ -209,6 +213,31 @@ mod tests { assert_eq!(bytes, Encode::encode(key)); } + #[test] + fn test_block_number_hashed_address() { + let num = 1u64; + let hash = B256::from_slice(&[0xba; 32]); + let key = BlockNumberHashedAddress((num, hash)); + + let mut bytes = [0u8; 40]; + bytes[..8].copy_from_slice(&num.to_be_bytes()); + bytes[8..].copy_from_slice(hash.as_slice()); + + let encoded = Encode::encode(key); + assert_eq!(encoded, bytes); + + let decoded: BlockNumberHashedAddress = Decode::decode(&encoded).unwrap(); + assert_eq!(decoded, key); + } + + #[test] + fn test_block_number_hashed_address_rand() { + let mut bytes = [0u8; 40]; + rng().fill(bytes.as_mut_slice()); + let key = BlockNumberHashedAddress::arbitrary(&mut Unstructured::new(&bytes)).unwrap(); + assert_eq!(bytes, Encode::encode(key)); + } + #[test] fn test_address_storage_key() { let storage_key = StorageKey::random(); diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 24951789f5d..31d9b301f8c 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -12,7 +12,9 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned, TxType}; use reth_primitives_traits::{Account, Bytecode, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; -use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *}; +use reth_trie_common::{ + StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry, *, +}; use serde::{Deserialize, Serialize}; pub mod accounts; @@ -219,6 +221,7 @@ impl_compression_for_compact!( TxType, StorageEntry, BranchNodeCompact, + TrieChangeSetsEntry, StoredNibbles, StoredNibblesSubKey, StorageTrieEntry, diff --git a/crates/storage/db-api/src/tables/mod.rs b/crates/storage/db-api/src/tables/mod.rs index 259b2d39b15..cd678260128 100644 --- a/crates/storage/db-api/src/tables/mod.rs +++ b/crates/storage/db-api/src/tables/mod.rs @@ -21,8 +21,8 @@ use crate::{ accounts::BlockNumberAddress, blocks::{HeaderHash, StoredBlockOmmers}, storage_sharded_key::StorageShardedKey, - AccountBeforeTx, ClientVersion, CompactU256, IntegerList, ShardedKey, - StoredBlockBodyIndices, StoredBlockWithdrawals, + AccountBeforeTx, BlockNumberHashedAddress, ClientVersion, CompactU256, IntegerList, + ShardedKey, StoredBlockBodyIndices, StoredBlockWithdrawals, }, table::{Decode, DupSort, Encode, Table, TableInfo}, }; @@ -32,7 +32,9 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_primitives_traits::{Account, Bytecode, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; -use reth_trie_common::{BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey}; +use reth_trie_common::{ + BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry, +}; use serde::{Deserialize, Serialize}; use std::fmt; @@ -486,6 +488,20 @@ tables! { type SubKey = StoredNibblesSubKey; } + /// Stores the state of a node in the accounts trie prior to a particular block being executed. + table AccountsTrieChangeSets { + type Key = BlockNumber; + type Value = TrieChangeSetsEntry; + type SubKey = StoredNibblesSubKey; + } + + /// Stores the state of a node in a storage trie prior to a particular block being executed. + table StoragesTrieChangeSets { + type Key = BlockNumberHashedAddress; + type Value = TrieChangeSetsEntry; + type SubKey = StoredNibblesSubKey; + } + /// Stores the transaction sender for each canonical transaction. /// It is needed to speed up execution stage and allows fetching signer without doing /// transaction signed recovery diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 48442aab381..87f009356a0 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -602,7 +602,7 @@ where match state_root.root_with_progress()? { StateRootProgress::Progress(state, _, updates) => { - let updated_len = provider.write_trie_updates(&updates)?; + let updated_len = provider.write_trie_updates(updates)?; total_flushed_updates += updated_len; trace!(target: "reth::cli", @@ -622,7 +622,7 @@ where } } StateRootProgress::Complete(root, _, updates) => { - let updated_len = provider.write_trie_updates(&updates)?; + let updated_len = provider.write_trie_updates(updates)?; total_flushed_updates += updated_len; trace!(target: "reth::cli", diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index c27587690ba..47cc630bcb6 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -137,6 +137,14 @@ pub enum ProviderError { /// Missing trie updates. #[error("missing trie updates for block {0}")] MissingTrieUpdates(B256), + /// Insufficient changesets to revert to the requested block. + #[error("insufficient changesets to revert to block #{requested}. Available changeset range: {available:?}")] + InsufficientChangesets { + /// The block number requested for reversion + requested: BlockNumber, + /// The available range of blocks with changesets + available: core::ops::RangeInclusive, + }, /// Any other error type wrapped into a cloneable [`AnyError`]. #[error(transparent)] Other(#[from] AnyError), diff --git a/crates/storage/provider/src/bundle_state/mod.rs b/crates/storage/provider/src/bundle_state/mod.rs deleted file mode 100644 index 58b76f1eacf..00000000000 --- a/crates/storage/provider/src/bundle_state/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Bundle state module. -//! This module contains all the logic related to bundle state. - -mod state_reverts; -pub use state_reverts::StorageRevertsIter; diff --git a/crates/storage/provider/src/changesets_utils/mod.rs b/crates/storage/provider/src/changesets_utils/mod.rs new file mode 100644 index 00000000000..3b65825264b --- /dev/null +++ b/crates/storage/provider/src/changesets_utils/mod.rs @@ -0,0 +1,7 @@ +//! This module contains helpful utilities related to populating changesets tables. + +mod state_reverts; +pub use state_reverts::StorageRevertsIter; + +mod trie; +pub use trie::*; diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/changesets_utils/state_reverts.rs similarity index 100% rename from crates/storage/provider/src/bundle_state/state_reverts.rs rename to crates/storage/provider/src/changesets_utils/state_reverts.rs diff --git a/crates/storage/provider/src/changesets_utils/trie.rs b/crates/storage/provider/src/changesets_utils/trie.rs new file mode 100644 index 00000000000..f4365aab103 --- /dev/null +++ b/crates/storage/provider/src/changesets_utils/trie.rs @@ -0,0 +1,147 @@ +use itertools::{merge_join_by, EitherOrBoth}; +use reth_db_api::DatabaseError; +use reth_trie::{trie_cursor::TrieCursor, BranchNodeCompact, Nibbles}; +use std::cmp::{Ord, Ordering}; + +/// Combines a sorted iterator of trie node paths and a storage trie cursor into a new +/// iterator which produces the current values of all given paths in the same order. +#[derive(Debug)] +pub struct StorageTrieCurrentValuesIter<'cursor, P, C> { + /// Sorted iterator of node paths which we want the values of. + paths: P, + /// Storage trie cursor. + cursor: &'cursor mut C, + /// Current value at the cursor, allows us to treat the cursor as a peekable iterator. + cursor_current: Option<(Nibbles, BranchNodeCompact)>, +} + +impl<'cursor, P, C> StorageTrieCurrentValuesIter<'cursor, P, C> +where + P: Iterator, + C: TrieCursor, +{ + /// Instantiate a [`StorageTrieCurrentValuesIter`] from a sorted paths iterator and a cursor. + pub fn new(paths: P, cursor: &'cursor mut C) -> Result { + let mut new_self = Self { paths, cursor, cursor_current: None }; + new_self.seek_cursor(Nibbles::default())?; + Ok(new_self) + } + + fn seek_cursor(&mut self, path: Nibbles) -> Result<(), DatabaseError> { + self.cursor_current = self.cursor.seek(path)?; + Ok(()) + } +} + +impl<'cursor, P, C> Iterator for StorageTrieCurrentValuesIter<'cursor, P, C> +where + P: Iterator, + C: TrieCursor, +{ + type Item = Result<(Nibbles, Option), DatabaseError>; + + fn next(&mut self) -> Option { + let Some(curr_path) = self.paths.next() else { + // If there are no more paths then there is no further possible output. + return None + }; + + // If the path is ahead of the cursor then seek the cursor forward to catch up. The cursor + // will seek either to `curr_path` or beyond it. + if self.cursor_current.as_ref().is_some_and(|(cursor_path, _)| curr_path > *cursor_path) && + let Err(err) = self.seek_cursor(curr_path) + { + return Some(Err(err)) + } + + // If there is a path but the cursor is empty then that path has no node. + if self.cursor_current.is_none() { + return Some(Ok((curr_path, None))) + } + + let (cursor_path, cursor_node) = + self.cursor_current.as_mut().expect("already checked for None"); + + // There is both a path and a cursor value, compare their paths. + match curr_path.cmp(cursor_path) { + Ordering::Less => { + // If the path is behind the cursor then there is no value for that + // path, produce None. + Some(Ok((curr_path, None))) + } + Ordering::Equal => { + // If the target path and cursor's path match then there is a value for that path, + // return the value. We don't seek the cursor here, that will be handled on the + // next call to `next` after checking that `paths` isn't None. + let cursor_node = core::mem::take(cursor_node); + Some(Ok((*cursor_path, Some(cursor_node)))) + } + Ordering::Greater => { + panic!("cursor was seeked to {curr_path:?}, but produced a node at a lower path {cursor_path:?}") + } + } + } +} + +/// Returns an iterator which produces the values to be inserted into the `StoragesTrieChangeSets` +/// table for an account whose storage was wiped during a block. It is expected that this is called +/// prior to inserting the block's trie updates. +/// +/// ## Arguments +/// +/// - `curr_values_of_changed` is an iterator over the current values of all trie nodes modified by +/// the block, ordered by path. +/// - `all_nodes` is an iterator over all existing trie nodes for the account, ordered by path. +/// +/// ## Returns +/// +/// An iterator of trie node paths and a `Some(node)` (indicating the node was wiped) or a `None` +/// (indicating the node was modified in the block but didn't previously exist. The iterator's +/// results will be ordered by path. +pub fn storage_trie_wiped_changeset_iter( + curr_values_of_changed: impl Iterator< + Item = Result<(Nibbles, Option), DatabaseError>, + >, + all_nodes: impl Iterator>, +) -> Result< + impl Iterator), DatabaseError>>, + DatabaseError, +> { + let all_nodes = all_nodes.map(|e| e.map(|(nibbles, node)| (nibbles, Some(node)))); + + let merged = merge_join_by(curr_values_of_changed, all_nodes, |a, b| match (a, b) { + (Err(_), _) => Ordering::Less, + (_, Err(_)) => Ordering::Greater, + (Ok(a), Ok(b)) => a.0.cmp(&b.0), + }); + + Ok(merged.map(|either_or| match either_or { + EitherOrBoth::Left(changed) => { + // A path of a changed node (given in `paths`) which was not found in the database (or + // there's an error). The current value of this path must be None, otherwise it would + // have also been returned by the `all_nodes` iter. + debug_assert!( + changed.as_ref().is_err() || changed.as_ref().is_ok_and(|(_, node)| node.is_none()), + "changed node is Some but wasn't returned by `all_nodes` iterator: {changed:?}", + ); + changed + } + EitherOrBoth::Right(wiped) => { + // A node was found in the db (indicating it was wiped) but was not given in `paths`. + // Return it as-is. + wiped + } + EitherOrBoth::Both(changed, _wiped) => { + // A path of a changed node (given in `paths`) was found with a previous value in the + // database. The changed node must have a value which is equal to the one found by the + // `all_nodes` iterator. If the changed node had no previous value (None) it wouldn't + // be returned by `all_nodes` and so would be in the Left branch. + // + // Due to the ordering closure passed to `merge_join_by` it's not possible for either + // value to be an error here. + debug_assert!(changed.is_ok(), "unreachable error condition: {changed:?}"); + debug_assert_eq!(changed, _wiped); + changed + } + })) +} diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index c281f117908..70822c604bb 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -35,7 +35,7 @@ pub use static_file::StaticFileSegment; pub use reth_execution_types::*; -pub mod bundle_state; +pub mod changesets_utils; /// Re-export `OriginalValuesKnown` pub use revm_database::states::OriginalValuesKnown; diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 7040032eca0..512b8569de2 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -6,7 +6,7 @@ use crate::{ HashedPostStateProvider, HeaderProvider, ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, - TransactionsProvider, + TransactionsProvider, TrieReader, }; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; @@ -25,7 +25,7 @@ use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{BlockBodyIndicesProvider, NodePrimitivesProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; -use reth_trie::{HashedPostState, KeccakKeyHasher}; +use reth_trie::{updates::TrieUpdatesSorted, HashedPostState, KeccakKeyHasher}; use revm_database::BundleState; use std::{ ops::{RangeBounds, RangeInclusive}, @@ -739,6 +739,19 @@ impl StateReader for BlockchainProvider { } } +impl TrieReader for BlockchainProvider { + fn trie_reverts(&self, from: BlockNumber) -> ProviderResult { + self.consistent_provider()?.trie_reverts(from) + } + + fn get_block_trie_updates( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + self.consistent_provider()?.get_block_trie_updates(block_number) + } +} + #[cfg(test)] mod tests { use crate::{ @@ -755,8 +768,7 @@ mod tests { use rand::Rng; use reth_chain_state::{ test_utils::TestBlockBuilder, CanonStateNotification, CanonStateSubscriptions, - CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, - NewCanonicalChain, + CanonicalInMemoryState, ExecutedBlock, NewCanonicalChain, }; use reth_chainspec::{ChainSpec, MAINNET}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; @@ -882,12 +894,14 @@ mod tests { let execution_outcome = ExecutionOutcome { receipts: vec![block_receipts], ..Default::default() }; - ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)), - execution_outcome.into(), - Default::default(), - ExecutedTrieUpdates::empty(), - ) + ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + block.clone(), + senders, + )), + execution_output: execution_outcome.into(), + ..Default::default() + } }) .collect(), }; @@ -1009,15 +1023,13 @@ mod tests { let in_memory_block_senders = first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { - new: vec![ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed( + new: vec![ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( first_in_mem_block.clone(), in_memory_block_senders, )), - Default::default(), - Default::default(), - ExecutedTrieUpdates::empty(), - )], + ..Default::default() + }], }; provider.canonical_in_memory_state.update_chain(chain); @@ -1045,16 +1057,12 @@ mod tests { assert_eq!(provider.find_block_by_hash(first_db_block.hash(), BlockSource::Pending)?, None); // Insert the last block into the pending state - provider.canonical_in_memory_state.set_pending_block(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed( - last_in_mem_block.clone(), - Default::default(), - )), - execution_output: Default::default(), - hashed_state: Default::default(), - }, - trie: ExecutedTrieUpdates::empty(), + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + last_in_mem_block.clone(), + Default::default(), + )), + ..Default::default() }); // Now the last block should be found in memory @@ -1105,15 +1113,13 @@ mod tests { let in_memory_block_senders = first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { - new: vec![ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed( + new: vec![ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( first_in_mem_block.clone(), in_memory_block_senders, )), - Default::default(), - Default::default(), - ExecutedTrieUpdates::empty(), - )], + ..Default::default() + }], }; provider.canonical_in_memory_state.update_chain(chain); @@ -1159,16 +1165,12 @@ mod tests { ); // Set the block as pending - provider.canonical_in_memory_state.set_pending_block(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed( - block.clone(), - block.senders().unwrap(), - )), - execution_output: Default::default(), - hashed_state: Default::default(), - }, - trie: ExecutedTrieUpdates::empty(), + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + block.clone(), + block.senders().unwrap(), + )), + ..Default::default() }); // Assertions related to the pending block @@ -1206,15 +1208,13 @@ mod tests { let in_memory_block_senders = first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { - new: vec![ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed( + new: vec![ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( first_in_mem_block.clone(), in_memory_block_senders, )), - Default::default(), - Default::default(), - ExecutedTrieUpdates::empty(), - )], + ..Default::default() + }], }; provider.canonical_in_memory_state.update_chain(chain); @@ -1686,9 +1686,12 @@ mod tests { .first() .map(|block| { let senders = block.senders().expect("failed to recover senders"); - ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)), - Arc::new(ExecutionOutcome { + ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + block.clone(), + senders, + )), + execution_output: Arc::new(ExecutionOutcome { bundle: BundleState::new( in_memory_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) @@ -1701,9 +1704,8 @@ mod tests { first_block: first_in_memory_block, ..Default::default() }), - Default::default(), - ExecutedTrieUpdates::empty(), - ) + ..Default::default() + } }) .unwrap()], }; @@ -1821,19 +1823,13 @@ mod tests { // adding a pending block to state can test pending() and pending_state_by_hash() function let pending_block = database_blocks[database_blocks.len() - 1].clone(); - only_database_provider.canonical_in_memory_state.set_pending_block( - ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed( - pending_block.clone(), - Default::default(), - )), - execution_output: Default::default(), - hashed_state: Default::default(), - }, - trie: ExecutedTrieUpdates::empty(), - }, - ); + only_database_provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + pending_block.clone(), + Default::default(), + )), + ..Default::default() + }); assert_eq!( pending_block.hash(), @@ -1919,16 +1915,12 @@ mod tests { // Set the pending block in memory let pending_block = in_memory_blocks.last().unwrap(); - provider.canonical_in_memory_state.set_pending_block(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed( - pending_block.clone(), - Default::default(), - )), - execution_output: Default::default(), - hashed_state: Default::default(), - }, - trie: ExecutedTrieUpdates::empty(), + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + pending_block.clone(), + Default::default(), + )), + ..Default::default() }); // Set the safe block in memory diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 93415e8e347..66a35e5e9b1 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -4,7 +4,7 @@ use crate::{ BlockReader, BlockReaderIdExt, BlockSource, ChainSpecProvider, ChangeSetReader, HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, - TransactionsProvider, + TransactionsProvider, TrieReader, }; use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_eips::{ @@ -28,6 +28,7 @@ use reth_storage_api::{ StorageChangeSetReader, TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; +use reth_trie::updates::TrieUpdatesSorted; use revm_database::states::PlainStorageRevert; use std::{ ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, @@ -1504,6 +1505,19 @@ impl StateReader for ConsistentProvider { } } +impl TrieReader for ConsistentProvider { + fn trie_reverts(&self, from: BlockNumber) -> ProviderResult { + self.storage_provider.trie_reverts(from) + } + + fn get_block_trie_updates( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + self.storage_provider.get_block_trie_updates(block_number) + } +} + #[cfg(test)] mod tests { use crate::{ @@ -1514,9 +1528,7 @@ mod tests { use alloy_primitives::B256; use itertools::Itertools; use rand::Rng; - use reth_chain_state::{ - ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, NewCanonicalChain, - }; + use reth_chain_state::{ExecutedBlock, NewCanonicalChain}; use reth_db_api::models::AccountBeforeTx; use reth_ethereum_primitives::Block; use reth_execution_types::ExecutionOutcome; @@ -1619,15 +1631,13 @@ mod tests { let in_memory_block_senders = first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { - new: vec![ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed( + new: vec![ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( first_in_mem_block.clone(), in_memory_block_senders, )), - Default::default(), - Default::default(), - ExecutedTrieUpdates::empty(), - )], + ..Default::default() + }], }; consistent_provider.canonical_in_memory_state.update_chain(chain); let consistent_provider = provider.consistent_provider()?; @@ -1661,16 +1671,12 @@ mod tests { ); // Insert the last block into the pending state - provider.canonical_in_memory_state.set_pending_block(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed( - last_in_mem_block.clone(), - Default::default(), - )), - execution_output: Default::default(), - hashed_state: Default::default(), - }, - trie: ExecutedTrieUpdates::empty(), + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + last_in_mem_block.clone(), + Default::default(), + )), + ..Default::default() }); // Now the last block should be found in memory @@ -1729,15 +1735,13 @@ mod tests { let in_memory_block_senders = first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { - new: vec![ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed( + new: vec![ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( first_in_mem_block.clone(), in_memory_block_senders, )), - Default::default(), - Default::default(), - ExecutedTrieUpdates::empty(), - )], + ..Default::default() + }], }; consistent_provider.canonical_in_memory_state.update_chain(chain); @@ -1834,9 +1838,12 @@ mod tests { .first() .map(|block| { let senders = block.senders().expect("failed to recover senders"); - ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)), - Arc::new(ExecutionOutcome { + ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + block.clone(), + senders, + )), + execution_output: Arc::new(ExecutionOutcome { bundle: BundleState::new( in_memory_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) @@ -1849,9 +1856,8 @@ mod tests { first_block: first_in_memory_block, ..Default::default() }), - Default::default(), - ExecutedTrieUpdates::empty(), - ) + ..Default::default() + } }) .unwrap()], }; diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index f7b3c4ba603..bd6b1e0f472 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -84,7 +84,7 @@ impl ProviderFactory { db, chain_spec, static_file_provider, - prune_modes: PruneModes::none(), + prune_modes: PruneModes::default(), storage: Default::default(), } } @@ -131,7 +131,7 @@ impl>> ProviderFactory { db: Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider, - prune_modes: PruneModes::none(), + prune_modes: PruneModes::default(), storage: Default::default(), }) } @@ -670,7 +670,7 @@ mod tests { let prune_modes = PruneModes { sender_recovery: Some(PruneMode::Full), transaction_lookup: Some(PruneMode::Full), - ..PruneModes::none() + ..PruneModes::default() }; let factory = create_test_provider_factory(); let provider = factory.with_prune_modes(prune_modes).provider_rw().unwrap(); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 6fdc37c4f53..235bf57a4a4 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,5 +1,7 @@ use crate::{ - bundle_state::StorageRevertsIter, + changesets_utils::{ + storage_trie_wiped_changeset_iter, StorageRevertsIter, StorageTrieCurrentValuesIter, + }, providers::{ database::{chain::ChainStorage, metrics}, static_file::StaticFileWriter, @@ -16,7 +18,7 @@ use crate::{ OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, TrieWriter, + TransactionsProviderExt, TrieReader, TrieWriter, }; use alloy_consensus::{ transaction::{SignerRecoverable, TransactionMeta, TxHashRef}, @@ -30,14 +32,14 @@ use alloy_primitives::{ }; use itertools::Itertools; use rayon::slice::ParallelSliceMut; -use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates}; +use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - ShardedKey, StoredBlockBodyIndices, + BlockNumberHashedAddress, ShardedKey, StoredBlockBodyIndices, }, table::Table, tables, @@ -47,8 +49,7 @@ use reth_db_api::{ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives_traits::{ - Account, Block as _, BlockBody as _, Bytecode, GotExpected, RecoveredBlock, SealedHeader, - StorageEntry, + Account, Block as _, BlockBody as _, Bytecode, RecoveredBlock, SealedHeader, StorageEntry, }; use reth_prune_types::{ PruneCheckpoint, PruneMode, PruneModes, PruneSegment, MINIMUM_PRUNING_DISTANCE, @@ -59,13 +60,19 @@ use reth_storage_api::{ BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider, }; -use reth_storage_errors::provider::{ProviderResult, RootMismatch}; +use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, - updates::{StorageTrieUpdates, TrieUpdates}, - HashedPostStateSorted, Nibbles, StateRoot, StoredNibbles, + trie_cursor::{ + InMemoryTrieCursor, InMemoryTrieCursorFactory, TrieCursor, TrieCursorFactory, + TrieCursorIter, + }, + updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, + BranchNodeCompact, HashedPostStateSorted, Nibbles, StoredNibbles, StoredNibblesSubKey, + TrieChangeSetsEntry, +}; +use reth_trie_db::{ + DatabaseAccountTrieCursor, DatabaseStorageTrieCursor, DatabaseTrieCursorFactory, }; -use reth_trie_db::{DatabaseStateRoot, DatabaseStorageTrieCursor}; use revm_database::states::{ PlainStateReverts, PlainStorageChangeset, PlainStorageRevert, StateChangeset, }; @@ -73,7 +80,7 @@ use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet}, fmt::Debug, - ops::{Deref, DerefMut, Not, Range, RangeBounds, RangeInclusive}, + ops::{Deref, DerefMut, Not, Range, RangeBounds, RangeFrom, RangeInclusive}, sync::Arc, }; use tracing::{debug, trace}; @@ -254,10 +261,7 @@ impl AsRef for DatabaseProvider { impl DatabaseProvider { /// Writes executed blocks and state to storage. - pub fn save_blocks( - &self, - blocks: Vec>, - ) -> ProviderResult<()> { + pub fn save_blocks(&self, blocks: Vec>) -> ProviderResult<()> { if blocks.is_empty() { debug!(target: "providers::db", "Attempted to write empty block range"); return Ok(()) @@ -281,12 +285,10 @@ impl DatabaseProvider DatabaseProvider DatabaseProvider, - ) -> ProviderResult<()> { + pub fn unwind_trie_state_from(&self, from: BlockNumber) -> ProviderResult<()> { let changed_accounts = self .tx .cursor_read::()? - .walk_range(range.clone())? + .walk_range(from..)? .collect::, _>>()?; - // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; - let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account.is_none() { - destroyed_accounts.insert(hashed_address); - } - } + // Unwind account hashes. + self.unwind_account_hashing(changed_accounts.iter())?; // Unwind account history indices. self.unwind_account_history_indices(changed_accounts.iter())?; - let storage_range = BlockNumberAddress::range(range.clone()); + let storage_start = BlockNumberAddress((from, Address::ZERO)); let changed_storages = self .tx .cursor_read::()? - .walk_range(storage_range)? + .walk_range(storage_start..)? .collect::, _>>()?; - // Unwind storage hashes. Add changed account and storage keys to corresponding prefix - // sets. - let mut storage_prefix_sets = B256Map::::default(); - let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; - for (hashed_address, hashed_slots) in storage_entries { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); - for slot in hashed_slots { - storage_prefix_set.insert(Nibbles::unpack(slot)); - } - storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); - } + // Unwind storage hashes. + self.unwind_storage_hashing(changed_storages.iter().copied())?; // Unwind storage history indices. self.unwind_storage_history_indices(changed_storages.iter().copied())?; - // Calculate the reverted merkle root. - // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets - // are pre-loaded. - let prefix_sets = TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - }; - let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(reth_db_api::DatabaseError::from)?; - - let parent_number = range.start().saturating_sub(1); - let parent_state_root = self - .header_by_number(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? - .state_root(); - - // state root should be always correct as we are reverting state. - // but for sake of double verification we will check it again. - if new_state_root != parent_state_root { - let parent_hash = self - .block_hash(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; - return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: new_state_root, expected: parent_state_root }, - block_number: parent_number, - block_hash: parent_hash, - }))) - } - self.write_trie_updates(&trie_updates)?; + // Unwind accounts/storages trie tables using the revert. + let trie_revert = self.trie_reverts(from)?; + self.write_trie_updates_sorted(&trie_revert)?; + + // Clear trie changesets which have been unwound. + self.clear_trie_changesets_from(from)?; Ok(()) } @@ -1773,6 +1730,10 @@ impl StateWriter // If we are writing the primary storage wipe transition, the pre-existing plain // storage state has to be taken from the database and written to storage history. // See [StorageWipe::Primary] for more details. + // + // TODO(mediocregopher): This could be rewritten in a way which doesn't require + // collecting wiped entries into a Vec like this, see + // `write_storage_trie_changesets`. let mut wiped_storage = Vec::new(); if wiped { tracing::trace!(?address, "Wiping storage"); @@ -2143,8 +2104,10 @@ impl StateWriter } impl TrieWriter for DatabaseProvider { - /// Writes trie updates. Returns the number of entries modified. - fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { + /// Writes trie updates to the database with already sorted updates. + /// + /// Returns the number of entries modified. + fn write_trie_updates_sorted(&self, trie_updates: &TrieUpdatesSorted) -> ProviderResult { if trie_updates.is_empty() { return Ok(0) } @@ -2152,23 +2115,11 @@ impl TrieWriter for DatabaseProvider // Track the number of inserted entries. let mut num_entries = 0; - // Merge updated and removed nodes. Updated nodes must take precedence. - let mut account_updates = trie_updates - .removed_nodes_ref() - .iter() - .filter_map(|n| { - (!trie_updates.account_nodes_ref().contains_key(n)).then_some((n, None)) - }) - .collect::>(); - account_updates.extend( - trie_updates.account_nodes_ref().iter().map(|(nibbles, node)| (nibbles, Some(node))), - ); - // Sort trie node updates. - account_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); - let tx = self.tx_ref(); let mut account_trie_cursor = tx.cursor_write::()?; - for (key, updated_node) in account_updates { + + // Process sorted account nodes + for (key, updated_node) in &trie_updates.account_nodes { let nibbles = StoredNibbles(*key); match updated_node { Some(node) => { @@ -2186,18 +2137,226 @@ impl TrieWriter for DatabaseProvider } } - num_entries += self.write_storage_trie_updates(trie_updates.storage_tries_ref().iter())?; + num_entries += + self.write_storage_trie_updates_sorted(trie_updates.storage_tries_ref().iter())?; + + Ok(num_entries) + } + + /// Records the current values of all trie nodes which will be updated using the `TrieUpdates` + /// into the trie changesets tables. + /// + /// The intended usage of this method is to call it _prior_ to calling `write_trie_updates` with + /// the same `TrieUpdates`. + /// + /// Returns the number of keys written. + fn write_trie_changesets( + &self, + block_number: BlockNumber, + trie_updates: &TrieUpdatesSorted, + updates_overlay: Option<&TrieUpdatesSorted>, + ) -> ProviderResult { + let mut num_entries = 0; + + let mut changeset_cursor = + self.tx_ref().cursor_dup_write::()?; + let curr_values_cursor = self.tx_ref().cursor_read::()?; + + // Wrap the cursor in DatabaseAccountTrieCursor + let mut db_account_cursor = DatabaseAccountTrieCursor::new(curr_values_cursor); + + // Static empty array for when updates_overlay is None + static EMPTY_ACCOUNT_UPDATES: Vec<(Nibbles, Option)> = Vec::new(); + + // Get the overlay updates for account trie, or use an empty array + let account_overlay_updates = updates_overlay + .map(|overlay| overlay.account_nodes_ref()) + .unwrap_or(&EMPTY_ACCOUNT_UPDATES); + + // Wrap the cursor in InMemoryTrieCursor with the overlay + let mut in_memory_account_cursor = + InMemoryTrieCursor::new(Some(&mut db_account_cursor), account_overlay_updates); + + for (path, _) in trie_updates.account_nodes_ref() { + num_entries += 1; + let node = in_memory_account_cursor.seek_exact(*path)?.map(|(_, node)| node); + changeset_cursor.append_dup( + block_number, + TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(*path), node }, + )?; + } + + let mut storage_updates = trie_updates.storage_tries.iter().collect::>(); + storage_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); + + num_entries += self.write_storage_trie_changesets( + block_number, + storage_updates.into_iter(), + updates_overlay, + )?; Ok(num_entries) } + + fn clear_trie_changesets(&self) -> ProviderResult<()> { + let tx = self.tx_ref(); + tx.clear::()?; + tx.clear::()?; + Ok(()) + } + + fn clear_trie_changesets_from(&self, from: BlockNumber) -> ProviderResult<()> { + let tx = self.tx_ref(); + { + let range = from..; + let mut cursor = tx.cursor_dup_write::()?; + let mut walker = cursor.walk_range(range)?; + + while walker.next().transpose()?.is_some() { + walker.delete_current()?; + } + } + + { + let range: RangeFrom = (from, B256::ZERO).into()..; + let mut cursor = tx.cursor_dup_write::()?; + let mut walker = cursor.walk_range(range)?; + + while walker.next().transpose()?.is_some() { + walker.delete_current()?; + } + } + + Ok(()) + } +} + +impl TrieReader for DatabaseProvider { + fn trie_reverts(&self, from: BlockNumber) -> ProviderResult { + let tx = self.tx_ref(); + + // Read account trie changes directly into a Vec - data is already sorted by nibbles + // within each block, and we want the oldest (first) version of each node + let mut account_nodes = Vec::new(); + let mut seen_account_keys = HashSet::new(); + let mut accounts_cursor = tx.cursor_dup_read::()?; + + for entry in accounts_cursor.walk_range(from..)? { + let (_, TrieChangeSetsEntry { nibbles, node }) = entry?; + // Only keep the first (oldest) version of each node + if seen_account_keys.insert(nibbles.0) { + account_nodes.push((nibbles.0, node)); + } + } + + // Read storage trie changes - data is sorted by (block, hashed_address, nibbles) + // Keep track of seen (address, nibbles) pairs to only keep the oldest version + let mut storage_tries = B256Map::>::default(); + let mut seen_storage_keys = HashSet::new(); + let mut storages_cursor = tx.cursor_dup_read::()?; + + // Create storage range starting from `from` block + let storage_range_start = BlockNumberHashedAddress((from, B256::ZERO)); + + for entry in storages_cursor.walk_range(storage_range_start..)? { + let ( + BlockNumberHashedAddress((_, hashed_address)), + TrieChangeSetsEntry { nibbles, node }, + ) = entry?; + + // Only keep the first (oldest) version of each node for this address + if seen_storage_keys.insert((hashed_address, nibbles.0)) { + storage_tries.entry(hashed_address).or_default().push((nibbles.0, node)); + } + } + + // Convert to StorageTrieUpdatesSorted + let storage_tries = storage_tries + .into_iter() + .map(|(address, nodes)| { + (address, StorageTrieUpdatesSorted { storage_nodes: nodes, is_deleted: false }) + }) + .collect(); + + Ok(TrieUpdatesSorted { account_nodes, storage_tries }) + } + + fn get_block_trie_updates( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + let tx = self.tx_ref(); + + // Step 1: Get the trie reverts for the state after the target block + let reverts = self.trie_reverts(block_number + 1)?; + + // Step 2: Create an InMemoryTrieCursorFactory with the reverts + // This gives us the trie state as it was after the target block was processed + let db_cursor_factory = DatabaseTrieCursorFactory::new(tx); + let cursor_factory = InMemoryTrieCursorFactory::new(db_cursor_factory, &reverts); + + // Step 3: Collect all account trie nodes that changed in the target block + let mut trie_updates = TrieUpdatesSorted::default(); + + // Walk through all account trie changes for this block + let mut accounts_trie_cursor = tx.cursor_dup_read::()?; + let mut account_cursor = cursor_factory.account_trie_cursor()?; + + for entry in accounts_trie_cursor.walk_dup(Some(block_number), None)? { + let (_, TrieChangeSetsEntry { nibbles, .. }) = entry?; + // Look up the current value of this trie node using the overlay cursor + let node_value = account_cursor.seek_exact(nibbles.0)?.map(|(_, node)| node); + trie_updates.account_nodes.push((nibbles.0, node_value)); + } + + // Step 4: Collect all storage trie nodes that changed in the target block + let mut storages_trie_cursor = tx.cursor_dup_read::()?; + let storage_range_start = BlockNumberHashedAddress((block_number, B256::ZERO)); + let storage_range_end = BlockNumberHashedAddress((block_number + 1, B256::ZERO)); + + let mut current_hashed_address = None; + let mut storage_cursor = None; + + for entry in storages_trie_cursor.walk_range(storage_range_start..storage_range_end)? { + let ( + BlockNumberHashedAddress((_, hashed_address)), + TrieChangeSetsEntry { nibbles, .. }, + ) = entry?; + + // Check if we need to create a new storage cursor for a different account + if current_hashed_address != Some(hashed_address) { + storage_cursor = Some(cursor_factory.storage_trie_cursor(hashed_address)?); + current_hashed_address = Some(hashed_address); + } + + // Look up the current value of this storage trie node + let cursor = + storage_cursor.as_mut().expect("storage_cursor was just initialized above"); + let node_value = cursor.seek_exact(nibbles.0)?.map(|(_, node)| node); + trie_updates + .storage_tries + .entry(hashed_address) + .or_insert_with(|| StorageTrieUpdatesSorted { + storage_nodes: Vec::new(), + is_deleted: false, + }) + .storage_nodes + .push((nibbles.0, node_value)); + } + + Ok(trie_updates) + } } impl StorageTrieWriter for DatabaseProvider { - /// Writes storage trie updates from the given storage trie map. First sorts the storage trie - /// updates by the hashed address, writing in sorted order. - fn write_storage_trie_updates<'a>( + /// Writes storage trie updates from the given storage trie map with already sorted updates. + /// + /// Expects the storage trie updates to already be sorted by the hashed address key. + /// + /// Returns the number of entries modified. + fn write_storage_trie_updates_sorted<'a>( &self, - storage_tries: impl Iterator, + storage_tries: impl Iterator, ) -> ProviderResult { let mut num_entries = 0; let mut storage_tries = storage_tries.collect::>(); @@ -2207,12 +2366,110 @@ impl StorageTrieWriter for DatabaseP let mut db_storage_trie_cursor = DatabaseStorageTrieCursor::new(cursor, *hashed_address); num_entries += - db_storage_trie_cursor.write_storage_trie_updates(storage_trie_updates)?; + db_storage_trie_cursor.write_storage_trie_updates_sorted(storage_trie_updates)?; cursor = db_storage_trie_cursor.cursor; } Ok(num_entries) } + + /// Records the current values of all trie nodes which will be updated using the + /// `StorageTrieUpdates` into the storage trie changesets table. + /// + /// The intended usage of this method is to call it _prior_ to calling + /// `write_storage_trie_updates` with the same set of `StorageTrieUpdates`. + /// + /// Returns the number of keys written. + fn write_storage_trie_changesets<'a>( + &self, + block_number: BlockNumber, + storage_tries: impl Iterator, + updates_overlay: Option<&TrieUpdatesSorted>, + ) -> ProviderResult { + let mut num_written = 0; + + let mut changeset_cursor = + self.tx_ref().cursor_dup_write::()?; + + // We hold two cursors to the same table because we use them simultaneously when an + // account's storage is wiped. We keep them outside the for-loop so they can be re-used + // between accounts. + let changed_curr_values_cursor = self.tx_ref().cursor_dup_read::()?; + let wiped_nodes_cursor = self.tx_ref().cursor_dup_read::()?; + + // DatabaseStorageTrieCursor requires ownership of the cursor. The easiest way to deal with + // this is to create this outer variable with an initial dummy account, and overwrite it on + // every loop for every real account. + let mut changed_curr_values_cursor = DatabaseStorageTrieCursor::new( + changed_curr_values_cursor, + B256::default(), // Will be set per iteration + ); + let mut wiped_nodes_cursor = DatabaseStorageTrieCursor::new( + wiped_nodes_cursor, + B256::default(), // Will be set per iteration + ); + + // Static empty array for when updates_overlay is None + static EMPTY_UPDATES: Vec<(Nibbles, Option)> = Vec::new(); + + for (hashed_address, storage_trie_updates) in storage_tries { + let changeset_key = BlockNumberHashedAddress((block_number, *hashed_address)); + + // Update the hashed address for the cursors + changed_curr_values_cursor = + DatabaseStorageTrieCursor::new(changed_curr_values_cursor.cursor, *hashed_address); + + // Get the overlay updates for this storage trie, or use an empty array + let overlay_updates = updates_overlay + .and_then(|overlay| overlay.storage_tries.get(hashed_address)) + .map(|updates| updates.storage_nodes_ref()) + .unwrap_or(&EMPTY_UPDATES); + + // Wrap the cursor in InMemoryTrieCursor with the overlay + let mut in_memory_changed_cursor = + InMemoryTrieCursor::new(Some(&mut changed_curr_values_cursor), overlay_updates); + + // Create an iterator which produces the current values of all updated paths, or None if + // they are currently unset. + let curr_values_of_changed = StorageTrieCurrentValuesIter::new( + storage_trie_updates.storage_nodes.iter().map(|e| e.0), + &mut in_memory_changed_cursor, + )?; + + if storage_trie_updates.is_deleted() { + // Create an iterator that starts from the beginning of the storage trie for this + // account + wiped_nodes_cursor = + DatabaseStorageTrieCursor::new(wiped_nodes_cursor.cursor, *hashed_address); + + // Wrap the wiped nodes cursor in InMemoryTrieCursor with the overlay + let mut in_memory_wiped_cursor = + InMemoryTrieCursor::new(Some(&mut wiped_nodes_cursor), overlay_updates); + + let all_nodes = TrieCursorIter::new(&mut in_memory_wiped_cursor); + + for wiped in storage_trie_wiped_changeset_iter(curr_values_of_changed, all_nodes)? { + let (path, node) = wiped?; + num_written += 1; + changeset_cursor.append_dup( + changeset_key, + TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(path), node }, + )?; + } + } else { + for curr_value in curr_values_of_changed { + let (path, node) = curr_value?; + num_written += 1; + changeset_cursor.append_dup( + changeset_key, + TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(path), node }, + )?; + } + } + } + + Ok(num_written) + } } impl HashingWriter for DatabaseProvider { @@ -2507,7 +2764,7 @@ impl BlockExecu ) -> ProviderResult> { let range = block + 1..=self.last_block_number()?; - self.unwind_trie_state_range(range.clone())?; + self.unwind_trie_state_from(block + 1)?; // get execution res let execution_state = self.take_state_above(block)?; @@ -2525,9 +2782,7 @@ impl BlockExecu } fn remove_block_and_execution_above(&self, block: BlockNumber) -> ProviderResult<()> { - let range = block + 1..=self.last_block_number()?; - - self.unwind_trie_state_range(range)?; + self.unwind_trie_state_from(block + 1)?; // remove execution res self.remove_state_above(block)?; @@ -3139,4 +3394,1275 @@ mod tests { assert_eq!(range_result, individual_results); } + + #[test] + fn test_write_trie_changesets() { + use reth_db_api::models::BlockNumberHashedAddress; + use reth_trie::{BranchNodeCompact, StorageTrieEntry}; + + let factory = create_test_provider_factory(); + let provider_rw = factory.provider_rw().unwrap(); + + let block_number = 1u64; + + // Create some test nibbles and nodes + let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); + let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]); + + let node1 = BranchNodeCompact::new( + 0b1111_1111_1111_1111, // state_mask + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask + vec![], // hashes + None, // root hash + ); + + // Pre-populate AccountsTrie with a node that will be updated (for account_nibbles1) + { + let mut cursor = provider_rw.tx_ref().cursor_write::().unwrap(); + cursor.insert(StoredNibbles(account_nibbles1), &node1).unwrap(); + } + + // Create account trie updates: one Some (update) and one None (removal) + let account_nodes = vec![ + (account_nibbles1, Some(node1.clone())), // This will update existing node + (account_nibbles2, None), // This will be a removal (no existing node) + ]; + + // Create storage trie updates + let storage_address1 = B256::from([1u8; 32]); // Normal storage trie + let storage_address2 = B256::from([2u8; 32]); // Wiped storage trie + + let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]); + let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]); + let storage_nibbles3 = Nibbles::from_nibbles([0xe, 0xf]); + + let storage_node1 = BranchNodeCompact::new( + 0b1111_0000_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + let storage_node2 = BranchNodeCompact::new( + 0b0000_1111_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Create an old version of storage_node1 to prepopulate + let storage_node1_old = BranchNodeCompact::new( + 0b1010_0000_0000_0000, // Different mask to show it's an old value + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Pre-populate StoragesTrie for normal storage (storage_address1) + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + // Add node that will be updated (storage_nibbles1) with old value + let entry = StorageTrieEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: storage_node1_old.clone(), + }; + cursor.upsert(storage_address1, &entry).unwrap(); + } + + // Pre-populate StoragesTrie for wiped storage (storage_address2) + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + // Add node that will be updated (storage_nibbles1) + let entry1 = StorageTrieEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: storage_node1.clone(), + }; + cursor.upsert(storage_address2, &entry1).unwrap(); + // Add node that won't be updated but exists (storage_nibbles3) + let entry3 = StorageTrieEntry { + nibbles: StoredNibblesSubKey(storage_nibbles3), + node: storage_node2.clone(), + }; + cursor.upsert(storage_address2, &entry3).unwrap(); + } + + // Normal storage trie: one Some (update) and one None (new) + let storage_trie1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // This will update existing node + (storage_nibbles2, None), // This is a new node + ], + }; + + // Wiped storage trie + let storage_trie2 = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // Updated node already in db + (storage_nibbles2, Some(storage_node2.clone())), /* Updated node not in db + * storage_nibbles3 is in db + * but not updated */ + ], + }; + + let mut storage_tries = B256Map::default(); + storage_tries.insert(storage_address1, storage_trie1); + storage_tries.insert(storage_address2, storage_trie2); + + let trie_updates = TrieUpdatesSorted { account_nodes, storage_tries }; + + // Write the changesets + let num_written = + provider_rw.write_trie_changesets(block_number, &trie_updates, None).unwrap(); + + // Verify number of entries written + // Account changesets: 2 (one update, one removal) + // Storage changesets: + // - Normal storage: 2 (one update, one removal) + // - Wiped storage: 3 (two updated, one existing not updated) + // Total: 2 + 2 + 3 = 7 + assert_eq!(num_written, 7); + + // Verify account changesets were written correctly + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_read::().unwrap(); + + // Get all entries for this block to see what was written + let all_entries = cursor + .walk_dup(Some(block_number), None) + .unwrap() + .collect::, _>>() + .unwrap(); + + // Assert the full value of all_entries in a single assert_eq + assert_eq!( + all_entries, + vec![ + ( + block_number, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles1), + node: Some(node1), + } + ), + ( + block_number, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles2), + node: None, + } + ), + ] + ); + } + + // Verify storage changesets were written correctly + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_read::().unwrap(); + + // Check normal storage trie changesets + let key1 = BlockNumberHashedAddress((block_number, storage_address1)); + let entries1 = + cursor.walk_dup(Some(key1), None).unwrap().collect::, _>>().unwrap(); + + assert_eq!( + entries1, + vec![ + ( + key1, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(storage_node1_old), // Old value that was prepopulated + } + ), + ( + key1, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, // New node, no previous value + } + ), + ] + ); + + // Check wiped storage trie changesets + let key2 = BlockNumberHashedAddress((block_number, storage_address2)); + let entries2 = + cursor.walk_dup(Some(key2), None).unwrap().collect::, _>>().unwrap(); + + assert_eq!( + entries2, + vec![ + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(storage_node1), // Was in db, so has old value + } + ), + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, // Was not in db + } + ), + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles3), + node: Some(storage_node2), // Existing node in wiped storage + } + ), + ] + ); + } + + provider_rw.commit().unwrap(); + } + + #[test] + fn test_write_trie_changesets_with_overlay() { + use reth_db_api::models::BlockNumberHashedAddress; + use reth_trie::BranchNodeCompact; + + let factory = create_test_provider_factory(); + let provider_rw = factory.provider_rw().unwrap(); + + let block_number = 1u64; + + // Create some test nibbles and nodes + let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); + let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]); + + let node1 = BranchNodeCompact::new( + 0b1111_1111_1111_1111, // state_mask + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask + vec![], // hashes + None, // root hash + ); + + // NOTE: Unlike the previous test, we're NOT pre-populating the database + // All node values will come from the overlay + + // Create the overlay with existing values that would normally be in the DB + let node1_old = BranchNodeCompact::new( + 0b1010_1010_1010_1010, // Different mask to show it's the overlay "existing" value + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Create overlay account nodes + let overlay_account_nodes = vec![ + (account_nibbles1, Some(node1_old.clone())), // This simulates existing node in overlay + ]; + + // Create account trie updates: one Some (update) and one None (removal) + let account_nodes = vec![ + (account_nibbles1, Some(node1)), // This will update overlay node + (account_nibbles2, None), // This will be a removal (no existing node) + ]; + + // Create storage trie updates + let storage_address1 = B256::from([1u8; 32]); // Normal storage trie + let storage_address2 = B256::from([2u8; 32]); // Wiped storage trie + + let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]); + let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]); + let storage_nibbles3 = Nibbles::from_nibbles([0xe, 0xf]); + + let storage_node1 = BranchNodeCompact::new( + 0b1111_0000_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + let storage_node2 = BranchNodeCompact::new( + 0b0000_1111_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Create old versions for overlay + let storage_node1_old = BranchNodeCompact::new( + 0b1010_0000_0000_0000, // Different mask to show it's an old value + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Create overlay storage nodes + let mut overlay_storage_tries = B256Map::default(); + + // Overlay for normal storage (storage_address1) + let overlay_storage_trie1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1_old.clone())), /* Simulates existing in + * overlay */ + ], + }; + + // Overlay for wiped storage (storage_address2) + let overlay_storage_trie2 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // Existing in overlay + (storage_nibbles3, Some(storage_node2.clone())), // Also existing in overlay + ], + }; + + overlay_storage_tries.insert(storage_address1, overlay_storage_trie1); + overlay_storage_tries.insert(storage_address2, overlay_storage_trie2); + + let overlay = TrieUpdatesSorted { + account_nodes: overlay_account_nodes, + storage_tries: overlay_storage_tries, + }; + + // Normal storage trie: one Some (update) and one None (new) + let storage_trie1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // This will update overlay node + (storage_nibbles2, None), // This is a new node + ], + }; + + // Wiped storage trie + let storage_trie2 = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // Updated node from overlay + (storage_nibbles2, Some(storage_node2.clone())), /* Updated node not in overlay + * storage_nibbles3 is in + * overlay + * but not updated */ + ], + }; + + let mut storage_tries = B256Map::default(); + storage_tries.insert(storage_address1, storage_trie1); + storage_tries.insert(storage_address2, storage_trie2); + + let trie_updates = TrieUpdatesSorted { account_nodes, storage_tries }; + + // Write the changesets WITH OVERLAY + let num_written = + provider_rw.write_trie_changesets(block_number, &trie_updates, Some(&overlay)).unwrap(); + + // Verify number of entries written + // Account changesets: 2 (one update from overlay, one removal) + // Storage changesets: + // - Normal storage: 2 (one update from overlay, one new) + // - Wiped storage: 3 (two updated, one existing from overlay not updated) + // Total: 2 + 2 + 3 = 7 + assert_eq!(num_written, 7); + + // Verify account changesets were written correctly + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_read::().unwrap(); + + // Get all entries for this block to see what was written + let all_entries = cursor + .walk_dup(Some(block_number), None) + .unwrap() + .collect::, _>>() + .unwrap(); + + // Assert the full value of all_entries in a single assert_eq + assert_eq!( + all_entries, + vec![ + ( + block_number, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles1), + node: Some(node1_old), // Value from overlay, not DB + } + ), + ( + block_number, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles2), + node: None, + } + ), + ] + ); + } + + // Verify storage changesets were written correctly + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_read::().unwrap(); + + // Check normal storage trie changesets + let key1 = BlockNumberHashedAddress((block_number, storage_address1)); + let entries1 = + cursor.walk_dup(Some(key1), None).unwrap().collect::, _>>().unwrap(); + + assert_eq!( + entries1, + vec![ + ( + key1, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(storage_node1_old), // Old value from overlay + } + ), + ( + key1, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, // New node, no previous value + } + ), + ] + ); + + // Check wiped storage trie changesets + let key2 = BlockNumberHashedAddress((block_number, storage_address2)); + let entries2 = + cursor.walk_dup(Some(key2), None).unwrap().collect::, _>>().unwrap(); + + assert_eq!( + entries2, + vec![ + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(storage_node1), // Value from overlay + } + ), + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, // Was not in overlay + } + ), + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles3), + node: Some(storage_node2), /* Existing node from overlay in wiped + * storage */ + } + ), + ] + ); + } + + provider_rw.commit().unwrap(); + } + + #[test] + fn test_clear_trie_changesets_from() { + use alloy_primitives::hex_literal::hex; + use reth_db_api::models::BlockNumberHashedAddress; + use reth_trie::{BranchNodeCompact, StoredNibblesSubKey, TrieChangeSetsEntry}; + + let factory = create_test_provider_factory(); + + // Create some test data for different block numbers + let block1 = 100u64; + let block2 = 101u64; + let block3 = 102u64; + let block4 = 103u64; + let block5 = 104u64; + + // Create test addresses for storage changesets + let storage_address1 = + B256::from(hex!("1111111111111111111111111111111111111111111111111111111111111111")); + let storage_address2 = + B256::from(hex!("2222222222222222222222222222222222222222222222222222222222222222")); + + // Create test nibbles + let nibbles1 = StoredNibblesSubKey(Nibbles::from_nibbles([0x1, 0x2, 0x3])); + let nibbles2 = StoredNibblesSubKey(Nibbles::from_nibbles([0x4, 0x5, 0x6])); + let nibbles3 = StoredNibblesSubKey(Nibbles::from_nibbles([0x7, 0x8, 0x9])); + + // Create test nodes + let node1 = BranchNodeCompact::new( + 0b1111_1111_1111_1111, + 0b1111_1111_1111_1111, + 0b0000_0000_0000_0001, + vec![B256::from(hex!( + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" + ))], + None, + ); + let node2 = BranchNodeCompact::new( + 0b1111_1111_1111_1110, + 0b1111_1111_1111_1110, + 0b0000_0000_0000_0010, + vec![B256::from(hex!( + "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890" + ))], + Some(B256::from(hex!( + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" + ))), + ); + + // Populate AccountsTrieChangeSets with data across multiple blocks + { + let provider_rw = factory.provider_rw().unwrap(); + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + + // Block 100: 2 entries (will be kept - before start block) + cursor + .upsert( + block1, + &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) }, + ) + .unwrap(); + cursor + .upsert(block1, &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: None }) + .unwrap(); + + // Block 101: 3 entries with duplicates (will be deleted - from this block onwards) + cursor + .upsert( + block2, + &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node2.clone()) }, + ) + .unwrap(); + cursor + .upsert( + block2, + &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) }, + ) + .unwrap(); // duplicate key + cursor + .upsert(block2, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None }) + .unwrap(); + + // Block 102: 2 entries (will be deleted - after start block) + cursor + .upsert( + block3, + &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node1.clone()) }, + ) + .unwrap(); + cursor + .upsert( + block3, + &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: Some(node2.clone()) }, + ) + .unwrap(); + + // Block 103: 1 entry (will be deleted - after start block) + cursor + .upsert(block4, &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: None }) + .unwrap(); + + // Block 104: 2 entries (will be deleted - after start block) + cursor + .upsert( + block5, + &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node2.clone()) }, + ) + .unwrap(); + cursor + .upsert(block5, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None }) + .unwrap(); + + provider_rw.commit().unwrap(); + } + + // Populate StoragesTrieChangeSets with data across multiple blocks + { + let provider_rw = factory.provider_rw().unwrap(); + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + + // Block 100, address1: 2 entries (will be kept - before start block) + let key1_block1 = BlockNumberHashedAddress((block1, storage_address1)); + cursor + .upsert( + key1_block1, + &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) }, + ) + .unwrap(); + cursor + .upsert(key1_block1, &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: None }) + .unwrap(); + + // Block 101, address1: 3 entries with duplicates (will be deleted - from this block + // onwards) + let key1_block2 = BlockNumberHashedAddress((block2, storage_address1)); + cursor + .upsert( + key1_block2, + &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node2.clone()) }, + ) + .unwrap(); + cursor + .upsert(key1_block2, &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: None }) + .unwrap(); // duplicate key + cursor + .upsert( + key1_block2, + &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node1.clone()) }, + ) + .unwrap(); + + // Block 102, address2: 2 entries (will be deleted - after start block) + let key2_block3 = BlockNumberHashedAddress((block3, storage_address2)); + cursor + .upsert( + key2_block3, + &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node2.clone()) }, + ) + .unwrap(); + cursor + .upsert(key2_block3, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None }) + .unwrap(); + + // Block 103, address1: 2 entries with duplicate (will be deleted - after start block) + let key1_block4 = BlockNumberHashedAddress((block4, storage_address1)); + cursor + .upsert( + key1_block4, + &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: Some(node1) }, + ) + .unwrap(); + cursor + .upsert( + key1_block4, + &TrieChangeSetsEntry { nibbles: nibbles3, node: Some(node2.clone()) }, + ) + .unwrap(); // duplicate key + + // Block 104, address2: 2 entries (will be deleted - after start block) + let key2_block5 = BlockNumberHashedAddress((block5, storage_address2)); + cursor + .upsert(key2_block5, &TrieChangeSetsEntry { nibbles: nibbles1, node: None }) + .unwrap(); + cursor + .upsert(key2_block5, &TrieChangeSetsEntry { nibbles: nibbles2, node: Some(node2) }) + .unwrap(); + + provider_rw.commit().unwrap(); + } + + // Clear all changesets from block 101 onwards + { + let provider_rw = factory.provider_rw().unwrap(); + provider_rw.clear_trie_changesets_from(block2).unwrap(); + provider_rw.commit().unwrap(); + } + + // Verify AccountsTrieChangeSets after clearing + { + let provider = factory.provider().unwrap(); + let mut cursor = + provider.tx_ref().cursor_dup_read::().unwrap(); + + // Block 100 should still exist (before range) + let block1_entries = cursor + .walk_dup(Some(block1), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(block1_entries.len(), 2, "Block 100 entries should be preserved"); + assert_eq!(block1_entries[0].0, block1); + assert_eq!(block1_entries[1].0, block1); + + // Blocks 101-104 should be deleted + let block2_entries = cursor + .walk_dup(Some(block2), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block2_entries.is_empty(), "Block 101 entries should be deleted"); + + let block3_entries = cursor + .walk_dup(Some(block3), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block3_entries.is_empty(), "Block 102 entries should be deleted"); + + let block4_entries = cursor + .walk_dup(Some(block4), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block4_entries.is_empty(), "Block 103 entries should be deleted"); + + // Block 104 should also be deleted + let block5_entries = cursor + .walk_dup(Some(block5), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block5_entries.is_empty(), "Block 104 entries should be deleted"); + } + + // Verify StoragesTrieChangeSets after clearing + { + let provider = factory.provider().unwrap(); + let mut cursor = + provider.tx_ref().cursor_dup_read::().unwrap(); + + // Block 100 entries should still exist (before range) + let key1_block1 = BlockNumberHashedAddress((block1, storage_address1)); + let block1_entries = cursor + .walk_dup(Some(key1_block1), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(block1_entries.len(), 2, "Block 100 storage entries should be preserved"); + + // Blocks 101-104 entries should be deleted + let key1_block2 = BlockNumberHashedAddress((block2, storage_address1)); + let block2_entries = cursor + .walk_dup(Some(key1_block2), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block2_entries.is_empty(), "Block 101 storage entries should be deleted"); + + let key2_block3 = BlockNumberHashedAddress((block3, storage_address2)); + let block3_entries = cursor + .walk_dup(Some(key2_block3), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block3_entries.is_empty(), "Block 102 storage entries should be deleted"); + + let key1_block4 = BlockNumberHashedAddress((block4, storage_address1)); + let block4_entries = cursor + .walk_dup(Some(key1_block4), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block4_entries.is_empty(), "Block 103 storage entries should be deleted"); + + // Block 104 entries should also be deleted + let key2_block5 = BlockNumberHashedAddress((block5, storage_address2)); + let block5_entries = cursor + .walk_dup(Some(key2_block5), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block5_entries.is_empty(), "Block 104 storage entries should be deleted"); + } + } + + #[test] + fn test_write_trie_updates_sorted() { + use reth_trie::{ + updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, + BranchNodeCompact, StorageTrieEntry, + }; + + let factory = create_test_provider_factory(); + let provider_rw = factory.provider_rw().unwrap(); + + // Pre-populate account trie with data that will be deleted + { + let tx = provider_rw.tx_ref(); + let mut cursor = tx.cursor_write::().unwrap(); + + // Add account node that will be deleted + let to_delete = StoredNibbles(Nibbles::from_nibbles([0x3, 0x4])); + cursor + .upsert( + to_delete, + &BranchNodeCompact::new( + 0b1010_1010_1010_1010, // state_mask + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask + vec![], + None, + ), + ) + .unwrap(); + + // Add account node that will be updated + let to_update = StoredNibbles(Nibbles::from_nibbles([0x1, 0x2])); + cursor + .upsert( + to_update, + &BranchNodeCompact::new( + 0b0101_0101_0101_0101, // old state_mask (will be updated) + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask + vec![], + None, + ), + ) + .unwrap(); + } + + // Pre-populate storage tries with data + let storage_address1 = B256::from([1u8; 32]); + let storage_address2 = B256::from([2u8; 32]); + { + let tx = provider_rw.tx_ref(); + let mut storage_cursor = tx.cursor_dup_write::().unwrap(); + + // Add storage nodes for address1 (one will be deleted) + storage_cursor + .upsert( + storage_address1, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles([0x2, 0x0])), + node: BranchNodeCompact::new( + 0b0011_0011_0011_0011, // will be deleted + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ), + }, + ) + .unwrap(); + + // Add storage nodes for address2 (will be wiped) + storage_cursor + .upsert( + storage_address2, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles([0xa, 0xb])), + node: BranchNodeCompact::new( + 0b1100_1100_1100_1100, // will be wiped + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ), + }, + ) + .unwrap(); + storage_cursor + .upsert( + storage_address2, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles([0xc, 0xd])), + node: BranchNodeCompact::new( + 0b0011_1100_0011_1100, // will be wiped + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ), + }, + ) + .unwrap(); + } + + // Create sorted account trie updates + let account_nodes = vec![ + ( + Nibbles::from_nibbles([0x1, 0x2]), + Some(BranchNodeCompact::new( + 0b1111_1111_1111_1111, // state_mask (updated) + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask (no hashes) + vec![], + None, + )), + ), + (Nibbles::from_nibbles([0x3, 0x4]), None), // Deletion + ( + Nibbles::from_nibbles([0x5, 0x6]), + Some(BranchNodeCompact::new( + 0b1111_1111_1111_1111, // state_mask + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask (no hashes) + vec![], + None, + )), + ), + ]; + + // Create sorted storage trie updates + let storage_trie1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + ( + Nibbles::from_nibbles([0x1, 0x0]), + Some(BranchNodeCompact::new( + 0b1111_0000_0000_0000, // state_mask + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask (no hashes) + vec![], + None, + )), + ), + (Nibbles::from_nibbles([0x2, 0x0]), None), // Deletion of existing node + ], + }; + + let storage_trie2 = StorageTrieUpdatesSorted { + is_deleted: true, // Wipe all storage for this address + storage_nodes: vec![], + }; + + let mut storage_tries = B256Map::default(); + storage_tries.insert(storage_address1, storage_trie1); + storage_tries.insert(storage_address2, storage_trie2); + + let trie_updates = TrieUpdatesSorted { account_nodes, storage_tries }; + + // Write the sorted trie updates + let num_entries = provider_rw.write_trie_updates_sorted(&trie_updates).unwrap(); + + // We should have 2 account insertions + 1 account deletion + 1 storage insertion + 1 + // storage deletion = 5 + assert_eq!(num_entries, 5); + + // Verify account trie updates were written correctly + let tx = provider_rw.tx_ref(); + let mut cursor = tx.cursor_read::().unwrap(); + + // Check first account node was updated + let nibbles1 = StoredNibbles(Nibbles::from_nibbles([0x1, 0x2])); + let entry1 = cursor.seek_exact(nibbles1).unwrap(); + assert!(entry1.is_some(), "Updated account node should exist"); + let expected_mask = reth_trie::TrieMask::new(0b1111_1111_1111_1111); + assert_eq!( + entry1.unwrap().1.state_mask, + expected_mask, + "Account node should have updated state_mask" + ); + + // Check deleted account node no longer exists + let nibbles2 = StoredNibbles(Nibbles::from_nibbles([0x3, 0x4])); + let entry2 = cursor.seek_exact(nibbles2).unwrap(); + assert!(entry2.is_none(), "Deleted account node should not exist"); + + // Check new account node exists + let nibbles3 = StoredNibbles(Nibbles::from_nibbles([0x5, 0x6])); + let entry3 = cursor.seek_exact(nibbles3).unwrap(); + assert!(entry3.is_some(), "New account node should exist"); + + // Verify storage trie updates were written correctly + let mut storage_cursor = tx.cursor_dup_read::().unwrap(); + + // Check storage for address1 + let storage_entries1: Vec<_> = storage_cursor + .walk_dup(Some(storage_address1), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!( + storage_entries1.len(), + 1, + "Storage address1 should have 1 entry after deletion" + ); + assert_eq!( + storage_entries1[0].1.nibbles.0, + Nibbles::from_nibbles([0x1, 0x0]), + "Remaining entry should be [0x1, 0x0]" + ); + + // Check storage for address2 was wiped + let storage_entries2: Vec<_> = storage_cursor + .walk_dup(Some(storage_address2), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(storage_entries2.len(), 0, "Storage address2 should be empty after wipe"); + + provider_rw.commit().unwrap(); + } + + #[test] + fn test_get_block_trie_updates() { + use reth_db_api::models::BlockNumberHashedAddress; + use reth_trie::{BranchNodeCompact, StorageTrieEntry}; + + let factory = create_test_provider_factory(); + let provider_rw = factory.provider_rw().unwrap(); + + let target_block = 2u64; + let next_block = 3u64; + + // Create test nibbles and nodes for accounts + let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); + let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]); + let account_nibbles3 = Nibbles::from_nibbles([0x9, 0xa, 0xb, 0xc]); + + let node1 = BranchNodeCompact::new( + 0b1111_1111_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + let node2 = BranchNodeCompact::new( + 0b0000_0000_1111_1111, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + let node3 = BranchNodeCompact::new( + 0b1010_1010_1010_1010, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Pre-populate AccountsTrie with nodes that will be the final state + { + let mut cursor = provider_rw.tx_ref().cursor_write::().unwrap(); + cursor.insert(StoredNibbles(account_nibbles1), &node1).unwrap(); + cursor.insert(StoredNibbles(account_nibbles2), &node2).unwrap(); + // account_nibbles3 will be deleted (not in final state) + } + + // Insert trie changesets for target_block + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + // nibbles1 was updated in target_block (old value stored) + cursor + .append_dup( + target_block, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles1), + node: Some(BranchNodeCompact::new( + 0b1111_0000_0000_0000, // old value + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + )), + }, + ) + .unwrap(); + // nibbles2 was created in target_block (no old value) + cursor + .append_dup( + target_block, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles2), + node: None, + }, + ) + .unwrap(); + } + + // Insert trie changesets for next_block (to test overlay) + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + // nibbles3 was deleted in next_block (old value stored) + cursor + .append_dup( + next_block, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles3), + node: Some(node3), + }, + ) + .unwrap(); + } + + // Storage trie updates + let storage_address1 = B256::from([1u8; 32]); + let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]); + let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]); + + let storage_node1 = BranchNodeCompact::new( + 0b1111_1111_1111_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + let storage_node2 = BranchNodeCompact::new( + 0b0101_0101_0101_0101, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Pre-populate StoragesTrie with final state + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + cursor + .upsert( + storage_address1, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: storage_node1.clone(), + }, + ) + .unwrap(); + // storage_nibbles2 was deleted in next_block, so it's not in final state + } + + // Insert storage trie changesets for target_block + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + let key = BlockNumberHashedAddress((target_block, storage_address1)); + + // storage_nibbles1 was updated + cursor + .append_dup( + key, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(BranchNodeCompact::new( + 0b0000_0000_1111_1111, // old value + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + )), + }, + ) + .unwrap(); + + // storage_nibbles2 was created + cursor + .append_dup( + key, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, + }, + ) + .unwrap(); + } + + // Insert storage trie changesets for next_block (to test overlay) + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + let key = BlockNumberHashedAddress((next_block, storage_address1)); + + // storage_nibbles2 was deleted in next_block + cursor + .append_dup( + key, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: Some(BranchNodeCompact::new( + 0b0101_0101_0101_0101, // value that was deleted + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + )), + }, + ) + .unwrap(); + } + + provider_rw.commit().unwrap(); + + // Now test get_block_trie_updates + let provider = factory.provider().unwrap(); + let result = provider.get_block_trie_updates(target_block).unwrap(); + + // Verify account trie updates + assert_eq!(result.account_nodes.len(), 2, "Should have 2 account trie updates"); + + // Check nibbles1 - should have the current value (node1) + let nibbles1_update = result + .account_nodes + .iter() + .find(|(n, _)| n == &account_nibbles1) + .expect("Should find nibbles1"); + assert!(nibbles1_update.1.is_some(), "nibbles1 should have a value"); + assert_eq!( + nibbles1_update.1.as_ref().unwrap().state_mask, + node1.state_mask, + "nibbles1 should have current value" + ); + + // Check nibbles2 - should have the current value (node2) + let nibbles2_update = result + .account_nodes + .iter() + .find(|(n, _)| n == &account_nibbles2) + .expect("Should find nibbles2"); + assert!(nibbles2_update.1.is_some(), "nibbles2 should have a value"); + assert_eq!( + nibbles2_update.1.as_ref().unwrap().state_mask, + node2.state_mask, + "nibbles2 should have current value" + ); + + // nibbles3 should NOT be in the result (it was changed in next_block, not target_block) + assert!( + !result.account_nodes.iter().any(|(n, _)| n == &account_nibbles3), + "nibbles3 should not be in target_block updates" + ); + + // Verify storage trie updates + assert_eq!(result.storage_tries.len(), 1, "Should have 1 storage trie"); + let storage_updates = result + .storage_tries + .get(&storage_address1) + .expect("Should have storage updates for address1"); + + assert_eq!(storage_updates.storage_nodes.len(), 2, "Should have 2 storage node updates"); + + // Check storage_nibbles1 - should have current value + let storage1_update = storage_updates + .storage_nodes + .iter() + .find(|(n, _)| n == &storage_nibbles1) + .expect("Should find storage_nibbles1"); + assert!(storage1_update.1.is_some(), "storage_nibbles1 should have a value"); + assert_eq!( + storage1_update.1.as_ref().unwrap().state_mask, + storage_node1.state_mask, + "storage_nibbles1 should have current value" + ); + + // Check storage_nibbles2 - was created in target_block, will be deleted in next_block + // So it should have a value (the value that will be deleted) + let storage2_update = storage_updates + .storage_nodes + .iter() + .find(|(n, _)| n == &storage_nibbles2) + .expect("Should find storage_nibbles2"); + assert!( + storage2_update.1.is_some(), + "storage_nibbles2 should have a value (the node that will be deleted in next block)" + ); + assert_eq!( + storage2_update.1.as_ref().unwrap().state_mask, + storage_node2.state_mask, + "storage_nibbles2 should have the value that was created and will be deleted" + ); + } } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index ab54fe01e56..5a950bbd7d2 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -17,7 +17,7 @@ mod state; pub use state::{ historical::{HistoricalStateProvider, HistoricalStateProviderRef, LowestAvailableBlocks}, latest::{LatestStateProvider, LatestStateProviderRef}, - overlay::OverlayStateProvider, + overlay::{OverlayStateProvider, OverlayStateProviderFactory}, }; mod consistent_view; diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 7e6a40efef2..71c1a693193 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -1,15 +1,143 @@ -use alloy_primitives::B256; +use alloy_primitives::{BlockNumber, B256}; use reth_db_api::DatabaseError; -use reth_storage_api::DBProvider; +use reth_errors::ProviderError; +use reth_stages_types::StageId; +use reth_storage_api::{DBProvider, DatabaseProviderFactory, StageCheckpointReader, TrieReader}; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdatesSorted, - HashedPostStateSorted, + HashedPostState, HashedPostStateSorted, KeccakKeyHasher, +}; +use reth_trie_db::{ + DatabaseHashedCursorFactory, DatabaseHashedPostState, DatabaseTrieCursorFactory, }; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::sync::Arc; +/// Factory for creating overlay state providers with optional reverts and overlays. +/// +/// This factory allows building an `OverlayStateProvider` whose DB state has been reverted to a +/// particular block, and/or with additional overlay information added on top. +#[derive(Debug, Clone)] +pub struct OverlayStateProviderFactory { + /// The underlying database provider factory + factory: F, + /// Optional block number for collecting reverts + block_number: Option, + /// Optional trie overlay + trie_overlay: Option>, + /// Optional hashed state overlay + hashed_state_overlay: Option>, +} + +impl OverlayStateProviderFactory +where + F: DatabaseProviderFactory, + F::Provider: Clone + TrieReader + StageCheckpointReader, +{ + /// Create a new overlay state provider factory + pub const fn new(factory: F) -> Self { + Self { factory, block_number: None, trie_overlay: None, hashed_state_overlay: None } + } + + /// Set the block number for collecting reverts + pub const fn with_block_number(mut self, block_number: Option) -> Self { + self.block_number = block_number; + self + } + + /// Set the trie overlay + pub fn with_trie_overlay(mut self, trie_overlay: Option>) -> Self { + self.trie_overlay = trie_overlay; + self + } + + /// Set the hashed state overlay + pub fn with_hashed_state_overlay( + mut self, + hashed_state_overlay: Option>, + ) -> Self { + self.hashed_state_overlay = hashed_state_overlay; + self + } + + /// Validates that there are sufficient changesets to revert to the requested block number. + /// + /// Returns an error if the `MerkleChangeSets` checkpoint doesn't cover the requested block. + fn validate_changesets_availability( + &self, + provider: &F::Provider, + requested_block: BlockNumber, + ) -> Result<(), ProviderError> { + // Get the MerkleChangeSets stage checkpoint - let errors propagate as-is + let checkpoint = provider.get_stage_checkpoint(StageId::MerkleChangeSets)?; + + // If there's no checkpoint at all or block range details are missing, we can't revert + let available_range = checkpoint + .and_then(|chk| { + chk.merkle_changesets_stage_checkpoint() + .map(|stage_chk| stage_chk.block_range.from..=chk.block_number) + }) + .ok_or_else(|| ProviderError::InsufficientChangesets { + requested: requested_block, + available: 0..=0, + })?; + + // Check if the requested block is within the available range + if !available_range.contains(&requested_block) { + return Err(ProviderError::InsufficientChangesets { + requested: requested_block, + available: available_range, + }); + } + + Ok(()) + } + + /// Create a read-only [`OverlayStateProvider`]. + pub fn provider_ro(&self) -> Result, ProviderError> { + // Get a read-only provider + let provider = self.factory.database_provider_ro()?; + + // If block_number is provided, collect reverts + let (trie_updates, hashed_state) = if let Some(from_block) = self.block_number { + // Validate that we have sufficient changesets for the requested block + self.validate_changesets_availability(&provider, from_block)?; + + // Collect trie reverts + let mut trie_updates_mut = provider.trie_reverts(from_block)?; + + // Collect state reverts using HashedPostState::from_reverts + let reverted_state = + HashedPostState::from_reverts::(provider.tx_ref(), from_block..)?; + let mut hashed_state_mut = reverted_state.into_sorted(); + + // Extend with overlays if provided + if let Some(trie_overlay) = &self.trie_overlay { + trie_updates_mut.extend_ref(trie_overlay); + } + + if let Some(hashed_state_overlay) = &self.hashed_state_overlay { + hashed_state_mut.extend_ref(hashed_state_overlay); + } + + (Arc::new(trie_updates_mut), Arc::new(hashed_state_mut)) + } else { + // If no block_number, use overlays directly or defaults + let trie_updates = + self.trie_overlay.clone().unwrap_or_else(|| Arc::new(TrieUpdatesSorted::default())); + let hashed_state = self + .hashed_state_overlay + .clone() + .unwrap_or_else(|| Arc::new(HashedPostStateSorted::default())); + + (trie_updates, hashed_state) + }; + + Ok(OverlayStateProvider::new(provider, trie_updates, hashed_state)) + } +} + /// State provider with in-memory overlay from trie updates and hashed post state. /// /// This provider uses in-memory trie updates and hashed post state as an overlay diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 1024312ead9..3e33e2b0509 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -34,12 +34,13 @@ use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory, HashedPostStateProvider, NodePrimitivesProvider, StageCheckpointReader, StateProofProvider, - StorageRootProvider, + StorageRootProvider, TrieReader, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, - MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, + updates::{TrieUpdates, TrieUpdatesSorted}, + AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, + StorageProof, TrieInput, }; use std::{ collections::BTreeMap, @@ -1005,6 +1006,19 @@ impl StateReader for MockEthProvider< } } +impl TrieReader for MockEthProvider { + fn trie_reverts(&self, _from: BlockNumber) -> ProviderResult { + Ok(TrieUpdatesSorted::default()) + } + + fn get_block_trie_updates( + &self, + _block_number: BlockNumber, + ) -> ProviderResult { + Ok(TrieUpdatesSorted::default()) + } +} + impl CanonStateSubscriptions for MockEthProvider { diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index d65655de8bf..ccda2d60e85 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -89,7 +89,7 @@ pub fn insert_genesis>( let (root, updates) = StateRoot::from_tx(provider.tx_ref()) .root_with_updates() .map_err(reth_db::DatabaseError::from)?; - provider.write_trie_updates(&updates).unwrap(); + provider.write_trie_updates(updates).unwrap(); provider.commit()?; diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 374a35f473c..710ca9400ed 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -3,7 +3,7 @@ use crate::{ AccountReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, HashedPostStateProvider, StageCheckpointReader, StateProviderFactory, - StateReader, StaticFileProviderFactory, + StateReader, StaticFileProviderFactory, TrieReader, }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; @@ -12,7 +12,7 @@ use std::fmt::Debug; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: - DatabaseProviderFactory + DatabaseProviderFactory + NodePrimitivesProvider + StaticFileProviderFactory + BlockReaderIdExt< @@ -37,7 +37,7 @@ pub trait FullProvider: } impl FullProvider for T where - T: DatabaseProviderFactory + T: DatabaseProviderFactory + NodePrimitivesProvider + StaticFileProviderFactory + BlockReaderIdExt< diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 1151990f97b..6d990e17a49 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -909,7 +909,7 @@ mod tests { } let (_, updates) = StateRoot::from_tx(tx).root_with_updates().unwrap(); - provider_rw.write_trie_updates(&updates).unwrap(); + provider_rw.write_trie_updates(updates).unwrap(); let mut state = State::builder().with_bundle_update().build(); @@ -1127,7 +1127,10 @@ mod tests { assert_eq!(storage_root, storage_root_prehashed(init_storage.storage)); assert!(!storage_updates.is_empty()); provider_rw - .write_storage_trie_updates(core::iter::once((&hashed_address, &storage_updates))) + .write_storage_trie_updates_sorted(core::iter::once(( + &hashed_address, + &storage_updates.into_sorted(), + ))) .unwrap(); // destroy the storage and re-create with new slots diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index e0c57d5226b..6b70a5260a6 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -6,7 +6,7 @@ use crate::{ HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider, StateProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - StorageRootProvider, TransactionVariant, TransactionsProvider, + StorageRootProvider, TransactionVariant, TransactionsProvider, TrieReader, }; #[cfg(feature = "db-api")] @@ -35,8 +35,9 @@ use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie_common::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, - MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, + updates::{TrieUpdates, TrieUpdatesSorted}, + AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, + StorageProof, TrieInput, }; /// Supports various api interfaces for testing purposes. @@ -59,7 +60,7 @@ impl NoopProvider { #[cfg(feature = "db-api")] tx: TxMock::default(), #[cfg(feature = "db-api")] - prune_modes: PruneModes::none(), + prune_modes: PruneModes::default(), _phantom: Default::default(), } } @@ -73,7 +74,7 @@ impl NoopProvider { #[cfg(feature = "db-api")] tx: TxMock::default(), #[cfg(feature = "db-api")] - prune_modes: PruneModes::none(), + prune_modes: PruneModes::default(), _phantom: Default::default(), } } @@ -646,6 +647,19 @@ impl DBProvider for NoopProvider TrieReader for NoopProvider { + fn trie_reverts(&self, _from: BlockNumber) -> ProviderResult { + Ok(TrieUpdatesSorted::default()) + } + + fn get_block_trie_updates( + &self, + _block_number: BlockNumber, + ) -> ProviderResult { + Ok(TrieUpdatesSorted::default()) + } +} + #[cfg(feature = "db-api")] impl DatabaseProviderFactory for NoopProvider diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index 3f39cf3838d..9ff02c106e5 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,8 +1,8 @@ use alloc::vec::Vec; -use alloy_primitives::{Address, Bytes, B256}; +use alloy_primitives::{Address, BlockNumber, Bytes, B256}; use reth_storage_errors::provider::ProviderResult; use reth_trie_common::{ - updates::{StorageTrieUpdates, TrieUpdates}, + updates::{StorageTrieUpdatesSorted, TrieUpdates, TrieUpdatesSorted}, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; @@ -89,25 +89,93 @@ pub trait StateProofProvider: Send + Sync { fn witness(&self, input: TrieInput, target: HashedPostState) -> ProviderResult>; } +/// Trie Reader +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait TrieReader: Send + Sync { + /// Returns the [`TrieUpdatesSorted`] for reverting the trie database to its state prior to the + /// given block and onwards having been processed. + fn trie_reverts(&self, from: BlockNumber) -> ProviderResult; + + /// Returns the trie updates that were applied by the specified block. + fn get_block_trie_updates( + &self, + block_number: BlockNumber, + ) -> ProviderResult; +} + /// Trie Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait TrieWriter: Send + Sync { /// Writes trie updates to the database. /// /// Returns the number of entries modified. - fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult; + fn write_trie_updates(&self, trie_updates: TrieUpdates) -> ProviderResult { + self.write_trie_updates_sorted(&trie_updates.into_sorted()) + } + + /// Writes trie updates to the database with already sorted updates. + /// + /// Returns the number of entries modified. + fn write_trie_updates_sorted(&self, trie_updates: &TrieUpdatesSorted) -> ProviderResult; + + /// Records the current values of all trie nodes which will be updated using the [`TrieUpdates`] + /// into the trie changesets tables. + /// + /// The intended usage of this method is to call it _prior_ to calling `write_trie_updates` with + /// the same [`TrieUpdates`]. + /// + /// The `updates_overlay` parameter allows providing additional in-memory trie updates that + /// should be considered when looking up current node values. When provided, these overlay + /// updates are applied on top of the database state, allowing the method to see a view that + /// includes both committed database values and pending in-memory changes. This is useful + /// when writing changesets for updates that depend on previous uncommitted trie changes. + /// + /// Returns the number of keys written. + fn write_trie_changesets( + &self, + block_number: BlockNumber, + trie_updates: &TrieUpdatesSorted, + updates_overlay: Option<&TrieUpdatesSorted>, + ) -> ProviderResult; + + /// Clears contents of trie changesets completely + fn clear_trie_changesets(&self) -> ProviderResult<()>; + + /// Clears contents of trie changesets starting from the given block number (inclusive) onwards. + fn clear_trie_changesets_from(&self, from: BlockNumber) -> ProviderResult<()>; } /// Storage Trie Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait StorageTrieWriter: Send + Sync { - /// Writes storage trie updates from the given storage trie map. + /// Writes storage trie updates from the given storage trie map with already sorted updates. /// - /// First sorts the storage trie updates by the hashed address key, writing in sorted order. + /// Expects the storage trie updates to already be sorted by the hashed address key. /// /// Returns the number of entries modified. - fn write_storage_trie_updates<'a>( + fn write_storage_trie_updates_sorted<'a>( + &self, + storage_tries: impl Iterator, + ) -> ProviderResult; + + /// Records the current values of all trie nodes which will be updated using the + /// [`StorageTrieUpdatesSorted`] into the storage trie changesets table. + /// + /// The intended usage of this method is to call it _prior_ to calling + /// `write_storage_trie_updates` with the same set of [`StorageTrieUpdatesSorted`]. + /// + /// The `updates_overlay` parameter allows providing additional in-memory trie updates that + /// should be considered when looking up current node values. When provided, these overlay + /// updates are applied on top of the database state for each storage trie, allowing the + /// method to see a view that includes both committed database values and pending in-memory + /// changes. This is useful when writing changesets for storage updates that depend on + /// previous uncommitted trie changes. + /// + /// Returns the number of keys written. + fn write_storage_trie_changesets<'a>( &self, - storage_tries: impl Iterator, + block_number: BlockNumber, + storage_tries: impl Iterator, + updates_overlay: Option<&TrieUpdatesSorted>, ) -> ProviderResult; } diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 50d9f20af0b..27c2807ad2a 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -3,6 +3,7 @@ use core::ops::Not; use crate::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::{PrefixSetMut, TriePrefixSetsMut}, + utils::extend_sorted_vec, KeyHasher, MultiProofTargets, Nibbles, }; use alloc::{borrow::Cow, vec::Vec}; @@ -484,6 +485,21 @@ impl HashedPostStateSorted { pub const fn account_storages(&self) -> &B256Map { &self.storages } + + /// Extends this state with contents of another sorted state. + /// Entries in `other` take precedence for duplicate keys. + pub fn extend_ref(&mut self, other: &Self) { + // Extend accounts + self.accounts.extend_ref(&other.accounts); + + // Extend storages + for (hashed_address, other_storage) in &other.storages { + self.storages + .entry(*hashed_address) + .and_modify(|existing| existing.extend_ref(other_storage)) + .or_insert_with(|| other_storage.clone()); + } + } } impl AsRef for HashedPostStateSorted { @@ -510,6 +526,20 @@ impl HashedAccountsSorted { .chain(self.destroyed_accounts.iter().map(|address| (*address, None))) .sorted_by_key(|entry| *entry.0) } + + /// Extends this collection with contents of another sorted collection. + /// Entries in `other` take precedence for duplicate keys. + pub fn extend_ref(&mut self, other: &Self) { + // Updates take precedence over removals, so we want removals from `other` to only apply to + // the previous accounts. + self.accounts.retain(|(addr, _)| !other.destroyed_accounts.contains(addr)); + + // Extend the sorted accounts vector + extend_sorted_vec(&mut self.accounts, &other.accounts); + + // Merge destroyed accounts sets + self.destroyed_accounts.extend(&other.destroyed_accounts); + } } /// Sorted hashed storage optimized for iterating during state trie calculation. @@ -537,6 +567,28 @@ impl HashedStorageSorted { .chain(self.zero_valued_slots.iter().map(|hashed_slot| (*hashed_slot, U256::ZERO))) .sorted_by_key(|entry| *entry.0) } + + /// Extends this storage with contents of another sorted storage. + /// Entries in `other` take precedence for duplicate keys. + pub fn extend_ref(&mut self, other: &Self) { + if other.wiped { + // If other is wiped, clear everything and copy from other + self.wiped = true; + self.non_zero_valued_slots.clear(); + self.zero_valued_slots.clear(); + self.non_zero_valued_slots.extend_from_slice(&other.non_zero_valued_slots); + self.zero_valued_slots.extend(&other.zero_valued_slots); + return; + } + + self.non_zero_valued_slots.retain(|(slot, _)| !other.zero_valued_slots.contains(slot)); + + // Extend the sorted non-zero valued slots + extend_sorted_vec(&mut self.non_zero_valued_slots, &other.non_zero_valued_slots); + + // Merge zero valued slots sets + self.zero_valued_slots.extend(&other.zero_valued_slots); + } } /// An iterator that yields chunks of the state updates of at most `size` account and storage @@ -1072,4 +1124,102 @@ mod tests { ); assert_eq!(chunks.next(), None); } + + #[test] + fn test_hashed_post_state_sorted_extend_ref() { + // Test extending accounts + let mut state1 = HashedPostStateSorted { + accounts: HashedAccountsSorted { + accounts: vec![ + (B256::from([1; 32]), Account::default()), + (B256::from([3; 32]), Account::default()), + ], + destroyed_accounts: B256Set::from_iter([B256::from([5; 32])]), + }, + storages: B256Map::default(), + }; + + let state2 = HashedPostStateSorted { + accounts: HashedAccountsSorted { + accounts: vec![ + (B256::from([2; 32]), Account::default()), + (B256::from([3; 32]), Account { nonce: 1, ..Default::default() }), // Override + (B256::from([4; 32]), Account::default()), + ], + destroyed_accounts: B256Set::from_iter([B256::from([6; 32])]), + }, + storages: B256Map::default(), + }; + + state1.extend_ref(&state2); + + // Check accounts are merged and sorted + assert_eq!(state1.accounts.accounts.len(), 4); + assert_eq!(state1.accounts.accounts[0].0, B256::from([1; 32])); + assert_eq!(state1.accounts.accounts[1].0, B256::from([2; 32])); + assert_eq!(state1.accounts.accounts[2].0, B256::from([3; 32])); + assert_eq!(state1.accounts.accounts[2].1.nonce, 1); // Should have state2's value + assert_eq!(state1.accounts.accounts[3].0, B256::from([4; 32])); + + // Check destroyed accounts are merged + assert!(state1.accounts.destroyed_accounts.contains(&B256::from([5; 32]))); + assert!(state1.accounts.destroyed_accounts.contains(&B256::from([6; 32]))); + } + + #[test] + fn test_hashed_storage_sorted_extend_ref() { + // Test normal extension + let mut storage1 = HashedStorageSorted { + non_zero_valued_slots: vec![ + (B256::from([1; 32]), U256::from(10)), + (B256::from([3; 32]), U256::from(30)), + ], + zero_valued_slots: B256Set::from_iter([B256::from([5; 32])]), + wiped: false, + }; + + let storage2 = HashedStorageSorted { + non_zero_valued_slots: vec![ + (B256::from([2; 32]), U256::from(20)), + (B256::from([3; 32]), U256::from(300)), // Override + (B256::from([4; 32]), U256::from(40)), + ], + zero_valued_slots: B256Set::from_iter([B256::from([6; 32])]), + wiped: false, + }; + + storage1.extend_ref(&storage2); + + assert_eq!(storage1.non_zero_valued_slots.len(), 4); + assert_eq!(storage1.non_zero_valued_slots[0].0, B256::from([1; 32])); + assert_eq!(storage1.non_zero_valued_slots[1].0, B256::from([2; 32])); + assert_eq!(storage1.non_zero_valued_slots[2].0, B256::from([3; 32])); + assert_eq!(storage1.non_zero_valued_slots[2].1, U256::from(300)); // Should have storage2's value + assert_eq!(storage1.non_zero_valued_slots[3].0, B256::from([4; 32])); + assert!(storage1.zero_valued_slots.contains(&B256::from([5; 32]))); + assert!(storage1.zero_valued_slots.contains(&B256::from([6; 32]))); + assert!(!storage1.wiped); + + // Test wiped storage + let mut storage3 = HashedStorageSorted { + non_zero_valued_slots: vec![(B256::from([1; 32]), U256::from(10))], + zero_valued_slots: B256Set::from_iter([B256::from([2; 32])]), + wiped: false, + }; + + let storage4 = HashedStorageSorted { + non_zero_valued_slots: vec![(B256::from([3; 32]), U256::from(30))], + zero_valued_slots: B256Set::from_iter([B256::from([4; 32])]), + wiped: true, + }; + + storage3.extend_ref(&storage4); + + assert!(storage3.wiped); + // When wiped, should only have storage4's values + assert_eq!(storage3.non_zero_valued_slots.len(), 1); + assert_eq!(storage3.non_zero_valued_slots[0].0, B256::from([3; 32])); + assert_eq!(storage3.zero_valued_slots.len(), 1); + assert!(storage3.zero_valued_slots.contains(&B256::from([4; 32]))); + } } diff --git a/crates/trie/common/src/input.rs b/crates/trie/common/src/input.rs index fff50fbb7b0..522cfa9ed41 100644 --- a/crates/trie/common/src/input.rs +++ b/crates/trie/common/src/input.rs @@ -34,7 +34,7 @@ impl TrieInput { /// Create new trie input from the provided blocks, from oldest to newest. See the documentation /// for [`Self::extend_with_blocks`] for details. pub fn from_blocks<'a>( - blocks: impl IntoIterator)>, + blocks: impl IntoIterator, ) -> Self { let mut input = Self::default(); input.extend_with_blocks(blocks); @@ -47,14 +47,10 @@ impl TrieInput { /// constructed from the state of this block and the state itself, **without** trie updates. pub fn extend_with_blocks<'a>( &mut self, - blocks: impl IntoIterator)>, + blocks: impl IntoIterator, ) { for (hashed_state, trie_updates) in blocks { - if let Some(nodes) = trie_updates.as_ref() { - self.append_cached_ref(nodes, hashed_state); - } else { - self.append_ref(hashed_state); - } + self.append_cached_ref(trie_updates, hashed_state); } } diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index 70616ba5eb8..e4292a52016 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -36,7 +36,7 @@ mod nibbles; pub use nibbles::{Nibbles, StoredNibbles, StoredNibblesSubKey}; mod storage; -pub use storage::StorageTrieEntry; +pub use storage::{StorageTrieEntry, TrieChangeSetsEntry}; mod subnode; pub use subnode::StoredSubNode; @@ -57,6 +57,9 @@ pub mod updates; pub mod added_removed_keys; +/// Utilities used by other modules in this crate. +mod utils; + /// Bincode-compatible serde implementations for trie types. /// /// `bincode` crate allows for more efficient serialization of trie types, because it allows diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index 187a097bfd4..557b9e4a606 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -1,6 +1,8 @@ -use super::{BranchNodeCompact, StoredNibblesSubKey}; +use super::{BranchNodeCompact, Nibbles, StoredNibblesSubKey}; /// Account storage trie node. +/// +/// `nibbles` is the subkey when used as a value in the `StorageTrie` table. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] #[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieEntry { @@ -31,3 +33,173 @@ impl reth_codecs::Compact for StorageTrieEntry { (this, buf) } } + +/// Trie changeset entry representing the state of a trie node before a block. +/// +/// `nibbles` is the subkey when used as a value in the changeset tables. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +pub struct TrieChangeSetsEntry { + /// The nibbles of the intermediate node + pub nibbles: StoredNibblesSubKey, + /// Node value prior to the block being processed, None indicating it didn't exist. + pub node: Option, +} + +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for TrieChangeSetsEntry { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let nibbles_len = self.nibbles.to_compact(buf); + let node_len = self.node.as_ref().map(|node| node.to_compact(buf)).unwrap_or(0); + nibbles_len + node_len + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + if len == 0 { + // Return an empty entry without trying to parse anything + return ( + Self { nibbles: StoredNibblesSubKey::from(Nibbles::default()), node: None }, + buf, + ) + } + + let (nibbles, buf) = StoredNibblesSubKey::from_compact(buf, 65); + + if len <= 65 { + return (Self { nibbles, node: None }, buf) + } + + let (node, buf) = BranchNodeCompact::from_compact(buf, len - 65); + (Self { nibbles, node: Some(node) }, buf) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bytes::BytesMut; + use reth_codecs::Compact; + + #[test] + fn test_trie_changesets_entry_full_empty() { + // Test a fully empty entry (empty nibbles, None node) + let entry = TrieChangeSetsEntry { nibbles: StoredNibblesSubKey::from(vec![]), node: None }; + + let mut buf = BytesMut::new(); + let len = entry.to_compact(&mut buf); + + // Empty nibbles takes 65 bytes (64 for padding + 1 for length) + // None node adds 0 bytes + assert_eq!(len, 65); + assert_eq!(buf.len(), 65); + + // Deserialize and verify + let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); + assert_eq!(decoded.nibbles.0.to_vec(), Vec::::new()); + assert_eq!(decoded.node, None); + assert_eq!(remaining.len(), 0); + } + + #[test] + fn test_trie_changesets_entry_none_node() { + // Test non-empty nibbles with None node + let nibbles_data = vec![0x01, 0x02, 0x03, 0x04]; + let entry = TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey::from(nibbles_data.clone()), + node: None, + }; + + let mut buf = BytesMut::new(); + let len = entry.to_compact(&mut buf); + + // Nibbles takes 65 bytes regardless of content + assert_eq!(len, 65); + + // Deserialize and verify + let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); + assert_eq!(decoded.nibbles.0.to_vec(), nibbles_data); + assert_eq!(decoded.node, None); + assert_eq!(remaining.len(), 0); + } + + #[test] + fn test_trie_changesets_entry_empty_path_with_node() { + // Test empty path with Some node + // Using the same signature as in the codebase: (state_mask, hash_mask, tree_mask, hashes, + // value) + let test_node = BranchNodeCompact::new( + 0b1111_1111_1111_1111, // state_mask: all children present + 0b1111_1111_1111_1111, // hash_mask: all have hashes + 0b0000_0000_0000_0000, // tree_mask: no embedded trees + vec![], // hashes + None, // value + ); + + let entry = TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey::from(vec![]), + node: Some(test_node.clone()), + }; + + let mut buf = BytesMut::new(); + let len = entry.to_compact(&mut buf); + + // Calculate expected length + let mut temp_buf = BytesMut::new(); + let node_len = test_node.to_compact(&mut temp_buf); + assert_eq!(len, 65 + node_len); + + // Deserialize and verify + let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); + assert_eq!(decoded.nibbles.0.to_vec(), Vec::::new()); + assert_eq!(decoded.node, Some(test_node)); + assert_eq!(remaining.len(), 0); + } + + #[test] + fn test_trie_changesets_entry_normal() { + // Test normal case: non-empty path with Some node + let nibbles_data = vec![0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f]; + // Using the same signature as in the codebase + let test_node = BranchNodeCompact::new( + 0b0000_0000_1111_0000, // state_mask: some children present + 0b0000_0000_0011_0000, // hash_mask: some have hashes + 0b0000_0000_0000_0000, // tree_mask: no embedded trees + vec![], // hashes (empty for this test) + None, // value + ); + + let entry = TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey::from(nibbles_data.clone()), + node: Some(test_node.clone()), + }; + + let mut buf = BytesMut::new(); + let len = entry.to_compact(&mut buf); + + // Verify serialization length + let mut temp_buf = BytesMut::new(); + let node_len = test_node.to_compact(&mut temp_buf); + assert_eq!(len, 65 + node_len); + + // Deserialize and verify + let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); + assert_eq!(decoded.nibbles.0.to_vec(), nibbles_data); + assert_eq!(decoded.node, Some(test_node)); + assert_eq!(remaining.len(), 0); + } + + #[test] + fn test_trie_changesets_entry_from_compact_zero_len() { + // Test from_compact with zero length + let buf = vec![0x01, 0x02, 0x03]; + let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, 0); + + // Should return empty nibbles and None node + assert_eq!(decoded.nibbles.0.to_vec(), Vec::::new()); + assert_eq!(decoded.node, None); + assert_eq!(remaining, &buf[..]); // Buffer should be unchanged + } +} diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 441e407db16..00a160c4f9f 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -1,4 +1,4 @@ -use crate::{BranchNodeCompact, HashBuilder, Nibbles}; +use crate::{utils::extend_sorted_vec, BranchNodeCompact, HashBuilder, Nibbles}; use alloc::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, vec::Vec, @@ -438,6 +438,11 @@ pub struct TrieUpdatesSorted { } impl TrieUpdatesSorted { + /// Returns `true` if the updates are empty. + pub fn is_empty(&self) -> bool { + self.account_nodes.is_empty() && self.storage_tries.is_empty() + } + /// Returns reference to updated account nodes. pub fn account_nodes_ref(&self) -> &[(Nibbles, Option)] { &self.account_nodes @@ -447,6 +452,24 @@ impl TrieUpdatesSorted { pub const fn storage_tries_ref(&self) -> &B256Map { &self.storage_tries } + + /// Extends the trie updates with another set of sorted updates. + /// + /// This merges the account nodes and storage tries from `other` into `self`. + /// Account nodes are merged and re-sorted, with `other`'s values taking precedence + /// for duplicate keys. + pub fn extend_ref(&mut self, other: &Self) { + // Extend account nodes + extend_sorted_vec(&mut self.account_nodes, &other.account_nodes); + + // Merge storage tries + for (hashed_address, storage_trie) in &other.storage_tries { + self.storage_tries + .entry(*hashed_address) + .and_modify(|existing| existing.extend_ref(storage_trie)) + .or_insert_with(|| storage_trie.clone()); + } + } } impl AsRef for TrieUpdatesSorted { @@ -455,6 +478,29 @@ impl AsRef for TrieUpdatesSorted { } } +impl From for TrieUpdates { + fn from(sorted: TrieUpdatesSorted) -> Self { + let mut account_nodes = HashMap::default(); + let mut removed_nodes = HashSet::default(); + + for (nibbles, node) in sorted.account_nodes { + if let Some(node) = node { + account_nodes.insert(nibbles, node); + } else { + removed_nodes.insert(nibbles); + } + } + + let storage_tries = sorted + .storage_tries + .into_iter() + .map(|(address, storage)| (address, storage.into())) + .collect(); + + Self { account_nodes, removed_nodes, storage_tries } + } +} + /// Sorted storage trie updates reference used for serializing to file. #[derive(PartialEq, Eq, Clone, Default, Debug)] #[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize))] @@ -488,6 +534,23 @@ impl StorageTrieUpdatesSorted { pub fn storage_nodes_ref(&self) -> &[(Nibbles, Option)] { &self.storage_nodes } + + /// Extends the storage trie updates with another set of sorted updates. + /// + /// If `other` is marked as deleted, this will be marked as deleted and all nodes cleared. + /// Otherwise, nodes are merged with `other`'s values taking precedence for duplicates. + pub fn extend_ref(&mut self, other: &Self) { + if other.is_deleted { + self.is_deleted = true; + self.storage_nodes.clear(); + self.storage_nodes.extend(other.storage_nodes.iter().cloned()); + return; + } + + // Extend storage nodes + extend_sorted_vec(&mut self.storage_nodes, &other.storage_nodes); + self.is_deleted = self.is_deleted || other.is_deleted; + } } /// Excludes empty nibbles from the given iterator. @@ -502,6 +565,153 @@ fn exclude_empty_from_pair( iter.into_iter().filter(|(n, _)| !n.is_empty()) } +impl From for StorageTrieUpdates { + fn from(sorted: StorageTrieUpdatesSorted) -> Self { + let mut storage_nodes = HashMap::default(); + let mut removed_nodes = HashSet::default(); + + for (nibbles, node) in sorted.storage_nodes { + if let Some(node) = node { + storage_nodes.insert(nibbles, node); + } else { + removed_nodes.insert(nibbles); + } + } + + Self { is_deleted: sorted.is_deleted, storage_nodes, removed_nodes } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + + #[test] + fn test_trie_updates_sorted_extend_ref() { + // Test extending with empty updates + let mut updates1 = TrieUpdatesSorted::default(); + let updates2 = TrieUpdatesSorted::default(); + updates1.extend_ref(&updates2); + assert_eq!(updates1.account_nodes.len(), 0); + assert_eq!(updates1.storage_tries.len(), 0); + + // Test extending account nodes + let mut updates1 = TrieUpdatesSorted { + account_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x01]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x03]), None), + ], + storage_tries: B256Map::default(), + }; + let updates2 = TrieUpdatesSorted { + account_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x02]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x03]), Some(BranchNodeCompact::default())), /* Override */ + ], + storage_tries: B256Map::default(), + }; + updates1.extend_ref(&updates2); + assert_eq!(updates1.account_nodes.len(), 3); + // Should be sorted: 0x01, 0x02, 0x03 + assert_eq!(updates1.account_nodes[0].0, Nibbles::from_nibbles_unchecked([0x01])); + assert_eq!(updates1.account_nodes[1].0, Nibbles::from_nibbles_unchecked([0x02])); + assert_eq!(updates1.account_nodes[2].0, Nibbles::from_nibbles_unchecked([0x03])); + // 0x03 should have Some value from updates2 (override) + assert!(updates1.account_nodes[2].1.is_some()); + + // Test extending storage tries + let storage_trie1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![( + Nibbles::from_nibbles_unchecked([0x0a]), + Some(BranchNodeCompact::default()), + )], + }; + let storage_trie2 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![(Nibbles::from_nibbles_unchecked([0x0b]), None)], + }; + + let hashed_address1 = B256::from([1; 32]); + let hashed_address2 = B256::from([2; 32]); + + let mut updates1 = TrieUpdatesSorted { + account_nodes: vec![], + storage_tries: B256Map::from_iter([(hashed_address1, storage_trie1.clone())]), + }; + let updates2 = TrieUpdatesSorted { + account_nodes: vec![], + storage_tries: B256Map::from_iter([ + (hashed_address1, storage_trie2), + (hashed_address2, storage_trie1), + ]), + }; + updates1.extend_ref(&updates2); + assert_eq!(updates1.storage_tries.len(), 2); + assert!(updates1.storage_tries.contains_key(&hashed_address1)); + assert!(updates1.storage_tries.contains_key(&hashed_address2)); + // Check that storage trie for hashed_address1 was extended + let merged_storage = &updates1.storage_tries[&hashed_address1]; + assert_eq!(merged_storage.storage_nodes.len(), 2); + } + + #[test] + fn test_storage_trie_updates_sorted_extend_ref_deleted() { + // Test case 1: Extending with a deleted storage trie that has nodes + let mut storage1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x01]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x02]), None), + ], + }; + + let storage2 = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x03]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x04]), None), + ], + }; + + storage1.extend_ref(&storage2); + + // Should be marked as deleted + assert!(storage1.is_deleted); + // Original nodes should be cleared, but other's nodes should be added + assert_eq!(storage1.storage_nodes.len(), 2); + assert_eq!(storage1.storage_nodes[0].0, Nibbles::from_nibbles_unchecked([0x03])); + assert_eq!(storage1.storage_nodes[1].0, Nibbles::from_nibbles_unchecked([0x04])); + + // Test case 2: Extending a deleted storage trie with more nodes + let mut storage3 = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![( + Nibbles::from_nibbles_unchecked([0x05]), + Some(BranchNodeCompact::default()), + )], + }; + + let storage4 = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x06]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x07]), None), + ], + }; + + storage3.extend_ref(&storage4); + + // Should remain deleted + assert!(storage3.is_deleted); + // Should have nodes from other (original cleared then extended) + assert_eq!(storage3.storage_nodes.len(), 2); + assert_eq!(storage3.storage_nodes[0].0, Nibbles::from_nibbles_unchecked([0x06])); + assert_eq!(storage3.storage_nodes[1].0, Nibbles::from_nibbles_unchecked([0x07])); + } +} + /// Bincode-compatible trie updates type serde implementations. #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { @@ -717,7 +927,7 @@ pub mod serde_bincode_compat { } #[cfg(all(test, feature = "serde"))] -mod tests { +mod serde_tests { use super::*; #[test] diff --git a/crates/trie/common/src/utils.rs b/crates/trie/common/src/utils.rs new file mode 100644 index 00000000000..e5d16d3ef51 --- /dev/null +++ b/crates/trie/common/src/utils.rs @@ -0,0 +1,53 @@ +use alloc::vec::Vec; + +/// Helper function to extend a sorted vector with another sorted vector. +/// Values from `other` take precedence for duplicate keys. +/// +/// This function efficiently merges two sorted vectors by: +/// 1. Iterating through the target vector with mutable references +/// 2. Using a peekable iterator for the other vector +/// 3. For each target item, processing other items that come before or equal to it +/// 4. Collecting items from other that need to be inserted +/// 5. Appending and re-sorting only if new items were added +pub(crate) fn extend_sorted_vec(target: &mut Vec<(K, V)>, other: &[(K, V)]) +where + K: Clone + Ord + core::hash::Hash + Eq, + V: Clone, +{ + if other.is_empty() { + return; + } + + let mut other_iter = other.iter().peekable(); + let mut to_insert = Vec::new(); + + // Iterate through target and update/collect items from other + for target_item in target.iter_mut() { + while let Some(other_item) = other_iter.peek() { + use core::cmp::Ordering; + match other_item.0.cmp(&target_item.0) { + Ordering::Less => { + // Other item comes before current target item, collect it + to_insert.push(other_iter.next().unwrap().clone()); + } + Ordering::Equal => { + // Same key, update target with other's value + target_item.1 = other_iter.next().unwrap().1.clone(); + break; + } + Ordering::Greater => { + // Other item comes after current target item, keep target unchanged + break; + } + } + } + } + + // Append collected new items, as well as any remaining from `other` which are necessarily also + // new, and sort if needed + if !to_insert.is_empty() || other_iter.peek().is_some() { + target.extend(to_insert); + target.extend(other_iter.cloned()); + target.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + } +} diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index 62d376d1b54..b1e9032fc0f 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -7,7 +7,7 @@ use reth_db_api::{ }; use reth_trie::{ trie_cursor::{TrieCursor, TrieCursorFactory}, - updates::StorageTrieUpdates, + updates::StorageTrieUpdatesSorted, BranchNodeCompact, Nibbles, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, }; @@ -110,31 +110,19 @@ where + DbDupCursorRO + DbDupCursorRW, { - /// Writes storage updates - pub fn write_storage_trie_updates( + /// Writes storage updates that are already sorted + pub fn write_storage_trie_updates_sorted( &mut self, - updates: &StorageTrieUpdates, + updates: &StorageTrieUpdatesSorted, ) -> Result { // The storage trie for this account has to be deleted. if updates.is_deleted() && self.cursor.seek_exact(self.hashed_address)?.is_some() { self.cursor.delete_current_duplicates()?; } - // Merge updated and removed nodes. Updated nodes must take precedence. - let mut storage_updates = updates - .removed_nodes_ref() - .iter() - .filter_map(|n| (!updates.storage_nodes_ref().contains_key(n)).then_some((n, None))) - .collect::>(); - storage_updates.extend( - updates.storage_nodes_ref().iter().map(|(nibbles, node)| (nibbles, Some(node))), - ); - - // Sort trie node updates. - storage_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); - let mut num_entries = 0; - for (nibbles, maybe_updated) in storage_updates.into_iter().filter(|(n, _)| !n.is_empty()) { + for (nibbles, maybe_updated) in updates.storage_nodes.iter().filter(|(n, _)| !n.is_empty()) + { num_entries += 1; let nibbles = StoredNibblesSubKey(*nibbles); // Delete the old entry if it exists. diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index e9fcb5a1c48..8f543a711d8 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -81,7 +81,11 @@ fn incremental_vs_full_root(inputs: &[&str], modified: &str) { let modified_root = loader.root().unwrap(); // Update the intermediate roots table so that we can run the incremental verification - tx.write_storage_trie_updates(core::iter::once((&hashed_address, &trie_updates))).unwrap(); + tx.write_storage_trie_updates_sorted(core::iter::once(( + &hashed_address, + &trie_updates.into_sorted(), + ))) + .unwrap(); // 3. Calculate the incremental root let mut storage_changes = PrefixSetMut::default(); @@ -620,7 +624,7 @@ fn account_trie_around_extension_node_with_dbtrie() { let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); assert_eq!(expected, got); - tx.write_trie_updates(&updates).unwrap(); + tx.write_trie_updates(updates).unwrap(); // read the account updates from the db let mut accounts_trie = tx.tx_ref().cursor_read::().unwrap(); @@ -667,7 +671,7 @@ proptest! { state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) ); assert_eq!(expected_root, state_root); - tx.write_trie_updates(&trie_updates).unwrap(); + tx.write_trie_updates(trie_updates).unwrap(); } } } diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index fe1953b9055..48657cc8a70 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -33,7 +33,7 @@ pub fn calculate_state_root(c: &mut Criterion) { provider_rw.write_hashed_state(&db_state.into_sorted()).unwrap(); let (_, updates) = StateRoot::from_tx(provider_rw.tx_ref()).root_with_updates().unwrap(); - provider_rw.write_trie_updates(&updates).unwrap(); + provider_rw.write_trie_updates(updates).unwrap(); provider_rw.commit().unwrap(); } diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 50c9a79bd05..472624f99d7 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -4998,9 +4998,12 @@ mod tests { state.keys().copied(), ); + // Extract account nodes before moving hash_builder_updates + let hash_builder_account_nodes = hash_builder_updates.account_nodes.clone(); + // Write trie updates to the database let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.write_trie_updates(hash_builder_updates).unwrap(); provider_rw.commit().unwrap(); // Assert that the sparse trie root matches the hash builder root @@ -5008,7 +5011,7 @@ mod tests { // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( BTreeMap::from_iter(sparse_updates.updated_nodes), - BTreeMap::from_iter(hash_builder_updates.account_nodes) + BTreeMap::from_iter(hash_builder_account_nodes) ); // Assert that the sparse trie nodes match the hash builder proof nodes assert_eq_parallel_sparse_trie_proof_nodes( @@ -5043,9 +5046,12 @@ mod tests { state.keys().copied(), ); + // Extract account nodes before moving hash_builder_updates + let hash_builder_account_nodes = hash_builder_updates.account_nodes.clone(); + // Write trie updates to the database let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.write_trie_updates(hash_builder_updates).unwrap(); provider_rw.commit().unwrap(); // Assert that the sparse trie root matches the hash builder root @@ -5053,7 +5059,7 @@ mod tests { // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( BTreeMap::from_iter(sparse_updates.updated_nodes), - BTreeMap::from_iter(hash_builder_updates.account_nodes) + BTreeMap::from_iter(hash_builder_account_nodes) ); // Assert that the sparse trie nodes match the hash builder proof nodes assert_eq_parallel_sparse_trie_proof_nodes( diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 89a23851e28..cbffe5e7563 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -3034,9 +3034,12 @@ mod tests { state.keys().copied(), ); + // Extract account nodes before moving hash_builder_updates + let hash_builder_account_nodes = hash_builder_updates.account_nodes.clone(); + // Write trie updates to the database let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.write_trie_updates(hash_builder_updates).unwrap(); provider_rw.commit().unwrap(); // Assert that the sparse trie root matches the hash builder root @@ -3044,7 +3047,7 @@ mod tests { // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( BTreeMap::from_iter(sparse_updates.updated_nodes), - BTreeMap::from_iter(hash_builder_updates.account_nodes) + BTreeMap::from_iter(hash_builder_account_nodes) ); // Assert that the sparse trie nodes match the hash builder proof nodes assert_eq_sparse_trie_proof_nodes(&updated_sparse, hash_builder_proof_nodes); @@ -3076,9 +3079,12 @@ mod tests { state.keys().copied(), ); + // Extract account nodes before moving hash_builder_updates + let hash_builder_account_nodes = hash_builder_updates.account_nodes.clone(); + // Write trie updates to the database let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.write_trie_updates(hash_builder_updates).unwrap(); provider_rw.commit().unwrap(); // Assert that the sparse trie root matches the hash builder root @@ -3086,7 +3092,7 @@ mod tests { // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( BTreeMap::from_iter(sparse_updates.updated_nodes), - BTreeMap::from_iter(hash_builder_updates.account_nodes) + BTreeMap::from_iter(hash_builder_account_nodes) ); // Assert that the sparse trie nodes match the hash builder proof nodes assert_eq_sparse_trie_proof_nodes(&updated_sparse, hash_builder_proof_nodes); diff --git a/crates/trie/trie/src/trie_cursor/mod.rs b/crates/trie/trie/src/trie_cursor/mod.rs index 01eea4c40e6..269611150d6 100644 --- a/crates/trie/trie/src/trie_cursor/mod.rs +++ b/crates/trie/trie/src/trie_cursor/mod.rs @@ -58,3 +58,48 @@ pub trait TrieCursor: Send + Sync { /// Get the current entry. fn current(&mut self) -> Result, DatabaseError>; } + +/// Iterator wrapper for `TrieCursor` types +#[derive(Debug)] +pub struct TrieCursorIter<'a, C> { + cursor: &'a mut C, + /// The initial value from seek, if any + initial: Option>, +} + +impl<'a, C> TrieCursorIter<'a, C> { + /// Create a new iterator from a mutable reference to a cursor. The Iterator will start from the + /// empty path. + pub fn new(cursor: &'a mut C) -> Self + where + C: TrieCursor, + { + let initial = cursor.seek(Nibbles::default()).transpose(); + Self { cursor, initial } + } +} + +impl<'a, C> From<&'a mut C> for TrieCursorIter<'a, C> +where + C: TrieCursor, +{ + fn from(cursor: &'a mut C) -> Self { + Self::new(cursor) + } +} + +impl<'a, C> Iterator for TrieCursorIter<'a, C> +where + C: TrieCursor, +{ + type Item = Result<(Nibbles, BranchNodeCompact), DatabaseError>; + + fn next(&mut self) -> Option { + // If we have an initial value from seek, return it first + if let Some(initial) = self.initial.take() { + return Some(initial); + } + + self.cursor.next().transpose() + } +} diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index b97fffa00d0..02385552032 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -72,17 +72,18 @@ Database: Possible values: - - headers: The headers stage within the pipeline - - bodies: The bodies stage within the pipeline - - senders: The senders stage within the pipeline - - execution: The execution stage within the pipeline - - account-hashing: The account hashing stage within the pipeline - - storage-hashing: The storage hashing stage within the pipeline - - hashing: The account and storage hashing stages within the pipeline - - merkle: The merkle stage within the pipeline - - tx-lookup: The transaction lookup stage within the pipeline - - account-history: The account history stage within the pipeline - - storage-history: The storage history stage within the pipeline + - headers: The headers stage within the pipeline + - bodies: The bodies stage within the pipeline + - senders: The senders stage within the pipeline + - execution: The execution stage within the pipeline + - account-hashing: The account hashing stage within the pipeline + - storage-hashing: The storage hashing stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline + - merkle-changesets: The merkle changesets stage within the pipeline + - tx-lookup: The transaction lookup stage within the pipeline + - account-history: The account history stage within the pipeline + - storage-history: The storage history stage within the pipeline Logging: --log.stdout.format diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 2e9873034ff..d561eb3ce79 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -101,17 +101,18 @@ Database: The name of the stage to run Possible values: - - headers: The headers stage within the pipeline - - bodies: The bodies stage within the pipeline - - senders: The senders stage within the pipeline - - execution: The execution stage within the pipeline - - account-hashing: The account hashing stage within the pipeline - - storage-hashing: The storage hashing stage within the pipeline - - hashing: The account and storage hashing stages within the pipeline - - merkle: The merkle stage within the pipeline - - tx-lookup: The transaction lookup stage within the pipeline - - account-history: The account history stage within the pipeline - - storage-history: The storage history stage within the pipeline + - headers: The headers stage within the pipeline + - bodies: The bodies stage within the pipeline + - senders: The senders stage within the pipeline + - execution: The execution stage within the pipeline + - account-hashing: The account hashing stage within the pipeline + - storage-hashing: The storage hashing stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline + - merkle-changesets: The merkle changesets stage within the pipeline + - tx-lookup: The transaction lookup stage within the pipeline + - account-history: The account history stage within the pipeline + - storage-history: The storage history stage within the pipeline Networking: -d, --disable-discovery diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index 357290e14d7..0c80e52a661 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -6,7 +6,7 @@ use crate::{ }; use alloy_eips::eip2718::WithEncoded; use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload}; -use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_chain_state::ExecutedBlock; use reth_engine_primitives::EngineApiValidator; use reth_ethereum::{ node::api::{ @@ -167,7 +167,7 @@ impl BuiltPayload for CustomBuiltPayload { self.0.fees() } - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { self.0.executed_block() } From 386eaa3ff68b5e05bfa64d76837a676baf3582cc Mon Sep 17 00:00:00 2001 From: Mablr <59505383+mablr@users.noreply.github.com> Date: Thu, 16 Oct 2025 11:56:27 +0200 Subject: [PATCH 082/371] fix(discv5): get `fork_id` from `Enr` for all network stacks (#18988) Co-authored-by: emhane Co-authored-by: Emilia Hane --- crates/net/discv5/src/error.rs | 2 +- crates/net/discv5/src/lib.rs | 75 +++++++++++++++++++++++++++++++--- 2 files changed, 71 insertions(+), 6 deletions(-) diff --git a/crates/net/discv5/src/error.rs b/crates/net/discv5/src/error.rs index c373a17194c..64b2cd73af8 100644 --- a/crates/net/discv5/src/error.rs +++ b/crates/net/discv5/src/error.rs @@ -13,7 +13,7 @@ pub enum Error { #[error("network stack identifier is not configured")] NetworkStackIdNotConfigured, /// Missing key used to identify rlpx network. - #[error("fork missing on enr, key missing")] + #[error("fork missing on enr, key {0:?} and key 'eth' missing")] ForkMissing(&'static [u8]), /// Failed to decode [`ForkId`](reth_ethereum_forks::ForkId) rlp value. #[error("failed to decode fork id, 'eth': {0:?}")] diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index be7b781fe74..ef2c69caedb 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -320,10 +320,7 @@ impl Discv5 { return None } - // todo: extend for all network stacks in reth-network rlpx logic - let fork_id = (self.fork_key == Some(NetworkStackId::ETH)) - .then(|| self.get_fork_id(enr).ok()) - .flatten(); + let fork_id = self.get_fork_id(enr).ok(); trace!(target: "net::discv5", ?fork_id, @@ -387,7 +384,22 @@ impl Discv5 { let Some(key) = self.fork_key else { return Err(Error::NetworkStackIdNotConfigured) }; let fork_id = enr .get_decodable::(key) - .ok_or(Error::ForkMissing(key))? + .or_else(|| { + (key != NetworkStackId::ETH) + .then(|| { + // Fallback: trying to get fork id from Enr with 'eth' as network stack id + trace!(target: "net::discv5", + key = %String::from_utf8_lossy(key), + "Fork id not found for key, trying 'eth'..." + ); + enr.get_decodable::(NetworkStackId::ETH) + }) + .flatten() + }) + .ok_or({ + trace!(target: "net::discv5", "Fork id not found for 'eth' network stack id"); + Error::ForkMissing(key) + })? .map(Into::into)?; Ok(fork_id) @@ -669,6 +681,8 @@ mod test { use ::enr::{CombinedKey, EnrKey}; use rand_08::thread_rng; use reth_chainspec::MAINNET; + use reth_tracing::init_test_tracing; + use std::env; use tracing::trace; fn discv5_noop() -> Discv5 { @@ -901,4 +915,55 @@ mod test { assert_eq!(fork_id, decoded_fork_id); assert_eq!(TCP_PORT, enr.tcp4().unwrap()); // listen config is defaulting to ip mode ipv4 } + + #[test] + fn get_fork_id_with_different_network_stack_ids() { + unsafe { + env::set_var("RUST_LOG", "net::discv5=trace"); + } + init_test_tracing(); + + let fork_id = MAINNET.latest_fork_id(); + let sk = SecretKey::new(&mut thread_rng()); + + // Test 1: ENR with OPEL fork ID, Discv5 configured for OPEL + let enr_with_opel = Enr::builder() + .add_value_rlp( + NetworkStackId::OPEL, + alloy_rlp::encode(EnrForkIdEntry::from(fork_id)).into(), + ) + .build(&sk) + .unwrap(); + + let mut discv5 = discv5_noop(); + discv5.fork_key = Some(NetworkStackId::OPEL); + assert_eq!(discv5.get_fork_id(&enr_with_opel).unwrap(), fork_id); + + // Test 2: ENR with ETH fork ID, Discv5 configured for OPEL (fallback to ETH) + let enr_with_eth = Enr::builder() + .add_value_rlp( + NetworkStackId::ETH, + alloy_rlp::encode(EnrForkIdEntry::from(fork_id)).into(), + ) + .build(&sk) + .unwrap(); + + discv5.fork_key = Some(NetworkStackId::OPEL); + assert_eq!(discv5.get_fork_id(&enr_with_eth).unwrap(), fork_id); + + // Test 3: ENR with neither OPEL nor ETH fork ID (should fail) + let enr_without_network_stack_id = Enr::empty(&sk).unwrap(); + discv5.fork_key = Some(NetworkStackId::OPEL); + assert!(matches!( + discv5.get_fork_id(&enr_without_network_stack_id), + Err(Error::ForkMissing(NetworkStackId::OPEL)) + )); + + // Test 4: discv5 without network stack id configured (should fail) + let discv5 = discv5_noop(); + assert!(matches!( + discv5.get_fork_id(&enr_without_network_stack_id), + Err(Error::NetworkStackIdNotConfigured) + )); + } } From 5beeaedfaeead415e9bf24e0c00213beef23d5c1 Mon Sep 17 00:00:00 2001 From: Skylar Ray <137945430+sky-coderay@users.noreply.github.com> Date: Thu, 16 Oct 2025 13:10:11 +0300 Subject: [PATCH 083/371] chore(fs-util): remove redundant tmp_path clone (#19003) --- crates/fs-util/src/lib.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index d3195ad27fe..08817aecfa3 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -332,10 +332,7 @@ where Err(err) => { // Clean up the temporary file before returning the error let _ = fs::remove_file(&tmp_path); - return Err(FsPathError::Write { - source: Error::other(err.into()), - path: tmp_path.clone(), - }); + return Err(FsPathError::Write { source: Error::other(err.into()), path: tmp_path }); } } From be648d950c55da653715aa2caa11382f6e1996b3 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Thu, 16 Oct 2025 11:21:15 +0100 Subject: [PATCH 084/371] feat: Stateless validation function receives public keys corresponding to each transaction (#17841) Co-authored-by: Wolfgang Welz --- Cargo.lock | 2 + Cargo.toml | 2 +- crates/stateless/Cargo.toml | 8 ++ crates/stateless/src/lib.rs | 3 + crates/stateless/src/recover_block.rs | 130 ++++++++++++++++++ crates/stateless/src/validation.rs | 18 ++- testing/ef-tests/Cargo.toml | 2 +- testing/ef-tests/src/cases/blockchain_test.rs | 38 ++++- 8 files changed, 193 insertions(+), 10 deletions(-) create mode 100644 crates/stateless/src/recover_block.rs diff --git a/Cargo.lock b/Cargo.lock index f9d8401ec7b..336442241a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10400,6 +10400,7 @@ dependencies = [ "alloy-rpc-types-debug", "alloy-trie", "itertools 0.14.0", + "k256", "reth-chainspec", "reth-consensus", "reth-errors", @@ -10410,6 +10411,7 @@ dependencies = [ "reth-revm", "reth-trie-common", "reth-trie-sparse", + "secp256k1 0.30.0", "serde", "serde_with", "thiserror 2.0.16", diff --git a/Cargo.toml b/Cargo.toml index 68dc13584fc..a781e3b6047 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -446,7 +446,7 @@ reth-rpc-convert = { path = "crates/rpc/rpc-convert" } reth-stages = { path = "crates/stages/stages" } reth-stages-api = { path = "crates/stages/api" } reth-stages-types = { path = "crates/stages/types", default-features = false } -reth-stateless = { path = "crates/stateless" } +reth-stateless = { path = "crates/stateless", default-features = false } reth-static-file = { path = "crates/static-file/static-file" } reth-static-file-types = { path = "crates/static-file/types", default-features = false } reth-storage-api = { path = "crates/storage/storage-api", default-features = false } diff --git a/crates/stateless/Cargo.toml b/crates/stateless/Cargo.toml index 36a891ac3d2..8adbae28ae3 100644 --- a/crates/stateless/Cargo.toml +++ b/crates/stateless/Cargo.toml @@ -36,3 +36,11 @@ thiserror.workspace = true itertools.workspace = true serde.workspace = true serde_with.workspace = true + +k256 = { workspace = true, optional = true } +secp256k1 = { workspace = true, optional = true } + +[features] +default = ["k256"] +k256 = ["dep:k256"] +secp256k1 = ["dep:secp256k1"] diff --git a/crates/stateless/src/lib.rs b/crates/stateless/src/lib.rs index 1e858b9f9fb..6813638485e 100644 --- a/crates/stateless/src/lib.rs +++ b/crates/stateless/src/lib.rs @@ -35,9 +35,12 @@ extern crate alloc; +mod recover_block; /// Sparse trie implementation for stateless validation pub mod trie; +#[doc(inline)] +pub use recover_block::UncompressedPublicKey; #[doc(inline)] pub use trie::StatelessTrie; #[doc(inline)] diff --git a/crates/stateless/src/recover_block.rs b/crates/stateless/src/recover_block.rs new file mode 100644 index 00000000000..b402cb3724f --- /dev/null +++ b/crates/stateless/src/recover_block.rs @@ -0,0 +1,130 @@ +use crate::validation::StatelessValidationError; +use alloc::vec::Vec; +use alloy_consensus::BlockHeader; +use alloy_primitives::{Address, Signature, B256}; +use reth_chainspec::EthereumHardforks; +use reth_ethereum_primitives::{Block, TransactionSigned}; +use reth_primitives_traits::{Block as _, RecoveredBlock}; + +#[cfg(all(feature = "k256", feature = "secp256k1"))] +use k256 as _; + +/// Serialized uncompressed public key +pub type UncompressedPublicKey = [u8; 65]; + +/// Verifies all transactions in a block against a list of public keys and signatures. +/// +/// Returns a `RecoveredBlock` +pub(crate) fn recover_block_with_public_keys( + block: Block, + public_keys: Vec, + chain_spec: &ChainSpec, +) -> Result, StatelessValidationError> +where + ChainSpec: EthereumHardforks, +{ + if block.body().transactions.len() != public_keys.len() { + return Err(StatelessValidationError::Custom( + "Number of public keys must match number of transactions", + )); + } + + // Determine if we're in the Homestead fork for signature validation + let is_homestead = chain_spec.is_homestead_active_at_block(block.header().number()); + + // Verify each transaction signature against its corresponding public key + let senders = public_keys + .iter() + .zip(block.body().transactions()) + .map(|(vk, tx)| verify_and_compute_sender(vk, tx, is_homestead)) + .collect::, _>>()?; + + // Create RecoveredBlock with verified senders + let block_hash = block.hash_slow(); + Ok(RecoveredBlock::new(block, senders, block_hash)) +} + +/// Verifies a transaction using its signature and the given public key. +/// +/// Note: If the signature or the public key is incorrect, then this method +/// will return an error. +/// +/// Returns the address derived from the public key. +fn verify_and_compute_sender( + vk: &UncompressedPublicKey, + tx: &TransactionSigned, + is_homestead: bool, +) -> Result { + let sig = tx.signature(); + + // non-normalized signatures are only valid pre-homestead + let sig_is_normalized = sig.normalize_s().is_none(); + if is_homestead && !sig_is_normalized { + return Err(StatelessValidationError::HomesteadSignatureNotNormalized); + } + let sig_hash = tx.signature_hash(); + #[cfg(all(feature = "k256", feature = "secp256k1"))] + { + let _ = verify_and_compute_sender_unchecked_k256; + } + #[cfg(feature = "secp256k1")] + { + verify_and_compute_sender_unchecked_secp256k1(vk, sig, sig_hash) + } + #[cfg(all(feature = "k256", not(feature = "secp256k1")))] + { + verify_and_compute_sender_unchecked_k256(vk, sig, sig_hash) + } + #[cfg(not(any(feature = "secp256k1", feature = "k256")))] + { + let _ = vk; + let _ = tx; + let _: B256 = sig_hash; + let _: &Signature = sig; + + unimplemented!("Must choose either k256 or secp256k1 feature") + } +} +#[cfg(feature = "k256")] +fn verify_and_compute_sender_unchecked_k256( + vk: &UncompressedPublicKey, + sig: &Signature, + sig_hash: B256, +) -> Result { + use k256::ecdsa::{signature::hazmat::PrehashVerifier, VerifyingKey}; + + let vk = + VerifyingKey::from_sec1_bytes(vk).map_err(|_| StatelessValidationError::SignerRecovery)?; + + sig.to_k256() + .and_then(|sig| vk.verify_prehash(sig_hash.as_slice(), &sig)) + .map_err(|_| StatelessValidationError::SignerRecovery)?; + + Ok(Address::from_public_key(&vk)) +} + +#[cfg(feature = "secp256k1")] +fn verify_and_compute_sender_unchecked_secp256k1( + vk: &UncompressedPublicKey, + sig: &Signature, + sig_hash: B256, +) -> Result { + use secp256k1::{ecdsa::Signature as SecpSignature, Message, PublicKey, SECP256K1}; + + let public_key = + PublicKey::from_slice(vk).map_err(|_| StatelessValidationError::SignerRecovery)?; + + let mut sig_bytes = [0u8; 64]; + sig_bytes[0..32].copy_from_slice(&sig.r().to_be_bytes::<32>()); + sig_bytes[32..64].copy_from_slice(&sig.s().to_be_bytes::<32>()); + + let signature = SecpSignature::from_compact(&sig_bytes) + .map_err(|_| StatelessValidationError::SignerRecovery)?; + + let message = Message::from_digest(sig_hash.0); + SECP256K1 + .verify_ecdsa(&message, &signature, &public_key) + .map_err(|_| StatelessValidationError::SignerRecovery)?; + + Ok(Address::from_raw_public_key(&vk[1..])) +} diff --git a/crates/stateless/src/validation.rs b/crates/stateless/src/validation.rs index 38b96d6bd0f..a0475b09939 100644 --- a/crates/stateless/src/validation.rs +++ b/crates/stateless/src/validation.rs @@ -1,4 +1,5 @@ use crate::{ + recover_block::{recover_block_with_public_keys, UncompressedPublicKey}, trie::{StatelessSparseTrie, StatelessTrie}, witness_db::WitnessDatabase, ExecutionWitness, @@ -89,6 +90,14 @@ pub enum StatelessValidationError { expected: B256, }, + /// Error during signer recovery. + #[error("signer recovery failed")] + SignerRecovery, + + /// Error when signature has non-normalized s value in homestead block. + #[error("signature s value not normalized for homestead block")] + HomesteadSignatureNotNormalized, + /// Custom error. #[error("{0}")] Custom(&'static str), @@ -130,7 +139,8 @@ pub enum StatelessValidationError { /// If all steps succeed the function returns `Some` containing the hash of the validated /// `current_block`. pub fn stateless_validation( - current_block: RecoveredBlock, + current_block: Block, + public_keys: Vec, witness: ExecutionWitness, chain_spec: Arc, evm_config: E, @@ -141,6 +151,7 @@ where { stateless_validation_with_trie::( current_block, + public_keys, witness, chain_spec, evm_config, @@ -154,7 +165,8 @@ where /// /// See `stateless_validation` for detailed documentation of the validation process. pub fn stateless_validation_with_trie( - current_block: RecoveredBlock, + current_block: Block, + public_keys: Vec, witness: ExecutionWitness, chain_spec: Arc, evm_config: E, @@ -164,6 +176,8 @@ where ChainSpec: Send + Sync + EthChainSpec
+ EthereumHardforks + Debug, E: ConfigureEvm + Clone + 'static, { + let current_block = recover_block_with_public_keys(current_block, public_keys, &*chain_spec)?; + let mut ancestor_headers: Vec<_> = witness .headers .iter() diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index 6b11e29c707..745172cd82c 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -28,7 +28,7 @@ reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-consensus.workspace = true reth-revm = { workspace = true, features = ["std", "witness"] } -reth-stateless = { workspace = true } +reth-stateless = { workspace = true, features = ["secp256k1"] } reth-tracing.workspace = true reth-trie.workspace = true reth-trie-db.workspace = true diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 0526efaa6ef..5519846458c 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -10,17 +10,20 @@ use reth_chainspec::ChainSpec; use reth_consensus::{Consensus, HeaderValidator}; use reth_db_common::init::{insert_genesis_hashes, insert_genesis_history, insert_genesis_state}; use reth_ethereum_consensus::{validate_block_post_execution, EthBeaconConsensus}; -use reth_ethereum_primitives::Block; +use reth_ethereum_primitives::{Block, TransactionSigned}; use reth_evm::{execute::Executor, ConfigureEvm}; use reth_evm_ethereum::EthEvmConfig; -use reth_primitives_traits::{RecoveredBlock, SealedBlock}; +use reth_primitives_traits::{Block as BlockTrait, RecoveredBlock, SealedBlock}; use reth_provider::{ test_utils::create_test_provider_factory_with_chain_spec, BlockWriter, DatabaseProviderFactory, ExecutionOutcome, HeaderProvider, HistoryWriter, OriginalValuesKnown, StateProofProvider, StateWriter, StaticFileProviderFactory, StaticFileSegment, StaticFileWriter, }; use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord, State}; -use reth_stateless::{validation::stateless_validation, ExecutionWitness}; +use reth_stateless::{ + trie::StatelessSparseTrie, validation::stateless_validation_with_trie, ExecutionWitness, + UncompressedPublicKey, +}; use reth_trie::{HashedPostState, KeccakKeyHasher, StateRoot}; use reth_trie_db::DatabaseStateRoot; use std::{ @@ -356,9 +359,16 @@ fn run_case( } // Now validate using the stateless client if everything else passes - for (block, execution_witness) in &program_inputs { - stateless_validation( - block.clone(), + for (recovered_block, execution_witness) in &program_inputs { + let block = recovered_block.clone().into_block(); + + // Recover the actual public keys from the transaction signatures + let public_keys = recover_signers(block.body().transactions()) + .expect("Failed to recover public keys from transaction signatures"); + + stateless_validation_with_trie::( + block, + public_keys, execution_witness.clone(), chain_spec.clone(), EthEvmConfig::new(chain_spec.clone()), @@ -413,6 +423,22 @@ fn pre_execution_checks( Ok(()) } +/// Recover public keys from transaction signatures. +fn recover_signers<'a, I>(txs: I) -> Result, Box> +where + I: IntoIterator, +{ + txs.into_iter() + .enumerate() + .map(|(i, tx)| { + tx.signature() + .recover_from_prehash(&tx.signature_hash()) + .map(|keys| keys.to_encoded_point(false).as_bytes().try_into().unwrap()) + .map_err(|e| format!("failed to recover signature for tx #{i}: {e}").into()) + }) + .collect::, _>>() +} + /// Returns whether the test at the given path should be skipped. /// /// Some tests are edge cases that cannot happen on mainnet, while others are skipped for From e969262c7ec48f073b8ea7920a75161e7130a98e Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 16 Oct 2025 11:58:42 +0100 Subject: [PATCH 085/371] refactor: rename disable_caching_and_prewarming to disable_prewarming (#19072) --- crates/engine/primitives/src/config.rs | 25 ++++++++----------- .../tree/src/tree/payload_processor/mod.rs | 2 +- crates/node/core/src/args/engine.rs | 10 ++++---- docs/vocs/docs/pages/cli/reth/node.mdx | 4 +-- 4 files changed, 19 insertions(+), 22 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 9e2c8210f08..6f759036eb2 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -92,8 +92,8 @@ pub struct TreeConfig { /// Whether to always compare trie updates from the state root task to the trie updates from /// the regular state root calculation. always_compare_trie_updates: bool, - /// Whether to disable cross-block caching and parallel prewarming. - disable_caching_and_prewarming: bool, + /// Whether to disable parallel prewarming. + disable_prewarming: bool, /// Whether to disable the parallel sparse trie state root algorithm. disable_parallel_sparse_trie: bool, /// Whether to enable state provider metrics. @@ -148,7 +148,7 @@ impl Default for TreeConfig { max_execute_block_batch_size: DEFAULT_MAX_EXECUTE_BLOCK_BATCH_SIZE, legacy_state_root: false, always_compare_trie_updates: false, - disable_caching_and_prewarming: false, + disable_prewarming: false, disable_parallel_sparse_trie: false, state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE, @@ -179,7 +179,7 @@ impl TreeConfig { max_execute_block_batch_size: usize, legacy_state_root: bool, always_compare_trie_updates: bool, - disable_caching_and_prewarming: bool, + disable_prewarming: bool, disable_parallel_sparse_trie: bool, state_provider_metrics: bool, cross_block_cache_size: u64, @@ -205,7 +205,7 @@ impl TreeConfig { max_execute_block_batch_size, legacy_state_root, always_compare_trie_updates, - disable_caching_and_prewarming, + disable_prewarming, disable_parallel_sparse_trie, state_provider_metrics, cross_block_cache_size, @@ -285,9 +285,9 @@ impl TreeConfig { self.disable_parallel_sparse_trie } - /// Returns whether or not cross-block caching and parallel prewarming should be used. - pub const fn disable_caching_and_prewarming(&self) -> bool { - self.disable_caching_and_prewarming + /// Returns whether or not parallel prewarming should be used. + pub const fn disable_prewarming(&self) -> bool { + self.disable_prewarming } /// Returns whether to always compare trie updates from the state root task to the trie updates @@ -377,12 +377,9 @@ impl TreeConfig { self } - /// Setter for whether to disable cross-block caching and parallel prewarming. - pub const fn without_caching_and_prewarming( - mut self, - disable_caching_and_prewarming: bool, - ) -> Self { - self.disable_caching_and_prewarming = disable_caching_and_prewarming; + /// Setter for whether to disable parallel prewarming. + pub const fn without_prewarming(mut self, disable_prewarming: bool) -> Self { + self.disable_prewarming = disable_prewarming; self } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index e3090d60756..d2e48a49899 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -117,7 +117,7 @@ where execution_cache: Default::default(), trie_metrics: Default::default(), cross_block_cache_size: config.cross_block_cache_size(), - disable_transaction_prewarming: config.disable_caching_and_prewarming(), + disable_transaction_prewarming: config.disable_prewarming(), evm_config, precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 6b678b5789b..c82b1b03a15 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -30,9 +30,9 @@ pub struct EngineArgs { #[deprecated] pub caching_and_prewarming_enabled: bool, - /// Disable cross-block caching and parallel prewarming - #[arg(long = "engine.disable-caching-and-prewarming")] - pub caching_and_prewarming_disabled: bool, + /// Disable parallel prewarming + #[arg(long = "engine.disable-prewarming", alias = "engine.disable-caching-and-prewarming")] + pub prewarming_disabled: bool, /// CAUTION: This CLI flag has no effect anymore, use --engine.disable-parallel-sparse-trie /// if you want to disable usage of the `ParallelSparseTrie`. @@ -129,7 +129,7 @@ impl Default for EngineArgs { legacy_state_root_task_enabled: false, state_root_task_compare_updates: false, caching_and_prewarming_enabled: true, - caching_and_prewarming_disabled: false, + prewarming_disabled: false, parallel_sparse_trie_enabled: true, parallel_sparse_trie_disabled: false, state_provider_metrics: false, @@ -157,7 +157,7 @@ impl EngineArgs { .with_persistence_threshold(self.persistence_threshold) .with_memory_block_buffer_target(self.memory_block_buffer_target) .with_legacy_state_root(self.legacy_state_root_task_enabled) - .without_caching_and_prewarming(self.caching_and_prewarming_disabled) + .without_prewarming(self.prewarming_disabled) .with_disable_parallel_sparse_trie(self.parallel_sparse_trie_disabled) .with_state_provider_metrics(self.state_provider_metrics) .with_always_compare_trie_updates(self.state_root_task_compare_updates) diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index ea2d259f9ec..9b46593a3de 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -815,8 +815,8 @@ Engine: --engine.legacy-state-root Enable legacy state root - --engine.disable-caching-and-prewarming - Disable cross-block caching and parallel prewarming + --engine.disable-prewarming + Disable parallel prewarming --engine.disable-parallel-sparse-trie Disable the parallel sparse trie in the engine From 7e006d68452b9b2223c80c080c86eadaf156d948 Mon Sep 17 00:00:00 2001 From: Karl Yu <43113774+0xKarl98@users.noreply.github.com> Date: Thu, 16 Oct 2025 20:06:05 +0800 Subject: [PATCH 086/371] chore: remove unused rayon pool from WorkloadExecutor (#19065) Co-authored-by: sashass1315 Co-authored-by: Matthias Seitz --- .../src/tree/payload_processor/executor.rs | 34 ++++--------------- 1 file changed, 6 insertions(+), 28 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/executor.rs b/crates/engine/tree/src/tree/payload_processor/executor.rs index 3013c5e1c72..28165d5e8f2 100644 --- a/crates/engine/tree/src/tree/payload_processor/executor.rs +++ b/crates/engine/tree/src/tree/payload_processor/executor.rs @@ -1,10 +1,6 @@ //! Executor for mixed I/O and CPU workloads. -use rayon::ThreadPool as RayonPool; -use std::{ - sync::{Arc, OnceLock}, - time::Duration, -}; +use std::{sync::OnceLock, time::Duration}; use tokio::{ runtime::{Builder, Handle, Runtime}, task::JoinHandle, @@ -12,9 +8,8 @@ use tokio::{ /// An executor for mixed I/O and CPU workloads. /// -/// This type has access to its own rayon pool and uses tokio to spawn blocking tasks. -/// -/// It will reuse an existing tokio runtime if available or create its own. +/// This type uses tokio to spawn blocking tasks and will reuse an existing tokio +/// runtime if available or create its own. #[derive(Debug, Clone)] pub struct WorkloadExecutor { inner: WorkloadExecutorInner, @@ -22,21 +17,11 @@ pub struct WorkloadExecutor { impl Default for WorkloadExecutor { fn default() -> Self { - Self { inner: WorkloadExecutorInner::new(rayon::ThreadPoolBuilder::new().build().unwrap()) } + Self { inner: WorkloadExecutorInner::new() } } } impl WorkloadExecutor { - /// Creates a new executor with the given number of threads for cpu bound work (rayon). - #[expect(unused)] - pub(super) fn with_num_cpu_threads(cpu_threads: usize) -> Self { - Self { - inner: WorkloadExecutorInner::new( - rayon::ThreadPoolBuilder::new().num_threads(cpu_threads).build().unwrap(), - ), - } - } - /// Returns the handle to the tokio runtime pub(super) const fn handle(&self) -> &Handle { &self.inner.handle @@ -51,22 +36,15 @@ impl WorkloadExecutor { { self.inner.handle.spawn_blocking(func) } - - /// Returns access to the rayon pool - #[expect(unused)] - pub(super) const fn rayon_pool(&self) -> &Arc { - &self.inner.rayon_pool - } } #[derive(Debug, Clone)] struct WorkloadExecutorInner { handle: Handle, - rayon_pool: Arc, } impl WorkloadExecutorInner { - fn new(rayon_pool: rayon::ThreadPool) -> Self { + fn new() -> Self { fn get_runtime_handle() -> Handle { Handle::try_current().unwrap_or_else(|_| { // Create a new runtime if no runtime is available @@ -90,6 +68,6 @@ impl WorkloadExecutorInner { }) } - Self { handle: get_runtime_handle(), rayon_pool: Arc::new(rayon_pool) } + Self { handle: get_runtime_handle() } } } From 8788782f2543c5bb4f6d8edba04a0e28ef3bf709 Mon Sep 17 00:00:00 2001 From: MozirDmitriy Date: Thu, 16 Oct 2025 15:40:12 +0300 Subject: [PATCH 087/371] fix(net): remove redundant remove of evicted hash in fetcher (#19083) --- crates/net/network/src/transactions/fetcher.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 1cb725e4efb..df088bfbf46 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -413,7 +413,6 @@ impl TransactionFetcher { if let (_, Some(evicted_hash)) = self.hashes_pending_fetch.insert_and_get_evicted(hash) { self.hashes_fetch_inflight_and_pending_fetch.remove(&evicted_hash); - self.hashes_pending_fetch.remove(&evicted_hash); } } } From ff2236e5b462b726aeea7aea93dcfef14622f5f4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 16 Oct 2025 16:25:56 +0200 Subject: [PATCH 088/371] fix: support rlp hex in read_header_from_file (#19089) --- Cargo.lock | 1 + crates/cli/commands/Cargo.toml | 1 + crates/cli/commands/src/init_state/mod.rs | 2 +- .../commands/src/init_state/without_evm.rs | 63 +++++++++++++++++-- 4 files changed, 61 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 336442241a6..58d9146a654 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7481,6 +7481,7 @@ dependencies = [ "serde", "serde_json", "tar", + "tempfile", "tokio", "tokio-stream", "toml", diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 242cc6d5d9d..3ae7ae15fb9 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -99,6 +99,7 @@ proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] reth-ethereum-cli.workspace = true +tempfile.workspace = true [features] default = [] diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index 68618361e7f..2b79bb4c092 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -88,7 +88,7 @@ impl> InitStateC let header = self.header.ok_or_else(|| eyre::eyre!("Header file must be provided"))?; let header = without_evm::read_header_from_file::< ::BlockHeader, - >(header)?; + >(&header)?; let header_hash = self.header_hash.ok_or_else(|| eyre::eyre!("Header hash must be provided"))?; diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index 09711d45880..29b1848b122 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -10,16 +10,22 @@ use reth_provider::{ }; use reth_stages::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; -use std::{fs::File, io::Read, path::PathBuf}; +use std::path::Path; use tracing::info; + /// Reads the header RLP from a file and returns the Header. -pub(crate) fn read_header_from_file(path: PathBuf) -> Result +/// +/// This supports both raw rlp bytes and rlp hex string. +pub(crate) fn read_header_from_file(path: &Path) -> Result where H: Decodable, { - let mut file = File::open(path)?; - let mut buf = Vec::new(); - file.read_to_end(&mut buf)?; + let buf = if let Ok(content) = reth_fs_util::read_to_string(path) { + alloy_primitives::hex::decode(content.trim())? + } else { + // If UTF-8 decoding fails, read as raw bytes + reth_fs_util::read(path)? + }; let header = H::decode(&mut &buf[..])?; Ok(header) @@ -167,3 +173,50 @@ where Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::Header; + use alloy_primitives::{address, b256}; + use std::io::Write; + use tempfile::NamedTempFile; + + #[test] + fn test_read_header_from_file_hex_string() { + let header_rlp = "0xf90212a00d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479471562b71999873db5b286df957af199ec94617f7a0f496f3d199c51a1aaee67dac95f24d92ac13c60d25181e1eecd6eca5ddf32ac0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808206a4840365908a808468e975f09ad983011003846765746888676f312e32352e308664617277696ea06f485a167165ec12e0ab3e6ab59a7b88560b90306ac98a26eb294abf95a8c59b88000000000000000007"; + + let mut temp_file = NamedTempFile::new().unwrap(); + temp_file.write_all(header_rlp.as_bytes()).unwrap(); + temp_file.flush().unwrap(); + + let header: Header = read_header_from_file(temp_file.path()).unwrap(); + + assert_eq!(header.number, 1700); + assert_eq!( + header.parent_hash, + b256!("0d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dd") + ); + assert_eq!(header.beneficiary, address!("71562b71999873db5b286df957af199ec94617f7")); + } + + #[test] + fn test_read_header_from_file_raw_bytes() { + let header_rlp = "0xf90212a00d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479471562b71999873db5b286df957af199ec94617f7a0f496f3d199c51a1aaee67dac95f24d92ac13c60d25181e1eecd6eca5ddf32ac0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808206a4840365908a808468e975f09ad983011003846765746888676f312e32352e308664617277696ea06f485a167165ec12e0ab3e6ab59a7b88560b90306ac98a26eb294abf95a8c59b88000000000000000007"; + let header_bytes = + alloy_primitives::hex::decode(header_rlp.trim_start_matches("0x")).unwrap(); + + let mut temp_file = NamedTempFile::new().unwrap(); + temp_file.write_all(&header_bytes).unwrap(); + temp_file.flush().unwrap(); + + let header: Header = read_header_from_file(temp_file.path()).unwrap(); + + assert_eq!(header.number, 1700); + assert_eq!( + header.parent_hash, + b256!("0d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dd") + ); + assert_eq!(header.beneficiary, address!("71562b71999873db5b286df957af199ec94617f7")); + } +} From 5887a1596690efff06f7d7b8a812aec120a4a06d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 16 Oct 2025 16:30:36 +0200 Subject: [PATCH 089/371] revert: "fix: Revert "chore: disable fee charge in env"" (#19073) --- crates/rpc/rpc-eth-api/Cargo.toml | 2 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 44637d1931c..88a7f059323 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee"] } +revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee", "optional_fee_charge"] } reth-chain-state.workspace = true revm-inspectors.workspace = true reth-primitives-traits = { workspace = true, features = ["rpc-compat"] } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 8f325e757f1..c6203da3f4e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -791,6 +791,11 @@ pub trait Call: // Disable EIP-7825 transaction gas limit to support larger transactions evm_env.cfg_env.tx_gas_limit_cap = Some(u64::MAX); + // Disable additional fee charges, e.g. opstack operator fee charge + // See: + // + evm_env.cfg_env.disable_fee_charge = true; + // set nonce to None so that the correct nonce is chosen by the EVM request.as_mut().take_nonce(); From cc490b668a1e26994f8be1349c7e50ca5ea41411 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 16 Oct 2025 17:44:08 +0200 Subject: [PATCH 090/371] fix: accurately track account and code weighs (#19091) --- crates/engine/tree/src/tree/cached_state.rs | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index 8553a9fe63c..ffd7f49c6fc 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -474,9 +474,9 @@ impl ExecutionCacheBuilder { .build_with_hasher(DefaultHashBuilder::default()); let account_cache = CacheBuilder::new(self.account_cache_entries) - .weigher(|_key: &Address, _value: &Option| -> u32 { + .weigher(|_key: &Address, value: &Option| -> u32 { // Account has a fixed size (none, balance,code_hash) - size_of::>() as u32 + 20 + size_of_val(value) as u32 }) .max_capacity(account_cache_size) .time_to_live(EXPIRY_TIME) @@ -485,13 +485,19 @@ impl ExecutionCacheBuilder { let code_cache = CacheBuilder::new(self.code_cache_entries) .weigher(|_key: &B256, value: &Option| -> u32 { - match value { + let code_size = match value { Some(bytecode) => { - // base weight + actual bytecode size - (40 + bytecode.len()) as u32 + // base weight + actual (padded) bytecode size + size of the jump table + (size_of_val(value) + + bytecode.bytecode().len() + + bytecode + .legacy_jump_table() + .map(|table| table.as_slice().len()) + .unwrap_or_default()) as u32 } - None => 8, // size of None variant - } + None => size_of_val(value) as u32, + }; + 32 + code_size }) .max_capacity(code_cache_size) .time_to_live(EXPIRY_TIME) From 25e8d6bb77993a8e6f2d61cf43f0da639eae0e8a Mon Sep 17 00:00:00 2001 From: AJStonewee Date: Thu, 16 Oct 2025 16:06:08 -0400 Subject: [PATCH 091/371] chore: clarify the wrong Length description (#19094) --- crates/trie/common/src/prefix_set.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/trie/common/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs index 6714893f16d..35c4bc67839 100644 --- a/crates/trie/common/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -280,8 +280,8 @@ mod tests { prefix_set_mut.insert(Nibbles::from_nibbles([4, 5, 6])); prefix_set_mut.insert(Nibbles::from_nibbles([1, 2, 3])); // Duplicate - assert_eq!(prefix_set_mut.keys.len(), 4); // Length should be 3 (including duplicate) - assert_eq!(prefix_set_mut.keys.capacity(), 4); // Capacity should be 4 (including duplicate) + assert_eq!(prefix_set_mut.keys.len(), 4); // Length is 4 (before deduplication) + assert_eq!(prefix_set_mut.keys.capacity(), 4); // Capacity is 4 (before deduplication) let mut prefix_set = prefix_set_mut.freeze(); assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([1, 2]))); @@ -300,8 +300,8 @@ mod tests { prefix_set_mut.insert(Nibbles::from_nibbles([4, 5, 6])); prefix_set_mut.insert(Nibbles::from_nibbles([1, 2, 3])); // Duplicate - assert_eq!(prefix_set_mut.keys.len(), 4); // Length should be 3 (including duplicate) - assert_eq!(prefix_set_mut.keys.capacity(), 101); // Capacity should be 101 (including duplicate) + assert_eq!(prefix_set_mut.keys.len(), 4); // Length is 4 (before deduplication) + assert_eq!(prefix_set_mut.keys.capacity(), 101); // Capacity is 101 (before deduplication) let mut prefix_set = prefix_set_mut.freeze(); assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([1, 2]))); From 53ef7a386c702fe7ad98378a0592949e3bbe7cd5 Mon Sep 17 00:00:00 2001 From: Avory Date: Thu, 16 Oct 2025 23:53:36 +0300 Subject: [PATCH 092/371] docs: fix duplicate method comments in ChainInfoTracker (#18929) --- crates/chain-state/src/chain_info.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/chain-state/src/chain_info.rs b/crates/chain-state/src/chain_info.rs index a8a08430566..dd6afc8db1a 100644 --- a/crates/chain-state/src/chain_info.rs +++ b/crates/chain-state/src/chain_info.rs @@ -77,22 +77,22 @@ where self.inner.finalized_block.borrow().clone() } - /// Returns the canonical head of the chain. + /// Returns the `BlockNumHash` of the canonical head. pub fn get_canonical_num_hash(&self) -> BlockNumHash { self.inner.canonical_head.read().num_hash() } - /// Returns the canonical head of the chain. + /// Returns the block number of the canonical head. pub fn get_canonical_block_number(&self) -> BlockNumber { self.inner.canonical_head_number.load(Ordering::Relaxed) } - /// Returns the safe header of the chain. + /// Returns the `BlockNumHash` of the safe header. pub fn get_safe_num_hash(&self) -> Option { self.inner.safe_block.borrow().as_ref().map(SealedHeader::num_hash) } - /// Returns the finalized header of the chain. + /// Returns the `BlockNumHash` of the finalized header. pub fn get_finalized_num_hash(&self) -> Option { self.inner.finalized_block.borrow().as_ref().map(SealedHeader::num_hash) } From 48d8298e1f030156b0182393d5810193fe43c5b1 Mon Sep 17 00:00:00 2001 From: stevencartavia <112043913+stevencartavia@users.noreply.github.com> Date: Thu, 16 Oct 2025 15:02:26 -0600 Subject: [PATCH 093/371] feat: add Pool::remove_transaction(hash) (#19098) --- crates/transaction-pool/src/traits.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 9552646652b..2b9d8bae8ab 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -429,6 +429,20 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { /// Consumer: Utility fn all_transaction_hashes(&self) -> Vec; + /// Removes a single transaction corresponding to the given hash. + /// + /// Note: This removes the transaction as if it got discarded (_not_ mined). + /// + /// Returns the removed transaction if it was found in the pool. + /// + /// Consumer: Utility + fn remove_transaction( + &self, + hash: TxHash, + ) -> Option>> { + self.remove_transactions(vec![hash]).pop() + } + /// Removes all transactions corresponding to the given hashes. /// /// Note: This removes the transactions as if they got discarded (_not_ mined). From a8e387bd10905a8066ff663efcbe6265faab8748 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 16 Oct 2025 23:07:40 +0200 Subject: [PATCH 094/371] chore: init state touchups (#19066) --- crates/cli/commands/src/init_state/mod.rs | 40 ++++++++++++++++++++--- crates/storage/db-common/src/init.rs | 14 +++++--- 2 files changed, 45 insertions(+), 9 deletions(-) diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index 2b79bb4c092..ff1dce5a7cf 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -2,7 +2,7 @@ use crate::common::{AccessRights, CliHeader, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_consensus::BlockHeader as AlloyBlockHeader; -use alloy_primitives::{B256, U256}; +use alloy_primitives::{Sealable, B256, U256}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -64,7 +64,7 @@ pub struct InitStateCommand { /// Hash of the header. #[arg(long, value_name = "HEADER_HASH", verbatim_doc_comment)] - pub header_hash: Option, + pub header_hash: Option, } impl> InitStateCommand { @@ -90,9 +90,7 @@ impl> InitStateC ::BlockHeader, >(&header)?; - let header_hash = - self.header_hash.ok_or_else(|| eyre::eyre!("Header hash must be provided"))?; - let header_hash = B256::from_str(&header_hash)?; + let header_hash = self.header_hash.unwrap_or_else(|| header.hash_slow()); let total_difficulty = self .total_difficulty @@ -146,3 +144,35 @@ impl InitStateCommand { Some(&self.env.chain) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::b256; + use reth_ethereum_cli::chainspec::EthereumChainSpecParser; + + #[test] + fn parse_init_state_command_with_without_evm() { + let cmd: InitStateCommand = InitStateCommand::parse_from([ + "reth", + "--chain", + "sepolia", + "--without-evm", + "--header", + "header.rlp", + "--total-difficulty", + "12345", + "--header-hash", + "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "state.jsonl", + ]); + assert_eq!(cmd.state.to_str().unwrap(), "state.jsonl"); + assert!(cmd.without_evm); + assert_eq!(cmd.header.unwrap().to_str().unwrap(), "header.rlp"); + assert_eq!(cmd.total_difficulty.unwrap(), "12345"); + assert_eq!( + cmd.header_hash.unwrap(), + b256!("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") + ); + } +} diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 87f009356a0..8b24f0f8d19 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -9,7 +9,9 @@ use reth_config::config::EtlConfig; use reth_db_api::{tables, transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; use reth_execution_errors::StateRootError; -use reth_primitives_traits::{Account, Bytecode, GotExpected, NodePrimitives, StorageEntry}; +use reth_primitives_traits::{ + Account, Bytecode, GotExpected, NodePrimitives, SealedHeader, StorageEntry, +}; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, @@ -389,13 +391,16 @@ where } let block = provider_rw.last_block_number()?; + let hash = provider_rw .block_hash(block)? .ok_or_else(|| eyre::eyre!("Block hash not found for block {}", block))?; - let expected_state_root = provider_rw + let header = provider_rw .header_by_number(block)? - .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))? - .state_root(); + .map(SealedHeader::seal_slow) + .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))?; + + let expected_state_root = header.state_root(); // first line can be state root let dump_state_root = parse_state_root(&mut reader)?; @@ -403,6 +408,7 @@ where error!(target: "reth::cli", ?dump_state_root, ?expected_state_root, + header=?header.num_hash(), "State root from state dump does not match state root in current header." ); return Err(InitStorageError::StateRootMismatch(GotExpected { From 73af3002866b5c14450c9cadfb78d9788f4ef826 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Hodul=C3=A1k?= Date: Fri, 17 Oct 2025 10:45:00 +0200 Subject: [PATCH 095/371] fix(cli): Remove duplicit static file header and transaction append (#19103) --- crates/cli/commands/Cargo.toml | 1 + crates/cli/commands/src/init_state/mod.rs | 17 +------ .../commands/src/init_state/without_evm.rs | 47 ++++++++++++++----- .../optimism/cli/src/commands/init_state.rs | 3 +- docs/vocs/docs/pages/cli/reth/init-state.mdx | 3 -- 5 files changed, 40 insertions(+), 31 deletions(-) diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 3ae7ae15fb9..da1a5318f25 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -99,6 +99,7 @@ proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] reth-ethereum-cli.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } tempfile.workspace = true [features] diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index ff1dce5a7cf..4b5c51585b3 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -2,7 +2,7 @@ use crate::common::{AccessRights, CliHeader, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_consensus::BlockHeader as AlloyBlockHeader; -use alloy_primitives::{Sealable, B256, U256}; +use alloy_primitives::{Sealable, B256}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -13,7 +13,7 @@ use reth_provider::{ BlockNumReader, DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; -use std::{io::BufReader, path::PathBuf, str::FromStr, sync::Arc}; +use std::{io::BufReader, path::PathBuf, sync::Arc}; use tracing::info; pub mod without_evm; @@ -58,10 +58,6 @@ pub struct InitStateCommand { #[arg(long, value_name = "HEADER_FILE", verbatim_doc_comment)] pub header: Option, - /// Total difficulty of the header. - #[arg(long, value_name = "TOTAL_DIFFICULTY", verbatim_doc_comment)] - pub total_difficulty: Option, - /// Hash of the header. #[arg(long, value_name = "HEADER_HASH", verbatim_doc_comment)] pub header_hash: Option, @@ -92,18 +88,12 @@ impl> InitStateC let header_hash = self.header_hash.unwrap_or_else(|| header.hash_slow()); - let total_difficulty = self - .total_difficulty - .ok_or_else(|| eyre::eyre!("Total difficulty must be provided"))?; - let total_difficulty = U256::from_str(&total_difficulty)?; - let last_block_number = provider_rw.last_block_number()?; if last_block_number == 0 { without_evm::setup_without_evm( &provider_rw, SealedHeader::new(header, header_hash), - total_difficulty, |number| { let mut header = <::BlockHeader>::default(); @@ -160,8 +150,6 @@ mod tests { "--without-evm", "--header", "header.rlp", - "--total-difficulty", - "12345", "--header-hash", "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", "state.jsonl", @@ -169,7 +157,6 @@ mod tests { assert_eq!(cmd.state.to_str().unwrap(), "state.jsonl"); assert!(cmd.without_evm); assert_eq!(cmd.header.unwrap().to_str().unwrap(), "header.rlp"); - assert_eq!(cmd.total_difficulty.unwrap(), "12345"); assert_eq!( cmd.header_hash.unwrap(), b256!("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index 29b1848b122..de6320fc86e 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -36,7 +36,6 @@ where pub fn setup_without_evm( provider_rw: &Provider, header: SealedHeader<::BlockHeader>, - total_difficulty: U256, header_factory: F, ) -> ProviderResult<()> where @@ -56,7 +55,7 @@ where info!(target: "reth::cli", "Appending first valid block."); - append_first_block(provider_rw, &header, total_difficulty)?; + append_first_block(provider_rw, &header)?; for stage in StageId::ALL { provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number()))?; @@ -74,7 +73,6 @@ where fn append_first_block( provider_rw: &Provider, header: &SealedHeaderFor, - total_difficulty: U256, ) -> ProviderResult<()> where Provider: BlockWriter::Block> @@ -91,16 +89,8 @@ where let sf_provider = provider_rw.static_file_provider(); - sf_provider.latest_writer(StaticFileSegment::Headers)?.append_header( - header, - total_difficulty, - &header.hash(), - )?; - sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number())?; - sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number())?; - Ok(()) } @@ -179,6 +169,8 @@ mod tests { use super::*; use alloy_consensus::Header; use alloy_primitives::{address, b256}; + use reth_db_common::init::init_genesis; + use reth_provider::{test_utils::create_test_provider_factory, DatabaseProviderFactory}; use std::io::Write; use tempfile::NamedTempFile; @@ -219,4 +211,37 @@ mod tests { ); assert_eq!(header.beneficiary, address!("71562b71999873db5b286df957af199ec94617f7")); } + + #[test] + fn test_setup_without_evm_succeeds() { + let header_rlp = "0xf90212a00d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479471562b71999873db5b286df957af199ec94617f7a0f496f3d199c51a1aaee67dac95f24d92ac13c60d25181e1eecd6eca5ddf32ac0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808206a4840365908a808468e975f09ad983011003846765746888676f312e32352e308664617277696ea06f485a167165ec12e0ab3e6ab59a7b88560b90306ac98a26eb294abf95a8c59b88000000000000000007"; + let header_bytes = + alloy_primitives::hex::decode(header_rlp.trim_start_matches("0x")).unwrap(); + + let mut temp_file = NamedTempFile::new().unwrap(); + temp_file.write_all(&header_bytes).unwrap(); + temp_file.flush().unwrap(); + + let header: Header = read_header_from_file(temp_file.path()).unwrap(); + let header_hash = b256!("4f05e4392969fc82e41f6d6a8cea379323b0b2d3ddf7def1a33eec03883e3a33"); + + let provider_factory = create_test_provider_factory(); + + init_genesis(&provider_factory).unwrap(); + + let provider_rw = provider_factory.database_provider_rw().unwrap(); + + setup_without_evm(&provider_rw, SealedHeader::new(header, header_hash), |number| Header { + number, + ..Default::default() + }) + .unwrap(); + + let static_files = provider_factory.static_file_provider(); + let writer = static_files.latest_writer(StaticFileSegment::Headers).unwrap(); + let actual_next_height = writer.next_block_number(); + let expected_next_height = 1701; + + assert_eq!(actual_next_height, expected_next_height); + } } diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index 0d065c29442..7af17ca3523 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -7,7 +7,7 @@ use reth_cli_commands::common::{AccessRights, CliHeader, CliNodeTypes, Environme use reth_db_common::init::init_from_state_dump; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::{ - bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}, + bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH}, OpPrimitives, }; use reth_primitives_traits::SealedHeader; @@ -58,7 +58,6 @@ impl> InitStateCommandOp { reth_cli_commands::init_state::without_evm::setup_without_evm( &provider_rw, SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), - BEDROCK_HEADER_TTD, |number| { let mut header = Header::default(); header.set_number(number); diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 86132c163d4..2e030fb3c05 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -80,9 +80,6 @@ Database: --header Header file containing the header in an RLP encoded format. - --total-difficulty - Total difficulty of the header. - --header-hash Hash of the header. From 3af2c93fc6adf973e79b1378b5ea5c6a5dc1c7ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Fri, 17 Oct 2025 11:24:19 +0200 Subject: [PATCH 096/371] feat(cli): add method `CliRunner::block_on` (#19088) --- crates/cli/runner/src/lib.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/cli/runner/src/lib.rs b/crates/cli/runner/src/lib.rs index 4f8e13ce8cb..79dc6b21142 100644 --- a/crates/cli/runner/src/lib.rs +++ b/crates/cli/runner/src/lib.rs @@ -36,11 +36,15 @@ impl CliRunner { pub const fn from_runtime(tokio_runtime: tokio::runtime::Runtime) -> Self { Self { tokio_runtime } } -} -// === impl CliRunner === + /// Executes an async block on the runtime and blocks until completion. + pub fn block_on(&self, fut: F) -> T + where + F: Future, + { + self.tokio_runtime.block_on(fut) + } -impl CliRunner { /// Executes the given _async_ command on the tokio runtime until the command future resolves or /// until the process receives a `SIGINT` or `SIGTERM` signal. /// From a2c50947b84063ed1b1ffa43fa6ce00b9b3981c6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 17 Oct 2025 11:42:18 +0200 Subject: [PATCH 097/371] chore: exhaustive match for builtin tracer (#19105) --- crates/rpc/rpc/src/debug.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 00a89c10831..62aa625b9f2 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -420,6 +420,11 @@ where Ok(frame.into()) } + _ => { + // Note: this match is non-exhaustive in case we need to add support for + // additional tracers + Err(EthApiError::Unsupported("unsupported tracer").into()) + } }, #[cfg(not(feature = "js-tracer"))] GethDebugTracerType::JsTracer(_) => { @@ -839,6 +844,11 @@ where return Ok((frame.into(), res.state)); } + _ => { + // Note: this match is non-exhaustive in case we need to add support for + // additional tracers + Err(EthApiError::Unsupported("unsupported tracer").into()) + } }, #[cfg(not(feature = "js-tracer"))] GethDebugTracerType::JsTracer(_) => { From ff68bfe935cb0125ba492b1f8748c8a0f13fa62e Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 17 Oct 2025 06:05:18 -0400 Subject: [PATCH 098/371] chore: lower ecies instrument calls to trace (#19004) --- crates/net/ecies/src/algorithm.rs | 4 ++-- crates/net/ecies/src/stream.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 350cd3f7ed4..dae5e501695 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -499,7 +499,7 @@ impl ECIES { } /// Read and verify an auth message from the input data. - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "trace", skip_all)] pub fn read_auth(&mut self, data: &mut [u8]) -> Result<(), ECIESError> { self.remote_init_msg = Some(Bytes::copy_from_slice(data)); let unencrypted = self.decrypt_message(data)?; @@ -571,7 +571,7 @@ impl ECIES { } /// Read and verify an ack message from the input data. - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "trace", skip_all)] pub fn read_ack(&mut self, data: &mut [u8]) -> Result<(), ECIESError> { self.remote_init_msg = Some(Bytes::copy_from_slice(data)); let unencrypted = self.decrypt_message(data)?; diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index 9915fc42e6a..830f3f5ddef 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -40,7 +40,7 @@ where Io: AsyncRead + AsyncWrite + Unpin, { /// Connect to an `ECIES` server - #[instrument(skip(transport, secret_key))] + #[instrument(level = "trace", skip(transport, secret_key))] pub async fn connect( transport: Io, secret_key: SecretKey, From 4c7b1ed9d4a8d88d35bf271526fcb796e5c7175e Mon Sep 17 00:00:00 2001 From: futreall <86553580+futreall@users.noreply.github.com> Date: Fri, 17 Oct 2025 12:11:11 +0200 Subject: [PATCH 099/371] fix: add revm-state to dev-dependencies of chain-state crate (#19044) --- crates/chain-state/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index cba12995015..d21c83ae7c4 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -54,6 +54,7 @@ reth-testing-utils.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true rand.workspace = true +revm-state.workspace = true criterion.workspace = true [features] From e46a9bc40c276095e498d87b5368faee8ec7b53c Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Fri, 17 Oct 2025 12:47:14 +0200 Subject: [PATCH 100/371] fix(sim): clamp bundle timeout to max instead of falling back to default (#18840) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/eth/sim_bundle.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index c738a64c2d5..328ea29193f 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -426,7 +426,7 @@ where let timeout = override_timeout .map(Duration::from_secs) - .filter(|&custom_duration| custom_duration <= MAX_SIM_TIMEOUT) + .map(|d| d.min(MAX_SIM_TIMEOUT)) .unwrap_or(DEFAULT_SIM_TIMEOUT); let bundle_res = From cfb26912d39c1fa18635a1984dd0c267e6ac2543 Mon Sep 17 00:00:00 2001 From: sashass1315 Date: Fri, 17 Oct 2025 13:59:49 +0300 Subject: [PATCH 101/371] fix(cli): remove redundant EthChainSpec bound in run_with_components (#19106) --- crates/ethereum/cli/src/app.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index c0e2e4662ca..ab3682be6dc 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -82,10 +82,7 @@ where ) -> Result<()>, ) -> Result<()> where - N: CliNodeTypes< - Primitives: NodePrimitives, - ChainSpec: Hardforks + EthChainSpec, - >, + N: CliNodeTypes, ChainSpec: Hardforks>, C: ChainSpecParser, { let runner = match self.runner.take() { From ca26219aa6c504724ec044ee604523df0514532b Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 17 Oct 2025 15:45:23 +0400 Subject: [PATCH 102/371] feat: convert blobs at RPC (#19084) --- Cargo.lock | 123 +++++++++--------- Cargo.toml | 54 ++++---- crates/e2e-test-utils/src/rpc.rs | 10 +- crates/e2e-test-utils/src/transaction.rs | 12 +- crates/ethereum/node/tests/e2e/blobs.rs | 54 ++++++++ crates/rpc/rpc-eth-types/src/error/mod.rs | 2 +- crates/rpc/rpc/src/eth/core.rs | 14 +- crates/rpc/rpc/src/eth/helpers/transaction.rs | 63 ++++++++- .../src/blobstore/converter.rs | 30 +++++ crates/transaction-pool/src/blobstore/mod.rs | 2 + 10 files changed, 263 insertions(+), 101 deletions(-) create mode 100644 crates/transaction-pool/src/blobstore/converter.rs diff --git a/Cargo.lock b/Cargo.lock index 58d9146a654..6427833300e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a0dd3ed764953a6b20458b2b7abbfdc93d20d14b38babe1a70fe631a443a9f1" +checksum = "b9b151e38e42f1586a01369ec52a6934702731d07e8509a7307331b09f6c46dc" dependencies = [ "alloy-eips", "alloy-primitives", @@ -139,9 +139,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9556182afa73cddffa91e64a5aa9508d5e8c912b3a15f26998d2388a824d2c7b" +checksum = "6e2d5e8668ef6215efdb7dcca6f22277b4e483a5650e05f5de22b2350971f4b8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -154,9 +154,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03df5cb3b428ac96b386ad64c11d5c6e87a5505682cf1fbd6f8f773e9eda04f6" +checksum = "630288cf4f3a34a8c6bc75c03dce1dbd47833138f65f37d53a1661eafc96b83f" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -237,9 +237,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "305fa99b538ca7006b0c03cfed24ec6d82beda67aac857ef4714be24231d15e6" +checksum = "e5434834adaf64fa20a6fb90877bc1d33214c41b055cc49f82189c98614368cc" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -285,9 +285,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1421f6c9d15e5b86afbfe5865ca84dea3b9f77173a0963c1a2ee4e626320ada9" +checksum = "919a8471cfbed7bcd8cf1197a57dda583ce0e10c6385f6ff4e8b41304b223392" dependencies = [ "alloy-eips", "alloy-primitives", @@ -325,9 +325,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65f763621707fa09cece30b73ecc607eb43fd7a72451fe3b46f645b905086926" +checksum = "d7c69f6c9c68a1287c9d5ff903d0010726934de0dac10989be37b75a29190d55" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -340,9 +340,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f59a869fa4b4c3a7f08b1c8cb79aec61c29febe6e24a24fe0fcfded8a9b5703" +checksum = "8eaf2ae05219e73e0979cb2cf55612aafbab191d130f203079805eaf881cca58" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -366,9 +366,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "223612259a080160ce839a4e5df0125ca403a1d5e7206cc911cea54af5d769aa" +checksum = "e58f4f345cef483eab7374f2b6056973c7419ffe8ad35e994b7a7f5d8e0c7ba4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -440,9 +440,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77818b7348bd5486491a5297579dbfe5f706a81f8e1f5976393025f1e22a7c7d" +checksum = "de2597751539b1cc8fe4204e5325f9a9ed83fcacfb212018dfcfa7877e76de21" dependencies = [ "alloy-chains", "alloy-consensus", @@ -485,9 +485,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "249b45103a66c9ad60ad8176b076106d03a2399a37f0ee7b0e03692e6b354cb9" +checksum = "06e45a68423e732900a0c824b8e22237db461b79d2e472dd68b7547c16104427" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -529,9 +529,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2430d5623e428dd012c6c2156ae40b7fe638d6fca255e3244e0fba51fa698e93" +checksum = "edf8eb8be597cfa8c312934d2566ec4516f066d69164f9212d7a148979fdcfd8" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -555,9 +555,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e131624d08a25cfc40557041e7dc42e1182fa1153e7592d120f769a1edce56" +checksum = "339af7336571dd39ae3a15bde08ae6a647e62f75350bd415832640268af92c06" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -568,9 +568,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59407723b1850ebaa49e46d10c2ba9c10c10b3aedf2f7e97015ee23c3f4e639" +checksum = "19b33cdc0483d236cdfff763dae799ccef9646e94fb549a74f7adac6a7f7bb86" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -580,9 +580,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65e3266095e6d8e8028aab5f439c6b8736c5147314f7e606c61597e014cb8a0" +checksum = "83d98fb386a462e143f5efa64350860af39950c49e7c0cbdba419c16793116ef" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -592,9 +592,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07429a1099cd17227abcddb91b5e38c960aaeb02a6967467f5bb561fbe716ac6" +checksum = "fbde0801a32d21c5f111f037bee7e22874836fba7add34ed4a6919932dd7cf23" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -603,13 +603,14 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e0e876b20eb9debf316d3e875536f389070635250f22b5a678cf4632a3e0cf" +checksum = "55c8d51ebb7c5fa8be8ea739a3933c5bfea08777d2d662b30b2109ac5ca71e6b" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "derive_more", "ethereum_ssz", "ethereum_ssz_derive", "serde", @@ -622,9 +623,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeff305b7d10cc1c888456d023e7bb8a5ea82e9e42b951e37619b88cc1a1486d" +checksum = "388cf910e66bd4f309a81ef746dcf8f9bca2226e3577890a8d56c5839225cf46" dependencies = [ "alloy-primitives", "derive_more", @@ -634,9 +635,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222ecadcea6aac65e75e32b6735635ee98517aa63b111849ee01ae988a71d685" +checksum = "605ec375d91073851f566a3082548af69a28dca831b27a8be7c1b4c49f5c6ca2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -655,9 +656,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7d47bca1a2a1541e4404aa38b7e262bb4dffd9ac23b4f178729a4ddc5a5caa" +checksum = "361cd87ead4ba7659bda8127902eda92d17fa7ceb18aba1676f7be10f7222487" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -668,7 +669,7 @@ dependencies = [ "alloy-serde", "alloy-sol-types", "arbitrary", - "itertools 0.14.0", + "itertools 0.13.0", "serde", "serde_json", "serde_with", @@ -677,9 +678,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a60d4baadd3f278faa4e2305cca095dfd4ab286e071b768ff09181d8ae215" +checksum = "1397926d8d06a2531578bafc3e0ec78f97a02f0e6d1631c67d80d22af6a3af02" dependencies = [ "alloy-consensus", "alloy-eips", @@ -692,9 +693,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c331c8e48665607682e8a9549a2347c13674d4fbcbdc342e7032834eba2424f4" +checksum = "de4e95fb0572b97b17751d0fdf5cdc42b0050f9dd9459eddd1bf2e2fbfed0a33" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -706,9 +707,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864f41befa90102d4e02327679699a7e9510930e2924c529e31476086609fa89" +checksum = "cddde1bbd4feeb0d363ae7882af1e2e7955ef77c17f933f31402aad9343b57c5" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -718,9 +719,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8468f1a7f9ee3bae73c24eead0239abea720dbf7779384b9c7e20d51bfb6b0" +checksum = "64600fc6c312b7e0ba76f73a381059af044f4f21f43e07f51f1fa76c868fe302" dependencies = [ "alloy-primitives", "arbitrary", @@ -730,9 +731,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53410a18a61916e2c073a6519499514e027b01e77eeaf96acd1df7cf96ef6bb2" +checksum = "5772858492b26f780468ae693405f895d6a27dea6e3eab2c36b6217de47c2647" dependencies = [ "alloy-primitives", "async-trait", @@ -745,9 +746,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6006c4cbfa5d08cadec1fcabea6cb56dc585a30a9fce40bcf81e307d6a71c8e" +checksum = "f4195b803d0a992d8dbaab2ca1986fc86533d4bc80967c0cce7668b26ad99ef9" dependencies = [ "alloy-consensus", "alloy-network", @@ -834,9 +835,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d94ee404368a3d9910dfe61b203e888c6b0e151a50e147f95da8baff9f9c7763" +checksum = "025a940182bddaeb594c26fe3728525ae262d0806fe6a4befdf5d7bc13d54bce" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -858,9 +859,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2f8a6338d594f6c6481292215ee8f2fd7b986c80aba23f3f44e761a8658de78" +checksum = "e3b5064d1e1e1aabc918b5954e7fb8154c39e77ec6903a581b973198b26628fa" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -873,9 +874,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17a37a8ca18006fa0a58c7489645619ff58cfa073f2b29c4e052c9bd114b123a" +checksum = "d47962f3f1d9276646485458dc842b4e35675f42111c9d814ae4711c664c8300" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -893,9 +894,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.37" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "679b0122b7bca9d4dc5eb2c0549677a3c53153f6e232f23f4b3ba5575f74ebde" +checksum = "9476a36a34e2fb51b6746d009c53d309a186a825aa95435407f0e07149f4ad2d" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -931,9 +932,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bf39928a5e70c9755d6811a2928131b53ba785ad37c8bf85c90175b5d43b818" +checksum = "f8e52276fdb553d3c11563afad2898f4085165e4093604afe3d78b69afbf408f" dependencies = [ "alloy-primitives", "darling 0.21.3", diff --git a/Cargo.toml b/Cargo.toml index a781e3b6047..7d75c8da560 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -494,33 +494,33 @@ alloy-trie = { version = "0.9.1", default-features = false } alloy-hardforks = "0.4.0" -alloy-consensus = { version = "1.0.37", default-features = false } -alloy-contract = { version = "1.0.37", default-features = false } -alloy-eips = { version = "1.0.37", default-features = false } -alloy-genesis = { version = "1.0.37", default-features = false } -alloy-json-rpc = { version = "1.0.37", default-features = false } -alloy-network = { version = "1.0.37", default-features = false } -alloy-network-primitives = { version = "1.0.37", default-features = false } -alloy-provider = { version = "1.0.37", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.37", default-features = false } -alloy-rpc-client = { version = "1.0.37", default-features = false } -alloy-rpc-types = { version = "1.0.37", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.37", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.37", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.37", default-features = false } -alloy-rpc-types-debug = { version = "1.0.37", default-features = false } -alloy-rpc-types-engine = { version = "1.0.37", default-features = false } -alloy-rpc-types-eth = { version = "1.0.37", default-features = false } -alloy-rpc-types-mev = { version = "1.0.37", default-features = false } -alloy-rpc-types-trace = { version = "1.0.37", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.37", default-features = false } -alloy-serde = { version = "1.0.37", default-features = false } -alloy-signer = { version = "1.0.37", default-features = false } -alloy-signer-local = { version = "1.0.37", default-features = false } -alloy-transport = { version = "1.0.37" } -alloy-transport-http = { version = "1.0.37", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.37", default-features = false } -alloy-transport-ws = { version = "1.0.37", default-features = false } +alloy-consensus = { version = "1.0.41", default-features = false } +alloy-contract = { version = "1.0.41", default-features = false } +alloy-eips = { version = "1.0.41", default-features = false } +alloy-genesis = { version = "1.0.41", default-features = false } +alloy-json-rpc = { version = "1.0.41", default-features = false } +alloy-network = { version = "1.0.41", default-features = false } +alloy-network-primitives = { version = "1.0.41", default-features = false } +alloy-provider = { version = "1.0.41", features = ["reqwest"], default-features = false } +alloy-pubsub = { version = "1.0.41", default-features = false } +alloy-rpc-client = { version = "1.0.41", default-features = false } +alloy-rpc-types = { version = "1.0.41", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.0.41", default-features = false } +alloy-rpc-types-anvil = { version = "1.0.41", default-features = false } +alloy-rpc-types-beacon = { version = "1.0.41", default-features = false } +alloy-rpc-types-debug = { version = "1.0.41", default-features = false } +alloy-rpc-types-engine = { version = "1.0.41", default-features = false } +alloy-rpc-types-eth = { version = "1.0.41", default-features = false } +alloy-rpc-types-mev = { version = "1.0.41", default-features = false } +alloy-rpc-types-trace = { version = "1.0.41", default-features = false } +alloy-rpc-types-txpool = { version = "1.0.41", default-features = false } +alloy-serde = { version = "1.0.41", default-features = false } +alloy-signer = { version = "1.0.41", default-features = false } +alloy-signer-local = { version = "1.0.41", default-features = false } +alloy-transport = { version = "1.0.41" } +alloy-transport-http = { version = "1.0.41", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.0.41", default-features = false } +alloy-transport-ws = { version = "1.0.41", default-features = false } # op alloy-op-evm = { version = "0.22.0", default-features = false } diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 96dda811735..ff030c390b9 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,4 +1,5 @@ -use alloy_consensus::TxEnvelope; +use alloy_consensus::{EthereumTxEnvelope, TxEip4844Variant}; +use alloy_eips::eip7594::BlobTransactionSidecarVariant; use alloy_network::eip2718::Decodable2718; use alloy_primitives::{Bytes, B256}; use reth_chainspec::EthereumHardforks; @@ -30,9 +31,12 @@ where } /// Retrieves a transaction envelope by its hash - pub async fn envelope_by_hash(&self, hash: B256) -> eyre::Result { + pub async fn envelope_by_hash( + &self, + hash: B256, + ) -> eyre::Result>> { let tx = self.inner.debug_api().raw_transaction(hash).await?.unwrap(); let tx = tx.to_vec(); - Ok(TxEnvelope::decode_2718(&mut tx.as_ref()).unwrap()) + Ok(EthereumTxEnvelope::decode_2718(&mut tx.as_ref()).unwrap()) } } diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index 54f98469242..dd49ac76195 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -1,5 +1,7 @@ -use alloy_consensus::{EnvKzgSettings, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope}; -use alloy_eips::eip7702::SignedAuthorization; +use alloy_consensus::{ + EnvKzgSettings, EthereumTxEnvelope, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope, +}; +use alloy_eips::{eip7594::BlobTransactionSidecarVariant, eip7702::SignedAuthorization}; use alloy_network::{ eip2718::Encodable2718, Ethereum, EthereumWallet, TransactionBuilder, TransactionBuilder4844, }; @@ -146,11 +148,13 @@ impl TransactionTestContext { /// Validates the sidecar of a given tx envelope and returns the versioned hashes #[track_caller] - pub fn validate_sidecar(tx: TxEnvelope) -> Vec { + pub fn validate_sidecar( + tx: EthereumTxEnvelope>, + ) -> Vec { let proof_setting = EnvKzgSettings::Default; match tx { - TxEnvelope::Eip4844(signed) => match signed.tx() { + EthereumTxEnvelope::Eip4844(signed) => match signed.tx() { TxEip4844Variant::TxEip4844WithSidecar(tx) => { tx.validate_blob(proof_setting.get()).unwrap(); tx.sidecar.versioned_hashes().collect() diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index 8fd9d08d2dc..e7f62e64668 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -1,9 +1,11 @@ use crate::utils::eth_payload_attributes; +use alloy_eips::Decodable2718; use alloy_genesis::Genesis; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_ethereum_primitives::PooledTransactionVariant; use reth_node_builder::{NodeBuilder, NodeHandle}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; @@ -82,3 +84,55 @@ async fn can_handle_blobs() -> eyre::Result<()> { Ok(()) } + +#[tokio::test] +async fn can_send_legacy_sidecar_post_activation() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default().chain(MAINNET.chain).genesis(genesis).osaka_activated().build(), + ); + let genesis_hash = chain_spec.genesis_hash(); + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(EthereumNode::default()) + .launch() + .await?; + + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; + + let wallets = Wallet::new(2).wallet_gen(); + let blob_wallet = wallets.first().unwrap(); + + // build blob tx + let blob_tx = TransactionTestContext::tx_with_blobs_bytes(1, blob_wallet.clone()).await?; + + let tx = PooledTransactionVariant::decode_2718_exact(&blob_tx).unwrap(); + assert!(tx.as_eip4844().unwrap().tx().sidecar.is_eip4844()); + + // inject blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(blob_tx).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that sidecar was converted to eip7594 + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip7594()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + // build a payload + let blob_payload = node.new_payload().await?; + + // submit the blob payload + let blob_block_hash = node.submit_payload(blob_payload).await?; + + node.update_forkchoice(genesis_hash, blob_block_hash).await?; + + Ok(()) +} diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 196461d18ce..d3ba342c37a 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -69,7 +69,7 @@ pub enum EthApiError { InvalidTransactionSignature, /// Errors related to the transaction pool #[error(transparent)] - PoolError(RpcPoolError), + PoolError(#[from] RpcPoolError), /// Header not found for block hash/number/tag #[error("header not found")] HeaderNotFound(BlockId), diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 61082f4f929..e3850a67f54 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -30,8 +30,8 @@ use reth_tasks::{ TaskSpawner, TokioTaskExecutor, }; use reth_transaction_pool::{ - noop::NoopTransactionPool, AddedTransactionOutcome, BatchTxProcessor, BatchTxRequest, - TransactionPool, + blobstore::BlobSidecarConverter, noop::NoopTransactionPool, AddedTransactionOutcome, + BatchTxProcessor, BatchTxRequest, TransactionPool, }; use tokio::sync::{broadcast, mpsc, Mutex}; @@ -315,6 +315,9 @@ pub struct EthApiInner { /// Timeout duration for `send_raw_transaction_sync` RPC method. send_raw_transaction_sync_timeout: Duration, + + /// Blob sidecar converter + blob_sidecar_converter: BlobSidecarConverter, } impl EthApiInner @@ -382,6 +385,7 @@ where tx_batch_sender, pending_block_kind, send_raw_transaction_sync_timeout, + blob_sidecar_converter: BlobSidecarConverter::new(), } } } @@ -553,6 +557,12 @@ where pub const fn send_raw_transaction_sync_timeout(&self) -> Duration { self.send_raw_transaction_sync_timeout } + + /// Returns a handle to the blob sidecar converter. + #[inline] + pub const fn blob_sidecar_converter(&self) -> &BlobSidecarConverter { + &self.blob_sidecar_converter + } } #[cfg(test)] diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 4fa39112166..39758f68d77 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -3,14 +3,22 @@ use std::time::Duration; use crate::EthApi; +use alloy_consensus::BlobTransactionValidationError; +use alloy_eips::{eip7594::BlobTransactionSidecarVariant, BlockId, Typed2718}; use alloy_primitives::{hex, Bytes, B256}; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_primitives_traits::AlloyBlockHeader; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, FromEvmError, RpcNodeCore, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; -use reth_transaction_pool::{AddedTransactionOutcome, PoolTransaction, TransactionPool}; +use reth_rpc_eth_types::{error::RpcPoolError, utils::recover_raw_transaction, EthApiError}; +use reth_storage_api::BlockReaderIdExt; +use reth_transaction_pool::{ + error::Eip4844PoolTransactionError, AddedTransactionOutcome, EthBlobTransactionSidecar, + EthPoolTransaction, PoolTransaction, TransactionPool, +}; impl EthTransactions for EthApi where @@ -34,7 +42,56 @@ where async fn send_raw_transaction(&self, tx: Bytes) -> Result { let recovered = recover_raw_transaction(&tx)?; - let pool_transaction = ::Transaction::from_pooled(recovered); + let mut pool_transaction = + ::Transaction::from_pooled(recovered); + + // TODO: remove this after Osaka transition + // Convert legacy blob sidecars to EIP-7594 format + if pool_transaction.is_eip4844() { + let EthBlobTransactionSidecar::Present(sidecar) = pool_transaction.take_blob() else { + return Err(EthApiError::PoolError(RpcPoolError::Eip4844( + Eip4844PoolTransactionError::MissingEip4844BlobSidecar, + ))); + }; + + let sidecar = match sidecar { + BlobTransactionSidecarVariant::Eip4844(sidecar) => { + let latest = self + .provider() + .latest_header()? + .ok_or(EthApiError::HeaderNotFound(BlockId::latest()))?; + // Convert to EIP-7594 if next block is Osaka + if self + .provider() + .chain_spec() + .is_osaka_active_at_timestamp(latest.timestamp().saturating_add(12)) + { + BlobTransactionSidecarVariant::Eip7594( + self.blob_sidecar_converter().convert(sidecar).await.ok_or_else( + || { + RpcPoolError::Eip4844( + Eip4844PoolTransactionError::InvalidEip4844Blob( + BlobTransactionValidationError::InvalidProof, + ), + ) + }, + )?, + ) + } else { + BlobTransactionSidecarVariant::Eip4844(sidecar) + } + } + sidecar => sidecar, + }; + + pool_transaction = + EthPoolTransaction::try_from_eip4844(pool_transaction.into_consensus(), sidecar) + .ok_or_else(|| { + RpcPoolError::Eip4844( + Eip4844PoolTransactionError::MissingEip4844BlobSidecar, + ) + })?; + } // forward the transaction to the specific endpoint if configured. if let Some(client) = self.raw_tx_forwarder() { diff --git a/crates/transaction-pool/src/blobstore/converter.rs b/crates/transaction-pool/src/blobstore/converter.rs new file mode 100644 index 00000000000..3f6abc56bff --- /dev/null +++ b/crates/transaction-pool/src/blobstore/converter.rs @@ -0,0 +1,30 @@ +use alloy_consensus::{BlobTransactionSidecar, EnvKzgSettings}; +use alloy_eips::eip7594::BlobTransactionSidecarEip7594; +use tokio::sync::Semaphore; + +// We allow up to 5 concurrent conversions to avoid excessive memory usage. +static SEMAPHORE: Semaphore = Semaphore::const_new(5); + +/// A simple semaphore-based blob sidecar converter. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct BlobSidecarConverter; + +impl BlobSidecarConverter { + /// Creates a new blob sidecar converter. + pub const fn new() -> Self { + Self + } + + /// Converts the blob sidecar to the EIP-7594 format. + pub async fn convert( + &self, + sidecar: BlobTransactionSidecar, + ) -> Option { + let _permit = SEMAPHORE.acquire().await.ok()?; + tokio::task::spawn_blocking(move || sidecar.try_into_7594(EnvKzgSettings::Default.get())) + .await + .ok()? + .ok() + } +} diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index 29844994bc0..ee7eb45af0f 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -5,6 +5,7 @@ use alloy_eips::{ eip7594::BlobTransactionSidecarVariant, }; use alloy_primitives::B256; +pub use converter::BlobSidecarConverter; pub use disk::{DiskFileBlobStore, DiskFileBlobStoreConfig, OpenDiskFileBlobStore}; pub use mem::InMemoryBlobStore; pub use noop::NoopBlobStore; @@ -17,6 +18,7 @@ use std::{ }; pub use tracker::{BlobStoreCanonTracker, BlobStoreUpdates}; +mod converter; pub mod disk; mod mem; mod noop; From 1634535e0039834eed9868dbf9febaafefe12ac5 Mon Sep 17 00:00:00 2001 From: crazykissshout Date: Fri, 17 Oct 2025 16:40:26 +0200 Subject: [PATCH 103/371] fix: add bundle and transaction context to call_many errors (#18127) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 25 +++++++++++++++---- crates/rpc/rpc-eth-types/src/error/mod.rs | 29 ++++++++++++++++++++++ 2 files changed, 49 insertions(+), 5 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index c6203da3f4e..221fef3680f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -300,7 +300,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } // transact all bundles - for bundle in bundles { + for (bundle_index, bundle) in bundles.into_iter().enumerate() { let Bundle { transactions, block_override } = bundle; if transactions.is_empty() { // Skip empty bundles @@ -311,15 +311,30 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let block_overrides = block_override.map(Box::new); // transact all transactions in the bundle - for tx in transactions { + for (tx_index, tx) in transactions.into_iter().enumerate() { // Apply overrides, state overrides are only applied for the first tx in the // request let overrides = EvmOverrides::new(state_override.take(), block_overrides.clone()); - let (current_evm_env, prepared_tx) = - this.prepare_call_env(evm_env.clone(), tx, &mut db, overrides)?; - let res = this.transact(&mut db, current_evm_env, prepared_tx)?; + let (current_evm_env, prepared_tx) = this + .prepare_call_env(evm_env.clone(), tx, &mut db, overrides) + .map_err(|err| { + Self::Error::from_eth_err(EthApiError::call_many_error( + bundle_index, + tx_index, + err.into(), + )) + })?; + let res = this.transact(&mut db, current_evm_env, prepared_tx).map_err( + |err| { + Self::Error::from_eth_err(EthApiError::call_many_error( + bundle_index, + tx_index, + err.into(), + )) + }, + )?; match ensure_success::<_, Self::Error>(res.result) { Ok(output) => { diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index d3ba342c37a..fdb5f8f190f 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -186,6 +186,16 @@ pub enum EthApiError { /// Error thrown when batch tx send channel fails #[error("Batch transaction sender channel closed")] BatchTxSendError, + /// Error that occurred during `call_many` execution with bundle and transaction context + #[error("call_many error in bundle {bundle_index} and transaction {tx_index}: {}", .error.message())] + CallManyError { + /// Bundle index where the error occurred + bundle_index: usize, + /// Transaction index within the bundle where the error occurred + tx_index: usize, + /// The underlying error object + error: jsonrpsee_types::ErrorObject<'static>, + }, /// Any other error #[error("{0}")] Other(Box), @@ -197,6 +207,15 @@ impl EthApiError { Self::Other(Box::new(err)) } + /// Creates a new [`EthApiError::CallManyError`] variant. + pub const fn call_many_error( + bundle_index: usize, + tx_index: usize, + error: jsonrpsee_types::ErrorObject<'static>, + ) -> Self { + Self::CallManyError { bundle_index, tx_index, error } + } + /// Returns `true` if error is [`RpcInvalidTransactionError::GasTooHigh`] pub const fn is_gas_too_high(&self) -> bool { matches!( @@ -304,6 +323,16 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { EthApiError::BatchTxSendError => { internal_rpc_err("Batch transaction sender channel closed".to_string()) } + EthApiError::CallManyError { bundle_index, tx_index, error } => { + jsonrpsee_types::error::ErrorObject::owned( + error.code(), + format!( + "call_many error in bundle {bundle_index} and transaction {tx_index}: {}", + error.message() + ), + error.data(), + ) + } } } } From 928d91dbf9bf4aeb460ac62af8dcbaaca65fbc48 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 17 Oct 2025 16:45:24 +0200 Subject: [PATCH 104/371] chore: add comment section for claude (#19108) --- CLAUDE.md | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/CLAUDE.md b/CLAUDE.md index 99282fbf864..c7a709c6713 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -236,6 +236,85 @@ Common refactoring pattern: - Add trait bounds for flexibility - Enable reuse across different chain types (Ethereum, Optimism) +#### When to Comment + +Write comments that remain valuable after the PR is merged. Future readers won't have PR context - they only see the current code. + +##### ✅ DO: Add Value + +**Explain WHY and non-obvious behavior:** +```rust +// Process must handle allocations atomically to prevent race conditions +// between dealloc on drop and concurrent limit checks +unsafe impl GlobalAlloc for LimitedAllocator { ... } + +// Binary search requires sorted input. Panics on unsorted slices. +fn find_index(items: &[Item], target: &Item) -> Option + +// Timeout set to 5s to match EVM block processing limits +const TRACER_TIMEOUT: Duration = Duration::from_secs(5); +``` + +**Document constraints and assumptions:** +```rust +/// Returns heap size estimate. +/// +/// Note: May undercount shared references (Rc/Arc). For precise +/// accounting, combine with an allocator-based approach. +fn deep_size_of(&self) -> usize +``` + +**Explain complex logic:** +```rust +// We reset limits at task start because tokio reuses threads in +// spawn_blocking pool. Without reset, second task inherits first +// task's allocation count and immediately hits limit. +THREAD_ALLOCATED.with(|allocated| allocated.set(0)); +``` + +##### ❌ DON'T: Describe Changes +```rust +// ❌ BAD - Describes the change, not the code +// Changed from Vec to HashMap for O(1) lookups + +// ✅ GOOD - Explains the decision +// HashMap provides O(1) symbol lookups during trace replay +``` +```rust +// ❌ BAD - PR-specific context +// Fix for issue #234 where memory wasn't freed + +// ✅ GOOD - Documents the actual behavior +// Explicitly drop allocations before limit check to ensure +// accurate accounting +``` +```rust +// ❌ BAD - States the obvious +// Increment counter +counter += 1; + +// ✅ GOOD - Explains non-obvious purpose +// Track allocations across all threads for global limit enforcement +GLOBAL_COUNTER.fetch_add(1, Ordering::SeqCst); +``` + +✅ **Comment when:** +- Non-obvious behavior or edge cases +- Performance trade-offs +- Safety requirements (unsafe blocks must always be documented) +- Limitations or gotchas +- Why simpler alternatives don't work + +❌ **Don't comment when:** +- Code is self-explanatory +- Just restating the code in English +- Describing what changed in this PR + +##### The Test: "Will this make sense in 6 months?" + +Before adding a comment, ask: Would someone reading just the current code (no PR, no history) find this helpful? + + ### Example Contribution Workflow Let's say you want to fix a bug where external IP resolution fails on startup: From 1b830e9ed1a28003e7cc0505f347adf2a03dd8b3 Mon Sep 17 00:00:00 2001 From: Dharm Singh <153282211+dharmvr1@users.noreply.github.com> Date: Fri, 17 Oct 2025 20:49:21 +0530 Subject: [PATCH 105/371] feat: derive dev accounts from mnemonic in dev mode (#18299) Co-authored-by: Arsenii Kulikov --- crates/node/builder/src/rpc.rs | 8 +-- crates/node/core/src/args/dev.rs | 69 ++++++++++++++++++-- crates/node/core/src/node_config.rs | 4 +- crates/optimism/rpc/src/eth/mod.rs | 24 ++----- crates/rpc/rpc-convert/src/transaction.rs | 6 +- crates/rpc/rpc-eth-api/src/helpers/mod.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/signer.rs | 8 --- crates/rpc/rpc-eth-api/src/types.rs | 24 +++---- crates/rpc/rpc/Cargo.toml | 2 +- crates/rpc/rpc/src/eth/helpers/signer.rs | 55 +++++++++------- docs/vocs/docs/pages/cli/reth/node.mdx | 5 ++ 11 files changed, 126 insertions(+), 81 deletions(-) diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 70adcc83d69..ed0a3fb64d4 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -23,8 +23,8 @@ use reth_node_core::{ version::{version_metadata, CLIENT_CODE}, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadStore}; -use reth_rpc::eth::{core::EthRpcConverterFor, EthApiTypes, FullEthApiServer}; -use reth_rpc_api::{eth::helpers::AddDevSigners, IntoEngineApiRpcModule}; +use reth_rpc::eth::{core::EthRpcConverterFor, DevSigner, EthApiTypes, FullEthApiServer}; +use reth_rpc_api::{eth::helpers::EthTransactions, IntoEngineApiRpcModule}; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, config::RethRpcServerConfig, @@ -991,7 +991,8 @@ where // in dev mode we generate 20 random dev-signer accounts if config.dev.dev { - registry.eth_api().with_dev_accounts(); + let signers = DevSigner::from_mnemonic(config.dev.dev_mnemonic.as_str(), 20); + registry.eth_api().signers().write().extend(signers); } let mut registry = RpcRegistry { registry }; @@ -1163,7 +1164,6 @@ pub trait EthApiBuilder: Default + Send + 'static { /// The Ethapi implementation this builder will build. type EthApi: EthApiTypes + FullEthApiServer - + AddDevSigners + Unpin + 'static; diff --git a/crates/node/core/src/args/dev.rs b/crates/node/core/src/args/dev.rs index b6a01745257..d62ff1c5dce 100644 --- a/crates/node/core/src/args/dev.rs +++ b/crates/node/core/src/args/dev.rs @@ -5,8 +5,10 @@ use std::time::Duration; use clap::Args; use humantime::parse_duration; +const DEFAULT_MNEMONIC: &str = "test test test test test test test test test test test junk"; + /// Parameters for Dev testnet configuration -#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)] +#[derive(Debug, Args, PartialEq, Eq, Clone)] #[command(next_help_heading = "Dev testnet")] pub struct DevArgs { /// Start the node in dev mode @@ -39,6 +41,28 @@ pub struct DevArgs { verbatim_doc_comment )] pub block_time: Option, + + /// Derive dev accounts from a fixed mnemonic instead of random ones. + #[arg( + long = "dev.mnemonic", + help_heading = "Dev testnet", + value_name = "MNEMONIC", + requires = "dev", + verbatim_doc_comment, + default_value = DEFAULT_MNEMONIC + )] + pub dev_mnemonic: String, +} + +impl Default for DevArgs { + fn default() -> Self { + Self { + dev: false, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + } } #[cfg(test)] @@ -56,13 +80,37 @@ mod tests { #[test] fn test_parse_dev_args() { let args = CommandParser::::parse_from(["reth"]).args; - assert_eq!(args, DevArgs { dev: false, block_max_transactions: None, block_time: None }); + assert_eq!( + args, + DevArgs { + dev: false, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from(["reth", "--dev"]).args; - assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None }); + assert_eq!( + args, + DevArgs { + dev: true, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from(["reth", "--auto-mine"]).args; - assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None }); + assert_eq!( + args, + DevArgs { + dev: true, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from([ "reth", @@ -71,7 +119,15 @@ mod tests { "2", ]) .args; - assert_eq!(args, DevArgs { dev: true, block_max_transactions: Some(2), block_time: None }); + assert_eq!( + args, + DevArgs { + dev: true, + block_max_transactions: Some(2), + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from(["reth", "--dev", "--dev.block-time", "1s"]).args; @@ -80,7 +136,8 @@ mod tests { DevArgs { dev: true, block_max_transactions: None, - block_time: Some(std::time::Duration::from_secs(1)) + block_time: Some(std::time::Duration::from_secs(1)), + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), } ); } diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 94dbecb649c..7b487a1fa71 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -272,7 +272,7 @@ impl NodeConfig { } /// Set the dev args for the node - pub const fn with_dev(mut self, dev: DevArgs) -> Self { + pub fn with_dev(mut self, dev: DevArgs) -> Self { self.dev = dev; self } @@ -519,7 +519,7 @@ impl Clone for NodeConfig { builder: self.builder.clone(), debug: self.debug.clone(), db: self.db, - dev: self.dev, + dev: self.dev.clone(), pruning: self.pruning.clone(), datadir: self.datadir.clone(), engine: self.engine.clone(), diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index e10c5152473..04887d98f4c 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -26,19 +26,19 @@ use reth_optimism_flashblocks::{ ExecutionPayloadBaseV1, FlashBlockBuildInfo, FlashBlockCompleteSequenceRx, FlashBlockService, InProgressFlashBlockRx, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; -use reth_rpc::eth::{core::EthApiInner, DevSigner}; +use reth_rpc::eth::core::EthApiInner; use reth_rpc_eth_api::{ helpers::{ - pending_block::BuildPendingEnv, AddDevSigners, EthApiSpec, EthFees, EthState, LoadFee, - LoadPendingBlock, LoadState, SpawnBlocking, Trace, + pending_block::BuildPendingEnv, EthApiSpec, EthFees, EthState, LoadFee, LoadPendingBlock, + LoadState, SpawnBlocking, Trace, }, EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore, - RpcNodeCoreExt, RpcTypes, SignableTxRequest, + RpcNodeCoreExt, RpcTypes, }; use reth_rpc_eth_types::{ EthStateCache, FeeHistoryCache, GasPriceOracle, PendingBlock, PendingBlockEnvOrigin, }; -use reth_storage_api::{ProviderHeader, ProviderTx}; +use reth_storage_api::ProviderHeader; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, @@ -335,18 +335,6 @@ where { } -impl AddDevSigners for OpEthApi -where - N: RpcNodeCore, - Rpc: RpcConvert< - Network: RpcTypes>>, - >, -{ - fn with_dev_accounts(&self) { - *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) - } -} - impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() @@ -483,7 +471,7 @@ where NetworkT: RpcTypes, OpRpcConvert: RpcConvert, OpEthApi>: - FullEthApiServer + AddDevSigners, + FullEthApiServer, { type EthApi = OpEthApi>; diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index a89104bcbaf..046acbda544 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -2,7 +2,7 @@ use crate::{ fees::{CallFees, CallFeesError}, - RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, + RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, SignableTxRequest, }; use alloy_consensus::{ error::ValueError, transaction::Recovered, EthereumTxEnvelope, Sealable, TxEip4844, @@ -128,7 +128,7 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { /// Associated upper layer JSON-RPC API network requests and responses to convert from and into /// types of [`Self::Primitives`]. - type Network: RpcTypes + Send + Sync + Unpin + Clone + Debug; + type Network: RpcTypes>>; /// An associated RPC conversion error. type Error: error::Error + Into>; @@ -901,7 +901,7 @@ impl RpcConvert for RpcConverter where N: NodePrimitives, - Network: RpcTypes + Send + Sync + Unpin + Clone + Debug, + Network: RpcTypes>, Evm: ConfigureEvm + 'static, Receipt: ReceiptConverter< N, diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs index 29223d78913..19a72ccafb7 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/mod.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -34,7 +34,7 @@ pub use call::{Call, EthCall}; pub use fee::{EthFees, LoadFee}; pub use pending_block::LoadPendingBlock; pub use receipt::LoadReceipt; -pub use signer::{AddDevSigners, EthSigner}; +pub use signer::EthSigner; pub use spec::EthApiSpec; pub use state::{EthState, LoadState}; pub use trace::Trace; diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index 4060be138e0..c54c8943c0a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -32,11 +32,3 @@ pub trait EthSigner: Send + Sync + DynClone { } dyn_clone::clone_trait_object!( EthSigner); - -/// Adds 20 random dev signers for access via the API. Used in dev mode. -#[auto_impl::auto_impl(&)] -pub trait AddDevSigners { - /// Generates 20 random developer accounts. - /// Used in DEV mode. - fn with_dev_accounts(&self); -} diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 22100520016..ed4fcfa5c80 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -2,11 +2,9 @@ use crate::{AsEthApiError, FromEthApiError, RpcNodeCore}; use alloy_rpc_types_eth::Block; -use reth_chain_state::CanonStateSubscriptions; -use reth_rpc_convert::RpcConvert; +use reth_rpc_convert::{RpcConvert, SignableTxRequest}; pub use reth_rpc_convert::{RpcTransaction, RpcTxReq, RpcTypes}; -use reth_storage_api::{ProviderTx, ReceiptProvider, TransactionsProvider}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use reth_storage_api::ProviderTx; use std::{ error::Error, fmt::{self}, @@ -52,12 +50,11 @@ pub type RpcError = ::Error; /// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API. pub trait FullEthApiTypes where - Self: RpcNodeCore< - Provider: TransactionsProvider + ReceiptProvider + CanonStateSubscriptions, - Pool: TransactionPool< - Transaction: PoolTransaction>, + Self: RpcNodeCore + + EthApiTypes< + NetworkTypes: RpcTypes< + TransactionRequest: SignableTxRequest>, >, - > + EthApiTypes< RpcConvert: RpcConvert< Primitives = Self::Primitives, Network = Self::NetworkTypes, @@ -68,12 +65,11 @@ where } impl FullEthApiTypes for T where - T: RpcNodeCore< - Provider: TransactionsProvider + ReceiptProvider + CanonStateSubscriptions, - Pool: TransactionPool< - Transaction: PoolTransaction>, + T: RpcNodeCore + + EthApiTypes< + NetworkTypes: RpcTypes< + TransactionRequest: SignableTxRequest>, >, - > + EthApiTypes< RpcConvert: RpcConvert< Primitives = ::Primitives, Network = Self::NetworkTypes, diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index c47c383f057..e028e47448d 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -45,7 +45,7 @@ reth-trie-common.workspace = true alloy-evm = { workspace = true, features = ["overrides"] } alloy-consensus.workspace = true alloy-signer.workspace = true -alloy-signer-local.workspace = true +alloy-signer-local = { workspace = true, features = ["mnemonic"] } alloy-eips = { workspace = true, features = ["kzg"] } alloy-dyn-abi.workspace = true alloy-genesis.workspace = true diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index 60d6a151f9b..2c18245d542 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -1,33 +1,14 @@ //! An abstraction over ethereum signers. -use std::collections::HashMap; - -use crate::EthApi; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{eip191_hash_message, Address, Signature, B256}; use alloy_signer::SignerSync; -use alloy_signer_local::PrivateKeySigner; -use reth_rpc_convert::{RpcConvert, RpcTypes, SignableTxRequest}; -use reth_rpc_eth_api::{ - helpers::{signer::Result, AddDevSigners, EthSigner}, - FromEvmError, RpcNodeCore, -}; -use reth_rpc_eth_types::{EthApiError, SignError}; -use reth_storage_api::ProviderTx; - -impl AddDevSigners for EthApi -where - N: RpcNodeCore, - EthApiError: FromEvmError, - Rpc: RpcConvert< - Network: RpcTypes>>, - >, -{ - fn with_dev_accounts(&self) { - *self.inner.signers().write() = DevSigner::random_signers(20) - } -} +use alloy_signer_local::{coins_bip39::English, MnemonicBuilder, PrivateKeySigner}; +use reth_rpc_convert::SignableTxRequest; +use reth_rpc_eth_api::helpers::{signer::Result, EthSigner}; +use reth_rpc_eth_types::SignError; +use std::collections::HashMap; /// Holds developer keys #[derive(Debug, Clone)] @@ -55,6 +36,32 @@ impl DevSigner { signers } + /// Generates dev signers deterministically from a fixed mnemonic. + /// Uses the Ethereum derivation path: `m/44'/60'/0'/0/{index}` + pub fn from_mnemonic>( + mnemonic: &str, + num: u32, + ) -> Vec + 'static>> { + let mut signers = Vec::with_capacity(num as usize); + + for i in 0..num { + let sk = MnemonicBuilder::::default() + .phrase(mnemonic) + .index(i) + .expect("invalid derivation path") + .build() + .expect("failed to build signer from mnemonic"); + + let address = sk.address(); + let addresses = vec![address]; + let accounts = HashMap::from([(address, sk)]); + + signers.push(Box::new(Self { addresses, accounts }) as Box>); + } + + signers + } + fn get_key(&self, account: Address) -> Result<&PrivateKeySigner> { self.accounts.get(&account).ok_or(SignError::NoAccount) } diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 9b46593a3de..5d07845a8e1 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -734,6 +734,11 @@ Dev testnet: Parses strings using [`humantime::parse_duration`] --dev.block-time 12s + --dev.mnemonic + Derive dev accounts from a fixed mnemonic instead of random ones. + + [default: "test test test test test test test test test test test junk"] + Pruning: --full Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored From d1f6637a5aeb04f1a39dfffe3dba9d96326d6d95 Mon Sep 17 00:00:00 2001 From: YK Date: Fri, 17 Oct 2025 23:46:17 +0800 Subject: [PATCH 106/371] refactor: naming fix for multiproof dispatch (#19102) --- crates/trie/parallel/src/proof.rs | 2 +- crates/trie/parallel/src/proof_task.rs | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index ffa7aa4dc31..3ea5994488a 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -101,7 +101,7 @@ impl ParallelProof { ); self.proof_worker_handle - .queue_storage_proof(input) + .dispatch_storage_proof(input) .map_err(|e| ParallelStateRootError::Other(e.to_string())) } diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 5c26f6d99c3..b66b7bbaa4f 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -958,8 +958,8 @@ impl ProofWorkerHandle { Self { storage_work_tx, account_work_tx } } - /// Queue a storage proof computation - pub fn queue_storage_proof( + /// Dispatch a storage proof computation to storage worker pool + pub fn dispatch_storage_proof( &self, input: StorageProofInput, ) -> Result, ProviderError> { @@ -988,8 +988,8 @@ impl ProofWorkerHandle { Ok(rx) } - /// Internal: Queue blinded storage node request - fn queue_blinded_storage_node( + /// Dispatch blinded storage node request to storage worker pool + pub(crate) fn dispatch_blinded_storage_node( &self, account: B256, path: Nibbles, @@ -1004,8 +1004,8 @@ impl ProofWorkerHandle { Ok(rx) } - /// Internal: Queue blinded account node request - fn queue_blinded_account_node( + /// Dispatch blinded account node request to account worker pool + pub(crate) fn dispatch_blinded_account_node( &self, path: Nibbles, ) -> Result, ProviderError> { @@ -1055,13 +1055,13 @@ impl TrieNodeProvider for ProofTaskTrieNodeProvider { match self { Self::AccountNode { handle } => { let rx = handle - .queue_blinded_account_node(*path) + .dispatch_blinded_account_node(*path) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? } Self::StorageNode { handle, account } => { let rx = handle - .queue_blinded_storage_node(*account, *path) + .dispatch_blinded_storage_node(*account, *path) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? } From 6a918f4cab86c56b3fa3d22e8e9da53a09effd86 Mon Sep 17 00:00:00 2001 From: leopardracer <136604165+leopardracer@users.noreply.github.com> Date: Fri, 17 Oct 2025 18:48:22 +0300 Subject: [PATCH 107/371] fix: Deduplicate hashed storage preparation in MemoryOverlayStateProvider (#19087) --- crates/chain-state/src/memory_overlay.rs | 28 +++++++++++------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 2e1efd1ed1b..254edb248b4 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -61,6 +61,13 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { ) }) } + + fn merged_hashed_storage(&self, address: Address, storage: HashedStorage) -> HashedStorage { + let state = &self.trie_input().state; + let mut hashed = state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed.extend(&storage); + hashed + } } impl BlockHashReader for MemoryOverlayStateProviderRef<'_, N> { @@ -145,11 +152,8 @@ impl StateRootProvider for MemoryOverlayStateProviderRef<'_, impl StorageRootProvider for MemoryOverlayStateProviderRef<'_, N> { // TODO: Currently this does not reuse available in-memory trie nodes. fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { - let state = &self.trie_input().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_root(address, hashed_storage) + let merged = self.merged_hashed_storage(address, storage); + self.historical.storage_root(address, merged) } // TODO: Currently this does not reuse available in-memory trie nodes. @@ -159,11 +163,8 @@ impl StorageRootProvider for MemoryOverlayStateProviderRef<'_ slot: B256, storage: HashedStorage, ) -> ProviderResult { - let state = &self.trie_input().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_proof(address, slot, hashed_storage) + let merged = self.merged_hashed_storage(address, storage); + self.historical.storage_proof(address, slot, merged) } // TODO: Currently this does not reuse available in-memory trie nodes. @@ -173,11 +174,8 @@ impl StorageRootProvider for MemoryOverlayStateProviderRef<'_ slots: &[B256], storage: HashedStorage, ) -> ProviderResult { - let state = &self.trie_input().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_multiproof(address, slots, hashed_storage) + let merged = self.merged_hashed_storage(address, storage); + self.historical.storage_multiproof(address, slots, merged) } } From a5618f57a8a8f80f13739cae640340dd3600b3c6 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 17 Oct 2025 21:34:38 +0400 Subject: [PATCH 108/371] feat: convert pooled blobs transition (#19095) --- crates/ethereum/node/tests/e2e/blobs.rs | 116 +++++++++++++++++++- crates/node/builder/src/components/pool.rs | 10 +- crates/transaction-pool/src/maintain.rs | 98 +++++++++++++++-- crates/transaction-pool/src/validate/eth.rs | 41 ++++++- 4 files changed, 251 insertions(+), 14 deletions(-) diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index e7f62e64668..1c088e33da6 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -5,13 +5,17 @@ use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_ethereum_engine_primitives::BlobSidecars; use reth_ethereum_primitives::PooledTransactionVariant; use reth_node_builder::{NodeBuilder, NodeHandle}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; use reth_tasks::TaskManager; use reth_transaction_pool::TransactionPool; -use std::sync::Arc; +use std::{ + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; #[tokio::test] async fn can_handle_blobs() -> eyre::Result<()> { @@ -136,3 +140,113 @@ async fn can_send_legacy_sidecar_post_activation() -> eyre::Result<()> { Ok(()) } + +#[tokio::test] +async fn blob_conversion_at_osaka() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let current_timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); + // Osaka activates in 2 slots + let osaka_timestamp = current_timestamp + 24; + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .prague_activated() + .with_osaka_at(osaka_timestamp) + .build(), + ); + let genesis_hash = chain_spec.genesis_hash(); + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(EthereumNode::default()) + .launch() + .await?; + + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; + + let mut wallets = Wallet::new(3).wallet_gen(); + let first = wallets.pop().unwrap(); + let second = wallets.pop().unwrap(); + + // build a dummy payload at `current_timestamp` + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallets.pop().unwrap()).await; + node.rpc.inject_tx(raw_tx).await?; + node.payload.timestamp = current_timestamp - 1; + node.advance_block().await?; + + // build blob txs + let first_blob = TransactionTestContext::tx_with_blobs_bytes(1, first.clone()).await?; + let second_blob = TransactionTestContext::tx_with_blobs_bytes(1, second.clone()).await?; + + // assert both txs have legacy sidecars + assert!(PooledTransactionVariant::decode_2718_exact(&first_blob) + .unwrap() + .as_eip4844() + .unwrap() + .tx() + .sidecar + .is_eip4844()); + assert!(PooledTransactionVariant::decode_2718_exact(&second_blob) + .unwrap() + .as_eip4844() + .unwrap() + .tx() + .sidecar + .is_eip4844()); + + // inject first blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(first_blob).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that it still has a legacy sidecar + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip4844()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + // build last Prague payload + node.payload.timestamp = current_timestamp + 11; + let prague_payload = node.new_payload().await?; + assert!(matches!(prague_payload.sidecars(), BlobSidecars::Eip4844(_))); + + // inject second blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(second_blob).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that it still has a legacy sidecar + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip4844()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + tokio::time::sleep(Duration::from_secs(11)).await; + + // fetch second blob tx from rpc again + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that it was converted to eip7594 + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip7594()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + // submit the Prague payload + node.update_forkchoice(genesis_hash, node.submit_payload(prague_payload).await?).await?; + + // Build first Osaka payload + node.payload.timestamp = osaka_timestamp - 1; + let osaka_payload = node.new_payload().await?; + + // Assert that it includes the second blob tx with eip7594 sidecar + assert!(osaka_payload.block().body().transactions().any(|tx| *tx.hash() == blob_tx_hash)); + assert!(matches!(osaka_payload.sidecars(), BlobSidecars::Eip7594(_))); + + node.update_forkchoice(genesis_hash, node.submit_payload(osaka_payload).await?).await?; + + Ok(()) +} diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index 9be184bc9c0..a261f02c756 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -3,7 +3,8 @@ use crate::{BuilderContext, FullNodeTypes}; use alloy_primitives::Address; use reth_chain_state::CanonStateSubscriptions; -use reth_node_api::TxTy; +use reth_chainspec::EthereumHardforks; +use reth_node_api::{NodeTypes, TxTy}; use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolConfig, PoolTransaction, SubPoolLimit, TransactionPool, TransactionValidationTaskExecutor, TransactionValidator, @@ -125,8 +126,9 @@ impl<'a, Node: FullNodeTypes, V> TxPoolBuilder<'a, Node, V> { } } -impl<'a, Node: FullNodeTypes, V> TxPoolBuilder<'a, Node, TransactionValidationTaskExecutor> +impl<'a, Node, V> TxPoolBuilder<'a, Node, TransactionValidationTaskExecutor> where + Node: FullNodeTypes>, V: TransactionValidator + 'static, V::Transaction: PoolTransaction> + reth_transaction_pool::EthPoolTransaction, @@ -227,7 +229,7 @@ fn spawn_pool_maintenance_task( pool_config: &PoolConfig, ) -> eyre::Result<()> where - Node: FullNodeTypes, + Node: FullNodeTypes>, Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static, Pool::Transaction: PoolTransaction>, { @@ -259,7 +261,7 @@ pub fn spawn_maintenance_tasks( pool_config: &PoolConfig, ) -> eyre::Result<()> where - Node: FullNodeTypes, + Node: FullNodeTypes>, Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static, Pool::Transaction: PoolTransaction>, { diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 732d55d0c3f..aa0366341a6 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -1,11 +1,12 @@ //! Support for maintaining the state of the transaction pool use crate::{ - blobstore::{BlobStoreCanonTracker, BlobStoreUpdates}, + blobstore::{BlobSidecarConverter, BlobStoreCanonTracker, BlobStoreUpdates}, error::PoolError, metrics::MaintainPoolMetrics, traits::{CanonicalStateUpdate, EthPoolTransaction, TransactionPool, TransactionPoolExt}, - BlockInfo, PoolTransaction, PoolUpdateKind, TransactionOrigin, + AllPoolTransactions, BlobTransactionSidecarVariant, BlockInfo, PoolTransaction, PoolUpdateKind, + TransactionOrigin, }; use alloy_consensus::{transaction::TxHashRef, BlockHeader, Typed2718}; use alloy_eips::{BlockNumberOrTag, Decodable2718, Encodable2718}; @@ -16,7 +17,7 @@ use futures_util::{ FutureExt, Stream, StreamExt, }; use reth_chain_state::CanonStateNotification; -use reth_chainspec::{ChainSpecProvider, EthChainSpec}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives_traits::{ @@ -103,12 +104,12 @@ where N: NodePrimitives, Client: StateProviderFactory + BlockReaderIdExt
- + ChainSpecProvider> + + ChainSpecProvider + EthereumHardforks> + Clone + 'static, P: TransactionPoolExt> + 'static, St: Stream> + Send + Unpin + 'static, - Tasks: TaskSpawner + 'static, + Tasks: TaskSpawner + Clone + 'static, { async move { maintain_transaction_pool(client, pool, events, task_spawner, config).await; @@ -129,12 +130,12 @@ pub async fn maintain_transaction_pool( N: NodePrimitives, Client: StateProviderFactory + BlockReaderIdExt
- + ChainSpecProvider> + + ChainSpecProvider + EthereumHardforks> + Clone + 'static, P: TransactionPoolExt> + 'static, St: Stream> + Send + Unpin + 'static, - Tasks: TaskSpawner + 'static, + Tasks: TaskSpawner + Clone + 'static, { let metrics = MaintainPoolMetrics::default(); let MaintainPoolConfig { max_update_depth, max_reload_accounts, .. } = config; @@ -494,6 +495,89 @@ pub async fn maintain_transaction_pool( // keep track of mined blob transactions blob_store_tracker.add_new_chain_blocks(&blocks); + + // If Osaka activates in 2 slots we need to convert blobs to new format. + if !chain_spec.is_osaka_active_at_timestamp(tip.timestamp()) && + !chain_spec.is_osaka_active_at_timestamp(tip.timestamp().saturating_add(12)) && + chain_spec.is_osaka_active_at_timestamp(tip.timestamp().saturating_add(24)) + { + let pool = pool.clone(); + let spawner = task_spawner.clone(); + let client = client.clone(); + task_spawner.spawn(Box::pin(async move { + // Start converting not eaerlier than 4 seconds into current slot to ensure + // that our pool only contains valid transactions for the next block (as + // it's not Osaka yet). + tokio::time::sleep(Duration::from_secs(4)).await; + + let mut interval = tokio::time::interval(Duration::from_secs(1)); + loop { + // Loop and replace blob transactions until we reach Osaka transition + // block after which no legacy blobs are going to be accepted. + let last_iteration = + client.latest_header().ok().flatten().is_none_or(|header| { + client + .chain_spec() + .is_osaka_active_at_timestamp(header.timestamp()) + }); + + let AllPoolTransactions { pending, queued } = pool.all_transactions(); + for tx in pending + .into_iter() + .chain(queued) + .filter(|tx| tx.transaction.is_eip4844()) + { + let tx_hash = *tx.transaction.hash(); + + // Fetch sidecar from the pool + let Ok(Some(sidecar)) = pool.get_blob(tx_hash) else { + continue; + }; + // Ensure it is a legacy blob + if !sidecar.is_eip4844() { + continue; + } + // Remove transaction and sidecar from the pool, both are in memory + // now + let Some(tx) = pool.remove_transactions(vec![tx_hash]).pop() else { + continue; + }; + pool.delete_blob(tx_hash); + + let BlobTransactionSidecarVariant::Eip4844(sidecar) = + Arc::unwrap_or_clone(sidecar) + else { + continue; + }; + + let converter = BlobSidecarConverter::new(); + let pool = pool.clone(); + spawner.spawn(Box::pin(async move { + // Convert sidecar to EIP-7594 format + let Some(sidecar) = converter.convert(sidecar).await else { + return; + }; + + // Re-insert transaction with the new sidecar + let origin = tx.origin; + let Some(tx) = EthPoolTransaction::try_from_eip4844( + tx.transaction.clone_into_consensus(), + sidecar.into(), + ) else { + return; + }; + let _ = pool.add_transaction(origin, tx).await; + })); + } + + if last_iteration { + break; + } + + interval.tick().await; + } + })); + } } } } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 9eab8767d6d..038c820bfe9 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -39,7 +39,7 @@ use std::{ atomic::{AtomicBool, AtomicU64}, Arc, }, - time::Instant, + time::{Instant, SystemTime}, }; use tokio::sync::Mutex; @@ -673,7 +673,7 @@ where Eip4844PoolTransactionError::UnexpectedEip4844SidecarAfterOsaka, )) } - } else if sidecar.is_eip7594() { + } else if sidecar.is_eip7594() && !self.allow_7594_sidecars() { return Err(InvalidPoolTransactionError::Eip4844( Eip4844PoolTransactionError::UnexpectedEip7594SidecarBeforeOsaka, )) @@ -745,6 +745,10 @@ where self.fork_tracker.osaka.store(true, std::sync::atomic::Ordering::Relaxed); } + self.fork_tracker + .tip_timestamp + .store(new_tip_block.timestamp(), std::sync::atomic::Ordering::Relaxed); + if let Some(blob_params) = self.chain_spec().blob_params_at_timestamp(new_tip_block.timestamp()) { @@ -759,6 +763,24 @@ where fn max_gas_limit(&self) -> u64 { self.block_gas_limit.load(std::sync::atomic::Ordering::Relaxed) } + + /// Returns whether EIP-7594 sidecars are allowed + fn allow_7594_sidecars(&self) -> bool { + let tip_timestamp = self.fork_tracker.tip_timestamp(); + + // If next block is Osaka, allow 7594 sidecars + if self.chain_spec().is_osaka_active_at_timestamp(tip_timestamp.saturating_add(12)) { + true + } else if self.chain_spec().is_osaka_active_at_timestamp(tip_timestamp.saturating_add(24)) { + let current_timestamp = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); + + // Allow after 4 seconds into last non-Osaka slot + current_timestamp >= tip_timestamp.saturating_add(4) + } else { + false + } + } } impl TransactionValidator for EthTransactionValidator @@ -811,6 +833,8 @@ pub struct EthTransactionValidatorBuilder { prague: bool, /// Fork indicator whether we are in the Osaka hardfork. osaka: bool, + /// Timestamp of the tip block. + tip_timestamp: u64, /// Max blob count at the block's timestamp. max_blob_count: u64, /// Whether using EIP-2718 type transactions is allowed @@ -885,6 +909,8 @@ impl EthTransactionValidatorBuilder { // osaka not yet activated osaka: false, + tip_timestamp: 0, + // max blob count is prague by default max_blob_count: BlobParams::prague().max_blobs_per_tx, @@ -1012,6 +1038,7 @@ impl EthTransactionValidatorBuilder { self.cancun = self.client.chain_spec().is_cancun_active_at_timestamp(timestamp); self.prague = self.client.chain_spec().is_prague_active_at_timestamp(timestamp); self.osaka = self.client.chain_spec().is_osaka_active_at_timestamp(timestamp); + self.tip_timestamp = timestamp; self.max_blob_count = self .client .chain_spec() @@ -1072,6 +1099,7 @@ impl EthTransactionValidatorBuilder { cancun, prague, osaka, + tip_timestamp, eip2718, eip1559, eip4844, @@ -1094,6 +1122,7 @@ impl EthTransactionValidatorBuilder { cancun: AtomicBool::new(cancun), prague: AtomicBool::new(prague), osaka: AtomicBool::new(osaka), + tip_timestamp: AtomicU64::new(tip_timestamp), max_blob_count: AtomicU64::new(max_blob_count), }; @@ -1175,6 +1204,8 @@ pub struct ForkTracker { pub osaka: AtomicBool, /// Tracks max blob count per transaction at the block's timestamp. pub max_blob_count: AtomicU64, + /// Tracks the timestamp of the tip block. + pub tip_timestamp: AtomicU64, } impl ForkTracker { @@ -1198,6 +1229,11 @@ impl ForkTracker { self.osaka.load(std::sync::atomic::Ordering::Relaxed) } + /// Returns the timestamp of the tip block. + pub fn tip_timestamp(&self) -> u64 { + self.tip_timestamp.load(std::sync::atomic::Ordering::Relaxed) + } + /// Returns the max allowed blob count per transaction. pub fn max_blob_count(&self) -> u64 { self.max_blob_count.load(std::sync::atomic::Ordering::Relaxed) @@ -1272,6 +1308,7 @@ mod tests { cancun: false.into(), prague: false.into(), osaka: false.into(), + tip_timestamp: 0.into(), max_blob_count: 0.into(), }; From 4a32bc0fe5ddc70c49384219c9a939485b42436b Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 17 Oct 2025 22:20:12 +0100 Subject: [PATCH 109/371] feat(engine): improve payload validator tracing spans (#18960) Co-authored-by: Claude Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> --- crates/engine/tree/src/chain.rs | 2 +- crates/engine/tree/src/tree/cached_state.rs | 18 ++- crates/engine/tree/src/tree/metrics.rs | 2 +- crates/engine/tree/src/tree/mod.rs | 15 ++- .../tree/src/tree/payload_processor/mod.rs | 26 +++- .../src/tree/payload_processor/multiproof.rs | 5 +- .../src/tree/payload_processor/prewarm.rs | 40 +++++- .../src/tree/payload_processor/sparse_trie.rs | 28 ++++- .../engine/tree/src/tree/payload_validator.rs | 115 +++++++++++------- crates/net/ecies/src/codec.rs | 4 +- .../src/segments/user/account_history.rs | 2 +- .../prune/prune/src/segments/user/receipts.rs | 2 +- .../src/segments/user/receipts_by_logs.rs | 2 +- .../src/segments/user/sender_recovery.rs | 2 +- .../src/segments/user/storage_history.rs | 2 +- .../src/segments/user/transaction_lookup.rs | 2 +- crates/rpc/ipc/src/server/ipc.rs | 4 +- crates/rpc/ipc/src/server/mod.rs | 2 +- crates/rpc/rpc/src/engine.rs | 2 +- crates/trie/db/src/state.rs | 3 +- crates/trie/parallel/src/proof_task.rs | 2 +- crates/trie/sparse-parallel/src/trie.rs | 18 ++- crates/trie/sparse/Cargo.toml | 2 +- crates/trie/sparse/src/state.rs | 15 ++- crates/trie/sparse/src/trie.rs | 14 ++- crates/trie/trie/src/hashed_cursor/mock.rs | 4 +- crates/trie/trie/src/node_iter.rs | 3 +- crates/trie/trie/src/trie_cursor/mock.rs | 8 +- crates/trie/trie/src/walker.rs | 6 +- 29 files changed, 249 insertions(+), 101 deletions(-) diff --git a/crates/engine/tree/src/chain.rs b/crates/engine/tree/src/chain.rs index e2893bb976a..d1e63a6b3d9 100644 --- a/crates/engine/tree/src/chain.rs +++ b/crates/engine/tree/src/chain.rs @@ -71,7 +71,7 @@ where /// Internal function used to advance the chain. /// /// Polls the `ChainOrchestrator` for the next event. - #[tracing::instrument(level = "debug", name = "ChainOrchestrator::poll", skip(self, cx))] + #[tracing::instrument(name = "ChainOrchestrator::poll", skip(self, cx))] fn poll_next_event(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index ffd7f49c6fc..3e9cda38f13 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -18,7 +18,7 @@ use reth_trie::{ MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; use std::{sync::Arc, time::Duration}; -use tracing::trace; +use tracing::{debug_span, instrument, trace}; pub(crate) type Cache = mini_moka::sync::Cache; @@ -354,6 +354,7 @@ impl ExecutionCache { } /// Invalidates the storage for all addresses in the set + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(accounts = addresses.len()))] pub(crate) fn invalidate_storages(&self, addresses: HashSet<&Address>) { // NOTE: this must collect because the invalidate function should not be called while we // hold an iter for it @@ -385,12 +386,25 @@ impl ExecutionCache { /// ## Error Handling /// /// Returns an error if the state updates are inconsistent and should be discarded. + #[instrument(level = "debug", target = "engine::tree", skip_all)] pub(crate) fn insert_state(&self, state_updates: &BundleState) -> Result<(), ()> { + let _enter = + debug_span!(target: "engine::tree", "contracts", len = state_updates.contracts.len()) + .entered(); // Insert bytecodes for (code_hash, bytecode) in &state_updates.contracts { self.code_cache.insert(*code_hash, Some(Bytecode(bytecode.clone()))); } - + drop(_enter); + + let _enter = debug_span!( + target: "engine::tree", + "accounts", + accounts = state_updates.state.len(), + storages = + state_updates.state.values().map(|account| account.storage.len()).sum::() + ) + .entered(); let mut invalidated_accounts = HashSet::default(); for (addr, account) in &state_updates.state { // If the account was not modified, as in not changed and not destroyed, then we have diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index c014d8ba15e..1d1e208b0a6 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -79,7 +79,7 @@ impl EngineApiMetrics { for tx in transactions { let tx = tx?; let span = - debug_span!(target: "engine::tree", "execute_tx", tx_hash=?tx.tx().tx_hash()); + debug_span!(target: "engine::tree", "execute tx", tx_hash=?tx.tx().tx_hash()); let _enter = span.enter(); trace!(target: "engine::tree", "Executing transaction"); executor.execute_transaction(tx)?; diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index e66b2a8892e..a189b643f98 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -496,7 +496,12 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip_all, fields(block_hash = %payload.block_hash(), block_num = %payload.block_number(),), target = "engine::tree")] + #[instrument( + level = "debug", + target = "engine::tree", + skip_all, + fields(block_hash = %payload.block_hash(), block_num = %payload.block_number()), + )] fn on_new_payload( &mut self, payload: T::ExecutionData, @@ -577,6 +582,7 @@ where /// - `Valid`: Payload successfully validated and inserted /// - `Syncing`: Parent missing, payload buffered for later /// - Error status: Payload is invalid + #[instrument(level = "debug", target = "engine::tree", skip_all)] fn try_insert_payload( &mut self, payload: T::ExecutionData, @@ -970,7 +976,7 @@ where /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). /// /// Returns an error if an internal error occurred like a database error. - #[instrument(level = "trace", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash))] fn on_forkchoice_updated( &mut self, state: ForkchoiceState, @@ -1972,7 +1978,7 @@ where } /// Attempts to connect any buffered blocks that are connected to the given parent hash. - #[instrument(level = "trace", skip(self), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip(self))] fn try_connect_buffered_blocks( &mut self, parent: BlockNumHash, @@ -2281,7 +2287,7 @@ where /// Returns an event with the appropriate action to take, such as: /// - download more missing blocks /// - try to canonicalize the target if the `block` is the tracked target (head) block. - #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number(),), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(block_hash = %block.hash(), block_num = %block.number()))] fn on_downloaded_block( &mut self, block: RecoveredBlock, @@ -2387,6 +2393,7 @@ where /// Returns `InsertPayloadOk::Inserted(BlockStatus::Valid)` on successful execution, /// `InsertPayloadOk::AlreadySeen` if the block already exists, or /// `InsertPayloadOk::Inserted(BlockStatus::Disconnected)` if parent state is missing. + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(block_id))] fn insert_block_or_payload( &mut self, block_id: BlockWithParent, diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index d2e48a49899..8d6230dd82f 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -45,7 +45,7 @@ use std::sync::{ mpsc::{self, channel, Sender}, Arc, }; -use tracing::{debug, instrument, warn}; +use tracing::{debug, debug_span, instrument, warn}; mod configured_sparse_trie; pub mod executor; @@ -167,6 +167,12 @@ where /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) #[allow(clippy::type_complexity)] + #[instrument( + level = "debug", + target = "engine::tree::payload_processor", + name = "payload processor", + skip_all + )] pub fn spawn>( &mut self, env: ExecutionEnv, @@ -236,7 +242,9 @@ where ); // spawn multi-proof task + let span = tracing::Span::current(); self.executor.spawn_blocking(move || { + let _enter = span.entered(); multi_proof_task.run(); }); @@ -257,6 +265,7 @@ where /// Spawns a task that exclusively handles cache prewarming for transaction execution. /// /// Returns a [`PayloadHandle`] to communicate with the task. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] pub(super) fn spawn_cache_exclusive>( &self, env: ExecutionEnv, @@ -353,7 +362,9 @@ where // spawn pre-warm task { let to_prewarm_task = to_prewarm_task.clone(); + let span = debug_span!(target: "engine::tree::payload_processor", "prewarm task"); self.executor.spawn_blocking(move || { + let _enter = span.entered(); prewarm_task.run(transactions, to_prewarm_task); }); } @@ -370,7 +381,7 @@ where /// /// If the given hash is different then what is recently cached, then this will create a new /// instance. - #[instrument(target = "engine::caching", skip(self))] + #[instrument(level = "debug", target = "engine::caching", skip(self))] fn cache_for(&self, parent_hash: B256) -> SavedCache { if let Some(cache) = self.execution_cache.get_cache_for(parent_hash) { debug!("reusing execution cache"); @@ -383,6 +394,7 @@ where } /// Spawns the [`SparseTrieTask`] for this payload processor. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] fn spawn_sparse_trie_task( &self, sparse_trie_rx: mpsc::Receiver, @@ -421,13 +433,18 @@ where sparse_state_trie, ); + let span = tracing::Span::current(); self.executor.spawn_blocking(move || { + let _enter = span.entered(); + let (result, trie) = task.run(); // Send state root computation result let _ = state_root_tx.send(result); - // Clear the SparseStateTrie and replace it back into the mutex _after_ sending results - // to the next step, so that time spent clearing doesn't block the step after this one. + // Clear the SparseStateTrie and replace it back into the mutex _after_ sending + // results to the next step, so that time spent clearing doesn't block the step after + // this one. + let _enter = debug_span!(target: "engine::tree::payload_processor", "clear").entered(); cleared_sparse_trie.lock().replace(ClearedSparseStateTrie::from_state_trie(trie)); }); } @@ -452,6 +469,7 @@ impl PayloadHandle { /// # Panics /// /// If payload processing was started without background tasks. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] pub fn state_root(&mut self) -> Result { self.state_root .take() diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index a528b759570..163714483fd 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -32,7 +32,7 @@ use std::{ }, time::{Duration, Instant}, }; -use tracing::{debug, error, trace}; +use tracing::{debug, error, instrument, trace}; /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. @@ -718,6 +718,7 @@ impl MultiProofTask { /// Handles request for proof prefetch. /// /// Returns a number of proofs that were spawned. + #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all, fields(accounts = targets.len()))] fn on_prefetch_proof(&mut self, targets: MultiProofTargets) -> u64 { let proof_targets = self.get_prefetch_proof_targets(targets); self.fetched_proof_targets.extend_ref(&proof_targets); @@ -844,6 +845,7 @@ impl MultiProofTask { /// Handles state updates. /// /// Returns a number of proofs that were spawned. + #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip(self, update), fields(accounts = update.len()))] fn on_state_update(&mut self, source: StateChangeSource, update: EvmState) -> u64 { let hashed_state_update = evm_state_to_hashed_post_state(update); @@ -973,6 +975,7 @@ impl MultiProofTask { /// currently being calculated, or if there are any pending proofs in the proof sequencer /// left to be revealed by checking the pending tasks. /// 6. This task exits after all pending proofs are processed. + #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all)] pub(crate) fn run(mut self) { // TODO convert those into fields let mut prefetch_proofs_requested = 0; diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 44293614d3d..de8a88a167b 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -39,7 +39,7 @@ use std::{ }, time::Instant, }; -use tracing::{debug, trace, warn}; +use tracing::{debug, debug_span, instrument, trace, warn}; /// A wrapper for transactions that includes their index in the block. #[derive(Clone)] @@ -139,8 +139,11 @@ where let ctx = self.ctx.clone(); let max_concurrency = self.max_concurrency; let transaction_count_hint = self.transaction_count_hint; + let span = tracing::Span::current(); self.executor.spawn_blocking(move || { + let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", parent: span, "spawn_all").entered(); + let (done_tx, done_rx) = mpsc::channel(); let mut executing = 0usize; @@ -157,8 +160,8 @@ where }; // Only spawn initial workers as needed - for _ in 0..workers_needed { - handles.push(ctx.spawn_worker(&executor, actions_tx.clone(), done_tx.clone())); + for i in 0..workers_needed { + handles.push(ctx.spawn_worker(i, &executor, actions_tx.clone(), done_tx.clone())); } let mut tx_index = 0usize; @@ -248,6 +251,7 @@ where /// the new, warmed cache to be inserted. /// /// This method is called from `run()` only after all execution tasks are complete. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn save_cache(self, state: BundleState) { let start = Instant::now(); @@ -284,6 +288,12 @@ where /// /// This will execute the transactions until all transactions have been processed or the task /// was cancelled. + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::prewarm", + name = "prewarm", + skip_all + )] pub(super) fn run( self, pending: mpsc::Receiver + Clone + Send + 'static>, @@ -364,6 +374,7 @@ where { /// Splits this context into an evm, an evm config, metrics, and the atomic bool for terminating /// execution. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn evm_for_ctx(self) -> Option<(EvmFor, PrewarmMetrics, Arc)> { let Self { env, @@ -380,7 +391,7 @@ where Ok(provider) => provider, Err(err) => { trace!( - target: "engine::tree", + target: "engine::tree::payload_processor::prewarm", %err, "Failed to build state provider in prewarm thread" ); @@ -429,6 +440,7 @@ where /// /// Note: There are no ordering guarantees; this does not reflect the state produced by /// sequential execution. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn transact_batch( self, txs: mpsc::Receiver>, @@ -439,7 +451,15 @@ where { let Some((mut evm, metrics, terminate_execution)) = self.evm_for_ctx() else { return }; - while let Ok(IndexedTransaction { index, tx }) = txs.recv() { + while let Ok(IndexedTransaction { index, tx }) = { + let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "recv tx") + .entered(); + txs.recv() + } { + let _enter = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm tx", index, tx_hash=%tx.tx().tx_hash()) + .entered(); + // If the task was cancelled, stop execution, send an empty result to notify the task, // and exit. if terminate_execution.load(Ordering::Relaxed) { @@ -467,12 +487,18 @@ where }; metrics.execution_duration.record(start.elapsed()); + drop(_enter); + // Only send outcome for transactions after the first txn // as the main execution will be just as fast if index > 0 { + let _enter = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm outcome", index, tx_hash=%tx.tx().tx_hash()) + .entered(); let (targets, storage_targets) = multiproof_targets_from_state(res.state); metrics.prefetch_storage_targets.record(storage_targets as f64); let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: Some(targets) }); + drop(_enter); } metrics.total_runtime.record(start.elapsed()); @@ -485,6 +511,7 @@ where /// Spawns a worker task for transaction execution and returns its sender channel. fn spawn_worker( &self, + idx: usize, executor: &WorkloadExecutor, actions_tx: Sender, done_tx: Sender<()>, @@ -494,8 +521,11 @@ where { let (tx, rx) = mpsc::channel(); let ctx = self.clone(); + let span = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm worker", idx); executor.spawn_blocking(move || { + let _enter = span.entered(); ctx.transact_batch(rx, actions_tx, done_tx); }); diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index c16f7b6e4f4..6302abde5fb 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -15,7 +15,7 @@ use std::{ sync::mpsc, time::{Duration, Instant}, }; -use tracing::{debug, trace, trace_span}; +use tracing::{debug, debug_span, instrument, trace}; /// A task responsible for populating the sparse trie. pub(super) struct SparseTrieTask @@ -61,6 +61,11 @@ where /// /// - State root computation outcome. /// - `SparseStateTrie` that needs to be cleared and reused to avoid reallocations. + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::sparse_trie", + skip_all + )] pub(super) fn run( mut self, ) -> (Result, SparseStateTrie) { @@ -80,10 +85,14 @@ where while let Ok(mut update) = self.updates.recv() { num_iterations += 1; let mut num_updates = 1; + let _enter = + debug_span!(target: "engine::tree::payload_processor::sparse_trie", "drain updates") + .entered(); while let Ok(next) = self.updates.try_recv() { update.extend(next); num_updates += 1; } + drop(_enter); debug!( target: "engine::root", @@ -130,6 +139,7 @@ pub struct StateRootComputeOutcome { } /// Updates the sparse trie with the given proofs and state, and returns the elapsed time. +#[instrument(level = "debug", target = "engine::tree::payload_processor::sparse_trie", skip_all)] pub(crate) fn update_sparse_trie( trie: &mut SparseStateTrie, SparseTrieUpdate { mut state, multiproof }: SparseTrieUpdate, @@ -155,6 +165,7 @@ where ); // Update storage slots with new values and calculate storage roots. + let span = tracing::Span::current(); let (tx, rx) = mpsc::channel(); state .storages @@ -162,14 +173,16 @@ where .map(|(address, storage)| (address, storage, trie.take_storage_trie(&address))) .par_bridge() .map(|(address, storage, storage_trie)| { - let span = trace_span!(target: "engine::root::sparse", "Storage trie", ?address); - let _enter = span.enter(); - trace!(target: "engine::root::sparse", "Updating storage"); + let _enter = + debug_span!(target: "engine::tree::payload_processor::sparse_trie", parent: span.clone(), "storage trie", ?address) + .entered(); + + trace!(target: "engine::tree::payload_processor::sparse_trie", "Updating storage"); let storage_provider = blinded_provider_factory.storage_node_provider(address); let mut storage_trie = storage_trie.ok_or(SparseTrieErrorKind::Blind)?; if storage.wiped { - trace!(target: "engine::root::sparse", "Wiping storage"); + trace!(target: "engine::tree::payload_processor::sparse_trie", "Wiping storage"); storage_trie.wipe()?; } @@ -187,7 +200,7 @@ where continue; } - trace!(target: "engine::root::sparse", ?slot_nibbles, "Updating storage slot"); + trace!(target: "engine::tree::payload_processor::sparse_trie", ?slot_nibbles, "Updating storage slot"); storage_trie.update_leaf( slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec(), @@ -219,6 +232,9 @@ where let mut removed_accounts = Vec::new(); // Update account storage roots + let _enter = + tracing::debug_span!(target: "engine::tree::payload_processor::sparse_trie", "account trie") + .entered(); for result in rx { let (address, storage_trie) = result?; trie.insert_storage_trie(address, storage_trie); diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 4a3d45af8fd..253c6c0e183 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -44,9 +44,8 @@ use reth_trie::{ }; use reth_trie_db::DatabaseHashedPostState; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; -use revm::context::Block; use std::{collections::HashMap, sync::Arc, time::Instant}; -use tracing::{debug, debug_span, error, info, trace, warn}; +use tracing::{debug, debug_span, error, info, instrument, trace, warn}; /// Context providing access to tree state during validation. /// @@ -289,7 +288,7 @@ where V: PayloadValidator, { debug!( - target: "engine::tree", + target: "engine::tree::payload_validator", ?execution_err, block = ?input.num_hash(), "Block execution failed, checking for header validation errors" @@ -324,6 +323,15 @@ where /// - Block execution /// - State root computation /// - Fork detection + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields( + parent = ?input.parent_hash(), + block_num_hash = ?input.num_hash() + ) + )] pub fn validate_block_with_state>>( &mut self, input: BlockOrPayload, @@ -366,7 +374,9 @@ where let parent_hash = input.parent_hash(); let block_num_hash = input.num_hash(); - trace!(target: "engine::tree", block=?block_num_hash, parent=?parent_hash, "Fetching block state provider"); + trace!(target: "engine::tree::payload_validator", "Fetching block state provider"); + let _enter = + debug_span!(target: "engine::tree::payload_validator", "state provider").entered(); let Some(provider_builder) = ensure_ok!(self.state_provider_builder(parent_hash, ctx.state())) else { @@ -377,8 +387,8 @@ where ) .into()) }; - let state_provider = ensure_ok!(provider_builder.build()); + drop(_enter); // fetch parent block let Some(parent_block) = ensure_ok!(self.sealed_header_by_hash(parent_hash, ctx.state())) @@ -390,7 +400,9 @@ where .into()) }; - let evm_env = self.evm_env_for(&input).map_err(NewPayloadError::other)?; + let evm_env = debug_span!(target: "engine::tree::payload_validator", "evm env") + .in_scope(|| self.evm_env_for(&input)) + .map_err(NewPayloadError::other)?; let env = ExecutionEnv { evm_env, hash: input.hash(), parent_hash: input.parent_hash() }; @@ -400,8 +412,7 @@ where let strategy = state_root_plan.strategy; debug!( - target: "engine::tree", - block=?block_num_hash, + target: "engine::tree::payload_validator", ?strategy, "Deciding which state root algorithm to run" ); @@ -417,7 +428,6 @@ where persisting_kind, parent_hash, ctx.state(), - block_num_hash, strategy, )); @@ -452,7 +462,7 @@ where block ); - debug!(target: "engine::tree", block=?block_num_hash, "Calculating block state root"); + debug!(target: "engine::tree::payload_validator", "Calculating block state root"); let root_time = Instant::now(); @@ -460,17 +470,17 @@ where match strategy { StateRootStrategy::StateRootTask => { - debug!(target: "engine::tree", block=?block_num_hash, "Using sparse trie state root algorithm"); + debug!(target: "engine::tree::payload_validator", "Using sparse trie state root algorithm"); match handle.state_root() { Ok(StateRootComputeOutcome { state_root, trie_updates }) => { let elapsed = root_time.elapsed(); - info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); + info!(target: "engine::tree::payload_validator", ?state_root, ?elapsed, "State root task finished"); // we double check the state root here for good measure if state_root == block.header().state_root() { maybe_state_root = Some((state_root, trie_updates, elapsed)) } else { warn!( - target: "engine::tree", + target: "engine::tree::payload_validator", ?state_root, block_state_root = ?block.header().state_root(), "State root task returned incorrect state root" @@ -478,12 +488,12 @@ where } } Err(error) => { - debug!(target: "engine::tree", %error, "State root task failed"); + debug!(target: "engine::tree::payload_validator", %error, "State root task failed"); } } } StateRootStrategy::Parallel => { - debug!(target: "engine::tree", block=?block_num_hash, "Using parallel state root algorithm"); + debug!(target: "engine::tree::payload_validator", "Using parallel state root algorithm"); match self.compute_state_root_parallel( persisting_kind, block.parent_hash(), @@ -493,8 +503,7 @@ where Ok(result) => { let elapsed = root_time.elapsed(); info!( - target: "engine::tree", - block = ?block_num_hash, + target: "engine::tree::payload_validator", regular_state_root = ?result.0, ?elapsed, "Regular root task finished" @@ -502,7 +511,7 @@ where maybe_state_root = Some((result.0, result.1, elapsed)); } Err(error) => { - debug!(target: "engine::tree", %error, "Parallel state root computation failed"); + debug!(target: "engine::tree::payload_validator", %error, "Parallel state root computation failed"); } } } @@ -519,9 +528,9 @@ where } else { // fallback is to compute the state root regularly in sync if self.config.state_root_fallback() { - debug!(target: "engine::tree", block=?block_num_hash, "Using state root fallback for testing"); + debug!(target: "engine::tree::payload_validator", "Using state root fallback for testing"); } else { - warn!(target: "engine::tree", block=?block_num_hash, ?persisting_kind, "Failed to compute state root in parallel"); + warn!(target: "engine::tree::payload_validator", ?persisting_kind, "Failed to compute state root in parallel"); self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); } @@ -533,7 +542,7 @@ where }; self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); - debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root"); + debug!(target: "engine::tree::payload_validator", ?root_elapsed, "Calculated state root"); // ensure state root matches if state_root != block.header().state_root() { @@ -587,12 +596,12 @@ where /// and block body itself. fn validate_block_inner(&self, block: &RecoveredBlock) -> Result<(), ConsensusError> { if let Err(e) = self.consensus.validate_header(block.sealed_header()) { - error!(target: "engine::tree", ?block, "Failed to validate header {}: {e}", block.hash()); + error!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {}: {e}", block.hash()); return Err(e) } if let Err(e) = self.consensus.validate_block_pre_execution(block.sealed_block()) { - error!(target: "engine::tree", ?block, "Failed to validate block {}: {e}", block.hash()); + error!(target: "engine::tree::payload_validator", ?block, "Failed to validate block {}: {e}", block.hash()); return Err(e) } @@ -600,6 +609,7 @@ where } /// Executes a block with the given state provider + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn execute_block( &mut self, state_provider: S, @@ -614,11 +624,7 @@ where T: PayloadTypes>, Evm: ConfigureEngineEvm, { - let num_hash = NumHash::new(env.evm_env.block_env.number().to(), env.hash); - - let span = debug_span!(target: "engine::tree", "execute_block", num = ?num_hash.number, hash = ?num_hash.hash); - let _enter = span.enter(); - debug!(target: "engine::tree", "Executing block"); + debug!(target: "engine::tree::payload_validator", "Executing block"); let mut db = State::builder() .with_database(StateProviderDatabase::new(&state_provider)) @@ -657,7 +663,7 @@ where )?; let execution_finish = Instant::now(); let execution_time = execution_finish.duration_since(execution_start); - debug!(target: "engine::tree", elapsed = ?execution_time, number=?num_hash.number, "Executed block"); + debug!(target: "engine::tree::payload_validator", elapsed = ?execution_time, "Executed block"); Ok(output) } @@ -669,6 +675,7 @@ where /// Returns `Err(_)` if error was encountered during computation. /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation /// should be used instead. + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn compute_state_root_parallel( &self, persisting_kind: PersistingKind, @@ -709,7 +716,7 @@ where { let start = Instant::now(); - trace!(target: "engine::tree", block=?block.num_hash(), "Validating block consensus"); + trace!(target: "engine::tree::payload_validator", block=?block.num_hash(), "Validating block consensus"); // validate block consensus rules if let Err(e) = self.validate_block_inner(block) { return Err(e.into()) @@ -719,7 +726,7 @@ where if let Err(e) = self.consensus.validate_header_against_parent(block.sealed_header(), parent_block) { - warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); + warn!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {} against parent: {e}", block.hash()); return Err(e.into()) } @@ -759,6 +766,12 @@ where /// The method handles strategy fallbacks if the preferred approach fails, ensuring /// block execution always completes with a valid state root. #[allow(clippy::too_many_arguments)] + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields(strategy) + )] fn spawn_payload_processor>( &mut self, env: ExecutionEnv, @@ -767,7 +780,6 @@ where persisting_kind: PersistingKind, parent_hash: B256, state: &EngineApiTreeState, - block_num_hash: NumHash, strategy: StateRootStrategy, ) -> Result< ( @@ -821,8 +833,7 @@ where Err((error, txs, env, provider_builder)) => { // Failed to spawn proof workers, fallback to parallel state root error!( - target: "engine::tree", - block=?block_num_hash, + target: "engine::tree::payload_validator", ?error, "Failed to spawn proof workers, falling back to parallel state root" ); @@ -840,8 +851,7 @@ where // prewarming for transaction execution } else { debug!( - target: "engine::tree", - block=?block_num_hash, + target: "engine::tree::payload_validator", "Disabling state root task due to non-empty prefix sets" ); ( @@ -884,7 +894,7 @@ where state: &EngineApiTreeState, ) -> ProviderResult>> { if let Some((historical, blocks)) = state.tree_state.blocks_by_hash(hash) { - debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory, creating provider builder"); + debug!(target: "engine::tree::payload_validator", %hash, %historical, "found canonical state for block in memory, creating provider builder"); // the block leads back to the canonical chain return Ok(Some(StateProviderBuilder::new( self.provider.clone(), @@ -895,17 +905,18 @@ where // Check if the block is persisted if let Some(header) = self.provider.header(hash)? { - debug!(target: "engine::tree", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); + debug!(target: "engine::tree::payload_validator", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); // For persisted blocks, we create a builder that will fetch state directly from the // database return Ok(Some(StateProviderBuilder::new(self.provider.clone(), hash, None))) } - debug!(target: "engine::tree", %hash, "no canonical state found for block"); + debug!(target: "engine::tree::payload_validator", %hash, "no canonical state found for block"); Ok(None) } /// Determines the state root computation strategy based on persistence state and configuration. + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn plan_state_root_computation>>( &self, input: &BlockOrPayload, @@ -939,7 +950,7 @@ where }; debug!( - target: "engine::tree", + target: "engine::tree::payload_validator", block=?input.num_hash(), ?strategy, "Planned state root computation strategy" @@ -979,6 +990,12 @@ where /// block. /// 3. Once in-memory blocks are collected and optionally filtered, we compute the /// [`HashedPostState`] from them. + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields(persisting_kind, parent_hash) + )] fn compute_trie_input( &self, persisting_kind: PersistingKind, @@ -999,6 +1016,9 @@ where // If the current block is a descendant of the currently persisting blocks, then we need to // filter in-memory blocks, so that none of them are already persisted in the database. + let _enter = + debug_span!(target: "engine::tree::payload_validator", "filter in-memory blocks", len = blocks.len()) + .entered(); if persisting_kind.is_descendant() { // Iterate over the blocks from oldest to newest. while let Some(block) = blocks.last() { @@ -1023,11 +1043,13 @@ where parent_hash.into() }; } + drop(_enter); - if blocks.is_empty() { - debug!(target: "engine::tree", %parent_hash, "Parent found on disk"); + let blocks_empty = blocks.is_empty(); + if blocks_empty { + debug!(target: "engine::tree::payload_validator", "Parent found on disk"); } else { - debug!(target: "engine::tree", %parent_hash, %historical, blocks = blocks.len(), "Parent found in memory"); + debug!(target: "engine::tree::payload_validator", %historical, blocks = blocks.len(), "Parent found in memory"); } // Convert the historical block to the block number. @@ -1035,12 +1057,15 @@ where .convert_hash_or_number(historical)? .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; + let _enter = + debug_span!(target: "engine::tree::payload_validator", "revert state", blocks_empty) + .entered(); // Retrieve revert state for historical block. let (revert_state, revert_trie) = if block_number == best_block_number { // We do not check against the `last_block_number` here because // `HashedPostState::from_reverts` / `trie_reverts` only use the database tables, and // not static files. - debug!(target: "engine::tree", block_number, best_block_number, "Empty revert state"); + debug!(target: "engine::tree::payload_validator", block_number, best_block_number, "Empty revert state"); (HashedPostState::default(), TrieUpdatesSorted::default()) } else { let revert_state = HashedPostState::from_reverts::( @@ -1050,7 +1075,7 @@ where .map_err(ProviderError::from)?; let revert_trie = provider.trie_reverts(block_number + 1)?; debug!( - target: "engine::tree", + target: "engine::tree::payload_validator", block_number, best_block_number, accounts = revert_state.accounts.len(), diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index b5a10284cf2..c4c45366c66 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -58,7 +58,7 @@ impl Decoder for ECIESCodec { type Item = IngressECIESValue; type Error = ECIESError; - #[instrument(level = "trace", skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))] + #[instrument(skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))] fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { loop { match self.state { @@ -150,7 +150,7 @@ impl Decoder for ECIESCodec { impl Encoder for ECIESCodec { type Error = io::Error; - #[instrument(level = "trace", skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))] + #[instrument(skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))] fn encode(&mut self, item: EgressECIESValue, buf: &mut BytesMut) -> Result<(), Self::Error> { match item { EgressECIESValue::Auth => { diff --git a/crates/prune/prune/src/segments/user/account_history.rs b/crates/prune/prune/src/segments/user/account_history.rs index 3c18cd1befc..317337f050e 100644 --- a/crates/prune/prune/src/segments/user/account_history.rs +++ b/crates/prune/prune/src/segments/user/account_history.rs @@ -45,7 +45,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index ecb0f3423be..03faddc1d5b 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -42,7 +42,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { crate::segments::receipts::prune(provider, input) } diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index 0849db52518..8fd6d1e73a5 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -45,7 +45,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { // Contract log filtering removes every receipt possible except the ones in the list. So, // for the other receipts it's as if they had a `PruneMode::Distance()` of diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index 35ee487203a..9fbad8c428c 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -37,7 +37,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/storage_history.rs b/crates/prune/prune/src/segments/user/storage_history.rs index ee7447c37da..a4ad37bf789 100644 --- a/crates/prune/prune/src/segments/user/storage_history.rs +++ b/crates/prune/prune/src/segments/user/storage_history.rs @@ -47,7 +47,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index e218f623ed5..0055f8abd22 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -38,7 +38,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune( &self, provider: &Provider, diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index 19992ead498..fda19c7cb31 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -27,7 +27,7 @@ pub(crate) struct Batch { // Batch responses must be sent back as a single message so we read the results from each // request in the batch and read the results off of a new channel, `rx_batch`, and then send the // complete batch response back to the client over `tx`. -#[instrument(name = "batch", skip(b), level = "TRACE")] +#[instrument(name = "batch", skip(b))] pub(crate) async fn process_batch_request( b: Batch, max_response_body_size: usize, @@ -98,7 +98,7 @@ where } } -#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service), level = "TRACE")] +#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service))] pub(crate) async fn execute_call_with_tracing<'a, S>( req: Request<'a>, rpc_service: &S, diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index b6114938d2b..6e6b092c408 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -443,7 +443,7 @@ struct ProcessConnection<'a, HttpMiddleware, RpcMiddleware> { } /// Spawns the IPC connection onto a new task -#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id), level = "INFO")] +#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id))] fn process_connection( params: ProcessConnection<'_, HttpMiddleware, RpcMiddleware>, ) where diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index a0e0bd30931..7865659ece7 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -16,7 +16,7 @@ use tracing_futures::Instrument; macro_rules! engine_span { () => { - tracing::trace_span!(target: "rpc", "engine") + tracing::info_span!(target: "rpc", "engine") }; } diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 256ee20794e..6d37c5f3413 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -20,7 +20,7 @@ use std::{ collections::HashMap, ops::{RangeBounds, RangeInclusive}, }; -use tracing::debug; +use tracing::{debug, instrument}; /// Extends [`StateRoot`] with operations specific for working with a database transaction. pub trait DatabaseStateRoot<'a, TX>: Sized { @@ -226,6 +226,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } impl DatabaseHashedPostState for HashedPostState { + #[instrument(target = "trie::db", skip(tx), fields(range))] fn from_reverts( tx: &TX, range: impl RangeBounds, diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index b66b7bbaa4f..b3269f21fbb 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -693,7 +693,7 @@ where multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); - let span = tracing::trace_span!( + let span = tracing::info_span!( target: "trie::proof_task", "Storage proof calculation", hashed_address = ?hashed_address, diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 472624f99d7..c49675cf018 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -741,13 +741,24 @@ impl SparseTrieInterface for ParallelSparseTrie { // Update subtrie hashes in parallel { use rayon::iter::{IntoParallelIterator, ParallelIterator}; + use tracing::info_span; + let (tx, rx) = mpsc::channel(); let branch_node_tree_masks = &self.branch_node_tree_masks; let branch_node_hash_masks = &self.branch_node_hash_masks; + let span = tracing::Span::current(); changed_subtries .into_par_iter() .map(|mut changed_subtrie| { + let _enter = info_span!( + target: "trie::sparse::parallel", + parent: span.clone(), + "subtrie", + index = changed_subtrie.index + ) + .entered(); + #[cfg(feature = "metrics")] let start = std::time::Instant::now(); changed_subtrie.subtrie.update_hashes( @@ -1282,6 +1293,7 @@ impl ParallelSparseTrie { /// Drains any [`SparseTrieUpdatesAction`]s from the given subtrie, and applies each action to /// the given `updates` set. If the given set is None then this is a no-op. + #[instrument(target = "trie::sparse::parallel", skip_all)] fn apply_subtrie_update_actions( &mut self, update_actions: impl Iterator, @@ -1305,7 +1317,7 @@ impl ParallelSparseTrie { } /// Updates hashes for the upper subtrie, using nodes from both upper and lower subtries. - #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, ret)] + #[instrument(target = "trie::parallel_sparse", skip_all, ret(level = "trace"))] fn update_upper_subtrie_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { trace!(target: "trie::parallel_sparse", "Updating upper subtrie hashes"); @@ -1383,6 +1395,7 @@ impl ParallelSparseTrie { /// /// IMPORTANT: The method removes the subtries from `lower_subtries`, and the caller is /// responsible for returning them back into the array. + #[instrument(target = "trie::sparse::parallel", skip_all, fields(prefix_set_len = prefix_set.len()))] fn take_changed_lower_subtries( &mut self, prefix_set: &mut PrefixSet, @@ -1539,6 +1552,7 @@ impl ParallelSparseTrie { /// Return updated subtries back to the trie after executing any actions required on the /// top-level `SparseTrieUpdates`. + #[instrument(target = "trie::sparse::parallel", skip_all)] fn insert_changed_subtries( &mut self, changed_subtries: impl IntoIterator, @@ -2026,7 +2040,7 @@ impl SparseSubtrie { /// # Panics /// /// If the node at the root path does not exist. - #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret)] + #[instrument(target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret(level = "trace"))] fn update_hashes( &mut self, prefix_set: &mut PrefixSet, diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 6fac7c5faad..b2c7ee0f566 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-primitives-traits.workspace = true reth-execution-errors.workspace = true reth-trie-common.workspace = true -tracing.workspace = true +tracing = { workspace = true, features = ["attributes"] } alloy-trie.workspace = true # alloy diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index fde4810da57..c9edf9b7cb9 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -18,7 +18,7 @@ use reth_trie_common::{ DecodedMultiProof, DecodedStorageMultiProof, MultiProof, Nibbles, RlpNode, StorageMultiProof, TrieAccount, TrieMask, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use tracing::trace; +use tracing::{instrument, trace}; /// Provides type-safe re-use of cleared [`SparseStateTrie`]s, which helps to save allocations /// across payload runs. @@ -208,6 +208,14 @@ where /// Reveal unknown trie paths from decoded multiproof. /// NOTE: This method does not extensively validate the proof. + #[instrument( + target = "trie::sparse", + skip_all, + fields( + account_nodes = multiproof.account_subtree.len(), + storages = multiproof.storages.len() + ) + )] pub fn reveal_decoded_multiproof( &mut self, multiproof: DecodedMultiProof, @@ -532,6 +540,7 @@ where /// Calculates the hashes of subtries. /// /// If the trie has not been revealed, this function does nothing. + #[instrument(target = "trie::sparse", skip_all)] pub fn calculate_subtries(&mut self) { if let SparseTrie::Revealed(trie) = &mut self.state { trie.update_subtrie_hashes(); @@ -584,6 +593,7 @@ where } /// Returns sparse trie root and trie updates if the trie has been revealed. + #[instrument(target = "trie::sparse", skip_all)] pub fn root_with_updates( &mut self, provider_factory: impl TrieNodeProviderFactory, @@ -679,6 +689,7 @@ where /// /// Returns false if the new account info and storage trie are empty, indicating the account /// leaf should be removed. + #[instrument(target = "trie::sparse", skip_all)] pub fn update_account( &mut self, address: B256, @@ -721,6 +732,7 @@ where /// /// Returns false if the new storage root is empty, and the account info was already empty, /// indicating the account leaf should be removed. + #[instrument(target = "trie::sparse", skip_all)] pub fn update_account_storage_root( &mut self, address: B256, @@ -768,6 +780,7 @@ where } /// Remove the account leaf node. + #[instrument(target = "trie::sparse", skip_all)] pub fn remove_account_leaf( &mut self, path: &Nibbles, diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index cbffe5e7563..ce069cc9005 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -24,7 +24,7 @@ use reth_trie_common::{ TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; -use tracing::{debug, trace}; +use tracing::{debug, instrument, trace}; /// The level below which the sparse trie hashes are calculated in /// [`SerialSparseTrie::update_subtrie_hashes`]. @@ -175,6 +175,7 @@ impl SparseTrie { /// and resetting the trie to only contain an empty root node. /// /// Note: This method will error if the trie is blinded. + #[instrument(target = "trie::sparse", skip_all)] pub fn wipe(&mut self) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; revealed.wipe(); @@ -191,6 +192,7 @@ impl SparseTrie { /// /// - `Some(B256)` with the calculated root hash if the trie is revealed. /// - `None` if the trie is still blind. + #[instrument(target = "trie::sparse", skip_all)] pub fn root(&mut self) -> Option { Some(self.as_revealed_mut()?.root()) } @@ -230,6 +232,7 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the update fails. + #[instrument(target = "trie::sparse", skip_all)] pub fn update_leaf( &mut self, path: Nibbles, @@ -246,6 +249,7 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the leaf cannot be removed + #[instrument(target = "trie::sparse", skip_all)] pub fn remove_leaf( &mut self, path: &Nibbles, @@ -573,14 +577,13 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(target = "trie::sparse::serial", skip(self, provider))] fn update_leaf( &mut self, full_path: Nibbles, value: Vec, provider: P, ) -> SparseTrieResult<()> { - trace!(target: "trie::sparse", ?full_path, ?value, "update_leaf called"); - self.prefix_set.insert(full_path); let existing = self.values.insert(full_path, value); if existing.is_some() { @@ -712,6 +715,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(target = "trie::sparse::serial", skip(self, provider))] fn remove_leaf( &mut self, full_path: &Nibbles, @@ -897,6 +901,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(target = "trie::sparse::serial", skip(self))] fn root(&mut self) -> B256 { // Take the current prefix set let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); @@ -1324,6 +1329,7 @@ impl SerialSparseTrie { /// /// This function identifies all nodes that have changed (based on the prefix set) at the given /// depth and recalculates their RLP representation. + #[instrument(target = "trie::sparse::serial", skip(self))] pub fn update_rlp_node_level(&mut self, depth: usize) { // Take the current prefix set let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); @@ -1369,6 +1375,7 @@ impl SerialSparseTrie { /// specified depth. /// - A `PrefixSetMut` containing paths shallower than the specified depth that still need to be /// tracked for future updates. + #[instrument(target = "trie::sparse::serial", skip(self))] fn get_changed_nodes_at_depth( &self, prefix_set: &mut PrefixSet, @@ -1455,6 +1462,7 @@ impl SerialSparseTrie { /// # Panics /// /// If the node at provided path does not exist. + #[instrument(target = "trie::sparse::serial", skip_all, ret(level = "trace"))] pub fn rlp_node( &mut self, prefix_set: &mut PrefixSet, diff --git a/crates/trie/trie/src/hashed_cursor/mock.rs b/crates/trie/trie/src/hashed_cursor/mock.rs index 895bf852a22..308f05e4c8a 100644 --- a/crates/trie/trie/src/hashed_cursor/mock.rs +++ b/crates/trie/trie/src/hashed_cursor/mock.rs @@ -101,7 +101,7 @@ impl MockHashedCursor { impl HashedCursor for MockHashedCursor { type Value = T; - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek(&mut self, key: B256) -> Result, DatabaseError> { // Find the first key that is greater than or equal to the given key. let entry = self.values.iter().find_map(|(k, v)| (k >= &key).then(|| (*k, v.clone()))); @@ -115,7 +115,7 @@ impl HashedCursor for MockHashedCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn next(&mut self) -> Result, DatabaseError> { let mut iter = self.values.iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index 862176c803a..e11cd51f790 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -191,11 +191,10 @@ where /// /// NOTE: The iteration will start from the key of the previous hashed entry if it was supplied. #[instrument( - level = "trace", target = "trie::node_iter", skip_all, fields(trie_type = ?self.trie_type), - ret + ret(level = "trace") )] pub fn try_next( &mut self, diff --git a/crates/trie/trie/src/trie_cursor/mock.rs b/crates/trie/trie/src/trie_cursor/mock.rs index 4b0b7f699dc..add2d7ddef3 100644 --- a/crates/trie/trie/src/trie_cursor/mock.rs +++ b/crates/trie/trie/src/trie_cursor/mock.rs @@ -103,7 +103,7 @@ impl MockTrieCursor { } impl TrieCursor for MockTrieCursor { - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek_exact( &mut self, key: Nibbles, @@ -119,7 +119,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek( &mut self, key: Nibbles, @@ -136,7 +136,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn next(&mut self) -> Result, DatabaseError> { let mut iter = self.trie_nodes.iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first @@ -155,7 +155,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn current(&mut self) -> Result, DatabaseError> { Ok(self.current_key) } diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index f12bf46f748..0ea466437f5 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -157,7 +157,7 @@ impl> TrieWalker { } /// Returns the next unprocessed key in the trie along with its raw [`Nibbles`] representation. - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] pub fn next_unprocessed_key(&self) -> Option<(B256, Nibbles)> { self.key() .and_then(|key| if self.can_skip_current_node { key.increment() } else { Some(*key) }) @@ -297,7 +297,7 @@ impl> TrieWalker { } /// Consumes the next node in the trie, updating the stack. - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn consume_node(&mut self) -> Result<(), DatabaseError> { let Some((key, node)) = self.node(false)? else { // If no next node is found, clear the stack. @@ -343,7 +343,7 @@ impl> TrieWalker { } /// Moves to the next sibling node in the trie, updating the stack. - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn move_to_next_sibling( &mut self, allow_root_to_child_nibble: bool, From 63f560705cdfd3cc46af57fbec13d2fad9075dcf Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 18 Oct 2025 03:56:56 -0400 Subject: [PATCH 110/371] feat: add capacity metrics for tries (#19117) --- .../configured_sparse_trie.rs | 14 ++++ crates/trie/sparse-parallel/src/lower.rs | 16 ++++ crates/trie/sparse-parallel/src/trie.rs | 25 +++++++ crates/trie/sparse/src/metrics.rs | 39 ++++++++-- crates/trie/sparse/src/state.rs | 74 ++++++++++++++++++- crates/trie/sparse/src/traits.rs | 6 ++ crates/trie/sparse/src/trie.rs | 24 ++++++ 7 files changed, 187 insertions(+), 11 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index 176cffcd8fa..90e8928dba2 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -172,4 +172,18 @@ impl SparseTrieInterface for ConfiguredSparseTrie { Self::Parallel(trie) => trie.updates_ref(), } } + + fn node_capacity(&self) -> usize { + match self { + Self::Serial(trie) => trie.node_capacity(), + Self::Parallel(trie) => trie.node_capacity(), + } + } + + fn value_capacity(&self) -> usize { + match self { + Self::Serial(trie) => trie.value_capacity(), + Self::Parallel(trie) => trie.value_capacity(), + } + } } diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs index 449c3a7b29b..b5454dd3970 100644 --- a/crates/trie/sparse-parallel/src/lower.rs +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -106,4 +106,20 @@ impl LowerSparseSubtrie { Self::Revealed(_) | Self::Blind(_) => None, } } + + /// Returns the capacity of any maps containing trie nodes + pub(crate) fn node_capacity(&self) -> usize { + match self { + Self::Revealed(trie) | Self::Blind(Some(trie)) => trie.node_capacity(), + Self::Blind(None) => 0, + } + } + + /// Returns the capacity of any maps containing trie values + pub(crate) fn value_capacity(&self) -> usize { + match self { + Self::Revealed(trie) | Self::Blind(Some(trie)) => trie.value_capacity(), + Self::Blind(None) => 0, + } + } } diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index c49675cf018..b15eb7f4edb 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -873,6 +873,16 @@ impl SparseTrieInterface for ParallelSparseTrie { } } } + + fn node_capacity(&self) -> usize { + self.upper_subtrie.node_capacity() + + self.lower_subtries.iter().map(|trie| trie.node_capacity()).sum::() + } + + fn value_capacity(&self) -> usize { + self.upper_subtrie.value_capacity() + + self.lower_subtries.iter().map(|trie| trie.value_capacity()).sum::() + } } impl ParallelSparseTrie { @@ -2091,6 +2101,16 @@ impl SparseSubtrie { self.nodes.clear(); self.inner.clear(); } + + /// Returns the capacity of the map containing trie nodes. + pub(crate) fn node_capacity(&self) -> usize { + self.nodes.capacity() + } + + /// Returns the capacity of the map containing trie values. + pub(crate) fn value_capacity(&self) -> usize { + self.inner.value_capacity() + } } /// Helper type for [`SparseSubtrie`] to mutably access only a subset of fields from the original @@ -2424,6 +2444,11 @@ impl SparseSubtrieInner { self.values.clear(); self.buffers.clear(); } + + /// Returns the capacity of the map storing leaf values + fn value_capacity(&self) -> usize { + self.values.capacity() + } } /// Represents the outcome of processing a node during leaf insertion diff --git a/crates/trie/sparse/src/metrics.rs b/crates/trie/sparse/src/metrics.rs index 430a831a2f7..3f39e6df6f9 100644 --- a/crates/trie/sparse/src/metrics.rs +++ b/crates/trie/sparse/src/metrics.rs @@ -1,5 +1,6 @@ //! Metrics for the sparse state trie +use metrics::Gauge; use reth_metrics::{metrics::Histogram, Metrics}; /// Metrics for the sparse state trie @@ -15,24 +16,24 @@ pub(crate) struct SparseStateTrieMetrics { pub(crate) multiproof_skipped_storage_nodes: u64, /// Number of total storage nodes, including those that were skipped. pub(crate) multiproof_total_storage_nodes: u64, - /// The actual metrics we will record into the histogram - pub(crate) histograms: SparseStateTrieHistograms, + /// The actual metrics we will record + pub(crate) inner_metrics: SparseStateTrieInnerMetrics, } impl SparseStateTrieMetrics { /// Record the metrics into the histograms pub(crate) fn record(&mut self) { use core::mem::take; - self.histograms + self.inner_metrics .multiproof_skipped_account_nodes .record(take(&mut self.multiproof_skipped_account_nodes) as f64); - self.histograms + self.inner_metrics .multiproof_total_account_nodes .record(take(&mut self.multiproof_total_account_nodes) as f64); - self.histograms + self.inner_metrics .multiproof_skipped_storage_nodes .record(take(&mut self.multiproof_skipped_storage_nodes) as f64); - self.histograms + self.inner_metrics .multiproof_total_storage_nodes .record(take(&mut self.multiproof_total_storage_nodes) as f64); } @@ -56,12 +57,28 @@ impl SparseStateTrieMetrics { pub(crate) const fn increment_total_storage_nodes(&mut self, count: u64) { self.multiproof_total_storage_nodes += count; } + + /// Set the value capacity for the sparse state trie + pub(crate) fn set_value_capacity(&self, capacity: usize) { + self.inner_metrics.value_capacity.set(capacity as f64); + } + + /// Set the node capacity for the sparse state trie + pub(crate) fn set_node_capacity(&self, capacity: usize) { + self.inner_metrics.node_capacity.set(capacity as f64); + } + + /// Set the number of cleared and active storage tries + pub(crate) fn set_storage_trie_metrics(&self, cleared: usize, active: usize) { + self.inner_metrics.cleared_storage_tries.set(cleared as f64); + self.inner_metrics.active_storage_tries.set(active as f64); + } } /// Metrics for the sparse state trie #[derive(Metrics)] #[metrics(scope = "sparse_state_trie")] -pub(crate) struct SparseStateTrieHistograms { +pub(crate) struct SparseStateTrieInnerMetrics { /// Histogram of account nodes that were skipped during a multiproof reveal due to being /// redundant (i.e. they were already revealed) pub(crate) multiproof_skipped_account_nodes: Histogram, @@ -72,4 +89,12 @@ pub(crate) struct SparseStateTrieHistograms { pub(crate) multiproof_skipped_storage_nodes: Histogram, /// Histogram of total storage nodes, including those that were skipped. pub(crate) multiproof_total_storage_nodes: Histogram, + /// Gauge for the trie's node capacity + pub(crate) node_capacity: Gauge, + /// Gauge for the trie's value capacity + pub(crate) value_capacity: Gauge, + /// The current number of cleared storage tries. + pub(crate) cleared_storage_tries: Gauge, + /// The number of currently active storage tries, i.e., not cleared + pub(crate) active_storage_tries: Gauge, } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index c9edf9b7cb9..aef552da3dd 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -585,9 +585,17 @@ where &mut self, provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult { - // record revealed node metrics + // record revealed node metrics and capacity metrics #[cfg(feature = "metrics")] - self.metrics.record(); + { + self.metrics.record(); + self.metrics.set_node_capacity(self.node_capacity()); + self.metrics.set_value_capacity(self.value_capacity()); + self.metrics.set_storage_trie_metrics( + self.storage.cleared_tries.len(), + self.storage.tries.len(), + ); + } Ok(self.revealed_trie_mut(provider_factory)?.root()) } @@ -598,9 +606,17 @@ where &mut self, provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<(B256, TrieUpdates)> { - // record revealed node metrics + // record revealed node metrics and capacity metrics #[cfg(feature = "metrics")] - self.metrics.record(); + { + self.metrics.record(); + self.metrics.set_node_capacity(self.node_capacity()); + self.metrics.set_value_capacity(self.value_capacity()); + self.metrics.set_storage_trie_metrics( + self.storage.cleared_tries.len(), + self.storage.tries.len(), + ); + } let storage_tries = self.storage_trie_updates(); let revealed = self.revealed_trie_mut(provider_factory)?; @@ -805,6 +821,16 @@ where storage_trie.remove_leaf(slot, provider)?; Ok(()) } + + /// The sum of the account trie's node capacity and the storage tries' node capacity + pub fn node_capacity(&self) -> usize { + self.state.node_capacity() + self.storage.total_node_capacity() + } + + /// The sum of the account trie's value capacity and the storage tries' value capacity + pub fn value_capacity(&self) -> usize { + self.state.value_capacity() + self.storage.total_value_capacity() + } } /// The fields of [`SparseStateTrie`] related to storage tries. This is kept separate from the rest @@ -880,6 +906,46 @@ impl StorageTries { .remove(account) .unwrap_or_else(|| self.cleared_revealed_paths.pop().unwrap_or_default()) } + + /// Sums the total node capacity in `cleared_tries` + fn total_cleared_tries_node_capacity(&self) -> usize { + self.cleared_tries.iter().map(|trie| trie.node_capacity()).sum() + } + + /// Sums the total value capacity in `cleared_tries` + fn total_cleared_tries_value_capacity(&self) -> usize { + self.cleared_tries.iter().map(|trie| trie.value_capacity()).sum() + } + + /// Calculates the sum of the active storage trie node capacity, ie the tries in `tries` + fn total_active_tries_node_capacity(&self) -> usize { + self.tries.values().map(|trie| trie.node_capacity()).sum() + } + + /// Calculates the sum of the active storage trie value capacity, ie the tries in `tries` + fn total_active_tries_value_capacity(&self) -> usize { + self.tries.values().map(|trie| trie.value_capacity()).sum() + } + + /// Calculates the sum of active and cleared storage trie node capacity, i.e. the sum of + /// * [`StorageTries::total_active_tries_node_capacity`], and + /// * [`StorageTries::total_cleared_tries_node_capacity`] + /// * the default trie's node capacity + fn total_node_capacity(&self) -> usize { + self.total_active_tries_node_capacity() + + self.total_cleared_tries_node_capacity() + + self.default_trie.node_capacity() + } + + /// Calculates the sum of active and cleared storage trie value capacity, i.e. the sum of + /// * [`StorageTries::total_active_tries_value_capacity`], and + /// * [`StorageTries::total_cleared_tries_value_capacity`], and + /// * the default trie's value capacity + fn total_value_capacity(&self) -> usize { + self.total_active_tries_value_capacity() + + self.total_cleared_tries_value_capacity() + + self.default_trie.value_capacity() + } } #[derive(Debug, PartialEq, Eq, Default)] diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 300ac39c1b6..8fdbb78d876 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -222,6 +222,12 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// /// This is useful for reusing the trie without needing to reallocate memory. fn clear(&mut self); + + /// This returns the capacity of any inner data structures which store nodes. + fn node_capacity(&self) -> usize; + + /// This returns the capacity of any inner data structures which store leaf values. + fn value_capacity(&self) -> usize; } /// Struct for passing around branch node mask information. diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index ce069cc9005..737da842254 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -259,6 +259,22 @@ impl SparseTrie { revealed.remove_leaf(path, provider)?; Ok(()) } + + /// Returns the allocated capacity for sparse trie nodes. + pub fn node_capacity(&self) -> usize { + match self { + Self::Blind(Some(trie)) | Self::Revealed(trie) => trie.node_capacity(), + _ => 0, + } + } + + /// Returns the allocated capacity for sparse trie values. + pub fn value_capacity(&self) -> usize { + match self { + Self::Blind(Some(trie)) | Self::Revealed(trie) => trie.value_capacity(), + _ => 0, + } + } } /// The representation of revealed sparse trie. @@ -1064,6 +1080,14 @@ impl SparseTrieInterface for SerialSparseTrie { // If we get here, there's no leaf at the target path Ok(LeafLookup::NonExistent) } + + fn node_capacity(&self) -> usize { + self.nodes.capacity() + } + + fn value_capacity(&self) -> usize { + self.values.capacity() + } } impl SerialSparseTrie { From 8d91b9e443644819a59c269355f30480118503bd Mon Sep 17 00:00:00 2001 From: GarmashAlex Date: Sat, 18 Oct 2025 11:34:29 +0300 Subject: [PATCH 111/371] feat(cli): Reuse a single StaticFileProducer across file import chunks (#18964) Co-authored-by: Matthias Seitz --- crates/cli/commands/src/import_core.rs | 5 ++++- crates/optimism/cli/src/commands/import.rs | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/cli/commands/src/import_core.rs b/crates/cli/commands/src/import_core.rs index 2370ebaa039..98f888bb9e3 100644 --- a/crates/cli/commands/src/import_core.rs +++ b/crates/cli/commands/src/import_core.rs @@ -102,6 +102,9 @@ where .sealed_header(provider_factory.last_block_number()?)? .expect("should have genesis"); + let static_file_producer = + StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); + while let Some(file_client) = reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? { @@ -121,7 +124,7 @@ where provider_factory.clone(), &consensus, Arc::new(file_client), - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), + static_file_producer.clone(), import_config.no_state, executor.clone(), )?; diff --git a/crates/optimism/cli/src/commands/import.rs b/crates/optimism/cli/src/commands/import.rs index 0fd1d64ac12..74656511af1 100644 --- a/crates/optimism/cli/src/commands/import.rs +++ b/crates/optimism/cli/src/commands/import.rs @@ -71,6 +71,9 @@ impl> ImportOpCommand { .sealed_header(provider_factory.last_block_number()?)? .expect("should have genesis"); + let static_file_producer = + StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); + while let Some(mut file_client) = reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? { @@ -100,7 +103,7 @@ impl> ImportOpCommand { provider_factory.clone(), &consensus, Arc::new(file_client), - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), + static_file_producer.clone(), true, OpExecutorProvider::optimism(provider_factory.chain_spec()), )?; From 46228d0a182e8b21da17dfd186279422758c8306 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Sat, 18 Oct 2025 05:41:56 -0300 Subject: [PATCH 112/371] feat(stateless): make UncompressedPublicKey serializable (#19115) Signed-off-by: Ignacio Hagopian --- crates/stateless/src/recover_block.rs | 15 ++++++++++++++- testing/ef-tests/src/cases/blockchain_test.rs | 6 +++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/crates/stateless/src/recover_block.rs b/crates/stateless/src/recover_block.rs index b402cb3724f..15db1fe55e1 100644 --- a/crates/stateless/src/recover_block.rs +++ b/crates/stateless/src/recover_block.rs @@ -2,15 +2,28 @@ use crate::validation::StatelessValidationError; use alloc::vec::Vec; use alloy_consensus::BlockHeader; use alloy_primitives::{Address, Signature, B256}; +use core::ops::Deref; use reth_chainspec::EthereumHardforks; use reth_ethereum_primitives::{Block, TransactionSigned}; use reth_primitives_traits::{Block as _, RecoveredBlock}; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, Bytes}; #[cfg(all(feature = "k256", feature = "secp256k1"))] use k256 as _; /// Serialized uncompressed public key -pub type UncompressedPublicKey = [u8; 65]; +#[serde_as] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UncompressedPublicKey(#[serde_as(as = "Bytes")] pub [u8; 65]); + +impl Deref for UncompressedPublicKey { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} /// Verifies all transactions in a block against a list of public keys and signatures. /// diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 5519846458c..c54ef2ad7b1 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -433,7 +433,11 @@ where .map(|(i, tx)| { tx.signature() .recover_from_prehash(&tx.signature_hash()) - .map(|keys| keys.to_encoded_point(false).as_bytes().try_into().unwrap()) + .map(|keys| { + UncompressedPublicKey( + keys.to_encoded_point(false).as_bytes().try_into().unwrap(), + ) + }) .map_err(|e| format!("failed to recover signature for tx #{i}: {e}").into()) }) .collect::, _>>() From a8ef47d14cfb2fc8a5f511f9c0d6574f9b478a0b Mon Sep 17 00:00:00 2001 From: Dmitry <98899785+mdqst@users.noreply.github.com> Date: Sat, 18 Oct 2025 12:21:10 +0300 Subject: [PATCH 113/371] docs: fix wrong label for `--color=auto` (#19110) Co-authored-by: Matthias Seitz --- crates/node/core/src/args/log.rs | 2 +- docs/vocs/docs/pages/cli/reth.mdx | 2 +- docs/vocs/docs/pages/cli/reth/config.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/checksum.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/clear.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/diff.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/drop.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/get.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/list.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/path.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/stats.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/version.mdx | 2 +- docs/vocs/docs/pages/cli/reth/download.mdx | 2 +- docs/vocs/docs/pages/cli/reth/dump-genesis.mdx | 2 +- docs/vocs/docs/pages/cli/reth/export-era.mdx | 2 +- docs/vocs/docs/pages/cli/reth/import-era.mdx | 2 +- docs/vocs/docs/pages/cli/reth/import.mdx | 2 +- docs/vocs/docs/pages/cli/reth/init-state.mdx | 2 +- docs/vocs/docs/pages/cli/reth/init.mdx | 2 +- docs/vocs/docs/pages/cli/reth/node.mdx | 2 +- docs/vocs/docs/pages/cli/reth/p2p.mdx | 2 +- docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 2 +- docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx | 2 +- docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 2 +- docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 2 +- docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx | 2 +- docs/vocs/docs/pages/cli/reth/prune.mdx | 2 +- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/unwind.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx | 2 +- 45 files changed, 45 insertions(+), 45 deletions(-) diff --git a/crates/node/core/src/args/log.rs b/crates/node/core/src/args/log.rs index 99fefc11445..20c60362d7b 100644 --- a/crates/node/core/src/args/log.rs +++ b/crates/node/core/src/args/log.rs @@ -139,7 +139,7 @@ impl LogArgs { pub enum ColorMode { /// Colors on Always, - /// Colors on + /// Auto-detect Auto, /// Colors off Never, diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index feb4e8bf50d..0344c23bf2c 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -96,7 +96,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index 6c7cf532995..adc08cd96e6 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -82,7 +82,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 04b779c0f13..91397e0f7e9 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -147,7 +147,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index d4a32382302..834fd42e447 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 5f1f9935b0f..0b64cefb71b 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -91,7 +91,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index 324e6f15ca2..eb4120a34cb 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -90,7 +90,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 375692f315f..913c6fcc5eb 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -93,7 +93,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index 24c2493d6c8..b5120d7409a 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -126,7 +126,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index 58f4e3771b9..e0a54dcac35 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -89,7 +89,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index 93d12e2130e..0d027754d59 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -91,7 +91,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 7f1a6e2a121..2ea1ea48f2e 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 7ec416f4a4d..21e08493453 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index 7a9ee35145e..55e14d822cd 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -132,7 +132,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index 113fbb21509..3f95c5761d9 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -86,7 +86,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index e4fd2eeb118..d972bcccd54 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -89,7 +89,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index cb100a63e4f..1fd305c4e63 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index 88616890e51..c2b50b8944f 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -86,7 +86,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index f6b75e785d2..1890b95821d 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -144,7 +144,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 48ccb4855a6..4791d561980 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -85,7 +85,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 0f769e77599..430e0948a99 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -150,7 +150,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 71742b25b33..c0d03852de9 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -145,7 +145,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 80621a4deac..b5795a6e1d7 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -146,7 +146,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 2e030fb3c05..1ba1affc519 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -166,7 +166,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index 81be59d6789..11777b1f6e6 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -134,7 +134,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 5d07845a8e1..a752f76b019 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -981,7 +981,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 2fc4aa30849..4138656604d 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -83,7 +83,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index 10efb9b85d7..63f77913f9c 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -303,7 +303,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 7541ba55651..578932411f6 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -94,7 +94,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index f854ab9000b..f9b3276ced0 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -303,7 +303,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index 1d287c7cf09..8bf19d3ecab 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -80,7 +80,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index d4f07885fea..de13e93b561 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -80,7 +80,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 202a14b2e19..bc5d0385697 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -134,7 +134,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index 2bb23f77d23..dc3bcbe4627 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -147,7 +147,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index eed32a608be..85f2559de4d 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -83,7 +83,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 02385552032..923fd5ff955 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -149,7 +149,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 6dbee5df10c..2466edcb966 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -141,7 +141,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index 13819423bfd..c79571b31c3 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index 73b24e9ba46..c2480bae00f 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index a5b3c0f4ff6..423771b183b 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index e6deadb2581..211f4e59979 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index d561eb3ce79..9eae5963a17 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -370,7 +370,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index fa62d0546d6..ab5776e2e5b 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -142,7 +142,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index 2799b752fef..500cb3197fb 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -90,7 +90,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index d2056f7e349..4ec68dbb1ec 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -90,7 +90,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] From a718752bf5703778993a9b660c0ea578da41a4bc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 18 Oct 2025 11:22:02 +0200 Subject: [PATCH 114/371] chore: fix clippy (#19118) --- crates/chain-state/src/in_memory.rs | 2 +- crates/net/discv4/src/lib.rs | 2 +- crates/net/network/src/transactions/fetcher.rs | 4 +--- crates/node/builder/src/launch/engine.rs | 1 + crates/transaction-pool/src/test_utils/pool.rs | 2 +- 5 files changed, 5 insertions(+), 6 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 5b2f666657b..a6c85538107 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -570,7 +570,7 @@ pub struct BlockState { /// The executed block that determines the state after this block has been executed. block: ExecutedBlock, /// The block's parent block if it exists. - parent: Option>>, + parent: Option>, } impl BlockState { diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 3686d7bf690..83106cbbe6e 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -2402,7 +2402,7 @@ pub enum DiscoveryUpdate { /// Node that was removed from the table Removed(PeerId), /// A series of updates - Batch(Vec), + Batch(Vec), } #[cfg(test)] diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index df088bfbf46..a112e8cac89 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -284,9 +284,7 @@ impl TransactionFetcher { // folds size based on expected response size and adds selected hashes to the request // list and the other hashes to the surplus list - loop { - let Some((hash, metadata)) = hashes_from_announcement_iter.next() else { break }; - + for (hash, metadata) in hashes_from_announcement_iter.by_ref() { let Some((_ty, size)) = metadata else { unreachable!("this method is called upon reception of an eth68 announcement") }; diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 02fb505b077..3b43f5f3299 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -236,6 +236,7 @@ impl EngineNodeLauncher { info!(target: "reth::cli", "Consensus engine initialized"); + #[allow(clippy::needless_continue)] let events = stream_select!( event_sender.new_listener().map(Into::into), pipeline_events.map(Into::into), diff --git a/crates/transaction-pool/src/test_utils/pool.rs b/crates/transaction-pool/src/test_utils/pool.rs index 6af440f086a..ab7bebae2f5 100644 --- a/crates/transaction-pool/src/test_utils/pool.rs +++ b/crates/transaction-pool/src/test_utils/pool.rs @@ -188,7 +188,7 @@ pub(crate) enum Scenario { HigherNonce { onchain: u64, nonce: u64 }, Multi { // Execute multiple test scenarios - scenario: Vec, + scenario: Vec, }, } From 10ed1844e4adefb0531800e589f5ef6b50c21e82 Mon Sep 17 00:00:00 2001 From: GarmashAlex Date: Sat, 18 Oct 2025 18:11:15 +0300 Subject: [PATCH 115/371] fix(net): correct error messages for decrypt and header paths (#19039) --- crates/net/ecies/src/error.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index 9dabfc16183..a93b731fee6 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -33,7 +33,7 @@ pub enum ECIESErrorImpl { #[error(transparent)] IO(std::io::Error), /// Error when checking the HMAC tag against the tag on the message being decrypted - #[error("tag check failure in read_header")] + #[error("tag check failure in decrypt_message")] TagCheckDecryptFailed, /// Error when checking the HMAC tag against the tag on the header #[error("tag check failure in read_header")] @@ -47,8 +47,8 @@ pub enum ECIESErrorImpl { /// Error when parsing ACK data #[error("invalid ack data")] InvalidAckData, - /// Error when reading the header if its length is <3 - #[error("invalid body data")] + /// Error when reading/parsing the `RLPx` header + #[error("invalid header")] InvalidHeader, /// Error when interacting with secp256k1 #[error(transparent)] From 67bf37babdb1439d911b0fee06305d8a9248f1d1 Mon Sep 17 00:00:00 2001 From: Micke <155267459+reallesee@users.noreply.github.com> Date: Sat, 18 Oct 2025 17:17:43 +0200 Subject: [PATCH 116/371] chore: remove redundant collect in debug trace (#19121) --- crates/rpc/rpc/src/debug.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 62aa625b9f2..066f7180c85 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -169,8 +169,6 @@ where .iter() .map(|tx| tx.recover_signer().map_err(Eth::Error::from_eth_err)) .collect::, _>>()? - .into_iter() - .collect() } else { block .body() @@ -178,8 +176,6 @@ where .iter() .map(|tx| tx.recover_signer_unchecked().map_err(Eth::Error::from_eth_err)) .collect::, _>>()? - .into_iter() - .collect() }; self.trace_block(Arc::new(block.into_recovered_with_signers(senders)), evm_env, opts).await From 2f9281b6c10674525b3bc7820898b770258d2f67 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 19 Oct 2025 18:44:55 +0200 Subject: [PATCH 117/371] chore(deps): weekly `cargo update` (#19126) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> Co-authored-by: Matthias Seitz --- Cargo.lock | 1214 ++++++++++----------- Cargo.toml | 13 +- crates/optimism/rpc/src/eth/receipt.rs | 4 + examples/custom-node/src/primitives/tx.rs | 2 +- 4 files changed, 612 insertions(+), 621 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6427833300e..b406ede9b87 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,15 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "addr2line" -version = "0.25.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" -dependencies = [ - "gimli", -] - [[package]] name = "adler2" version = "2.0.1" @@ -59,7 +50,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", - "getrandom 0.3.3", + "getrandom 0.3.4", "once_cell", "version_check", "zerocopy", @@ -97,9 +88,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7aacbb0ac0f76aaa64d1e1412f778c0574f241e4073b2a3e09c605884c9b90" +checksum = "bf01dd83a1ca5e4807d0ca0223c9615e211ce5db0a9fd1443c2778cacf89b546" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -134,7 +125,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -171,7 +162,7 @@ dependencies = [ "futures", "futures-util", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -203,7 +194,7 @@ dependencies = [ "crc", "rand 0.8.5", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -232,7 +223,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -257,14 +248,14 @@ dependencies = [ "serde", "serde_with", "sha2", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-evm" -version = "0.22.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24a48fa6a4a5a69ae8e46c0ae60851602c5016baa3379d076c76e4c2f3b889f7" +checksum = "dbb19405755c6f94c9bb856f2b1449767074b7e2002e1ab2be0a79b9b28db322" dependencies = [ "alloy-consensus", "alloy-eips", @@ -280,7 +271,7 @@ dependencies = [ "op-alloy-rpc-types-engine", "op-revm", "revm", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -334,7 +325,7 @@ dependencies = [ "http", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] @@ -361,7 +352,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -379,9 +370,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.22.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1e0abe910a26d1b3686f4f6ad58287ce8c7fb85b08603d8c832869f02eb3d79" +checksum = "f059cf29d7f15b3e6581ceb6eda06a16d8ed4b55adc02b0677add3fd381db6bb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -392,7 +383,7 @@ dependencies = [ "op-alloy-consensus", "op-revm", "revm", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -421,9 +412,9 @@ dependencies = [ "const-hex", "derive_more", "foldhash 0.2.0", - "getrandom 0.3.3", + "getrandom 0.3.4", "hashbrown 0.16.0", - "indexmap 2.11.4", + "indexmap 2.12.0", "itoa", "k256", "keccak-asm", @@ -432,7 +423,7 @@ dependencies = [ "proptest-derive 0.6.0", "rand 0.9.2", "ruint", - "rustc-hash 2.1.1", + "rustc-hash", "serde", "sha3", "tiny-keccak", @@ -476,7 +467,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", "url", @@ -524,7 +515,7 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -616,7 +607,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tree_hash", "tree_hash_derive", ] @@ -669,11 +660,11 @@ dependencies = [ "alloy-serde", "alloy-sol-types", "arbitrary", - "itertools 0.13.0", + "itertools 0.14.0", "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -702,7 +693,7 @@ dependencies = [ "alloy-serde", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -741,7 +732,7 @@ dependencies = [ "either", "elliptic-curve", "k256", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -759,7 +750,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 2.0.16", + "thiserror 2.0.17", "zeroize", ] @@ -774,7 +765,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -786,11 +777,11 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.11.4", + "indexmap 2.12.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", "syn-solidity", "tiny-keccak", ] @@ -807,7 +798,7 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", "syn-solidity", ] @@ -849,7 +840,7 @@ dependencies = [ "parking_lot", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tower", "tracing", @@ -940,7 +931,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -960,9 +951,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.20" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", "anstyle-parse", @@ -975,9 +966,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "anstyle-parse" @@ -1025,7 +1016,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -1167,7 +1158,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -1205,7 +1196,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -1294,7 +1285,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -1411,7 +1402,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -1422,7 +1413,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -1460,7 +1451,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -1477,29 +1468,14 @@ checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" [[package]] name = "backon" -version = "1.5.2" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "592277618714fbcecda9a02ba7a8781f319d26532a88553bbacc77ba5d2b3a8d" +checksum = "cffb0e931875b666fc4fcb20fee52e9bbd1ef836fd9e9e04ec21555f9f85f7ef" dependencies = [ "fastrand 2.3.0", "tokio", ] -[[package]] -name = "backtrace" -version = "0.3.76" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-link 0.2.0", -] - [[package]] name = "base-x" version = "0.2.11" @@ -1512,6 +1488,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +[[package]] +name = "base256emoji" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c" +dependencies = [ + "const-str", + "match-lookup", +] + [[package]] name = "base64" version = "0.13.1" @@ -1579,9 +1565,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.70.1" +version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ "bitflags 2.9.4", "cexpr", @@ -1590,16 +1576,16 @@ dependencies = [ "proc-macro2", "quote", "regex", - "rustc-hash 1.1.0", + "rustc-hash", "shlex", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] name = "bindgen" -version = "0.71.1" +version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ "bitflags 2.9.4", "cexpr", @@ -1608,9 +1594,9 @@ dependencies = [ "proc-macro2", "quote", "regex", - "rustc-hash 2.1.1", + "rustc-hash", "shlex", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -1713,9 +1699,9 @@ dependencies = [ "boa_interner", "boa_macros", "boa_string", - "indexmap 2.11.4", + "indexmap 2.12.0", "num-bigint", - "rustc-hash 2.1.1", + "rustc-hash", ] [[package]] @@ -1739,7 +1725,7 @@ dependencies = [ "fast-float2", "hashbrown 0.15.5", "icu_normalizer 1.5.0", - "indexmap 2.11.4", + "indexmap 2.12.0", "intrusive-collections", "itertools 0.13.0", "num-bigint", @@ -1751,7 +1737,7 @@ dependencies = [ "portable-atomic", "rand 0.8.5", "regress", - "rustc-hash 2.1.1", + "rustc-hash", "ryu-js", "serde", "serde_json", @@ -1759,7 +1745,7 @@ dependencies = [ "static_assertions", "tap", "thin-vec", - "thiserror 2.0.16", + "thiserror 2.0.17", "time", ] @@ -1785,10 +1771,10 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.15.5", - "indexmap 2.11.4", + "indexmap 2.12.0", "once_cell", "phf 0.11.3", - "rustc-hash 2.1.1", + "rustc-hash", "static_assertions", ] @@ -1800,7 +1786,7 @@ checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", "synstructure", ] @@ -1820,7 +1806,7 @@ dependencies = [ "num-bigint", "num-traits", "regress", - "rustc-hash 2.1.1", + "rustc-hash", ] [[package]] @@ -1837,7 +1823,7 @@ checksum = "7debc13fbf7997bf38bf8e9b20f1ad5e2a7d27a900e1f6039fe244ce30f589b5" dependencies = [ "fast-float2", "paste", - "rustc-hash 2.1.1", + "rustc-hash", "sptr", "static_assertions", ] @@ -1913,22 +1899,22 @@ checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" [[package]] name = "bytemuck" -version = "1.23.2" +version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.10.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f154e572231cb6ba2bd1176980827e3d5dc04cc183a75dea38109fbdd672d29" +checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -1948,9 +1934,9 @@ dependencies = [ [[package]] name = "c-kzg" -version = "2.1.4" +version = "2.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "137a2a2878ed823ef1bd73e5441e245602aae5360022113b8ad259ca4b5b8727" +checksum = "e00bf4b112b07b505472dbefd19e37e53307e2bfed5a79e0cc161d58ccd0e687" dependencies = [ "arbitrary", "blst", @@ -1964,9 +1950,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1de8bc0aa9e9385ceb3bf0c152e3a9b9544f6c4a912c8ae504e80c1f0368603" +checksum = "276a59bf2b2c967788139340c9f0c5b12d7fd6630315c15c217e559de85d2609" dependencies = [ "serde_core", ] @@ -2004,7 +1990,7 @@ dependencies = [ "semver 1.0.27", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -2056,9 +2042,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cfg_aliases" @@ -2077,7 +2063,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -2130,9 +2116,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.48" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2134bb3ea021b78629caa971416385309e0131b351b25e01dc16fb54e1b5fae" +checksum = "f4512b90fa68d3a9932cea5184017c5d200f5921df706d45e853537dea51508f" dependencies = [ "clap_builder", "clap_derive", @@ -2140,9 +2126,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.48" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2ba64afa3c0a6df7fa517765e31314e983f51dda798ffba27b988194fb65dc9" +checksum = "0025e98baa12e766c67ba13ff4695a887a1eba19569aad00a472546795bd6730" dependencies = [ "anstream", "anstyle", @@ -2152,21 +2138,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.47" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] name = "clap_lex" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "cmake" @@ -2386,9 +2372,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6407bff74dea37e0fa3dc1c1c974e5d46405f0c987bf9997a0762adce71eda6" +checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735" dependencies = [ "cfg-if", "cpufeatures", @@ -2402,11 +2388,17 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "const-str" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3" + [[package]] name = "const_format" -version = "0.2.34" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" dependencies = [ "const_format_proc_macros", ] @@ -2609,21 +2601,21 @@ dependencies = [ [[package]] name = "csv" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" +checksum = "52cd9d68cf7efc6ddfaaee42e7288d3a99d613d4b50f76ce9827ae0c6e14f938" dependencies = [ "csv-core", "itoa", "ryu", - "serde", + "serde_core", ] [[package]] name = "csv-core" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d" +checksum = "704a3c26996a80471189265814dbc2c257598b96b8a7feae2d31ace646bb9782" dependencies = [ "memchr", ] @@ -2661,7 +2653,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -2695,7 +2687,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -2710,7 +2702,7 @@ dependencies = [ "quote", "serde", "strsim", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -2721,7 +2713,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -2732,7 +2724,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -2785,7 +2777,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -2844,7 +2836,7 @@ checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -2855,7 +2847,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -2876,7 +2868,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -2886,7 +2878,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -2907,7 +2899,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", "unicode-xid", ] @@ -2966,7 +2958,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.2", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -3021,7 +3013,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -3100,7 +3092,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -3140,7 +3132,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "walkdir", ] @@ -3208,7 +3200,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -3228,7 +3220,7 @@ checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -3244,7 +3236,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -3282,9 +3274,9 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca8ba45b63c389c6e115b095ca16381534fdcc03cf58176a3f8554db2dbe19b" +checksum = "0dcddb2554d19cde19b099fadddde576929d7a4d0c1cd3512d1fd95cf174375c" dependencies = [ "alloy-primitives", "ethereum_serde_utils", @@ -3297,14 +3289,14 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd55d08012b4e0dfcc92b8d6081234df65f2986ad34cc76eeed69c5e2ce7506" +checksum = "a657b6b3b7e153637dc6bdc6566ad9279d9ee11a15b12cfb24a2e04360637e9f" dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -3328,7 +3320,7 @@ dependencies = [ "reth-ethereum", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -3371,7 +3363,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -3417,7 +3409,7 @@ dependencies = [ "reth-payload-builder", "reth-tracing", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] @@ -3487,7 +3479,7 @@ dependencies = [ "revm", "revm-primitives", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -3831,9 +3823,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.2" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" +checksum = "dc5a4e564e38c699f2880d3fda590bedc2e69f3f84cd48b457bd892ce61d0aa9" dependencies = [ "crc32fast", "miniz_oxide", @@ -3952,7 +3944,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -4053,15 +4045,15 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "js-sys", "libc", "r-efi", - "wasi 0.14.7+wasi-0.2.4", + "wasip2", "wasm-bindgen", ] @@ -4075,12 +4067,6 @@ dependencies = [ "polyval", ] -[[package]] -name = "gimli" -version = "0.32.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" - [[package]] name = "git2" version = "0.20.2" @@ -4179,7 +4165,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.11.4", + "indexmap 2.12.0", "slab", "tokio", "tokio-util", @@ -4188,12 +4174,13 @@ dependencies = [ [[package]] name = "half" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", + "zerocopy", ] [[package]] @@ -4309,7 +4296,7 @@ dependencies = [ "rand 0.9.2", "ring", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tinyvec", "tokio", "tracing", @@ -4333,7 +4320,7 @@ dependencies = [ "resolv-conf", "serde", "smallvec", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -4489,7 +4476,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots 1.0.2", + "webpki-roots 1.0.3", ] [[package]] @@ -4510,7 +4497,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.0", + "socket2 0.6.1", "tokio", "tower-service", "tracing", @@ -4528,7 +4515,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.1", + "windows-core 0.62.2", ] [[package]] @@ -4741,7 +4728,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -4798,7 +4785,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -4839,9 +4826,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.11.4" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" dependencies = [ "arbitrary", "equivalent", @@ -4902,7 +4889,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -4938,17 +4925,6 @@ dependencies = [ "memoffset", ] -[[package]] -name = "io-uring" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" -dependencies = [ - "bitflags 2.9.4", - "cfg-if", - "libc", -] - [[package]] name = "ipconfig" version = "0.3.2" @@ -5055,7 +5031,7 @@ version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "libc", ] @@ -5104,7 +5080,7 @@ dependencies = [ "rustls-pki-types", "rustls-platform-verifier", "soketto", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-rustls", "tokio-util", @@ -5129,10 +5105,10 @@ dependencies = [ "parking_lot", "pin-project", "rand 0.9.2", - "rustc-hash 2.1.1", + "rustc-hash", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tower", @@ -5157,7 +5133,7 @@ dependencies = [ "rustls-platform-verifier", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tower", "url", @@ -5173,7 +5149,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -5195,7 +5171,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -5212,7 +5188,7 @@ dependencies = [ "http", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -5318,9 +5294,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.176" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58f929b4d672ea937a23a1ab494143d968337a5f47e56d0815df1e0890ddf174" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libgit2-sys" @@ -5341,7 +5317,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -5364,18 +5340,18 @@ dependencies = [ "multihash", "quick-protobuf", "sha2", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", "zeroize", ] [[package]] name = "libproc" -version = "0.14.10" +version = "0.14.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78a09b56be5adbcad5aa1197371688dc6bb249a26da3bca2011ee2fb987ebfb" +checksum = "a54ad7278b8bc5301d5ffd2a94251c004feb971feba96c971ea4063645990757" dependencies = [ - "bindgen 0.70.1", + "bindgen 0.72.1", "errno", "libc", ] @@ -5451,11 +5427,10 @@ checksum = "f5e54036fe321fd421e10d732f155734c4e4afd610dd556d9a82833ab3ee0bed" [[package]] name = "lock_api" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", "serde", ] @@ -5530,9 +5505,9 @@ checksum = "08ab2867e3eeeca90e844d1940eab391c9dc5228783db2ed999acbc0a9ed375a" [[package]] name = "mach2" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d640282b302c0bb0a2a8e0233ead9035e3bed871f0b7e81fe4a1ec829765db44" +checksum = "6a1b95cd5421ec55b445b5ae102f5ea0e768de1f82bd3001e11f426c269c3aea" dependencies = [ "libc", ] @@ -5545,7 +5520,18 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", +] + +[[package]] +name = "match-lookup" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -5600,7 +5586,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -5610,7 +5596,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd7399781913e5393588a8d8c6a2867bf85fb38eaf2502fdce465aad2dc6f034" dependencies = [ "base64 0.22.1", - "indexmap 2.11.4", + "indexmap 2.12.0", "metrics", "metrics-util", "quanta", @@ -5619,18 +5605,18 @@ dependencies = [ [[package]] name = "metrics-process" -version = "2.4.0" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a82c8add4382f29a122fa64fff1891453ed0f6b2867d971e7d60cb8dfa322ff" +checksum = "f615e08e049bd14a44c4425415782efb9bcd479fc1e19ddeb971509074c060d0" dependencies = [ "libc", "libproc", "mach2", "metrics", "once_cell", - "procfs", + "procfs 0.18.0", "rlimit", - "windows 0.58.0", + "windows 0.62.2", ] [[package]] @@ -5642,7 +5628,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.5", - "indexmap 2.11.4", + "indexmap 2.12.0", "metrics", "ordered-float", "quanta", @@ -5666,7 +5652,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -5716,6 +5702,7 @@ checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", "serde", + "simd-adler32", ] [[package]] @@ -5796,11 +5783,12 @@ dependencies = [ [[package]] name = "multibase" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77" dependencies = [ "base-x", + "base256emoji", "data-encoding", "data-encoding-macro", ] @@ -5860,11 +5848,11 @@ dependencies = [ [[package]] name = "nu-ansi-term" -version = "0.50.1" +version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -5977,7 +5965,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -5991,9 +5979,9 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa11e84403164a9f12982ab728f3c67c6fd4ab5b5f0254ffc217bdbd3b28ab0" +checksum = "2c4b5ecbd0beec843101bffe848217f770e8b8da81d8355b7d6e226f2199b3dc" dependencies = [ "alloy-rlp", "arbitrary", @@ -6004,15 +5992,6 @@ dependencies = [ "smallvec", ] -[[package]] -name = "object" -version = "0.37.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" -dependencies = [ - "memchr", -] - [[package]] name = "once_cell" version = "1.21.3" @@ -6037,9 +6016,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "op-alloy-consensus" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a501241474c3118833d6195312ae7eb7cc90bbb0d5f524cbb0b06619e49ff67" +checksum = "cf1fc8aa0e2f5b136d101630be009e4e6dbdd1f17bc3ce670f431511600d2930" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6052,7 +6031,7 @@ dependencies = [ "derive_more", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -6063,9 +6042,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f80108e3b36901200a4c5df1db1ee9ef6ce685b59ea79d7be1713c845e3765da" +checksum = "7c5cca341184dbfcb49dbc124e5958e6a857499f04782907e5d969abb644e0b6" dependencies = [ "alloy-consensus", "alloy-network", @@ -6079,9 +6058,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8eb878fc5ea95adb5abe55fb97475b3eb0dcc77dfcd6f61bd626a68ae0bdba1" +checksum = "190e9884a69012d4abc26d1c0bc60fe01d57899ab5417c8f38105ffaaab4149b" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -6089,9 +6068,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "753d6f6b03beca1ba9cbd344c05fee075a2ce715ee9d61981c10b9c764a824a2" +checksum = "274972c3c5e911b6675f6794ea0476b05e0bc1ea7e464f99ec2dc01b76d2eeb6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6104,14 +6083,14 @@ dependencies = [ "op-alloy-consensus", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "op-alloy-rpc-types-engine" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e50c94013a1d036a529df259151991dbbd6cf8dc215e3b68b784f95eec60e6" +checksum = "860edb8d5a8d54bbcdabcbd8642c45b974351ce4e10ed528dd4508eee2a43833" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6126,7 +6105,7 @@ dependencies = [ "op-alloy-consensus", "serde", "snap", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -6149,9 +6128,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "11.1.0" +version = "11.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a2811256cd65560453ea6f7174b1b6caa7909cb5652cf05dc7d8144c5e4b38" +checksum = "b1d721c4c196273dd135ea5b823cd573ea8735cd3c5f2c19fcb91ee3af655351" dependencies = [ "auto_impl", "revm", @@ -6180,7 +6159,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] @@ -6210,7 +6189,7 @@ dependencies = [ "opentelemetry_sdk", "prost", "reqwest", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] @@ -6245,7 +6224,7 @@ dependencies = [ "opentelemetry", "percent-encoding", "rand 0.9.2", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -6312,7 +6291,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -6323,9 +6302,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", "parking_lot_core", @@ -6333,15 +6312,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.11" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.6", + "windows-link 0.2.1", ] [[package]] @@ -6362,12 +6341,12 @@ dependencies = [ [[package]] name = "pem" -version = "3.0.5" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ "base64 0.22.1", - "serde", + "serde_core", ] [[package]] @@ -6378,12 +6357,11 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.8.2" +version = "2.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e0a3a33733faeaf8651dfee72dd0f388f0c8e5ad496a3478fa5a922f49cfa8" +checksum = "989e7521a040efde50c3ab6bbadafbe15ab6dc042686926be59ac35d74607df4" dependencies = [ "memchr", - "thiserror 2.0.16", "ucd-trie", ] @@ -6448,7 +6426,7 @@ dependencies = [ "phf_shared 0.11.3", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -6461,7 +6439,7 @@ dependencies = [ "phf_shared 0.13.1", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -6499,7 +6477,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -6632,7 +6610,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -6661,7 +6639,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.6", + "toml_edit 0.23.7", ] [[package]] @@ -6683,7 +6661,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -6705,10 +6683,21 @@ dependencies = [ "chrono", "flate2", "hex", - "procfs-core", + "procfs-core 0.17.0", "rustix 0.38.44", ] +[[package]] +name = "procfs" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25485360a54d6861439d60facef26de713b1e126bf015ec8f98239467a2b82f7" +dependencies = [ + "bitflags 2.9.4", + "procfs-core 0.18.0", + "rustix 1.1.2", +] + [[package]] name = "procfs-core" version = "0.17.0" @@ -6720,6 +6709,16 @@ dependencies = [ "hex", ] +[[package]] +name = "procfs-core" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6401bf7b6af22f78b563665d15a22e9aef27775b79b149a66ca022468a4e405" +dependencies = [ + "bitflags 2.9.4", + "hex", +] + [[package]] name = "proptest" version = "1.8.0" @@ -6758,7 +6757,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -6769,7 +6768,7 @@ checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -6792,7 +6791,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -6847,10 +6846,10 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.1.1", + "rustc-hash", "rustls", - "socket2 0.6.0", - "thiserror 2.0.16", + "socket2 0.6.1", + "thiserror 2.0.17", "tokio", "tracing", "web-time", @@ -6863,15 +6862,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", - "getrandom 0.3.3", + "getrandom 0.3.4", "lru-slab", "rand 0.9.2", "ring", - "rustc-hash 2.1.1", + "rustc-hash", "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.16", + "thiserror 2.0.17", "tinyvec", "tracing", "web-time", @@ -6886,16 +6885,16 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.0", + "socket2 0.6.1", "tracing", "windows-sys 0.60.2", ] [[package]] name = "quote" -version = "1.0.40" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" dependencies = [ "proc-macro2", ] @@ -7002,7 +7001,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "serde", ] @@ -7091,9 +7090,9 @@ checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" [[package]] name = "redox_syscall" -version = "0.5.17" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ "bitflags 2.9.4", ] @@ -7117,34 +7116,34 @@ checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ "getrandom 0.2.16", "libredox", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "ref-cast" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] name = "regex" -version = "1.11.3" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", @@ -7154,9 +7153,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", @@ -7165,9 +7164,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "regress" @@ -7187,9 +7186,9 @@ checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "reqwest" -version = "0.12.23" +version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ "base64 0.22.1", "bytes", @@ -7225,7 +7224,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.2", + "webpki-roots 1.0.3", ] [[package]] @@ -7337,7 +7336,7 @@ dependencies = [ "reth-tracing", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tower", "tracing", @@ -7514,7 +7513,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "snmalloc-rs", - "thiserror 2.0.16", + "thiserror 2.0.17", "tikv-jemallocator", "tracy-client", ] @@ -7550,7 +7549,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -7579,7 +7578,7 @@ dependencies = [ "auto_impl", "reth-execution-types", "reth-primitives-traits", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -7646,13 +7645,13 @@ dependencies = [ "reth-static-file-types", "reth-storage-errors", "reth-tracing", - "rustc-hash 2.1.1", + "rustc-hash", "serde", "serde_json", "strum 0.27.2", "sysinfo", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -7711,7 +7710,7 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] @@ -7752,7 +7751,7 @@ dependencies = [ "schnellru", "secp256k1 0.30.0", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -7778,7 +7777,7 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1 0.30.0", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -7805,7 +7804,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -7842,7 +7841,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -7928,7 +7927,7 @@ dependencies = [ "secp256k1 0.30.0", "sha2", "sha3", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -7979,7 +7978,7 @@ dependencies = [ "reth-primitives-traits", "reth-trie-common", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] @@ -8079,7 +8078,7 @@ dependencies = [ "schnellru", "serde_json", "smallvec", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -8129,7 +8128,7 @@ dependencies = [ "snap", "tempfile", "test-case", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] @@ -8183,7 +8182,7 @@ dependencies = [ "reth-consensus", "reth-execution-errors", "reth-storage-errors", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -8217,7 +8216,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -8246,7 +8245,7 @@ dependencies = [ "reth-ethereum-primitives", "reth-primitives-traits", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -8342,7 +8341,7 @@ dependencies = [ "serde", "serde_json", "sha2", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -8355,7 +8354,7 @@ dependencies = [ "arbitrary", "auto_impl", "once_cell", - "rustc-hash 2.1.1", + "rustc-hash", ] [[package]] @@ -8480,7 +8479,7 @@ dependencies = [ "alloy-rlp", "nybbles", "reth-storage-errors", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -8541,7 +8540,7 @@ dependencies = [ "rmp-serde", "secp256k1 0.30.0", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-util", "tracing", @@ -8574,7 +8573,7 @@ dependencies = [ "reth-tasks", "reth-transaction-pool", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] @@ -8601,7 +8600,7 @@ version = "1.8.2" dependencies = [ "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -8651,7 +8650,7 @@ dependencies = [ "reth-tracing", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -8673,7 +8672,7 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] @@ -8712,7 +8711,7 @@ dependencies = [ "reqwest", "reth-tracing", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -8764,12 +8763,12 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "reth-transaction-pool", - "rustc-hash 2.1.1", + "rustc-hash", "schnellru", "secp256k1 0.30.0", "serde", "smallvec", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -8796,7 +8795,7 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", ] @@ -8835,7 +8834,7 @@ dependencies = [ "secp256k1 0.30.0", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "url", ] @@ -8866,7 +8865,7 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", "zstd", ] @@ -9009,7 +9008,7 @@ dependencies = [ "serde", "shellexpand", "strum 0.27.2", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "toml", "tracing", @@ -9086,7 +9085,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-tungstenite", @@ -9128,7 +9127,7 @@ dependencies = [ "metrics-exporter-prometheus", "metrics-process", "metrics-util", - "procfs", + "procfs 0.17.0", "reqwest", "reth-metrics", "reth-tasks", @@ -9214,7 +9213,7 @@ dependencies = [ "serde", "serde_json", "tar-no-std", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -9293,7 +9292,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "revm", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] @@ -9323,7 +9322,7 @@ dependencies = [ "reth-rpc-eth-api", "reth-storage-errors", "revm", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -9467,7 +9466,7 @@ dependencies = [ "revm", "serde", "sha2", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] @@ -9553,7 +9552,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tower", @@ -9604,7 +9603,7 @@ dependencies = [ "reth-storage-api", "reth-transaction-pool", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -9656,7 +9655,7 @@ dependencies = [ "reth-errors", "reth-primitives-traits", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] @@ -9734,7 +9733,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -9811,8 +9810,8 @@ dependencies = [ "reth-testing-utils", "reth-tokio-util", "reth-tracing", - "rustc-hash 2.1.1", - "thiserror 2.0.16", + "rustc-hash", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -9831,7 +9830,7 @@ dependencies = [ "reth-codecs", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "toml", ] @@ -9976,7 +9975,7 @@ dependencies = [ "serde", "serde_json", "sha2", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tower", @@ -10077,7 +10076,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-util", "tower", @@ -10109,7 +10108,7 @@ dependencies = [ "reth-storage-api", "revm-context", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -10163,7 +10162,7 @@ dependencies = [ "reth-testing-utils", "reth-transaction-pool", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -10252,7 +10251,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -10342,7 +10341,7 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -10370,7 +10369,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -10416,7 +10415,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -10488,7 +10487,7 @@ dependencies = [ "reth-prune-types", "reth-static-file-types", "revm-database-interface", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -10531,7 +10530,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", "tracing-futures", @@ -10626,15 +10625,15 @@ dependencies = [ "reth-storage-api", "reth-tasks", "reth-tracing", - "revm-interpreter", + "revm-interpreter 27.0.2", "revm-primitives", - "rustc-hash 2.1.1", + "rustc-hash", "schnellru", "serde", "serde_json", "smallvec", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -10756,7 +10755,7 @@ dependencies = [ "reth-trie-common", "reth-trie-db", "reth-trie-sparse", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -10832,9 +10831,9 @@ dependencies = [ [[package]] name = "revm" -version = "30.1.1" +version = "30.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ca37fd2db4a76e4fb805b583ca3500ad9f6789b8d069473c70d8182ed5547d6" +checksum = "76df793c6ef3bef8f88f05b3873ebebce1494385a3ce8f58ad2e2e111aa0de11" dependencies = [ "revm-bytecode", "revm-context", @@ -10843,7 +10842,7 @@ dependencies = [ "revm-database-interface", "revm-handler", "revm-inspector", - "revm-interpreter", + "revm-interpreter 28.0.0", "revm-precompile", "revm-primitives", "revm-state", @@ -10863,9 +10862,9 @@ dependencies = [ [[package]] name = "revm-context" -version = "10.1.1" +version = "10.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94dffb17f4ac19cc3e7ace5b9bb69406b53a2d2e74a0a0c6b56591762aa7c30a" +checksum = "7adcce0c14cf59b7128de34185a0fbf8f63309539b9263b35ead870d73584114" dependencies = [ "bitvec", "cfg-if", @@ -10880,9 +10879,9 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "11.1.1" +version = "11.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fc1793e0092475f28d9cc4e663ff45846bc06d034c5ca33d89b6556143e2930" +checksum = "7d620a9725e443c171fb195a074331fa4a745fa5cbb0018b4bbf42619e64b563" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10896,9 +10895,9 @@ dependencies = [ [[package]] name = "revm-database" -version = "9.0.1" +version = "9.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637ceeefe76c93a69a1453e98272150ad10691d801b51033a68d5d03a6268f6a" +checksum = "fdefd7f40835e992bab40a245124cb1243e6c7a1c4659798827c809a59b0fea9" dependencies = [ "alloy-eips", "revm-bytecode", @@ -10910,9 +10909,9 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "8.0.2" +version = "8.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f318a603e1179e57c72ceca6e37f8d44c7b9ab7caec1feffc1202b42f25f4ac4" +checksum = "aa488a73ac2738f11478650cdf1a0f263864c09d5f0e9bf6309e891a05323c60" dependencies = [ "auto_impl", "either", @@ -10923,9 +10922,9 @@ dependencies = [ [[package]] name = "revm-handler" -version = "11.1.1" +version = "11.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "085ec3b976336478c29d96ec222445c964badefe0fd408a61da7079cb168b9c7" +checksum = "b1d8049b2fbff6636150f4740c95369aa174e41b0383034e0e256cfdffcfcd23" dependencies = [ "auto_impl", "derive-where", @@ -10933,7 +10932,7 @@ dependencies = [ "revm-context", "revm-context-interface", "revm-database-interface", - "revm-interpreter", + "revm-interpreter 28.0.0", "revm-precompile", "revm-primitives", "revm-state", @@ -10942,16 +10941,16 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "11.1.1" +version = "11.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a9b5f2375e5a90f289669e7403f96b0fff21052116f3ed1e7cc7759327127e" +checksum = "e2a21dd773b654ec7e080025eecef4ac84c711150d1bd36acadf0546f471329a" dependencies = [ "auto_impl", "either", "revm-context", "revm-database-interface", "revm-handler", - "revm-interpreter", + "revm-interpreter 28.0.0", "revm-primitives", "revm-state", "serde", @@ -10960,9 +10959,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.31.0" +version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce1228a7989cc3d9af84c0de2abe39680a252c265877e67d2f0fb4f392cb690" +checksum = "782c38fa94f99b4b15f1690bffc2c3cbf06a0f460cf163b470d126914b47d343" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -10975,14 +10974,27 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", +] + +[[package]] +name = "revm-interpreter" +version = "27.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0834fc25c020061f0f801d8de8bb53c88a63631cca5884a6c65b90c85e241138" +dependencies = [ + "revm-bytecode", + "revm-context-interface", + "revm-primitives", + "revm-state", + "serde", ] [[package]] name = "revm-interpreter" -version = "27.0.1" +version = "28.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8301ef34c8c242ecc040a5b0880fb04df3caaf844d81920a48c0073fd7d5d1" +checksum = "f1de5c790122f8ded67992312af8acd41ccfcee629b25b819e10c5b1f69caf57" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -11030,9 +11042,9 @@ dependencies = [ [[package]] name = "revm-state" -version = "8.0.1" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef7e3342f602a1a7a38d15e140ec08d1dc4f4d703c4196aadfd1744b2008e915" +checksum = "9e6bd5e669b02007872a8ca2643a14e308fe1739ee4475d74122587c3388a06a" dependencies = [ "bitflags 2.9.4", "revm-bytecode", @@ -11171,7 +11183,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.106", + "syn 2.0.107", "unicode-ident", ] @@ -11228,12 +11240,6 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - [[package]] name = "rustc-hash" version = "2.1.1" @@ -11290,14 +11296,14 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] name = "rustls" -version = "0.23.32" +version = "0.23.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" +checksum = "751e04a496ca00bb97a5e043158d23d66b5aabf2e1d5aa2a0aaebb1aafe6f82c" dependencies = [ "log", "once_cell", @@ -11310,9 +11316,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" dependencies = [ "openssl-probe", "rustls-pki-types", @@ -11359,9 +11365,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.6" +version = "0.103.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb" +checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" dependencies = [ "ring", "rustls-pki-types", @@ -11376,9 +11382,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "rusty-fork" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" dependencies = [ "fnv", "quick-error", @@ -11413,7 +11419,7 @@ version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -11521,9 +11527,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.5.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc198e42d9b7510827939c9a15f5062a0c913f3371d765977e586d2fe6c16f4a" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ "bitflags 2.9.4", "core-foundation", @@ -11618,7 +11624,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -11627,7 +11633,7 @@ version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.11.4", + "indexmap 2.12.0", "itoa", "memchr", "ryu", @@ -11669,19 +11675,18 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.14.1" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e" +checksum = "6093cd8c01b25262b84927e0f7151692158fab02d961e04c979d3903eba7ecc5" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.11.4", + "indexmap 2.12.0", "schemars 0.9.0", "schemars 1.0.4", - "serde", - "serde_derive", + "serde_core", "serde_json", "serde_with_macros", "time", @@ -11689,14 +11694,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.14.1" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327ada00f7d64abaac1e55a6911e90cf665aa051b9a561c7006c157f4633135e" +checksum = "a7e6c180db0816026a61afa1cff5344fb7ebded7e4d3062772179f2501481c27" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -11815,6 +11820,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "similar" version = "2.7.0" @@ -11844,7 +11855,7 @@ checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 2.0.16", + "thiserror 2.0.17", "time", ] @@ -11928,12 +11939,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -11970,9 +11981,9 @@ checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" [[package]] name = "stable_deref_trait" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "static_assertions" @@ -12014,7 +12025,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -12026,7 +12037,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -12048,9 +12059,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.106" +version = "2.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +checksum = "2a26dbd934e5451d21ef060c018dae56fc073894c5a7896f882928a76e6d081b" dependencies = [ "proc-macro2", "quote", @@ -12066,7 +12077,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -12086,7 +12097,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -12143,10 +12154,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand 2.3.0", - "getrandom 0.3.3", + "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -12167,7 +12178,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -12178,7 +12189,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", "test-case-core", ] @@ -12218,7 +12229,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -12251,11 +12262,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.16" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.16", + "thiserror-impl 2.0.17", ] [[package]] @@ -12266,18 +12277,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] name = "thiserror-impl" -version = "2.0.16" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -12300,9 +12311,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f21f216790c8df74ce3ab25b534e0718da5a1916719771d3fec23315c99e468b" +checksum = "661f1f6a57b3a36dc9174a2c10f19513b4866816e13425d3e418b11cc37bc24c" dependencies = [ "libc", "paste", @@ -12311,9 +12322,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" +version = "0.6.1+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d" +checksum = "cd8aa5b2ab86a2cefa406d889139c162cbb230092f7d1d7cbc1716405d852a3b" dependencies = [ "cc", "libc", @@ -12321,9 +12332,9 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865" +checksum = "0359b4327f954e0567e69fb191cf1436617748813819c94b8cd4a431422d053a" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -12419,33 +12430,30 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.47.1" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", - "io-uring", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", - "slab", - "socket2 0.6.0", + "socket2 0.6.1", "tokio-macros", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -12525,9 +12533,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f1085dec27c2b6632b04c80b3bb1b4300d6495d1e129693bdda7d91e72eec1" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" dependencies = [ "serde_core", ] @@ -12538,7 +12546,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.11.4", + "indexmap 2.12.0", "serde", "serde_spanned", "toml_datetime 0.6.11", @@ -12548,21 +12556,21 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.6" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3effe7c0e86fdff4f69cdd2ccc1b96f933e24811c5441d44904e8683e27184b" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ - "indexmap 2.11.4", - "toml_datetime 0.7.2", + "indexmap 2.12.0", + "toml_datetime 0.7.3", "toml_parser", "winnow", ] [[package]] name = "toml_parser" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf893c33be71572e0e9aa6dd15e6677937abd686b066eac3f8cd3531688a627" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" dependencies = [ "winnow", ] @@ -12614,7 +12622,7 @@ dependencies = [ "futures-core", "futures-util", "hdrhistogram", - "indexmap 2.11.4", + "indexmap 2.12.0", "pin-project-lite", "slab", "sync_wrapper", @@ -12700,7 +12708,7 @@ checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -12768,7 +12776,7 @@ dependencies = [ "opentelemetry_sdk", "rustversion", "smallvec", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", "tracing-core", "tracing-log", @@ -12860,7 +12868,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -12875,9 +12883,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85" +checksum = "dd69c5aa8f924c7519d6372789a74eac5b94fb0f8fcf0d4a97eb0bfc3e785f39" [[package]] name = "try-lock" @@ -12900,15 +12908,15 @@ dependencies = [ "rustls", "rustls-pki-types", "sha1", - "thiserror 2.0.16", + "thiserror 2.0.17", "utf-8", ] [[package]] name = "typenum" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] name = "ucd-trie" @@ -13063,7 +13071,7 @@ version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "js-sys", "wasm-bindgen", ] @@ -13141,7 +13149,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -13190,15 +13198,6 @@ version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" -[[package]] -name = "wasi" -version = "0.14.7+wasi-0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" -dependencies = [ - "wasip2", -] - [[package]] name = "wasip2" version = "1.0.1+wasi-0.2.4" @@ -13231,7 +13230,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", "wasm-bindgen-shared", ] @@ -13266,7 +13265,7 @@ checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -13333,14 +13332,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.2", + "webpki-root-certs 1.0.3", ] [[package]] name = "webpki-root-certs" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4ffd8df1c57e87c325000a3d6ef93db75279dc3a231125aac571650f22b12a" +checksum = "05d651ec480de84b762e7be71e6efa7461699c19d9e2c272c8d93455f567786e" dependencies = [ "rustls-pki-types", ] @@ -13351,23 +13350,23 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.2", + "webpki-roots 1.0.3", ] [[package]] name = "webpki-roots" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" dependencies = [ "rustls-pki-types", ] [[package]] name = "widestring" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" +checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471" [[package]] name = "winapi" @@ -13391,7 +13390,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -13412,25 +13411,27 @@ dependencies = [ [[package]] name = "windows" -version = "0.58.0" +version = "0.61.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" dependencies = [ - "windows-core 0.58.0", - "windows-targets 0.52.6", + "windows-collections 0.2.0", + "windows-core 0.61.2", + "windows-future 0.2.1", + "windows-link 0.1.3", + "windows-numerics 0.2.0", ] [[package]] name = "windows" -version = "0.61.3" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" +checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580" dependencies = [ - "windows-collections", - "windows-core 0.61.2", - "windows-future", - "windows-link 0.1.3", - "windows-numerics", + "windows-collections 0.3.2", + "windows-core 0.62.2", + "windows-future 0.3.2", + "windows-numerics 0.3.1", ] [[package]] @@ -13443,27 +13444,23 @@ dependencies = [ ] [[package]] -name = "windows-core" -version = "0.57.0" +name = "windows-collections" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" +checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610" dependencies = [ - "windows-implement 0.57.0", - "windows-interface 0.57.0", - "windows-result 0.1.2", - "windows-targets 0.52.6", + "windows-core 0.62.2", ] [[package]] name = "windows-core" -version = "0.58.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" dependencies = [ - "windows-implement 0.58.0", - "windows-interface 0.58.0", - "windows-result 0.2.0", - "windows-strings 0.1.0", + "windows-implement 0.57.0", + "windows-interface 0.57.0", + "windows-result 0.1.2", "windows-targets 0.52.6", ] @@ -13473,8 +13470,8 @@ version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ - "windows-implement 0.60.1", - "windows-interface 0.59.2", + "windows-implement 0.60.2", + "windows-interface 0.59.3", "windows-link 0.1.3", "windows-result 0.3.4", "windows-strings 0.4.2", @@ -13482,15 +13479,15 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.62.1" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6844ee5416b285084d3d3fffd743b925a6c9385455f64f6d4fa3031c4c2749a9" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ - "windows-implement 0.60.1", - "windows-interface 0.59.2", - "windows-link 0.2.0", - "windows-result 0.4.0", - "windows-strings 0.5.0", + "windows-implement 0.60.2", + "windows-interface 0.59.3", + "windows-link 0.2.1", + "windows-result 0.4.1", + "windows-strings 0.5.1", ] [[package]] @@ -13501,40 +13498,40 @@ checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" dependencies = [ "windows-core 0.61.2", "windows-link 0.1.3", - "windows-threading", + "windows-threading 0.1.0", ] [[package]] -name = "windows-implement" -version = "0.57.0" +name = "windows-future" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", + "windows-core 0.62.2", + "windows-link 0.2.1", + "windows-threading 0.2.1", ] [[package]] name = "windows-implement" -version = "0.58.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] name = "windows-implement" -version = "0.60.1" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb307e42a74fb6de9bf3a02d9712678b22399c87e6fa869d6dfcd8c1b7754e0" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -13545,29 +13542,18 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", -] - -[[package]] -name = "windows-interface" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] name = "windows-interface" -version = "0.59.2" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0abd1ddbc6964ac14db11c7213d6532ef34bd9aa042c2e5935f59d7908b46a5" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -13578,9 +13564,9 @@ checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-link" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-numerics" @@ -13593,19 +13579,20 @@ dependencies = [ ] [[package]] -name = "windows-result" -version = "0.1.2" +name = "windows-numerics" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26" dependencies = [ - "windows-targets 0.52.6", + "windows-core 0.62.2", + "windows-link 0.2.1", ] [[package]] name = "windows-result" -version = "0.2.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" dependencies = [ "windows-targets 0.52.6", ] @@ -13621,21 +13608,11 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7084dcc306f89883455a206237404d3eaf961e5bd7e0f312f7c91f57eb44167f" -dependencies = [ - "windows-link 0.2.0", -] - -[[package]] -name = "windows-strings" -version = "0.1.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-result 0.2.0", - "windows-targets 0.52.6", + "windows-link 0.2.1", ] [[package]] @@ -13649,11 +13626,11 @@ dependencies = [ [[package]] name = "windows-strings" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7218c655a553b0bed4426cf54b20d7ba363ef543b52d515b3e48d7fd55318dda" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -13698,16 +13675,16 @@ version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ - "windows-targets 0.53.4", + "windows-targets 0.53.5", ] [[package]] name = "windows-sys" -version = "0.61.1" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f109e41dd4a3c848907eb83d5a42ea98b3769495597450cf6d153507b166f0f" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -13758,19 +13735,19 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.4" +version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d42b7b7f66d2a06854650af09cfdf8713e427a439c97ad65a6375318033ac4b" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows-link 0.2.0", - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows-link 0.2.1", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -13782,6 +13759,15 @@ dependencies = [ "windows-link 0.1.3", ] +[[package]] +name = "windows-threading" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37" +dependencies = [ + "windows-link 0.2.1", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -13802,9 +13788,9 @@ checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" @@ -13826,9 +13812,9 @@ checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" @@ -13850,9 +13836,9 @@ checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" @@ -13862,9 +13848,9 @@ checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" @@ -13886,9 +13872,9 @@ checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" @@ -13910,9 +13896,9 @@ checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" @@ -13934,9 +13920,9 @@ checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" @@ -13958,9 +13944,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" @@ -14018,7 +14004,7 @@ dependencies = [ "pharos", "rustc_version 0.4.1", "send_wrapper 0.6.0", - "thiserror 2.0.16", + "thiserror 2.0.17", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -14081,7 +14067,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", "synstructure", ] @@ -14093,7 +14079,7 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", "synstructure", ] @@ -14114,7 +14100,7 @@ checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -14134,15 +14120,15 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" dependencies = [ "zeroize_derive", ] @@ -14155,7 +14141,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -14199,7 +14185,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] @@ -14210,7 +14196,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.106", + "syn 2.0.107", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7d75c8da560..414e387ee28 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -525,11 +525,11 @@ alloy-transport-ws = { version = "1.0.41", default-features = false } # op alloy-op-evm = { version = "0.22.0", default-features = false } alloy-op-hardforks = "0.4.0" -op-alloy-rpc-types = { version = "0.20.0", default-features = false } -op-alloy-rpc-types-engine = { version = "0.20.0", default-features = false } -op-alloy-network = { version = "0.20.0", default-features = false } -op-alloy-consensus = { version = "0.20.0", default-features = false } -op-alloy-rpc-jsonrpsee = { version = "0.20.0", default-features = false } +op-alloy-rpc-types = { version = "0.21.0", default-features = false } +op-alloy-rpc-types-engine = { version = "0.21.0", default-features = false } +op-alloy-network = { version = "0.21.0", default-features = false } +op-alloy-consensus = { version = "0.21.0", default-features = false } +op-alloy-rpc-jsonrpsee = { version = "0.21.0", default-features = false } op-alloy-flz = { version = "0.13.1", default-features = false } # misc @@ -550,7 +550,8 @@ dirs-next = "2.0.0" dyn-clone = "1.0.17" eyre = "0.6" fdlimit = "0.3.0" -generic-array = "0.14" +# pinned until downstream crypto libs migrate to 1.0 because 0.14.8 marks all types as deprecated +generic-array = "=0.14.7" humantime = "2.1" humantime-serde = "1.1" itertools = { version = "0.14", default-features = false } diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 775e79d5aff..f8910c22a33 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -249,6 +249,7 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, + da_footprint_gas_scalar: None, }, deposit_nonce, deposit_receipt_version, @@ -364,6 +365,7 @@ mod test { l1_blob_base_fee_scalar: Some(1014213), operator_fee_scalar: None, operator_fee_constant: None, + da_footprint_gas_scalar: None, }, deposit_nonce: None, deposit_receipt_version: None, @@ -407,6 +409,7 @@ mod test { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, + .. } = receipt_meta.l1_block_info; assert_eq!( @@ -537,6 +540,7 @@ mod test { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, + .. } = receipt_meta.l1_block_info; assert_eq!(l1_gas_price, Some(14121491676), "incorrect l1 base fee (former gas price)"); diff --git a/examples/custom-node/src/primitives/tx.rs b/examples/custom-node/src/primitives/tx.rs index f04bcc8862f..7c282922f48 100644 --- a/examples/custom-node/src/primitives/tx.rs +++ b/examples/custom-node/src/primitives/tx.rs @@ -33,7 +33,7 @@ impl RlpBincode for CustomTransaction {} impl reth_codecs::alloy::transaction::Envelope for CustomTransaction { fn signature(&self) -> &Signature { match self { - CustomTransaction::Op(tx) => tx.signature(), + CustomTransaction::Op(tx) => reth_codecs::alloy::transaction::Envelope::signature(tx), CustomTransaction::Payment(tx) => tx.signature(), } } From e185025447e7e5b59e1451f0bfb71987723bc618 Mon Sep 17 00:00:00 2001 From: leopardracer <136604165+leopardracer@users.noreply.github.com> Date: Mon, 20 Oct 2025 11:57:03 +0300 Subject: [PATCH 118/371] fix: Remove duplicate debug log in write_blocks_to_rlp (#19132) --- crates/e2e-test-utils/src/test_rlp_utils.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/e2e-test-utils/src/test_rlp_utils.rs b/crates/e2e-test-utils/src/test_rlp_utils.rs index b33b598fd0b..bcfb9faa9d8 100644 --- a/crates/e2e-test-utils/src/test_rlp_utils.rs +++ b/crates/e2e-test-utils/src/test_rlp_utils.rs @@ -157,7 +157,6 @@ pub fn write_blocks_to_rlp(blocks: &[SealedBlock], path: &Path) -> std::io::Resu ); // Debug: check what's in the encoded data - debug!(target: "e2e::import", "Block {} encoded to {} bytes", i, buf.len()); if buf.len() < 20 { debug!(target: "e2e::import", " Raw bytes: {:?}", &buf); } else { From 11c449feb02b5239a59d2dbffe9e5851206b74d5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 20 Oct 2025 11:29:09 +0200 Subject: [PATCH 119/371] feat: add helper apply fns (#19122) --- crates/node/core/src/node_config.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 7b487a1fa71..e3b98f4bd0f 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -191,6 +191,22 @@ impl NodeConfig { self } + /// Apply a function to the config. + pub fn apply(self, f: F) -> Self + where + F: FnOnce(Self) -> Self, + { + f(self) + } + + /// Applies a fallible function to the config. + pub fn try_apply(self, f: F) -> Result + where + F: FnOnce(Self) -> Result, + { + f(self) + } + /// Sets --dev mode for the node [`NodeConfig::dev`], if `dev` is true. pub const fn set_dev(self, dev: bool) -> Self { if dev { From c5a52c7d44dede41d2ebb84f1e3c69da4470c346 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 20 Oct 2025 14:50:51 +0400 Subject: [PATCH 120/371] fix(e2e): gracefully wait for payload (#19137) --- crates/e2e-test-utils/src/payload.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index b3f9b027fba..4e185ce9693 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -57,8 +57,9 @@ impl PayloadTestContext { /// Wait until the best built payload is ready pub async fn wait_for_built_payload(&self, payload_id: PayloadId) { loop { - let payload = self.payload_builder.best_payload(payload_id).await.unwrap().unwrap(); - if payload.block().body().transactions().is_empty() { + let payload = + self.payload_builder.best_payload(payload_id).await.transpose().ok().flatten(); + if payload.is_none_or(|p| p.block().body().transactions().is_empty()) { tokio::time::sleep(std::time::Duration::from_millis(20)).await; continue } From 79c11ff5674e12a02307219c276c1e4b24a3ff5b Mon Sep 17 00:00:00 2001 From: Andrew Huang Date: Mon, 20 Oct 2025 05:06:15 -0700 Subject: [PATCH 121/371] =?UTF-8?q?fix:=20Add=20support=20for=20init-state?= =?UTF-8?q?=20for=20op-reth=20chains=20that=20are=20not=20op-mainnet?= =?UTF-8?q?=E2=80=A6=20(#19116)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Matthias Seitz --- .../optimism/cli/src/commands/init_state.rs | 88 +++++++++++-------- 1 file changed, 52 insertions(+), 36 deletions(-) diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index 7af17ca3523..950f60193f0 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -12,8 +12,8 @@ use reth_optimism_primitives::{ }; use reth_primitives_traits::SealedHeader; use reth_provider::{ - BlockNumReader, ChainSpecProvider, DBProvider, DatabaseProviderFactory, - StaticFileProviderFactory, StaticFileWriter, + BlockNumReader, DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, + StaticFileWriter, }; use std::{io::BufReader, sync::Arc}; use tracing::info; @@ -24,12 +24,11 @@ pub struct InitStateCommandOp { #[command(flatten)] init_state: reth_cli_commands::init_state::InitStateCommand, - /// **Optimism Mainnet Only** - /// - /// Specifies whether to initialize the state without relying on OVM historical data. + /// Specifies whether to initialize the state without relying on OVM or EVM historical data. /// /// When enabled, and before inserting the state, it creates a dummy chain up to the last OVM - /// block (#105235062) (14GB / 90 seconds). It then, appends the Bedrock block. + /// block (#105235062) (14GB / 90 seconds). It then, appends the Bedrock block. This is + /// hardcoded for OP mainnet, for other OP chains you will need to pass in a header. /// /// - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be /// ignored. @@ -40,42 +39,59 @@ pub struct InitStateCommandOp { impl> InitStateCommandOp { /// Execute the `init` command pub async fn execute>( - self, + mut self, ) -> eyre::Result<()> { - info!(target: "reth::cli", "Reth init-state starting"); + // If using --without-ovm for OP mainnet, handle the special case with hardcoded Bedrock + // header. Otherwise delegate to the base InitStateCommand implementation. + if self.without_ovm { + if self.init_state.env.chain.is_optimism_mainnet() { + return self.execute_with_bedrock_header::(); + } + + // For non-mainnet OP chains with --without-ovm, use the base implementation + // by setting the without_evm flag + self.init_state.without_evm = true; + } + + self.init_state.execute::().await + } - let Environment { config, provider_factory, .. } = - self.init_state.env.init::(AccessRights::RW)?; + /// Execute init-state with hardcoded Bedrock header for OP mainnet. + fn execute_with_bedrock_header< + N: CliNodeTypes, + >( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "Reth init-state starting for OP mainnet"); + let env = self.init_state.env.init::(AccessRights::RW)?; + let Environment { config, provider_factory, .. } = env; let static_file_provider = provider_factory.static_file_provider(); let provider_rw = provider_factory.database_provider_rw()?; - // OP-Mainnet may want to bootstrap a chain without OVM historical data - if provider_factory.chain_spec().is_optimism_mainnet() && self.without_ovm { - let last_block_number = provider_rw.last_block_number()?; - - if last_block_number == 0 { - reth_cli_commands::init_state::without_evm::setup_without_evm( - &provider_rw, - SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), - |number| { - let mut header = Header::default(); - header.set_number(number); - header - }, - )?; - - // SAFETY: it's safe to commit static files, since in the event of a crash, they - // will be unwound according to database checkpoints. - // - // Necessary to commit, so the BEDROCK_HEADER is accessible to provider_rw and - // init_state_dump - static_file_provider.commit()?; - } else if last_block_number > 0 && last_block_number < BEDROCK_HEADER.number { - return Err(eyre::eyre!( - "Data directory should be empty when calling init-state with --without-ovm." - )) - } + let last_block_number = provider_rw.last_block_number()?; + + if last_block_number == 0 { + reth_cli_commands::init_state::without_evm::setup_without_evm( + &provider_rw, + SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), + |number| { + let mut header = Header::default(); + header.set_number(number); + header + }, + )?; + + // SAFETY: it's safe to commit static files, since in the event of a crash, they + // will be unwound according to database checkpoints. + // + // Necessary to commit, so the BEDROCK_HEADER is accessible to provider_rw and + // init_state_dump + static_file_provider.commit()?; + } else if last_block_number > 0 && last_block_number < BEDROCK_HEADER.number { + return Err(eyre::eyre!( + "Data directory should be empty when calling init-state with --without-ovm." + )) } info!(target: "reth::cli", "Initiating state dump"); From 8eb5461dad9e5ec88044caf1acf9f8e042728220 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 20 Oct 2025 14:18:24 +0200 Subject: [PATCH 122/371] chore(trie): Add lifetime to cursors returned from Trie/HashedCursorFactorys (#19114) --- crates/cli/commands/src/db/repair_trie.rs | 4 +- .../provider/src/providers/state/overlay.rs | 117 +++++++++--------- crates/trie/db/src/hashed_cursor.rs | 17 ++- crates/trie/db/src/trie_cursor.rs | 16 ++- crates/trie/trie/src/hashed_cursor/mock.rs | 16 ++- crates/trie/trie/src/hashed_cursor/mod.rs | 15 ++- crates/trie/trie/src/hashed_cursor/noop.rs | 14 ++- .../trie/trie/src/hashed_cursor/post_state.rs | 18 ++- crates/trie/trie/src/trie_cursor/in_memory.rs | 19 ++- crates/trie/trie/src/trie_cursor/mock.rs | 14 ++- crates/trie/trie/src/trie_cursor/mod.rs | 17 ++- crates/trie/trie/src/trie_cursor/noop.rs | 15 ++- crates/trie/trie/src/verify.rs | 21 ++-- 13 files changed, 185 insertions(+), 118 deletions(-) diff --git a/crates/cli/commands/src/db/repair_trie.rs b/crates/cli/commands/src/db/repair_trie.rs index e5b7db0e2f0..f7dea67b76f 100644 --- a/crates/cli/commands/src/db/repair_trie.rs +++ b/crates/cli/commands/src/db/repair_trie.rs @@ -52,7 +52,7 @@ fn verify_only(provider_factory: ProviderFactory) -> eyre // Create the verifier let hashed_cursor_factory = DatabaseHashedCursorFactory::new(&tx); let trie_cursor_factory = DatabaseTrieCursorFactory::new(&tx); - let verifier = Verifier::new(trie_cursor_factory, hashed_cursor_factory)?; + let verifier = Verifier::new(&trie_cursor_factory, hashed_cursor_factory)?; let mut inconsistent_nodes = 0; let start_time = Instant::now(); @@ -136,7 +136,7 @@ fn verify_and_repair( let trie_cursor_factory = DatabaseTrieCursorFactory::new(tx); // Create the verifier - let verifier = Verifier::new(trie_cursor_factory, hashed_cursor_factory)?; + let verifier = Verifier::new(&trie_cursor_factory, hashed_cursor_factory)?; let mut inconsistent_nodes = 0; let start_time = Instant::now(); diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 71c1a693193..046072ef5fe 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -145,95 +145,98 @@ where /// using the in-memory overlay factories. #[derive(Debug, Clone)] pub struct OverlayStateProvider { - /// The in-memory trie cursor factory that wraps the database cursor factory. - trie_cursor_factory: - InMemoryTrieCursorFactory, Arc>, - /// The hashed cursor factory that wraps the database cursor factory. - hashed_cursor_factory: HashedPostStateCursorFactory< - DatabaseHashedCursorFactory, - Arc, - >, + provider: Provider, + trie_updates: Arc, + hashed_post_state: Arc, } impl OverlayStateProvider where - Provider: DBProvider + Clone, + Provider: DBProvider, { /// Create new overlay state provider. The `Provider` must be cloneable, which generally means /// it should be wrapped in an `Arc`. - pub fn new( + pub const fn new( provider: Provider, trie_updates: Arc, hashed_post_state: Arc, ) -> Self { - // Create the trie cursor factory - let db_trie_cursor_factory = DatabaseTrieCursorFactory::new(provider.clone().into_tx()); - let trie_cursor_factory = - InMemoryTrieCursorFactory::new(db_trie_cursor_factory, trie_updates); - - // Create the hashed cursor factory - let db_hashed_cursor_factory = DatabaseHashedCursorFactory::new(provider.into_tx()); - let hashed_cursor_factory = - HashedPostStateCursorFactory::new(db_hashed_cursor_factory, hashed_post_state); - - Self { trie_cursor_factory, hashed_cursor_factory } + Self { provider, trie_updates, hashed_post_state } } } impl TrieCursorFactory for OverlayStateProvider where - Provider: DBProvider + Clone, - InMemoryTrieCursorFactory, Arc>: - TrieCursorFactory, + Provider: DBProvider, { - type AccountTrieCursor = , - Arc, - > as TrieCursorFactory>::AccountTrieCursor; - - type StorageTrieCursor = , - Arc, - > as TrieCursorFactory>::StorageTrieCursor; - - fn account_trie_cursor(&self) -> Result { - self.trie_cursor_factory.account_trie_cursor() + type AccountTrieCursor<'a> + = , + &'a TrieUpdatesSorted, + > as TrieCursorFactory>::AccountTrieCursor<'a> + where + Self: 'a; + + type StorageTrieCursor<'a> + = , + &'a TrieUpdatesSorted, + > as TrieCursorFactory>::StorageTrieCursor<'a> + where + Self: 'a; + + fn account_trie_cursor(&self) -> Result, DatabaseError> { + let db_trie_cursor_factory = DatabaseTrieCursorFactory::new(self.provider.tx_ref()); + let trie_cursor_factory = + InMemoryTrieCursorFactory::new(db_trie_cursor_factory, self.trie_updates.as_ref()); + trie_cursor_factory.account_trie_cursor() } fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result { - self.trie_cursor_factory.storage_trie_cursor(hashed_address) + ) -> Result, DatabaseError> { + let db_trie_cursor_factory = DatabaseTrieCursorFactory::new(self.provider.tx_ref()); + let trie_cursor_factory = + InMemoryTrieCursorFactory::new(db_trie_cursor_factory, self.trie_updates.as_ref()); + trie_cursor_factory.storage_trie_cursor(hashed_address) } } impl HashedCursorFactory for OverlayStateProvider where - Provider: DBProvider + Clone, - HashedPostStateCursorFactory< - DatabaseHashedCursorFactory, - Arc, - >: HashedCursorFactory, + Provider: DBProvider, { - type AccountCursor = , - Arc, - > as HashedCursorFactory>::AccountCursor; - - type StorageCursor = , - Arc, - > as HashedCursorFactory>::StorageCursor; - - fn hashed_account_cursor(&self) -> Result { - self.hashed_cursor_factory.hashed_account_cursor() + type AccountCursor<'a> + = , + &'a Arc, + > as HashedCursorFactory>::AccountCursor<'a> + where + Self: 'a; + + type StorageCursor<'a> + = , + &'a Arc, + > as HashedCursorFactory>::StorageCursor<'a> + where + Self: 'a; + + fn hashed_account_cursor(&self) -> Result, DatabaseError> { + let db_hashed_cursor_factory = DatabaseHashedCursorFactory::new(self.provider.tx_ref()); + let hashed_cursor_factory = + HashedPostStateCursorFactory::new(db_hashed_cursor_factory, &self.hashed_post_state); + hashed_cursor_factory.hashed_account_cursor() } fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result { - self.hashed_cursor_factory.hashed_storage_cursor(hashed_address) + ) -> Result, DatabaseError> { + let db_hashed_cursor_factory = DatabaseHashedCursorFactory::new(self.provider.tx_ref()); + let hashed_cursor_factory = + HashedPostStateCursorFactory::new(db_hashed_cursor_factory, &self.hashed_post_state); + hashed_cursor_factory.hashed_storage_cursor(hashed_address) } } diff --git a/crates/trie/db/src/hashed_cursor.rs b/crates/trie/db/src/hashed_cursor.rs index 06e6914275c..4fe3d57429f 100644 --- a/crates/trie/db/src/hashed_cursor.rs +++ b/crates/trie/db/src/hashed_cursor.rs @@ -20,18 +20,23 @@ impl DatabaseHashedCursorFactory { } impl HashedCursorFactory for DatabaseHashedCursorFactory<&TX> { - type AccountCursor = DatabaseHashedAccountCursor<::Cursor>; - type StorageCursor = - DatabaseHashedStorageCursor<::DupCursor>; - - fn hashed_account_cursor(&self) -> Result { + type AccountCursor<'a> + = DatabaseHashedAccountCursor<::Cursor> + where + Self: 'a; + type StorageCursor<'a> + = DatabaseHashedStorageCursor<::DupCursor> + where + Self: 'a; + + fn hashed_account_cursor(&self) -> Result, DatabaseError> { Ok(DatabaseHashedAccountCursor(self.0.cursor_read::()?)) } fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(DatabaseHashedStorageCursor::new( self.0.cursor_dup_read::()?, hashed_address, diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index b1e9032fc0f..d05c3fd92da 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -26,18 +26,24 @@ impl TrieCursorFactory for DatabaseTrieCursorFactory<&TX> where TX: DbTx, { - type AccountTrieCursor = DatabaseAccountTrieCursor<::Cursor>; - type StorageTrieCursor = - DatabaseStorageTrieCursor<::DupCursor>; + type AccountTrieCursor<'a> + = DatabaseAccountTrieCursor<::Cursor> + where + Self: 'a; - fn account_trie_cursor(&self) -> Result { + type StorageTrieCursor<'a> + = DatabaseStorageTrieCursor<::DupCursor> + where + Self: 'a; + + fn account_trie_cursor(&self) -> Result, DatabaseError> { Ok(DatabaseAccountTrieCursor::new(self.0.cursor_read::()?)) } fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(DatabaseStorageTrieCursor::new( self.0.cursor_dup_read::()?, hashed_address, diff --git a/crates/trie/trie/src/hashed_cursor/mock.rs b/crates/trie/trie/src/hashed_cursor/mock.rs index 308f05e4c8a..f091ae6ffe5 100644 --- a/crates/trie/trie/src/hashed_cursor/mock.rs +++ b/crates/trie/trie/src/hashed_cursor/mock.rs @@ -55,17 +55,23 @@ impl MockHashedCursorFactory { } impl HashedCursorFactory for MockHashedCursorFactory { - type AccountCursor = MockHashedCursor; - type StorageCursor = MockHashedCursor; - - fn hashed_account_cursor(&self) -> Result { + type AccountCursor<'a> + = MockHashedCursor + where + Self: 'a; + type StorageCursor<'a> + = MockHashedCursor + where + Self: 'a; + + fn hashed_account_cursor(&self) -> Result, DatabaseError> { Ok(MockHashedCursor::new(self.hashed_accounts.clone(), self.visited_account_keys.clone())) } fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(MockHashedCursor::new( self.hashed_storage_tries .get(&hashed_address) diff --git a/crates/trie/trie/src/hashed_cursor/mod.rs b/crates/trie/trie/src/hashed_cursor/mod.rs index 7917f675452..6c4788a3360 100644 --- a/crates/trie/trie/src/hashed_cursor/mod.rs +++ b/crates/trie/trie/src/hashed_cursor/mod.rs @@ -14,23 +14,29 @@ pub mod noop; pub mod mock; /// The factory trait for creating cursors over the hashed state. +#[auto_impl::auto_impl(&)] pub trait HashedCursorFactory { /// The hashed account cursor type. - type AccountCursor: HashedCursor; + type AccountCursor<'a>: HashedCursor + where + Self: 'a; /// The hashed storage cursor type. - type StorageCursor: HashedStorageCursor; + type StorageCursor<'a>: HashedStorageCursor + where + Self: 'a; /// Returns a cursor for iterating over all hashed accounts in the state. - fn hashed_account_cursor(&self) -> Result; + fn hashed_account_cursor(&self) -> Result, DatabaseError>; /// Returns a cursor for iterating over all hashed storage entries in the state. fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result; + ) -> Result, DatabaseError>; } /// The cursor for iterating over hashed entries. +#[auto_impl::auto_impl(&mut)] pub trait HashedCursor { /// Value returned by the cursor. type Value: std::fmt::Debug; @@ -44,6 +50,7 @@ pub trait HashedCursor { } /// The cursor for iterating over hashed storage entries. +#[auto_impl::auto_impl(&mut)] pub trait HashedStorageCursor: HashedCursor { /// Returns `true` if there are no entries for a given key. fn is_storage_empty(&mut self) -> Result; diff --git a/crates/trie/trie/src/hashed_cursor/noop.rs b/crates/trie/trie/src/hashed_cursor/noop.rs index 58b78dc245f..e5bc44f0f5c 100644 --- a/crates/trie/trie/src/hashed_cursor/noop.rs +++ b/crates/trie/trie/src/hashed_cursor/noop.rs @@ -9,17 +9,23 @@ use reth_storage_errors::db::DatabaseError; pub struct NoopHashedCursorFactory; impl HashedCursorFactory for NoopHashedCursorFactory { - type AccountCursor = NoopHashedAccountCursor; - type StorageCursor = NoopHashedStorageCursor; + type AccountCursor<'a> + = NoopHashedAccountCursor + where + Self: 'a; + type StorageCursor<'a> + = NoopHashedStorageCursor + where + Self: 'a; - fn hashed_account_cursor(&self) -> Result { + fn hashed_account_cursor(&self) -> Result, DatabaseError> { Ok(NoopHashedAccountCursor::default()) } fn hashed_storage_cursor( &self, _hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(NoopHashedStorageCursor::default()) } } diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index e81aa4af22a..896251f3634 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -19,15 +19,21 @@ impl HashedPostStateCursorFactory { } } -impl<'a, CF, T> HashedCursorFactory for HashedPostStateCursorFactory +impl<'overlay, CF, T> HashedCursorFactory for HashedPostStateCursorFactory where CF: HashedCursorFactory, T: AsRef, { - type AccountCursor = HashedPostStateAccountCursor<'a, CF::AccountCursor>; - type StorageCursor = HashedPostStateStorageCursor<'a, CF::StorageCursor>; - - fn hashed_account_cursor(&self) -> Result { + type AccountCursor<'cursor> + = HashedPostStateAccountCursor<'overlay, CF::AccountCursor<'cursor>> + where + Self: 'cursor; + type StorageCursor<'cursor> + = HashedPostStateStorageCursor<'overlay, CF::StorageCursor<'cursor>> + where + Self: 'cursor; + + fn hashed_account_cursor(&self) -> Result, DatabaseError> { let cursor = self.cursor_factory.hashed_account_cursor()?; Ok(HashedPostStateAccountCursor::new(cursor, &self.post_state.as_ref().accounts)) } @@ -35,7 +41,7 @@ where fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { let cursor = self.cursor_factory.hashed_storage_cursor(hashed_address)?; Ok(HashedPostStateStorageCursor::new( cursor, diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 7f1b933e206..1c7f179ad0a 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -20,15 +20,22 @@ impl InMemoryTrieCursorFactory { } } -impl<'a, CF, T> TrieCursorFactory for InMemoryTrieCursorFactory +impl<'overlay, CF, T> TrieCursorFactory for InMemoryTrieCursorFactory where - CF: TrieCursorFactory, + CF: TrieCursorFactory + 'overlay, T: AsRef, { - type AccountTrieCursor = InMemoryTrieCursor<'a, CF::AccountTrieCursor>; - type StorageTrieCursor = InMemoryTrieCursor<'a, CF::StorageTrieCursor>; + type AccountTrieCursor<'cursor> + = InMemoryTrieCursor<'overlay, CF::AccountTrieCursor<'cursor>> + where + Self: 'cursor; - fn account_trie_cursor(&self) -> Result { + type StorageTrieCursor<'cursor> + = InMemoryTrieCursor<'overlay, CF::StorageTrieCursor<'cursor>> + where + Self: 'cursor; + + fn account_trie_cursor(&self) -> Result, DatabaseError> { let cursor = self.cursor_factory.account_trie_cursor()?; Ok(InMemoryTrieCursor::new(Some(cursor), self.trie_updates.as_ref().account_nodes_ref())) } @@ -36,7 +43,7 @@ where fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { // if the storage trie has no updates then we use this as the in-memory overlay. static EMPTY_UPDATES: Vec<(Nibbles, Option)> = Vec::new(); diff --git a/crates/trie/trie/src/trie_cursor/mock.rs b/crates/trie/trie/src/trie_cursor/mock.rs index add2d7ddef3..313df0443e3 100644 --- a/crates/trie/trie/src/trie_cursor/mock.rs +++ b/crates/trie/trie/src/trie_cursor/mock.rs @@ -52,11 +52,17 @@ impl MockTrieCursorFactory { } impl TrieCursorFactory for MockTrieCursorFactory { - type AccountTrieCursor = MockTrieCursor; - type StorageTrieCursor = MockTrieCursor; + type AccountTrieCursor<'a> + = MockTrieCursor + where + Self: 'a; + type StorageTrieCursor<'a> + = MockTrieCursor + where + Self: 'a; /// Generates a mock account trie cursor. - fn account_trie_cursor(&self) -> Result { + fn account_trie_cursor(&self) -> Result, DatabaseError> { Ok(MockTrieCursor::new(self.account_trie_nodes.clone(), self.visited_account_keys.clone())) } @@ -64,7 +70,7 @@ impl TrieCursorFactory for MockTrieCursorFactory { fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(MockTrieCursor::new( self.storage_tries .get(&hashed_address) diff --git a/crates/trie/trie/src/trie_cursor/mod.rs b/crates/trie/trie/src/trie_cursor/mod.rs index 269611150d6..05a6c09e948 100644 --- a/crates/trie/trie/src/trie_cursor/mod.rs +++ b/crates/trie/trie/src/trie_cursor/mod.rs @@ -24,24 +24,29 @@ pub use self::{depth_first::DepthFirstTrieIterator, in_memory::*, subnode::Curso #[auto_impl::auto_impl(&)] pub trait TrieCursorFactory { /// The account trie cursor type. - type AccountTrieCursor: TrieCursor; + type AccountTrieCursor<'a>: TrieCursor + where + Self: 'a; + /// The storage trie cursor type. - type StorageTrieCursor: TrieCursor; + type StorageTrieCursor<'a>: TrieCursor + where + Self: 'a; /// Create an account trie cursor. - fn account_trie_cursor(&self) -> Result; + fn account_trie_cursor(&self) -> Result, DatabaseError>; /// Create a storage tries cursor. fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result; + ) -> Result, DatabaseError>; } /// A cursor for traversing stored trie nodes. The cursor must iterate over keys in /// lexicographical order. -#[auto_impl::auto_impl(&mut, Box)] -pub trait TrieCursor: Send + Sync { +#[auto_impl::auto_impl(&mut)] +pub trait TrieCursor { /// Move the cursor to the key and return if it is an exact match. fn seek_exact( &mut self, diff --git a/crates/trie/trie/src/trie_cursor/noop.rs b/crates/trie/trie/src/trie_cursor/noop.rs index de409c59fe1..a00a18e4f00 100644 --- a/crates/trie/trie/src/trie_cursor/noop.rs +++ b/crates/trie/trie/src/trie_cursor/noop.rs @@ -9,11 +9,18 @@ use reth_storage_errors::db::DatabaseError; pub struct NoopTrieCursorFactory; impl TrieCursorFactory for NoopTrieCursorFactory { - type AccountTrieCursor = NoopAccountTrieCursor; - type StorageTrieCursor = NoopStorageTrieCursor; + type AccountTrieCursor<'a> + = NoopAccountTrieCursor + where + Self: 'a; + + type StorageTrieCursor<'a> + = NoopStorageTrieCursor + where + Self: 'a; /// Generates a noop account trie cursor. - fn account_trie_cursor(&self) -> Result { + fn account_trie_cursor(&self) -> Result, DatabaseError> { Ok(NoopAccountTrieCursor::default()) } @@ -21,7 +28,7 @@ impl TrieCursorFactory for NoopTrieCursorFactory { fn storage_trie_cursor( &self, _hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(NoopStorageTrieCursor::default()) } } diff --git a/crates/trie/trie/src/verify.rs b/crates/trie/trie/src/verify.rs index 96059211458..4299a669165 100644 --- a/crates/trie/trie/src/verify.rs +++ b/crates/trie/trie/src/verify.rs @@ -301,21 +301,24 @@ impl SingleVerifier> { /// database tables as the source of truth. This will iteratively recompute the entire trie based /// on the hashed state, and produce any discovered [`Output`]s via the `next` method. #[derive(Debug)] -pub struct Verifier { - trie_cursor_factory: T, +pub struct Verifier<'a, T: TrieCursorFactory, H> { + trie_cursor_factory: &'a T, hashed_cursor_factory: H, branch_node_iter: StateRootBranchNodesIter, outputs: Vec, - account: SingleVerifier>, - storage: Option<(B256, SingleVerifier>)>, + account: SingleVerifier>>, + storage: Option<(B256, SingleVerifier>>)>, complete: bool, } -impl Verifier { +impl<'a, T: TrieCursorFactory, H: HashedCursorFactory + Clone> Verifier<'a, T, H> { /// Creates a new verifier instance. - pub fn new(trie_cursor_factory: T, hashed_cursor_factory: H) -> Result { + pub fn new( + trie_cursor_factory: &'a T, + hashed_cursor_factory: H, + ) -> Result { Ok(Self { - trie_cursor_factory: trie_cursor_factory.clone(), + trie_cursor_factory, hashed_cursor_factory: hashed_cursor_factory.clone(), branch_node_iter: StateRootBranchNodesIter::new(hashed_cursor_factory), outputs: Default::default(), @@ -326,7 +329,7 @@ impl Verifier Verifier { +impl<'a, T: TrieCursorFactory, H: HashedCursorFactory + Clone> Verifier<'a, T, H> { fn new_storage( &mut self, account: B256, @@ -444,7 +447,7 @@ impl Verifier { } } -impl Iterator for Verifier { +impl<'a, T: TrieCursorFactory, H: HashedCursorFactory + Clone> Iterator for Verifier<'a, T, H> { type Item = Result; fn next(&mut self) -> Option { From ca19c19b385d3e6d68838dc9933fec0495c6c78d Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 20 Oct 2025 15:04:05 +0200 Subject: [PATCH 123/371] chore: fix+update nix flake (#19142) --- flake.lock | 18 +++++++++--------- flake.nix | 5 +++++ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/flake.lock b/flake.lock index 704d14161e0..fd2bf9ac61e 100644 --- a/flake.lock +++ b/flake.lock @@ -2,11 +2,11 @@ "nodes": { "crane": { "locked": { - "lastModified": 1754269165, - "narHash": "sha256-0tcS8FHd4QjbCVoxN9jI+PjHgA4vc/IjkUSp+N3zy0U=", + "lastModified": 1760924934, + "narHash": "sha256-tuuqY5aU7cUkR71sO2TraVKK2boYrdW3gCSXUkF4i44=", "owner": "ipetkov", "repo": "crane", - "rev": "444e81206df3f7d92780680e45858e31d2f07a08", + "rev": "c6b4d5308293d0d04fcfeee92705017537cad02f", "type": "github" }, "original": { @@ -23,11 +23,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1755499663, - "narHash": "sha256-OxHGov+A4qR4kpO3e1I3LFR78IAKvDFnWoWsDWvFhKU=", + "lastModified": 1760942671, + "narHash": "sha256-LyO+TwzM7C8TJJkgbqC+BMnPiJX8XHQJmssTWS2Ze9k=", "owner": "nix-community", "repo": "fenix", - "rev": "d1ff4457857ad551e8d6c7c79324b44fac518b8b", + "rev": "b5e669194d67dbd4c659c40bb67476d9285b9a13", "type": "github" }, "original": { @@ -63,11 +63,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1755004716, - "narHash": "sha256-TbhPR5Fqw5LjAeI3/FOPhNNFQCF3cieKCJWWupeZmiA=", + "lastModified": 1760898410, + "narHash": "sha256-bTMk3D0V+6t3qOjXUfWSwjztEuLoAsgtAtqp6/wwfOc=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "b2a58b8c6eff3c3a2c8b5c70dbf69ead78284194", + "rev": "c7e7eb9dc42df01016d795b0fd3a9ae87b7ada1c", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 7550edc31e3..512b69e3660 100644 --- a/flake.nix +++ b/flake.nix @@ -120,6 +120,11 @@ rustNightly.rustfmt pkgs.cargo-nextest ]; + + # Remove the hardening added by nix to fix jmalloc compilation error. + # More info: https://github.com/tikv/jemallocator/issues/108 + hardeningDisable = [ "fortify" ]; + } overrides); } ); From 20f807778d2ec3dcc98ebd6e0a15f197dc8dd5f6 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 20 Oct 2025 17:15:10 +0400 Subject: [PATCH 124/371] Revert "refactor: unify `Pipeline` creation codepaths" (#19143) --- crates/cli/commands/src/common.rs | 12 +- crates/node/builder/src/launch/common.rs | 144 +++++++++++++++++- crates/node/builder/src/launch/engine.rs | 19 +-- crates/stages/api/src/pipeline/mod.rs | 87 ++--------- crates/stages/stages/src/stages/mod.rs | 44 +++--- .../provider/src/providers/database/mod.rs | 5 - .../src/providers/static_file/manager.rs | 11 +- 7 files changed, 186 insertions(+), 136 deletions(-) diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 25f32f63a2b..1ceba8f57da 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -24,7 +24,7 @@ use reth_provider::{ providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider}, ProviderFactory, StaticFileProviderFactory, }; -use reth_stages::{sets::DefaultStages, Pipeline}; +use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; @@ -126,6 +126,7 @@ impl EnvironmentArgs { where C: ChainSpecParser, { + let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); let prune_modes = config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); let factory = ProviderFactory::>>::new( @@ -136,8 +137,9 @@ impl EnvironmentArgs { .with_prune_modes(prune_modes.clone()); // Check for consistency between database and static files. - if let Some(unwind_target) = - factory.static_file_provider().check_consistency(&factory.provider()?)? + if let Some(unwind_target) = factory + .static_file_provider() + .check_consistency(&factory.provider()?, has_receipt_pruning)? { if factory.db_ref().is_read_only()? { warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal."); @@ -148,7 +150,7 @@ impl EnvironmentArgs { // instead. assert_ne!( unwind_target, - 0, + PipelineTarget::Unwind(0), "A static file <> database inconsistency was found that would trigger an unwind to block 0" ); @@ -173,7 +175,7 @@ impl EnvironmentArgs { // Move all applicable data from database to static files. pipeline.move_to_static_files()?; - pipeline.unwind(unwind_target, None)?; + pipeline.unwind(unwind_target.unwind_target().expect("should exist"), None)?; } Ok(factory) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 2d1fb6924d8..b43dc2a2a6a 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -41,10 +41,12 @@ use eyre::Context; use rayon::ThreadPoolBuilder; use reth_chainspec::{Chain, EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; +use reth_consensus::noop::NoopConsensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitStorageError}; +use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_engine_local::MiningMode; -use reth_evm::ConfigureEvm; +use reth_evm::{noop::NoopEvmConfig, ConfigureEvm}; use reth_exex::ExExManagerHandle; use reth_fs_util as fs; use reth_network_p2p::headers::client::HeadersClient; @@ -65,19 +67,25 @@ use reth_node_metrics::{ }; use reth_provider::{ providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, - BlockNumReader, BlockReaderIdExt, ProviderError, ProviderFactory, ProviderResult, - StaticFileProviderFactory, + BlockHashReader, BlockNumReader, BlockReaderIdExt, ProviderError, ProviderFactory, + ProviderResult, StageCheckpointReader, StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_builder::config::RethRpcServerConfig; use reth_rpc_layer::JwtSecret; -use reth_stages::{stages::EraImportSource, MetricEvent}; +use reth_stages::{ + sets::DefaultStages, stages::EraImportSource, MetricEvent, PipelineBuilder, PipelineTarget, + StageId, +}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; use reth_transaction_pool::TransactionPool; use std::{sync::Arc, thread::available_parallelism}; -use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; +use tokio::sync::{ + mpsc::{unbounded_channel, UnboundedSender}, + oneshot, watch, +}; use futures::{future::Either, stream, Stream, StreamExt}; use reth_node_ethstats::EthStatsService; @@ -458,13 +466,70 @@ where N: ProviderNodeTypes, Evm: ConfigureEvm + 'static, { - Ok(ProviderFactory::new( + let factory = ProviderFactory::new( self.right().clone(), self.chain_spec(), StaticFileProvider::read_write(self.data_dir().static_files())?, ) .with_prune_modes(self.prune_modes()) - .with_static_files_metrics()) + .with_static_files_metrics(); + + let has_receipt_pruning = + self.toml_config().prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); + + // Check for consistency between database and static files. If it fails, it unwinds to + // the first block that's consistent between database and static files. + if let Some(unwind_target) = factory + .static_file_provider() + .check_consistency(&factory.provider()?, has_receipt_pruning)? + { + // Highly unlikely to happen, and given its destructive nature, it's better to panic + // instead. + assert_ne!( + unwind_target, + PipelineTarget::Unwind(0), + "A static file <> database inconsistency was found that would trigger an unwind to block 0" + ); + + info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check."); + + let (_tip_tx, tip_rx) = watch::channel(B256::ZERO); + + // Builds an unwind-only pipeline + let pipeline = PipelineBuilder::default() + .add_stages(DefaultStages::new( + factory.clone(), + tip_rx, + Arc::new(NoopConsensus::default()), + NoopHeaderDownloader::default(), + NoopBodiesDownloader::default(), + NoopEvmConfig::::default(), + self.toml_config().stages.clone(), + self.prune_modes(), + None, + )) + .build( + factory.clone(), + StaticFileProducer::new(factory.clone(), self.prune_modes()), + ); + + // Unwinds to block + let (tx, rx) = oneshot::channel(); + + // Pipeline should be run as blocking and panic if it fails. + self.task_executor().spawn_critical_blocking( + "pipeline task", + Box::pin(async move { + let (_, result) = pipeline.run_as_fut(Some(unwind_target)).await; + let _ = tx.send(result); + }), + ); + rx.await?.inspect_err(|err| { + error!(target: "reth::cli", unwind_target = %unwind_target, %err, "failed to run unwind") + })?; + } + + Ok(factory) } /// Creates a new [`ProviderFactory`] and attaches it to the launch context. @@ -787,6 +852,21 @@ where &self.node_adapter().provider } + /// Returns the initial backfill to sync to at launch. + /// + /// This returns the configured `debug.tip` if set, otherwise it will check if backfill was + /// previously interrupted and returns the block hash of the last checkpoint, see also + /// [`Self::check_pipeline_consistency`] + pub fn initial_backfill_target(&self) -> ProviderResult> { + let mut initial_target = self.node_config().debug.tip; + + if initial_target.is_none() { + initial_target = self.check_pipeline_consistency()?; + } + + Ok(initial_target) + } + /// Returns true if the node should terminate after the initial backfill run. /// /// This is the case if any of these configs are set: @@ -800,7 +880,7 @@ where /// /// This checks for OP-Mainnet and ensures we have all the necessary data to progress (past /// bedrock height) - pub fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { + fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { if self.chain_spec().is_optimism() && !self.is_dev() && self.chain_id() == Chain::optimism_mainnet() @@ -818,6 +898,54 @@ where Ok(()) } + /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less + /// than the checkpoint of the first stage). + /// + /// This will return the pipeline target if: + /// * the pipeline was interrupted during its previous run + /// * a new stage was added + /// * stage data was dropped manually through `reth stage drop ...` + /// + /// # Returns + /// + /// A target block hash if the pipeline is inconsistent, otherwise `None`. + pub fn check_pipeline_consistency(&self) -> ProviderResult> { + // If no target was provided, check if the stages are congruent - check if the + // checkpoint of the last stage matches the checkpoint of the first. + let first_stage_checkpoint = self + .blockchain_db() + .get_stage_checkpoint(*StageId::ALL.first().unwrap())? + .unwrap_or_default() + .block_number; + + // Skip the first stage as we've already retrieved it and comparing all other checkpoints + // against it. + for stage_id in StageId::ALL.iter().skip(1) { + let stage_checkpoint = self + .blockchain_db() + .get_stage_checkpoint(*stage_id)? + .unwrap_or_default() + .block_number; + + // If the checkpoint of any stage is less than the checkpoint of the first stage, + // retrieve and return the block hash of the latest header and use it as the target. + if stage_checkpoint < first_stage_checkpoint { + debug!( + target: "consensus::engine", + first_stage_checkpoint, + inconsistent_stage_id = %stage_id, + inconsistent_stage_checkpoint = stage_checkpoint, + "Pipeline sync progress is inconsistent" + ); + return self.blockchain_db().block_hash(first_stage_checkpoint); + } + } + + self.ensure_chain_specific_db_checks()?; + + Ok(None) + } + /// Expire the pre-merge transactions if the node is configured to do so and the chain has a /// merge block. /// diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 3b43f5f3299..556eb5670d1 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -117,6 +117,9 @@ impl EngineNodeLauncher { })? .with_components(components_builder, on_component_initialized).await?; + // Try to expire pre-merge transaction history if configured + ctx.expire_pre_merge_transactions()?; + // spawn exexs if any let maybe_exex_manager_handle = ctx.launch_exex(installed_exex).await?; @@ -138,7 +141,7 @@ impl EngineNodeLauncher { let consensus = Arc::new(ctx.components().consensus().clone()); - let mut pipeline = build_networked_pipeline( + let pipeline = build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), consensus.clone(), @@ -154,18 +157,7 @@ impl EngineNodeLauncher { )?; // The new engine writes directly to static files. This ensures that they're up to the tip. - pipeline.ensure_static_files_consistency().await?; - - // Try to expire pre-merge transaction history if configured - ctx.expire_pre_merge_transactions()?; - - let initial_target = if let Some(tip) = ctx.node_config().debug.tip { - Some(tip) - } else { - pipeline.initial_backfill_target()? - }; - - ctx.ensure_chain_specific_db_checks()?; + pipeline.move_to_static_files()?; let pipeline_events = pipeline.events(); @@ -258,6 +250,7 @@ impl EngineNodeLauncher { add_ons.launch_add_ons(add_ons_ctx).await?; // Run consensus engine to completion + let initial_target = ctx.initial_backfill_target()?; let mut built_payloads = ctx .components() .payload_builder_handle() diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index ac35a489031..e8542c36da6 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -9,7 +9,7 @@ use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ providers::ProviderNodeTypes, BlockHashReader, BlockNumReader, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, DatabaseProviderFactory, ProviderFactory, - PruneCheckpointReader, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, + PruneCheckpointReader, StageCheckpointReader, StageCheckpointWriter, }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; @@ -31,7 +31,7 @@ use crate::{ }; pub use builder::*; use progress::*; -use reth_errors::{ProviderResult, RethResult}; +use reth_errors::RethResult; pub use set::*; /// A container for a queued stage. @@ -101,6 +101,12 @@ impl Pipeline { PipelineBuilder::default() } + /// Return the minimum block number achieved by + /// any stage during the execution of the pipeline. + pub const fn minimum_block_number(&self) -> Option { + self.progress.minimum_block_number + } + /// Set tip for reverse sync. #[track_caller] pub fn set_tip(&self, tip: B256) { @@ -121,7 +127,9 @@ impl Pipeline { ) -> &mut dyn Stage< as DatabaseProviderFactory>::ProviderRW> { &mut self.stages[idx] } +} +impl Pipeline { /// Registers progress metrics for each registered stage pub fn register_metrics(&mut self) -> Result<(), PipelineError> { let Some(metrics_tx) = &mut self.metrics_tx else { return Ok(()) }; @@ -282,81 +290,6 @@ impl Pipeline { Ok(()) } - /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less - /// than the checkpoint of the first stage). - /// - /// This will return the pipeline target if: - /// * the pipeline was interrupted during its previous run - /// * a new stage was added - /// * stage data was dropped manually through `reth stage drop ...` - /// - /// # Returns - /// - /// A target block hash if the pipeline is inconsistent, otherwise `None`. - pub fn initial_backfill_target(&self) -> ProviderResult> { - let provider = self.provider_factory.provider()?; - - // If no target was provided, check if the stages are congruent - check if the - // checkpoint of the last stage matches the checkpoint of the first. - let first_stage_checkpoint = provider - .get_stage_checkpoint(self.stages.first().unwrap().id())? - .unwrap_or_default() - .block_number; - - // Skip the first stage as we've already retrieved it and comparing all other checkpoints - // against it. - for stage in self.stages.iter().skip(1) { - let stage_id = stage.id(); - - let stage_checkpoint = - provider.get_stage_checkpoint(stage_id)?.unwrap_or_default().block_number; - - // If the checkpoint of any stage is less than the checkpoint of the first stage, - // retrieve and return the block hash of the latest header and use it as the target. - if stage_checkpoint < first_stage_checkpoint { - debug!( - target: "consensus::engine", - first_stage_checkpoint, - inconsistent_stage_id = %stage_id, - inconsistent_stage_checkpoint = stage_checkpoint, - "Pipeline sync progress is inconsistent" - ); - return provider.block_hash(first_stage_checkpoint); - } - } - - Ok(None) - } - - /// Checks for consistency between database and static files. If it fails, it unwinds to - /// the first block that's consistent between database and static files. - pub async fn ensure_static_files_consistency(&mut self) -> Result<(), PipelineError> { - let maybe_unwind_target = self - .provider_factory - .static_file_provider() - .check_consistency(&self.provider_factory.provider()?)?; - - self.move_to_static_files()?; - - if let Some(unwind_target) = maybe_unwind_target { - // Highly unlikely to happen, and given its destructive nature, it's better to panic - // instead. - assert_ne!( - unwind_target, - 0, - "A static file <> database inconsistency was found that would trigger an unwind to block 0" - ); - - info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check."); - - self.unwind(unwind_target, None).inspect_err(|err| { - error!(target: "reth::cli", unwind_target = %unwind_target, %err, "failed to run unwind") - })?; - } - - Ok(()) - } - /// Unwind the stages to the target block (exclusive). /// /// If the unwind is due to a bad block the number of that block should be specified. diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 40c4cb91368..58fa7cfb324 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -75,7 +75,9 @@ mod tests { StaticFileProviderFactory, StorageReader, }; use reth_prune_types::{PruneMode, PruneModes}; - use reth_stages_api::{ExecInput, ExecutionStageThresholds, Stage, StageCheckpoint, StageId}; + use reth_stages_api::{ + ExecInput, ExecutionStageThresholds, PipelineTarget, Stage, StageCheckpoint, StageId, + }; use reth_static_file_types::StaticFileSegment; use reth_testing_utils::generators::{ self, random_block, random_block_range, random_receipt, BlockRangeParams, @@ -302,7 +304,7 @@ mod tests { prune_count: usize, segment: StaticFileSegment, is_full_node: bool, - expected: Option, + expected: Option, ) { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. @@ -324,18 +326,11 @@ mod tests { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. - let mut provider = db.factory.database_provider_ro().unwrap(); - if is_full_node { - provider.set_prune_modes(PruneModes { - receipts: Some(PruneMode::Full), - ..Default::default() - }); - } let mut static_file_provider = db.factory.static_file_provider(); static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap(); assert!(matches!( static_file_provider - .check_consistency(&provider), + .check_consistency(&db.factory.database_provider_ro().unwrap(), is_full_node,), Ok(e) if e == expected )); } @@ -346,7 +341,7 @@ mod tests { db: &TestStageDB, stage_id: StageId, checkpoint_block_number: BlockNumber, - expected: Option, + expected: Option, ) { let provider_rw = db.factory.provider_rw().unwrap(); provider_rw @@ -357,15 +352,18 @@ mod tests { assert!(matches!( db.factory .static_file_provider() - .check_consistency(&db.factory.database_provider_ro().unwrap()), + .check_consistency(&db.factory.database_provider_ro().unwrap(), false,), Ok(e) if e == expected )); } /// Inserts a dummy value at key and compare the check consistency result against the expected /// one. - fn update_db_and_check>(db: &TestStageDB, key: u64, expected: Option) - where + fn update_db_and_check>( + db: &TestStageDB, + key: u64, + expected: Option, + ) where ::Value: Default, { update_db_with_and_check::(db, key, expected, &Default::default()); @@ -376,7 +374,7 @@ mod tests { fn update_db_with_and_check>( db: &TestStageDB, key: u64, - expected: Option, + expected: Option, value: &T::Value, ) { let provider_rw = db.factory.provider_rw().unwrap(); @@ -387,7 +385,7 @@ mod tests { assert!(matches!( db.factory .static_file_provider() - .check_consistency(&db.factory.database_provider_ro().unwrap()), + .check_consistency(&db.factory.database_provider_ro().unwrap(), false), Ok(e) if e == expected )); } @@ -398,7 +396,7 @@ mod tests { let db_provider = db.factory.database_provider_ro().unwrap(); assert!(matches!( - db.factory.static_file_provider().check_consistency(&db_provider), + db.factory.static_file_provider().check_consistency(&db_provider, false), Ok(None) )); } @@ -420,7 +418,7 @@ mod tests { 1, StaticFileSegment::Receipts, archive_node, - Some(88), + Some(PipelineTarget::Unwind(88)), ); simulate_behind_checkpoint_corruption( @@ -428,7 +426,7 @@ mod tests { 3, StaticFileSegment::Headers, archive_node, - Some(86), + Some(PipelineTarget::Unwind(86)), ); } @@ -477,7 +475,7 @@ mod tests { ); // When a checkpoint is ahead, we request a pipeline unwind. - save_checkpoint_and_check(&db, StageId::Headers, 91, Some(block)); + save_checkpoint_and_check(&db, StageId::Headers, 91, Some(PipelineTarget::Unwind(block))); } #[test] @@ -490,7 +488,7 @@ mod tests { .unwrap(); // Creates a gap of one header: static_file db - update_db_and_check::(&db, current + 2, Some(89)); + update_db_and_check::(&db, current + 2, Some(PipelineTarget::Unwind(89))); // Fill the gap, and ensure no unwind is necessary. update_db_and_check::(&db, current + 1, None); @@ -509,7 +507,7 @@ mod tests { update_db_with_and_check::( &db, current + 2, - Some(89), + Some(PipelineTarget::Unwind(89)), &TxLegacy::default().into_signed(Signature::test_signature()).into(), ); @@ -532,7 +530,7 @@ mod tests { .unwrap(); // Creates a gap of one receipt: static_file db - update_db_and_check::(&db, current + 2, Some(89)); + update_db_and_check::(&db, current + 2, Some(PipelineTarget::Unwind(89))); // Fill the gap, and ensure no unwind is necessary. update_db_and_check::(&db, current + 1, None); diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index bd6b1e0f472..df0bc33c461 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -111,11 +111,6 @@ impl ProviderFactory { pub fn into_db(self) -> N::DB { self.db } - - /// Returns reference to the prune modes. - pub const fn prune_modes_ref(&self) -> &PruneModes { - &self.prune_modes - } } impl>> ProviderFactory { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 800c761718a..434d3836fb2 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -37,7 +37,7 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_node_types::{FullNodePrimitives, NodePrimitives}; use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction}; -use reth_stages_types::StageId; +use reth_stages_types::{PipelineTarget, StageId}; use reth_static_file_types::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE, @@ -731,14 +731,15 @@ impl StaticFileProvider { /// * its highest block should match the stage checkpoint block number if it's equal or higher /// than the corresponding database table last entry. /// - /// Returns a [`Option`] with block number to unwind to if any healing is further required. + /// Returns a [`Option`] of [`PipelineTarget::Unwind`] if any healing is further required. /// /// WARNING: No static file writer should be held before calling this function, otherwise it /// will deadlock. pub fn check_consistency( &self, provider: &Provider, - ) -> ProviderResult> + has_receipt_pruning: bool, + ) -> ProviderResult> where Provider: DBProvider + BlockReader + StageCheckpointReader + ChainSpecProvider, N: NodePrimitives, @@ -775,7 +776,7 @@ impl StaticFileProvider { }; for segment in StaticFileSegment::iter() { - if provider.prune_modes_ref().has_receipts_pruning() && segment.is_receipts() { + if has_receipt_pruning && segment.is_receipts() { // Pruned nodes (including full node) do not store receipts as static files. continue } @@ -886,7 +887,7 @@ impl StaticFileProvider { } } - Ok(unwind_target) + Ok(unwind_target.map(PipelineTarget::Unwind)) } /// Checks consistency of the latest static file segment and throws an error if at fault. From 6ee53922d054776817645d5d61e0bc83079d3d60 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 20 Oct 2025 16:05:16 +0200 Subject: [PATCH 125/371] fix(prune): Disable pruning limits (#19141) --- Cargo.lock | 1 - crates/node/builder/src/launch/common.rs | 2 -- crates/prune/prune/Cargo.toml | 1 - crates/prune/prune/src/builder.rs | 6 +----- 4 files changed, 1 insertion(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b406ede9b87..73f63b42f11 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9795,7 +9795,6 @@ dependencies = [ "itertools 0.14.0", "metrics", "rayon", - "reth-chainspec", "reth-config", "reth-db", "reth-db-api", diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index b43dc2a2a6a..969479bfa6c 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -429,8 +429,6 @@ impl LaunchContextWith Self { Self::default() @@ -135,7 +131,7 @@ impl Default for PrunerBuilder { Self { block_interval: 5, segments: PruneModes::default(), - delete_limit: MAINNET_PRUNE_DELETE_LIMIT, + delete_limit: usize::MAX, timeout: None, finished_exex_height: watch::channel(FinishedExExHeight::NoExExs).1, } From 90e0d37367d4828c88f80e7c24733e8b9c076ac9 Mon Sep 17 00:00:00 2001 From: Skylar Ray <137945430+sky-coderay@users.noreply.github.com> Date: Mon, 20 Oct 2025 17:19:55 +0300 Subject: [PATCH 126/371] fix: remove tautological assertions in validator tests (#19134) --- crates/engine/tree/src/tree/tests.rs | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 49ce5ab9cf1..7c40680c809 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -1391,13 +1391,8 @@ fn test_validate_block_synchronous_strategy_during_persistence() { let genesis_hash = MAINNET.genesis_hash(); let valid_block = block_factory.create_valid_block(genesis_hash); - // Call validate_block_with_state directly - // This should execute the Synchronous strategy logic during active persistence - let result = test_harness.validate_block_direct(valid_block); - - // Verify validation was attempted (may fail due to test environment limitations) - // The key test is that the Synchronous strategy path is executed during persistence - assert!(result.is_ok() || result.is_err(), "Validation should complete") + // Test that Synchronous strategy executes during active persistence without panicking + let _result = test_harness.validate_block_direct(valid_block); } /// Test multiple validation scenarios including valid, consensus-invalid, and execution-invalid @@ -1411,15 +1406,9 @@ fn test_validate_block_multiple_scenarios() { let mut block_factory = TestBlockFactory::new(MAINNET.as_ref().clone()); let genesis_hash = MAINNET.genesis_hash(); - // Scenario 1: Valid block validation (may fail due to test environment limitations) + // Scenario 1: Valid block validation (test execution, not result) let valid_block = block_factory.create_valid_block(genesis_hash); - let result1 = test_harness.validate_block_direct(valid_block); - // Note: Valid blocks might fail in test environment due to missing provider data, - // but the important thing is that the validation logic executes without panicking - assert!( - result1.is_ok() || result1.is_err(), - "Valid block validation should complete (may fail due to test environment)" - ); + let _result1 = test_harness.validate_block_direct(valid_block); // Scenario 2: Block with consensus issues should be rejected let consensus_invalid = block_factory.create_invalid_consensus_block(genesis_hash); From be2306da31b6c4d56735c56a3409d8bd2734d534 Mon Sep 17 00:00:00 2001 From: 0xMushow <105550256+0xMushow@users.noreply.github.com> Date: Mon, 20 Oct 2025 16:41:10 +0200 Subject: [PATCH 127/371] chore(config): clean up gas limit code (#19144) --- crates/node/core/src/cli/config.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crates/node/core/src/cli/config.rs b/crates/node/core/src/cli/config.rs index 657b8cac1f9..8c29c4745e9 100644 --- a/crates/node/core/src/cli/config.rs +++ b/crates/node/core/src/cli/config.rs @@ -42,10 +42,9 @@ pub trait PayloadBuilderConfig { } match chain.kind() { - ChainKind::Named(NamedChain::Sepolia | NamedChain::Holesky | NamedChain::Hoodi) => { - ETHEREUM_BLOCK_GAS_LIMIT_60M - } - ChainKind::Named(NamedChain::Mainnet) => ETHEREUM_BLOCK_GAS_LIMIT_60M, + ChainKind::Named( + NamedChain::Mainnet | NamedChain::Sepolia | NamedChain::Holesky | NamedChain::Hoodi, + ) => ETHEREUM_BLOCK_GAS_LIMIT_60M, _ => ETHEREUM_BLOCK_GAS_LIMIT_36M, } } From e198a38d628bdb60650f75d2a2a5724bbb543fe0 Mon Sep 17 00:00:00 2001 From: malik Date: Mon, 20 Oct 2025 16:04:31 +0100 Subject: [PATCH 128/371] perf: batch byte for serialization (#19096) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + Cargo.toml | 1 + crates/trie/common/Cargo.toml | 5 ++++- crates/trie/common/src/nibbles.rs | 19 +++++++++---------- crates/trie/common/src/storage.rs | 4 ++-- 5 files changed, 17 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 73f63b42f11..228c0783058 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10683,6 +10683,7 @@ dependencies = [ "alloy-serde", "alloy-trie", "arbitrary", + "arrayvec", "bincode 1.3.3", "bytes", "codspeed-criterion-compat", diff --git a/Cargo.toml b/Cargo.toml index 414e387ee28..08041015646 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -534,6 +534,7 @@ op-alloy-flz = { version = "0.13.1", default-features = false } # misc either = { version = "1.15.0", default-features = false } +arrayvec = { version = "0.7.6", default-features = false } aquamarine = "0.6" auto_impl = "1" backon = { version = "1.2", default-features = false, features = ["std-blocking-sleep", "tokio-sleep"] } diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 0aa93adb598..f10e53a8389 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -23,6 +23,7 @@ reth-codecs = { workspace = true, optional = true } alloy-rpc-types-eth = { workspace = true, optional = true } alloy-serde = { workspace = true, optional = true } +arrayvec = { workspace = true, optional = true } bytes = { workspace = true, optional = true } derive_more.workspace = true itertools = { workspace = true, features = ["use_alloc"] } @@ -83,10 +84,12 @@ std = [ "serde_json/std", "revm-database/std", "revm-state/std", + "arrayvec?/std", ] eip1186 = ["alloy-rpc-types-eth/serde", "dep:alloy-serde"] serde = [ "dep:serde", + "arrayvec?/serde", "bytes?/serde", "nybbles/serde", "alloy-primitives/serde", @@ -98,7 +101,7 @@ serde = [ "revm-database/serde", "revm-state/serde", ] -reth-codec = ["dep:reth-codecs", "dep:bytes"] +reth-codec = ["dep:reth-codecs", "dep:bytes", "dep:arrayvec"] serde-bincode-compat = [ "serde", "reth-primitives-traits/serde-bincode-compat", diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index 7d9e6670beb..82d710395f9 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -28,10 +28,9 @@ impl reth_codecs::Compact for StoredNibbles { where B: bytes::BufMut + AsMut<[u8]>, { - for i in self.0.iter() { - buf.put_u8(i); - } - self.0.len() + let bytes = self.0.iter().collect::>(); + buf.put_slice(&bytes); + bytes.len() } fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { @@ -78,14 +77,14 @@ impl reth_codecs::Compact for StoredNibblesSubKey { { assert!(self.0.len() <= 64); - // right-pad with zeros - for i in self.0.iter() { - buf.put_u8(i); - } + let bytes = self.0.iter().collect::>(); + buf.put_slice(&bytes); + + // Right-pad with zeros static ZERO: &[u8; 64] = &[0; 64]; - buf.put_slice(&ZERO[self.0.len()..]); + buf.put_slice(&ZERO[bytes.len()..]); - buf.put_u8(self.0.len() as u8); + buf.put_u8(bytes.len() as u8); 64 + 1 } diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index 557b9e4a606..1e567393864 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -1,4 +1,4 @@ -use super::{BranchNodeCompact, Nibbles, StoredNibblesSubKey}; +use super::{BranchNodeCompact, StoredNibblesSubKey}; /// Account storage trie node. /// @@ -61,7 +61,7 @@ impl reth_codecs::Compact for TrieChangeSetsEntry { if len == 0 { // Return an empty entry without trying to parse anything return ( - Self { nibbles: StoredNibblesSubKey::from(Nibbles::default()), node: None }, + Self { nibbles: StoredNibblesSubKey::from(super::Nibbles::default()), node: None }, buf, ) } From 915b627f4f1e485628e2d7b955f9ecdab7a8b2de Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 20 Oct 2025 18:06:23 +0200 Subject: [PATCH 129/371] fix: Revert "feat(engine): improve payload validator tracing spans (#18960)" (#19145) --- crates/engine/tree/src/chain.rs | 2 +- crates/engine/tree/src/tree/cached_state.rs | 18 +-- crates/engine/tree/src/tree/metrics.rs | 2 +- crates/engine/tree/src/tree/mod.rs | 15 +-- .../tree/src/tree/payload_processor/mod.rs | 26 +--- .../src/tree/payload_processor/multiproof.rs | 5 +- .../src/tree/payload_processor/prewarm.rs | 40 +----- .../src/tree/payload_processor/sparse_trie.rs | 28 +---- .../engine/tree/src/tree/payload_validator.rs | 115 +++++++----------- crates/net/ecies/src/codec.rs | 4 +- .../src/segments/user/account_history.rs | 2 +- .../prune/prune/src/segments/user/receipts.rs | 2 +- .../src/segments/user/receipts_by_logs.rs | 2 +- .../src/segments/user/sender_recovery.rs | 2 +- .../src/segments/user/storage_history.rs | 2 +- .../src/segments/user/transaction_lookup.rs | 2 +- crates/rpc/ipc/src/server/ipc.rs | 4 +- crates/rpc/ipc/src/server/mod.rs | 2 +- crates/rpc/rpc/src/engine.rs | 2 +- crates/trie/db/src/state.rs | 3 +- crates/trie/parallel/src/proof_task.rs | 2 +- crates/trie/sparse-parallel/src/trie.rs | 18 +-- crates/trie/sparse/Cargo.toml | 2 +- crates/trie/sparse/src/state.rs | 15 +-- crates/trie/sparse/src/trie.rs | 14 +-- crates/trie/trie/src/hashed_cursor/mock.rs | 4 +- crates/trie/trie/src/node_iter.rs | 3 +- crates/trie/trie/src/trie_cursor/mock.rs | 8 +- crates/trie/trie/src/walker.rs | 6 +- 29 files changed, 101 insertions(+), 249 deletions(-) diff --git a/crates/engine/tree/src/chain.rs b/crates/engine/tree/src/chain.rs index d1e63a6b3d9..e2893bb976a 100644 --- a/crates/engine/tree/src/chain.rs +++ b/crates/engine/tree/src/chain.rs @@ -71,7 +71,7 @@ where /// Internal function used to advance the chain. /// /// Polls the `ChainOrchestrator` for the next event. - #[tracing::instrument(name = "ChainOrchestrator::poll", skip(self, cx))] + #[tracing::instrument(level = "debug", name = "ChainOrchestrator::poll", skip(self, cx))] fn poll_next_event(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index 3e9cda38f13..ffd7f49c6fc 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -18,7 +18,7 @@ use reth_trie::{ MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; use std::{sync::Arc, time::Duration}; -use tracing::{debug_span, instrument, trace}; +use tracing::trace; pub(crate) type Cache = mini_moka::sync::Cache; @@ -354,7 +354,6 @@ impl ExecutionCache { } /// Invalidates the storage for all addresses in the set - #[instrument(level = "debug", target = "engine::tree", skip_all, fields(accounts = addresses.len()))] pub(crate) fn invalidate_storages(&self, addresses: HashSet<&Address>) { // NOTE: this must collect because the invalidate function should not be called while we // hold an iter for it @@ -386,25 +385,12 @@ impl ExecutionCache { /// ## Error Handling /// /// Returns an error if the state updates are inconsistent and should be discarded. - #[instrument(level = "debug", target = "engine::tree", skip_all)] pub(crate) fn insert_state(&self, state_updates: &BundleState) -> Result<(), ()> { - let _enter = - debug_span!(target: "engine::tree", "contracts", len = state_updates.contracts.len()) - .entered(); // Insert bytecodes for (code_hash, bytecode) in &state_updates.contracts { self.code_cache.insert(*code_hash, Some(Bytecode(bytecode.clone()))); } - drop(_enter); - - let _enter = debug_span!( - target: "engine::tree", - "accounts", - accounts = state_updates.state.len(), - storages = - state_updates.state.values().map(|account| account.storage.len()).sum::() - ) - .entered(); + let mut invalidated_accounts = HashSet::default(); for (addr, account) in &state_updates.state { // If the account was not modified, as in not changed and not destroyed, then we have diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 1d1e208b0a6..c014d8ba15e 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -79,7 +79,7 @@ impl EngineApiMetrics { for tx in transactions { let tx = tx?; let span = - debug_span!(target: "engine::tree", "execute tx", tx_hash=?tx.tx().tx_hash()); + debug_span!(target: "engine::tree", "execute_tx", tx_hash=?tx.tx().tx_hash()); let _enter = span.enter(); trace!(target: "engine::tree", "Executing transaction"); executor.execute_transaction(tx)?; diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index a189b643f98..e66b2a8892e 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -496,12 +496,7 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument( - level = "debug", - target = "engine::tree", - skip_all, - fields(block_hash = %payload.block_hash(), block_num = %payload.block_number()), - )] + #[instrument(level = "trace", skip_all, fields(block_hash = %payload.block_hash(), block_num = %payload.block_number(),), target = "engine::tree")] fn on_new_payload( &mut self, payload: T::ExecutionData, @@ -582,7 +577,6 @@ where /// - `Valid`: Payload successfully validated and inserted /// - `Syncing`: Parent missing, payload buffered for later /// - Error status: Payload is invalid - #[instrument(level = "debug", target = "engine::tree", skip_all)] fn try_insert_payload( &mut self, payload: T::ExecutionData, @@ -976,7 +970,7 @@ where /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). /// /// Returns an error if an internal error occurred like a database error. - #[instrument(level = "debug", target = "engine::tree", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash))] + #[instrument(level = "trace", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash), target = "engine::tree")] fn on_forkchoice_updated( &mut self, state: ForkchoiceState, @@ -1978,7 +1972,7 @@ where } /// Attempts to connect any buffered blocks that are connected to the given parent hash. - #[instrument(level = "debug", target = "engine::tree", skip(self))] + #[instrument(level = "trace", skip(self), target = "engine::tree")] fn try_connect_buffered_blocks( &mut self, parent: BlockNumHash, @@ -2287,7 +2281,7 @@ where /// Returns an event with the appropriate action to take, such as: /// - download more missing blocks /// - try to canonicalize the target if the `block` is the tracked target (head) block. - #[instrument(level = "debug", target = "engine::tree", skip_all, fields(block_hash = %block.hash(), block_num = %block.number()))] + #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number(),), target = "engine::tree")] fn on_downloaded_block( &mut self, block: RecoveredBlock, @@ -2393,7 +2387,6 @@ where /// Returns `InsertPayloadOk::Inserted(BlockStatus::Valid)` on successful execution, /// `InsertPayloadOk::AlreadySeen` if the block already exists, or /// `InsertPayloadOk::Inserted(BlockStatus::Disconnected)` if parent state is missing. - #[instrument(level = "debug", target = "engine::tree", skip_all, fields(block_id))] fn insert_block_or_payload( &mut self, block_id: BlockWithParent, diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 8d6230dd82f..d2e48a49899 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -45,7 +45,7 @@ use std::sync::{ mpsc::{self, channel, Sender}, Arc, }; -use tracing::{debug, debug_span, instrument, warn}; +use tracing::{debug, instrument, warn}; mod configured_sparse_trie; pub mod executor; @@ -167,12 +167,6 @@ where /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) #[allow(clippy::type_complexity)] - #[instrument( - level = "debug", - target = "engine::tree::payload_processor", - name = "payload processor", - skip_all - )] pub fn spawn>( &mut self, env: ExecutionEnv, @@ -242,9 +236,7 @@ where ); // spawn multi-proof task - let span = tracing::Span::current(); self.executor.spawn_blocking(move || { - let _enter = span.entered(); multi_proof_task.run(); }); @@ -265,7 +257,6 @@ where /// Spawns a task that exclusively handles cache prewarming for transaction execution. /// /// Returns a [`PayloadHandle`] to communicate with the task. - #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] pub(super) fn spawn_cache_exclusive>( &self, env: ExecutionEnv, @@ -362,9 +353,7 @@ where // spawn pre-warm task { let to_prewarm_task = to_prewarm_task.clone(); - let span = debug_span!(target: "engine::tree::payload_processor", "prewarm task"); self.executor.spawn_blocking(move || { - let _enter = span.entered(); prewarm_task.run(transactions, to_prewarm_task); }); } @@ -381,7 +370,7 @@ where /// /// If the given hash is different then what is recently cached, then this will create a new /// instance. - #[instrument(level = "debug", target = "engine::caching", skip(self))] + #[instrument(target = "engine::caching", skip(self))] fn cache_for(&self, parent_hash: B256) -> SavedCache { if let Some(cache) = self.execution_cache.get_cache_for(parent_hash) { debug!("reusing execution cache"); @@ -394,7 +383,6 @@ where } /// Spawns the [`SparseTrieTask`] for this payload processor. - #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] fn spawn_sparse_trie_task( &self, sparse_trie_rx: mpsc::Receiver, @@ -433,18 +421,13 @@ where sparse_state_trie, ); - let span = tracing::Span::current(); self.executor.spawn_blocking(move || { - let _enter = span.entered(); - let (result, trie) = task.run(); // Send state root computation result let _ = state_root_tx.send(result); - // Clear the SparseStateTrie and replace it back into the mutex _after_ sending - // results to the next step, so that time spent clearing doesn't block the step after - // this one. - let _enter = debug_span!(target: "engine::tree::payload_processor", "clear").entered(); + // Clear the SparseStateTrie and replace it back into the mutex _after_ sending results + // to the next step, so that time spent clearing doesn't block the step after this one. cleared_sparse_trie.lock().replace(ClearedSparseStateTrie::from_state_trie(trie)); }); } @@ -469,7 +452,6 @@ impl PayloadHandle { /// # Panics /// /// If payload processing was started without background tasks. - #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] pub fn state_root(&mut self) -> Result { self.state_root .take() diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 163714483fd..a528b759570 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -32,7 +32,7 @@ use std::{ }, time::{Duration, Instant}, }; -use tracing::{debug, error, instrument, trace}; +use tracing::{debug, error, trace}; /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. @@ -718,7 +718,6 @@ impl MultiProofTask { /// Handles request for proof prefetch. /// /// Returns a number of proofs that were spawned. - #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all, fields(accounts = targets.len()))] fn on_prefetch_proof(&mut self, targets: MultiProofTargets) -> u64 { let proof_targets = self.get_prefetch_proof_targets(targets); self.fetched_proof_targets.extend_ref(&proof_targets); @@ -845,7 +844,6 @@ impl MultiProofTask { /// Handles state updates. /// /// Returns a number of proofs that were spawned. - #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip(self, update), fields(accounts = update.len()))] fn on_state_update(&mut self, source: StateChangeSource, update: EvmState) -> u64 { let hashed_state_update = evm_state_to_hashed_post_state(update); @@ -975,7 +973,6 @@ impl MultiProofTask { /// currently being calculated, or if there are any pending proofs in the proof sequencer /// left to be revealed by checking the pending tasks. /// 6. This task exits after all pending proofs are processed. - #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all)] pub(crate) fn run(mut self) { // TODO convert those into fields let mut prefetch_proofs_requested = 0; diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index de8a88a167b..44293614d3d 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -39,7 +39,7 @@ use std::{ }, time::Instant, }; -use tracing::{debug, debug_span, instrument, trace, warn}; +use tracing::{debug, trace, warn}; /// A wrapper for transactions that includes their index in the block. #[derive(Clone)] @@ -139,11 +139,8 @@ where let ctx = self.ctx.clone(); let max_concurrency = self.max_concurrency; let transaction_count_hint = self.transaction_count_hint; - let span = tracing::Span::current(); self.executor.spawn_blocking(move || { - let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", parent: span, "spawn_all").entered(); - let (done_tx, done_rx) = mpsc::channel(); let mut executing = 0usize; @@ -160,8 +157,8 @@ where }; // Only spawn initial workers as needed - for i in 0..workers_needed { - handles.push(ctx.spawn_worker(i, &executor, actions_tx.clone(), done_tx.clone())); + for _ in 0..workers_needed { + handles.push(ctx.spawn_worker(&executor, actions_tx.clone(), done_tx.clone())); } let mut tx_index = 0usize; @@ -251,7 +248,6 @@ where /// the new, warmed cache to be inserted. /// /// This method is called from `run()` only after all execution tasks are complete. - #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn save_cache(self, state: BundleState) { let start = Instant::now(); @@ -288,12 +284,6 @@ where /// /// This will execute the transactions until all transactions have been processed or the task /// was cancelled. - #[instrument( - level = "debug", - target = "engine::tree::payload_processor::prewarm", - name = "prewarm", - skip_all - )] pub(super) fn run( self, pending: mpsc::Receiver + Clone + Send + 'static>, @@ -374,7 +364,6 @@ where { /// Splits this context into an evm, an evm config, metrics, and the atomic bool for terminating /// execution. - #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn evm_for_ctx(self) -> Option<(EvmFor, PrewarmMetrics, Arc)> { let Self { env, @@ -391,7 +380,7 @@ where Ok(provider) => provider, Err(err) => { trace!( - target: "engine::tree::payload_processor::prewarm", + target: "engine::tree", %err, "Failed to build state provider in prewarm thread" ); @@ -440,7 +429,6 @@ where /// /// Note: There are no ordering guarantees; this does not reflect the state produced by /// sequential execution. - #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn transact_batch( self, txs: mpsc::Receiver>, @@ -451,15 +439,7 @@ where { let Some((mut evm, metrics, terminate_execution)) = self.evm_for_ctx() else { return }; - while let Ok(IndexedTransaction { index, tx }) = { - let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "recv tx") - .entered(); - txs.recv() - } { - let _enter = - debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm tx", index, tx_hash=%tx.tx().tx_hash()) - .entered(); - + while let Ok(IndexedTransaction { index, tx }) = txs.recv() { // If the task was cancelled, stop execution, send an empty result to notify the task, // and exit. if terminate_execution.load(Ordering::Relaxed) { @@ -487,18 +467,12 @@ where }; metrics.execution_duration.record(start.elapsed()); - drop(_enter); - // Only send outcome for transactions after the first txn // as the main execution will be just as fast if index > 0 { - let _enter = - debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm outcome", index, tx_hash=%tx.tx().tx_hash()) - .entered(); let (targets, storage_targets) = multiproof_targets_from_state(res.state); metrics.prefetch_storage_targets.record(storage_targets as f64); let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: Some(targets) }); - drop(_enter); } metrics.total_runtime.record(start.elapsed()); @@ -511,7 +485,6 @@ where /// Spawns a worker task for transaction execution and returns its sender channel. fn spawn_worker( &self, - idx: usize, executor: &WorkloadExecutor, actions_tx: Sender, done_tx: Sender<()>, @@ -521,11 +494,8 @@ where { let (tx, rx) = mpsc::channel(); let ctx = self.clone(); - let span = - debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm worker", idx); executor.spawn_blocking(move || { - let _enter = span.entered(); ctx.transact_batch(rx, actions_tx, done_tx); }); diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index 6302abde5fb..c16f7b6e4f4 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -15,7 +15,7 @@ use std::{ sync::mpsc, time::{Duration, Instant}, }; -use tracing::{debug, debug_span, instrument, trace}; +use tracing::{debug, trace, trace_span}; /// A task responsible for populating the sparse trie. pub(super) struct SparseTrieTask @@ -61,11 +61,6 @@ where /// /// - State root computation outcome. /// - `SparseStateTrie` that needs to be cleared and reused to avoid reallocations. - #[instrument( - level = "debug", - target = "engine::tree::payload_processor::sparse_trie", - skip_all - )] pub(super) fn run( mut self, ) -> (Result, SparseStateTrie) { @@ -85,14 +80,10 @@ where while let Ok(mut update) = self.updates.recv() { num_iterations += 1; let mut num_updates = 1; - let _enter = - debug_span!(target: "engine::tree::payload_processor::sparse_trie", "drain updates") - .entered(); while let Ok(next) = self.updates.try_recv() { update.extend(next); num_updates += 1; } - drop(_enter); debug!( target: "engine::root", @@ -139,7 +130,6 @@ pub struct StateRootComputeOutcome { } /// Updates the sparse trie with the given proofs and state, and returns the elapsed time. -#[instrument(level = "debug", target = "engine::tree::payload_processor::sparse_trie", skip_all)] pub(crate) fn update_sparse_trie( trie: &mut SparseStateTrie, SparseTrieUpdate { mut state, multiproof }: SparseTrieUpdate, @@ -165,7 +155,6 @@ where ); // Update storage slots with new values and calculate storage roots. - let span = tracing::Span::current(); let (tx, rx) = mpsc::channel(); state .storages @@ -173,16 +162,14 @@ where .map(|(address, storage)| (address, storage, trie.take_storage_trie(&address))) .par_bridge() .map(|(address, storage, storage_trie)| { - let _enter = - debug_span!(target: "engine::tree::payload_processor::sparse_trie", parent: span.clone(), "storage trie", ?address) - .entered(); - - trace!(target: "engine::tree::payload_processor::sparse_trie", "Updating storage"); + let span = trace_span!(target: "engine::root::sparse", "Storage trie", ?address); + let _enter = span.enter(); + trace!(target: "engine::root::sparse", "Updating storage"); let storage_provider = blinded_provider_factory.storage_node_provider(address); let mut storage_trie = storage_trie.ok_or(SparseTrieErrorKind::Blind)?; if storage.wiped { - trace!(target: "engine::tree::payload_processor::sparse_trie", "Wiping storage"); + trace!(target: "engine::root::sparse", "Wiping storage"); storage_trie.wipe()?; } @@ -200,7 +187,7 @@ where continue; } - trace!(target: "engine::tree::payload_processor::sparse_trie", ?slot_nibbles, "Updating storage slot"); + trace!(target: "engine::root::sparse", ?slot_nibbles, "Updating storage slot"); storage_trie.update_leaf( slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec(), @@ -232,9 +219,6 @@ where let mut removed_accounts = Vec::new(); // Update account storage roots - let _enter = - tracing::debug_span!(target: "engine::tree::payload_processor::sparse_trie", "account trie") - .entered(); for result in rx { let (address, storage_trie) = result?; trie.insert_storage_trie(address, storage_trie); diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 253c6c0e183..4a3d45af8fd 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -44,8 +44,9 @@ use reth_trie::{ }; use reth_trie_db::DatabaseHashedPostState; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; +use revm::context::Block; use std::{collections::HashMap, sync::Arc, time::Instant}; -use tracing::{debug, debug_span, error, info, instrument, trace, warn}; +use tracing::{debug, debug_span, error, info, trace, warn}; /// Context providing access to tree state during validation. /// @@ -288,7 +289,7 @@ where V: PayloadValidator, { debug!( - target: "engine::tree::payload_validator", + target: "engine::tree", ?execution_err, block = ?input.num_hash(), "Block execution failed, checking for header validation errors" @@ -323,15 +324,6 @@ where /// - Block execution /// - State root computation /// - Fork detection - #[instrument( - level = "debug", - target = "engine::tree::payload_validator", - skip_all, - fields( - parent = ?input.parent_hash(), - block_num_hash = ?input.num_hash() - ) - )] pub fn validate_block_with_state>>( &mut self, input: BlockOrPayload, @@ -374,9 +366,7 @@ where let parent_hash = input.parent_hash(); let block_num_hash = input.num_hash(); - trace!(target: "engine::tree::payload_validator", "Fetching block state provider"); - let _enter = - debug_span!(target: "engine::tree::payload_validator", "state provider").entered(); + trace!(target: "engine::tree", block=?block_num_hash, parent=?parent_hash, "Fetching block state provider"); let Some(provider_builder) = ensure_ok!(self.state_provider_builder(parent_hash, ctx.state())) else { @@ -387,8 +377,8 @@ where ) .into()) }; + let state_provider = ensure_ok!(provider_builder.build()); - drop(_enter); // fetch parent block let Some(parent_block) = ensure_ok!(self.sealed_header_by_hash(parent_hash, ctx.state())) @@ -400,9 +390,7 @@ where .into()) }; - let evm_env = debug_span!(target: "engine::tree::payload_validator", "evm env") - .in_scope(|| self.evm_env_for(&input)) - .map_err(NewPayloadError::other)?; + let evm_env = self.evm_env_for(&input).map_err(NewPayloadError::other)?; let env = ExecutionEnv { evm_env, hash: input.hash(), parent_hash: input.parent_hash() }; @@ -412,7 +400,8 @@ where let strategy = state_root_plan.strategy; debug!( - target: "engine::tree::payload_validator", + target: "engine::tree", + block=?block_num_hash, ?strategy, "Deciding which state root algorithm to run" ); @@ -428,6 +417,7 @@ where persisting_kind, parent_hash, ctx.state(), + block_num_hash, strategy, )); @@ -462,7 +452,7 @@ where block ); - debug!(target: "engine::tree::payload_validator", "Calculating block state root"); + debug!(target: "engine::tree", block=?block_num_hash, "Calculating block state root"); let root_time = Instant::now(); @@ -470,17 +460,17 @@ where match strategy { StateRootStrategy::StateRootTask => { - debug!(target: "engine::tree::payload_validator", "Using sparse trie state root algorithm"); + debug!(target: "engine::tree", block=?block_num_hash, "Using sparse trie state root algorithm"); match handle.state_root() { Ok(StateRootComputeOutcome { state_root, trie_updates }) => { let elapsed = root_time.elapsed(); - info!(target: "engine::tree::payload_validator", ?state_root, ?elapsed, "State root task finished"); + info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); // we double check the state root here for good measure if state_root == block.header().state_root() { maybe_state_root = Some((state_root, trie_updates, elapsed)) } else { warn!( - target: "engine::tree::payload_validator", + target: "engine::tree", ?state_root, block_state_root = ?block.header().state_root(), "State root task returned incorrect state root" @@ -488,12 +478,12 @@ where } } Err(error) => { - debug!(target: "engine::tree::payload_validator", %error, "State root task failed"); + debug!(target: "engine::tree", %error, "State root task failed"); } } } StateRootStrategy::Parallel => { - debug!(target: "engine::tree::payload_validator", "Using parallel state root algorithm"); + debug!(target: "engine::tree", block=?block_num_hash, "Using parallel state root algorithm"); match self.compute_state_root_parallel( persisting_kind, block.parent_hash(), @@ -503,7 +493,8 @@ where Ok(result) => { let elapsed = root_time.elapsed(); info!( - target: "engine::tree::payload_validator", + target: "engine::tree", + block = ?block_num_hash, regular_state_root = ?result.0, ?elapsed, "Regular root task finished" @@ -511,7 +502,7 @@ where maybe_state_root = Some((result.0, result.1, elapsed)); } Err(error) => { - debug!(target: "engine::tree::payload_validator", %error, "Parallel state root computation failed"); + debug!(target: "engine::tree", %error, "Parallel state root computation failed"); } } } @@ -528,9 +519,9 @@ where } else { // fallback is to compute the state root regularly in sync if self.config.state_root_fallback() { - debug!(target: "engine::tree::payload_validator", "Using state root fallback for testing"); + debug!(target: "engine::tree", block=?block_num_hash, "Using state root fallback for testing"); } else { - warn!(target: "engine::tree::payload_validator", ?persisting_kind, "Failed to compute state root in parallel"); + warn!(target: "engine::tree", block=?block_num_hash, ?persisting_kind, "Failed to compute state root in parallel"); self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); } @@ -542,7 +533,7 @@ where }; self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); - debug!(target: "engine::tree::payload_validator", ?root_elapsed, "Calculated state root"); + debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root"); // ensure state root matches if state_root != block.header().state_root() { @@ -596,12 +587,12 @@ where /// and block body itself. fn validate_block_inner(&self, block: &RecoveredBlock) -> Result<(), ConsensusError> { if let Err(e) = self.consensus.validate_header(block.sealed_header()) { - error!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {}: {e}", block.hash()); + error!(target: "engine::tree", ?block, "Failed to validate header {}: {e}", block.hash()); return Err(e) } if let Err(e) = self.consensus.validate_block_pre_execution(block.sealed_block()) { - error!(target: "engine::tree::payload_validator", ?block, "Failed to validate block {}: {e}", block.hash()); + error!(target: "engine::tree", ?block, "Failed to validate block {}: {e}", block.hash()); return Err(e) } @@ -609,7 +600,6 @@ where } /// Executes a block with the given state provider - #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn execute_block( &mut self, state_provider: S, @@ -624,7 +614,11 @@ where T: PayloadTypes>, Evm: ConfigureEngineEvm, { - debug!(target: "engine::tree::payload_validator", "Executing block"); + let num_hash = NumHash::new(env.evm_env.block_env.number().to(), env.hash); + + let span = debug_span!(target: "engine::tree", "execute_block", num = ?num_hash.number, hash = ?num_hash.hash); + let _enter = span.enter(); + debug!(target: "engine::tree", "Executing block"); let mut db = State::builder() .with_database(StateProviderDatabase::new(&state_provider)) @@ -663,7 +657,7 @@ where )?; let execution_finish = Instant::now(); let execution_time = execution_finish.duration_since(execution_start); - debug!(target: "engine::tree::payload_validator", elapsed = ?execution_time, "Executed block"); + debug!(target: "engine::tree", elapsed = ?execution_time, number=?num_hash.number, "Executed block"); Ok(output) } @@ -675,7 +669,6 @@ where /// Returns `Err(_)` if error was encountered during computation. /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation /// should be used instead. - #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn compute_state_root_parallel( &self, persisting_kind: PersistingKind, @@ -716,7 +709,7 @@ where { let start = Instant::now(); - trace!(target: "engine::tree::payload_validator", block=?block.num_hash(), "Validating block consensus"); + trace!(target: "engine::tree", block=?block.num_hash(), "Validating block consensus"); // validate block consensus rules if let Err(e) = self.validate_block_inner(block) { return Err(e.into()) @@ -726,7 +719,7 @@ where if let Err(e) = self.consensus.validate_header_against_parent(block.sealed_header(), parent_block) { - warn!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {} against parent: {e}", block.hash()); + warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); return Err(e.into()) } @@ -766,12 +759,6 @@ where /// The method handles strategy fallbacks if the preferred approach fails, ensuring /// block execution always completes with a valid state root. #[allow(clippy::too_many_arguments)] - #[instrument( - level = "debug", - target = "engine::tree::payload_validator", - skip_all, - fields(strategy) - )] fn spawn_payload_processor>( &mut self, env: ExecutionEnv, @@ -780,6 +767,7 @@ where persisting_kind: PersistingKind, parent_hash: B256, state: &EngineApiTreeState, + block_num_hash: NumHash, strategy: StateRootStrategy, ) -> Result< ( @@ -833,7 +821,8 @@ where Err((error, txs, env, provider_builder)) => { // Failed to spawn proof workers, fallback to parallel state root error!( - target: "engine::tree::payload_validator", + target: "engine::tree", + block=?block_num_hash, ?error, "Failed to spawn proof workers, falling back to parallel state root" ); @@ -851,7 +840,8 @@ where // prewarming for transaction execution } else { debug!( - target: "engine::tree::payload_validator", + target: "engine::tree", + block=?block_num_hash, "Disabling state root task due to non-empty prefix sets" ); ( @@ -894,7 +884,7 @@ where state: &EngineApiTreeState, ) -> ProviderResult>> { if let Some((historical, blocks)) = state.tree_state.blocks_by_hash(hash) { - debug!(target: "engine::tree::payload_validator", %hash, %historical, "found canonical state for block in memory, creating provider builder"); + debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory, creating provider builder"); // the block leads back to the canonical chain return Ok(Some(StateProviderBuilder::new( self.provider.clone(), @@ -905,18 +895,17 @@ where // Check if the block is persisted if let Some(header) = self.provider.header(hash)? { - debug!(target: "engine::tree::payload_validator", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); + debug!(target: "engine::tree", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); // For persisted blocks, we create a builder that will fetch state directly from the // database return Ok(Some(StateProviderBuilder::new(self.provider.clone(), hash, None))) } - debug!(target: "engine::tree::payload_validator", %hash, "no canonical state found for block"); + debug!(target: "engine::tree", %hash, "no canonical state found for block"); Ok(None) } /// Determines the state root computation strategy based on persistence state and configuration. - #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn plan_state_root_computation>>( &self, input: &BlockOrPayload, @@ -950,7 +939,7 @@ where }; debug!( - target: "engine::tree::payload_validator", + target: "engine::tree", block=?input.num_hash(), ?strategy, "Planned state root computation strategy" @@ -990,12 +979,6 @@ where /// block. /// 3. Once in-memory blocks are collected and optionally filtered, we compute the /// [`HashedPostState`] from them. - #[instrument( - level = "debug", - target = "engine::tree::payload_validator", - skip_all, - fields(persisting_kind, parent_hash) - )] fn compute_trie_input( &self, persisting_kind: PersistingKind, @@ -1016,9 +999,6 @@ where // If the current block is a descendant of the currently persisting blocks, then we need to // filter in-memory blocks, so that none of them are already persisted in the database. - let _enter = - debug_span!(target: "engine::tree::payload_validator", "filter in-memory blocks", len = blocks.len()) - .entered(); if persisting_kind.is_descendant() { // Iterate over the blocks from oldest to newest. while let Some(block) = blocks.last() { @@ -1043,13 +1023,11 @@ where parent_hash.into() }; } - drop(_enter); - let blocks_empty = blocks.is_empty(); - if blocks_empty { - debug!(target: "engine::tree::payload_validator", "Parent found on disk"); + if blocks.is_empty() { + debug!(target: "engine::tree", %parent_hash, "Parent found on disk"); } else { - debug!(target: "engine::tree::payload_validator", %historical, blocks = blocks.len(), "Parent found in memory"); + debug!(target: "engine::tree", %parent_hash, %historical, blocks = blocks.len(), "Parent found in memory"); } // Convert the historical block to the block number. @@ -1057,15 +1035,12 @@ where .convert_hash_or_number(historical)? .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; - let _enter = - debug_span!(target: "engine::tree::payload_validator", "revert state", blocks_empty) - .entered(); // Retrieve revert state for historical block. let (revert_state, revert_trie) = if block_number == best_block_number { // We do not check against the `last_block_number` here because // `HashedPostState::from_reverts` / `trie_reverts` only use the database tables, and // not static files. - debug!(target: "engine::tree::payload_validator", block_number, best_block_number, "Empty revert state"); + debug!(target: "engine::tree", block_number, best_block_number, "Empty revert state"); (HashedPostState::default(), TrieUpdatesSorted::default()) } else { let revert_state = HashedPostState::from_reverts::( @@ -1075,7 +1050,7 @@ where .map_err(ProviderError::from)?; let revert_trie = provider.trie_reverts(block_number + 1)?; debug!( - target: "engine::tree::payload_validator", + target: "engine::tree", block_number, best_block_number, accounts = revert_state.accounts.len(), diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index c4c45366c66..b5a10284cf2 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -58,7 +58,7 @@ impl Decoder for ECIESCodec { type Item = IngressECIESValue; type Error = ECIESError; - #[instrument(skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))] + #[instrument(level = "trace", skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))] fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { loop { match self.state { @@ -150,7 +150,7 @@ impl Decoder for ECIESCodec { impl Encoder for ECIESCodec { type Error = io::Error; - #[instrument(skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))] + #[instrument(level = "trace", skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))] fn encode(&mut self, item: EgressECIESValue, buf: &mut BytesMut) -> Result<(), Self::Error> { match item { EgressECIESValue::Auth => { diff --git a/crates/prune/prune/src/segments/user/account_history.rs b/crates/prune/prune/src/segments/user/account_history.rs index 317337f050e..3c18cd1befc 100644 --- a/crates/prune/prune/src/segments/user/account_history.rs +++ b/crates/prune/prune/src/segments/user/account_history.rs @@ -45,7 +45,7 @@ where PrunePurpose::User } - #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] + #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index 03faddc1d5b..ecb0f3423be 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -42,7 +42,7 @@ where PrunePurpose::User } - #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] + #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { crate::segments::receipts::prune(provider, input) } diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index 8fd6d1e73a5..0849db52518 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -45,7 +45,7 @@ where PrunePurpose::User } - #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] + #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { // Contract log filtering removes every receipt possible except the ones in the list. So, // for the other receipts it's as if they had a `PruneMode::Distance()` of diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index 9fbad8c428c..35ee487203a 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -37,7 +37,7 @@ where PrunePurpose::User } - #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] + #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/storage_history.rs b/crates/prune/prune/src/segments/user/storage_history.rs index a4ad37bf789..ee7447c37da 100644 --- a/crates/prune/prune/src/segments/user/storage_history.rs +++ b/crates/prune/prune/src/segments/user/storage_history.rs @@ -47,7 +47,7 @@ where PrunePurpose::User } - #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] + #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index 0055f8abd22..e218f623ed5 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -38,7 +38,7 @@ where PrunePurpose::User } - #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] + #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune( &self, provider: &Provider, diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index fda19c7cb31..19992ead498 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -27,7 +27,7 @@ pub(crate) struct Batch { // Batch responses must be sent back as a single message so we read the results from each // request in the batch and read the results off of a new channel, `rx_batch`, and then send the // complete batch response back to the client over `tx`. -#[instrument(name = "batch", skip(b))] +#[instrument(name = "batch", skip(b), level = "TRACE")] pub(crate) async fn process_batch_request( b: Batch, max_response_body_size: usize, @@ -98,7 +98,7 @@ where } } -#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service))] +#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service), level = "TRACE")] pub(crate) async fn execute_call_with_tracing<'a, S>( req: Request<'a>, rpc_service: &S, diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 6e6b092c408..b6114938d2b 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -443,7 +443,7 @@ struct ProcessConnection<'a, HttpMiddleware, RpcMiddleware> { } /// Spawns the IPC connection onto a new task -#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id))] +#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id), level = "INFO")] fn process_connection( params: ProcessConnection<'_, HttpMiddleware, RpcMiddleware>, ) where diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index 7865659ece7..a0e0bd30931 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -16,7 +16,7 @@ use tracing_futures::Instrument; macro_rules! engine_span { () => { - tracing::info_span!(target: "rpc", "engine") + tracing::trace_span!(target: "rpc", "engine") }; } diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 6d37c5f3413..256ee20794e 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -20,7 +20,7 @@ use std::{ collections::HashMap, ops::{RangeBounds, RangeInclusive}, }; -use tracing::{debug, instrument}; +use tracing::debug; /// Extends [`StateRoot`] with operations specific for working with a database transaction. pub trait DatabaseStateRoot<'a, TX>: Sized { @@ -226,7 +226,6 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } impl DatabaseHashedPostState for HashedPostState { - #[instrument(target = "trie::db", skip(tx), fields(range))] fn from_reverts( tx: &TX, range: impl RangeBounds, diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index b3269f21fbb..b66b7bbaa4f 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -693,7 +693,7 @@ where multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); - let span = tracing::info_span!( + let span = tracing::trace_span!( target: "trie::proof_task", "Storage proof calculation", hashed_address = ?hashed_address, diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index b15eb7f4edb..e99bc584ec4 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -741,24 +741,13 @@ impl SparseTrieInterface for ParallelSparseTrie { // Update subtrie hashes in parallel { use rayon::iter::{IntoParallelIterator, ParallelIterator}; - use tracing::info_span; - let (tx, rx) = mpsc::channel(); let branch_node_tree_masks = &self.branch_node_tree_masks; let branch_node_hash_masks = &self.branch_node_hash_masks; - let span = tracing::Span::current(); changed_subtries .into_par_iter() .map(|mut changed_subtrie| { - let _enter = info_span!( - target: "trie::sparse::parallel", - parent: span.clone(), - "subtrie", - index = changed_subtrie.index - ) - .entered(); - #[cfg(feature = "metrics")] let start = std::time::Instant::now(); changed_subtrie.subtrie.update_hashes( @@ -1303,7 +1292,6 @@ impl ParallelSparseTrie { /// Drains any [`SparseTrieUpdatesAction`]s from the given subtrie, and applies each action to /// the given `updates` set. If the given set is None then this is a no-op. - #[instrument(target = "trie::sparse::parallel", skip_all)] fn apply_subtrie_update_actions( &mut self, update_actions: impl Iterator, @@ -1327,7 +1315,7 @@ impl ParallelSparseTrie { } /// Updates hashes for the upper subtrie, using nodes from both upper and lower subtries. - #[instrument(target = "trie::parallel_sparse", skip_all, ret(level = "trace"))] + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, ret)] fn update_upper_subtrie_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { trace!(target: "trie::parallel_sparse", "Updating upper subtrie hashes"); @@ -1405,7 +1393,6 @@ impl ParallelSparseTrie { /// /// IMPORTANT: The method removes the subtries from `lower_subtries`, and the caller is /// responsible for returning them back into the array. - #[instrument(target = "trie::sparse::parallel", skip_all, fields(prefix_set_len = prefix_set.len()))] fn take_changed_lower_subtries( &mut self, prefix_set: &mut PrefixSet, @@ -1562,7 +1549,6 @@ impl ParallelSparseTrie { /// Return updated subtries back to the trie after executing any actions required on the /// top-level `SparseTrieUpdates`. - #[instrument(target = "trie::sparse::parallel", skip_all)] fn insert_changed_subtries( &mut self, changed_subtries: impl IntoIterator, @@ -2050,7 +2036,7 @@ impl SparseSubtrie { /// # Panics /// /// If the node at the root path does not exist. - #[instrument(target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret(level = "trace"))] + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret)] fn update_hashes( &mut self, prefix_set: &mut PrefixSet, diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index b2c7ee0f566..6fac7c5faad 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-primitives-traits.workspace = true reth-execution-errors.workspace = true reth-trie-common.workspace = true -tracing = { workspace = true, features = ["attributes"] } +tracing.workspace = true alloy-trie.workspace = true # alloy diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index aef552da3dd..08e868d2a40 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -18,7 +18,7 @@ use reth_trie_common::{ DecodedMultiProof, DecodedStorageMultiProof, MultiProof, Nibbles, RlpNode, StorageMultiProof, TrieAccount, TrieMask, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use tracing::{instrument, trace}; +use tracing::trace; /// Provides type-safe re-use of cleared [`SparseStateTrie`]s, which helps to save allocations /// across payload runs. @@ -208,14 +208,6 @@ where /// Reveal unknown trie paths from decoded multiproof. /// NOTE: This method does not extensively validate the proof. - #[instrument( - target = "trie::sparse", - skip_all, - fields( - account_nodes = multiproof.account_subtree.len(), - storages = multiproof.storages.len() - ) - )] pub fn reveal_decoded_multiproof( &mut self, multiproof: DecodedMultiProof, @@ -540,7 +532,6 @@ where /// Calculates the hashes of subtries. /// /// If the trie has not been revealed, this function does nothing. - #[instrument(target = "trie::sparse", skip_all)] pub fn calculate_subtries(&mut self) { if let SparseTrie::Revealed(trie) = &mut self.state { trie.update_subtrie_hashes(); @@ -601,7 +592,6 @@ where } /// Returns sparse trie root and trie updates if the trie has been revealed. - #[instrument(target = "trie::sparse", skip_all)] pub fn root_with_updates( &mut self, provider_factory: impl TrieNodeProviderFactory, @@ -705,7 +695,6 @@ where /// /// Returns false if the new account info and storage trie are empty, indicating the account /// leaf should be removed. - #[instrument(target = "trie::sparse", skip_all)] pub fn update_account( &mut self, address: B256, @@ -748,7 +737,6 @@ where /// /// Returns false if the new storage root is empty, and the account info was already empty, /// indicating the account leaf should be removed. - #[instrument(target = "trie::sparse", skip_all)] pub fn update_account_storage_root( &mut self, address: B256, @@ -796,7 +784,6 @@ where } /// Remove the account leaf node. - #[instrument(target = "trie::sparse", skip_all)] pub fn remove_account_leaf( &mut self, path: &Nibbles, diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 737da842254..d3c83c48a09 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -24,7 +24,7 @@ use reth_trie_common::{ TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; -use tracing::{debug, instrument, trace}; +use tracing::{debug, trace}; /// The level below which the sparse trie hashes are calculated in /// [`SerialSparseTrie::update_subtrie_hashes`]. @@ -175,7 +175,6 @@ impl SparseTrie { /// and resetting the trie to only contain an empty root node. /// /// Note: This method will error if the trie is blinded. - #[instrument(target = "trie::sparse", skip_all)] pub fn wipe(&mut self) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; revealed.wipe(); @@ -192,7 +191,6 @@ impl SparseTrie { /// /// - `Some(B256)` with the calculated root hash if the trie is revealed. /// - `None` if the trie is still blind. - #[instrument(target = "trie::sparse", skip_all)] pub fn root(&mut self) -> Option { Some(self.as_revealed_mut()?.root()) } @@ -232,7 +230,6 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the update fails. - #[instrument(target = "trie::sparse", skip_all)] pub fn update_leaf( &mut self, path: Nibbles, @@ -249,7 +246,6 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the leaf cannot be removed - #[instrument(target = "trie::sparse", skip_all)] pub fn remove_leaf( &mut self, path: &Nibbles, @@ -593,13 +589,14 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } - #[instrument(target = "trie::sparse::serial", skip(self, provider))] fn update_leaf( &mut self, full_path: Nibbles, value: Vec, provider: P, ) -> SparseTrieResult<()> { + trace!(target: "trie::sparse", ?full_path, ?value, "update_leaf called"); + self.prefix_set.insert(full_path); let existing = self.values.insert(full_path, value); if existing.is_some() { @@ -731,7 +728,6 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } - #[instrument(target = "trie::sparse::serial", skip(self, provider))] fn remove_leaf( &mut self, full_path: &Nibbles, @@ -917,7 +913,6 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } - #[instrument(target = "trie::sparse::serial", skip(self))] fn root(&mut self) -> B256 { // Take the current prefix set let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); @@ -1353,7 +1348,6 @@ impl SerialSparseTrie { /// /// This function identifies all nodes that have changed (based on the prefix set) at the given /// depth and recalculates their RLP representation. - #[instrument(target = "trie::sparse::serial", skip(self))] pub fn update_rlp_node_level(&mut self, depth: usize) { // Take the current prefix set let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); @@ -1399,7 +1393,6 @@ impl SerialSparseTrie { /// specified depth. /// - A `PrefixSetMut` containing paths shallower than the specified depth that still need to be /// tracked for future updates. - #[instrument(target = "trie::sparse::serial", skip(self))] fn get_changed_nodes_at_depth( &self, prefix_set: &mut PrefixSet, @@ -1486,7 +1479,6 @@ impl SerialSparseTrie { /// # Panics /// /// If the node at provided path does not exist. - #[instrument(target = "trie::sparse::serial", skip_all, ret(level = "trace"))] pub fn rlp_node( &mut self, prefix_set: &mut PrefixSet, diff --git a/crates/trie/trie/src/hashed_cursor/mock.rs b/crates/trie/trie/src/hashed_cursor/mock.rs index f091ae6ffe5..aca1c303d69 100644 --- a/crates/trie/trie/src/hashed_cursor/mock.rs +++ b/crates/trie/trie/src/hashed_cursor/mock.rs @@ -107,7 +107,7 @@ impl MockHashedCursor { impl HashedCursor for MockHashedCursor { type Value = T; - #[instrument(skip(self), ret(level = "trace"))] + #[instrument(level = "trace", skip(self), ret)] fn seek(&mut self, key: B256) -> Result, DatabaseError> { // Find the first key that is greater than or equal to the given key. let entry = self.values.iter().find_map(|(k, v)| (k >= &key).then(|| (*k, v.clone()))); @@ -121,7 +121,7 @@ impl HashedCursor for MockHashedCursor { Ok(entry) } - #[instrument(skip(self), ret(level = "trace"))] + #[instrument(level = "trace", skip(self), ret)] fn next(&mut self) -> Result, DatabaseError> { let mut iter = self.values.iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index e11cd51f790..862176c803a 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -191,10 +191,11 @@ where /// /// NOTE: The iteration will start from the key of the previous hashed entry if it was supplied. #[instrument( + level = "trace", target = "trie::node_iter", skip_all, fields(trie_type = ?self.trie_type), - ret(level = "trace") + ret )] pub fn try_next( &mut self, diff --git a/crates/trie/trie/src/trie_cursor/mock.rs b/crates/trie/trie/src/trie_cursor/mock.rs index 313df0443e3..e4504ee4f9c 100644 --- a/crates/trie/trie/src/trie_cursor/mock.rs +++ b/crates/trie/trie/src/trie_cursor/mock.rs @@ -109,7 +109,7 @@ impl MockTrieCursor { } impl TrieCursor for MockTrieCursor { - #[instrument(skip(self), ret(level = "trace"))] + #[instrument(level = "trace", skip(self), ret)] fn seek_exact( &mut self, key: Nibbles, @@ -125,7 +125,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(skip(self), ret(level = "trace"))] + #[instrument(level = "trace", skip(self), ret)] fn seek( &mut self, key: Nibbles, @@ -142,7 +142,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(skip(self), ret(level = "trace"))] + #[instrument(level = "trace", skip(self), ret)] fn next(&mut self) -> Result, DatabaseError> { let mut iter = self.trie_nodes.iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first @@ -161,7 +161,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(skip(self), ret(level = "trace"))] + #[instrument(level = "trace", skip(self), ret)] fn current(&mut self) -> Result, DatabaseError> { Ok(self.current_key) } diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index 0ea466437f5..f12bf46f748 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -157,7 +157,7 @@ impl> TrieWalker { } /// Returns the next unprocessed key in the trie along with its raw [`Nibbles`] representation. - #[instrument(skip(self), ret(level = "trace"))] + #[instrument(level = "trace", skip(self), ret)] pub fn next_unprocessed_key(&self) -> Option<(B256, Nibbles)> { self.key() .and_then(|key| if self.can_skip_current_node { key.increment() } else { Some(*key) }) @@ -297,7 +297,7 @@ impl> TrieWalker { } /// Consumes the next node in the trie, updating the stack. - #[instrument(skip(self), ret(level = "trace"))] + #[instrument(level = "trace", skip(self), ret)] fn consume_node(&mut self) -> Result<(), DatabaseError> { let Some((key, node)) = self.node(false)? else { // If no next node is found, clear the stack. @@ -343,7 +343,7 @@ impl> TrieWalker { } /// Moves to the next sibling node in the trie, updating the stack. - #[instrument(skip(self), ret(level = "trace"))] + #[instrument(level = "trace", skip(self), ret)] fn move_to_next_sibling( &mut self, allow_root_to_child_nibble: bool, From 49bbcdc38c74427f6da79b6ac31f5d34065527dd Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 20 Oct 2025 13:09:57 -0400 Subject: [PATCH 130/371] chore: rm high frequency otel-related debug logs (#19147) --- crates/tracing/src/layers.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index d27bbc96b6e..156bd8c8253 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -20,12 +20,16 @@ pub(crate) type BoxedLayer = Box + Send + Sync>; /// Default [directives](Directive) for [`EnvFilter`] which disables high-frequency debug logs from /// `hyper`, `hickory-resolver`, `jsonrpsee-server`, and `discv5`. -const DEFAULT_ENV_FILTER_DIRECTIVES: [&str; 5] = [ +const DEFAULT_ENV_FILTER_DIRECTIVES: [&str; 9] = [ "hyper::proto::h1=off", "hickory_resolver=off", "hickory_proto=off", "discv5=off", "jsonrpsee-server=off", + "opentelemetry-otlp=off", + "opentelemetry_sdk=off", + "opentelemetry-http=off", + "hyper_util::client::legacy::pool=off", ]; /// Manages the collection of layers for a tracing subscriber. From 792b82d8956d91255c022abf7ea39a81781d0796 Mon Sep 17 00:00:00 2001 From: Alex Pikme <30472093+reject-i@users.noreply.github.com> Date: Tue, 21 Oct 2025 11:48:46 +0300 Subject: [PATCH 131/371] perf: fix redundant Arc clone in file_client tests (#19170) --- crates/net/downloaders/src/file_client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 34c2f56b75c..de3d8f8f1f4 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -815,7 +815,7 @@ mod tests { // construct headers downloader and use first header let mut header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(Arc::clone(&Arc::new(client)), Arc::new(TestConsensus::default())); + .build(Arc::new(client), Arc::new(TestConsensus::default())); header_downloader.update_local_head(local_header.clone()); header_downloader.update_sync_target(SyncTarget::Tip(sync_target_hash)); @@ -890,7 +890,7 @@ mod tests { // construct headers downloader and use first header let mut header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(Arc::clone(&Arc::new(client)), Arc::new(TestConsensus::default())); + .build(Arc::new(client), Arc::new(TestConsensus::default())); header_downloader.update_local_head(local_header.clone()); header_downloader.update_sync_target(SyncTarget::Tip(sync_target_hash)); From f0c0b3db4e47c167244654a5f03e5e388c9505fe Mon Sep 17 00:00:00 2001 From: MozirDmitriy Date: Tue, 21 Oct 2025 13:21:36 +0300 Subject: [PATCH 132/371] feat(storage): replace unreachable todo!() with explicit unreachable!() in compact derive (#19152) --- crates/storage/codecs/derive/src/compact/structs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/codecs/derive/src/compact/structs.rs b/crates/storage/codecs/derive/src/compact/structs.rs index f8ebda33499..4bafe730624 100644 --- a/crates/storage/codecs/derive/src/compact/structs.rs +++ b/crates/storage/codecs/derive/src/compact/structs.rs @@ -155,7 +155,7 @@ impl<'a> StructHandler<'a> { let (#name, new_buf) = #ident_type::#from_compact_ident(buf, flags.#len() as usize); }); } else { - todo!() + unreachable!("flag-type fields are always compact in Compact derive") } self.lines.push(quote! { buf = new_buf; From e21048314c375b3df2801d384bd3f826c878e7a8 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 21 Oct 2025 11:56:36 +0100 Subject: [PATCH 133/371] chore: remove total difficulty from `HeaderProvider` (#19151) --- .../commands/src/init_state/without_evm.rs | 4 +- crates/era-utils/src/export.rs | 5 +- crates/era-utils/src/history.rs | 49 ++++++++++++------- crates/era-utils/src/lib.rs | 3 +- .../net/downloaders/src/bodies/test_utils.rs | 6 +-- crates/node/core/src/node_config.rs | 10 +--- crates/rpc/rpc-eth-types/src/error/mod.rs | 1 - crates/stages/stages/src/stages/era.rs | 20 ++------ crates/stages/stages/src/stages/headers.rs | 35 ++----------- crates/stages/stages/src/stages/merkle.rs | 2 +- .../stages/stages/src/test_utils/test_db.rs | 4 +- .../static-file/src/segments/headers.rs | 14 ++---- crates/storage/db-common/src/init.rs | 3 +- crates/storage/errors/src/provider.rs | 3 -- .../src/providers/blockchain_provider.rs | 22 +-------- .../provider/src/providers/consistent.rs | 33 +------------ .../src/providers/database/metrics.rs | 4 -- .../provider/src/providers/database/mod.rs | 14 ++---- .../src/providers/database/provider.rs | 36 ++------------ .../provider/src/providers/static_file/jar.rs | 17 +------ .../src/providers/static_file/manager.rs | 27 +--------- .../provider/src/providers/static_file/mod.rs | 14 +----- .../src/providers/static_file/writer.rs | 15 +++++- .../storage/provider/src/test_utils/mock.rs | 18 ------- crates/storage/rpc-provider/src/lib.rs | 20 -------- crates/storage/storage-api/src/header.rs | 8 +-- crates/storage/storage-api/src/noop.rs | 10 +--- examples/db-access/src/main.rs | 5 -- 28 files changed, 87 insertions(+), 315 deletions(-) diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index de6320fc86e..8da0bde068c 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,5 +1,5 @@ use alloy_consensus::BlockHeader; -use alloy_primitives::{BlockNumber, B256, U256}; +use alloy_primitives::{BlockNumber, B256}; use alloy_rlp::Decodable; use reth_codecs::Compact; use reth_node_builder::NodePrimitives; @@ -133,7 +133,7 @@ where for block_num in 1..=target_height { // TODO: should we fill with real parent_hash? let header = header_factory(block_num); - writer.append_header(&header, U256::ZERO, &B256::ZERO)?; + writer.append_header(&header, &B256::ZERO)?; } Ok(()) }); diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs index 670a534ba01..6ccdba24262 100644 --- a/crates/era-utils/src/export.rs +++ b/crates/era-utils/src/export.rs @@ -1,6 +1,7 @@ //! Logic to export from database era1 block history //! and injecting them into era1 files with `Era1Writer`. +use crate::calculate_td_by_number; use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256, U256}; use eyre::{eyre, Result}; @@ -114,9 +115,7 @@ where let mut total_difficulty = if config.first_block_number > 0 { let prev_block_number = config.first_block_number - 1; - provider - .header_td_by_number(prev_block_number)? - .ok_or_else(|| eyre!("Total difficulty not found for block {prev_block_number}"))? + calculate_td_by_number(provider, prev_block_number)? } else { U256::ZERO }; diff --git a/crates/era-utils/src/history.rs b/crates/era-utils/src/history.rs index b1c3cd309c0..58d5e383c37 100644 --- a/crates/era-utils/src/history.rs +++ b/crates/era-utils/src/history.rs @@ -1,3 +1,4 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, U256}; use futures_util::{Stream, StreamExt}; use reth_db_api::{ @@ -19,15 +20,15 @@ use reth_etl::Collector; use reth_fs_util as fs; use reth_primitives_traits::{Block, FullBlockBody, FullBlockHeader, NodePrimitives}; use reth_provider::{ - providers::StaticFileProviderRWRefMut, BlockWriter, ProviderError, StaticFileProviderFactory, + providers::StaticFileProviderRWRefMut, BlockReader, BlockWriter, StaticFileProviderFactory, StaticFileSegment, StaticFileWriter, }; use reth_stages_types::{ CheckpointBlockRange, EntitiesCheckpoint, HeadersCheckpoint, StageCheckpoint, StageId, }; use reth_storage_api::{ - errors::ProviderResult, DBProvider, DatabaseProviderFactory, HeaderProvider, - NodePrimitivesProvider, StageCheckpointWriter, + errors::ProviderResult, DBProvider, DatabaseProviderFactory, NodePrimitivesProvider, + StageCheckpointWriter, }; use std::{ collections::Bound, @@ -82,11 +83,6 @@ where .get_highest_static_file_block(StaticFileSegment::Headers) .unwrap_or_default(); - // Find the latest total difficulty - let mut td = static_file_provider - .header_td_by_number(height)? - .ok_or(ProviderError::TotalDifficultyNotFound(height))?; - while let Some(meta) = rx.recv()? { let from = height; let provider = provider_factory.database_provider_rw()?; @@ -96,7 +92,6 @@ where &mut static_file_provider.latest_writer(StaticFileSegment::Headers)?, &provider, hash_collector, - &mut td, height.., )?; @@ -146,7 +141,7 @@ where /// Extracts block headers and bodies from `meta` and appends them using `writer` and `provider`. /// -/// Adds on to `total_difficulty` and collects hash to height using `hash_collector`. +/// Collects hash to height using `hash_collector`. /// /// Skips all blocks below the [`start_bound`] of `block_numbers` and stops when reaching past the /// [`end_bound`] or the end of the file. @@ -160,7 +155,6 @@ pub fn process( writer: &mut StaticFileProviderRWRefMut<'_,

::Primitives>, provider: &P, hash_collector: &mut Collector, - total_difficulty: &mut U256, block_numbers: impl RangeBounds, ) -> eyre::Result where @@ -182,7 +176,7 @@ where as Box) -> eyre::Result<(BH, BB)>>); let iter = ProcessIter { iter, era: meta }; - process_iter(iter, writer, provider, hash_collector, total_difficulty, block_numbers) + process_iter(iter, writer, provider, hash_collector, block_numbers) } type ProcessInnerIter = @@ -271,7 +265,6 @@ pub fn process_iter( writer: &mut StaticFileProviderRWRefMut<'_,

::Primitives>, provider: &P, hash_collector: &mut Collector, - total_difficulty: &mut U256, block_numbers: impl RangeBounds, ) -> eyre::Result where @@ -311,11 +304,8 @@ where let hash = header.hash_slow(); last_header_number = number; - // Increase total difficulty - *total_difficulty += header.difficulty(); - // Append to Headers segment - writer.append_header(&header, *total_difficulty, &hash)?; + writer.append_header(&header, &hash)?; // Write bodies to database. provider.append_block_bodies(vec![(header.number(), Some(body))])?; @@ -382,3 +372,28 @@ where Ok(()) } + +/// Calculates the total difficulty for a given block number by summing the difficulty +/// of all blocks from genesis to the given block. +/// +/// Very expensive - iterates through all blocks in batches of 1000. +/// +/// Returns an error if any block is missing. +pub fn calculate_td_by_number

(provider: &P, num: BlockNumber) -> eyre::Result +where + P: BlockReader, +{ + let mut total_difficulty = U256::ZERO; + let mut start = 0; + + while start <= num { + let end = (start + 1000 - 1).min(num); + + total_difficulty += + provider.headers_range(start..=end)?.iter().map(|h| h.difficulty()).sum::(); + + start = end + 1; + } + + Ok(total_difficulty) +} diff --git a/crates/era-utils/src/lib.rs b/crates/era-utils/src/lib.rs index 966709d2f21..13a5ceefe92 100644 --- a/crates/era-utils/src/lib.rs +++ b/crates/era-utils/src/lib.rs @@ -14,5 +14,6 @@ pub use export::{export, ExportConfig}; /// Imports history from ERA files. pub use history::{ - build_index, decode, import, open, process, process_iter, save_stage_checkpoints, ProcessIter, + build_index, calculate_td_by_number, decode, import, open, process, process_iter, + save_stage_checkpoints, ProcessIter, }; diff --git a/crates/net/downloaders/src/bodies/test_utils.rs b/crates/net/downloaders/src/bodies/test_utils.rs index a7172ec1a00..513226a2c91 100644 --- a/crates/net/downloaders/src/bodies/test_utils.rs +++ b/crates/net/downloaders/src/bodies/test_utils.rs @@ -3,7 +3,7 @@ #![allow(dead_code)] use alloy_consensus::BlockHeader; -use alloy_primitives::{B256, U256}; +use alloy_primitives::B256; use reth_ethereum_primitives::BlockBody; use reth_network_p2p::bodies::response::BlockResponse; use reth_primitives_traits::{Block, SealedBlock, SealedHeader}; @@ -55,9 +55,7 @@ pub(crate) fn insert_headers( .expect("failed to create writer"); for header in headers { - writer - .append_header(header.header(), U256::ZERO, &header.hash()) - .expect("failed to append header"); + writer.append_header(header.header(), &header.hash()).expect("failed to append header"); } drop(writer); provider_rw.commit().expect("failed to commit"); diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index e3b98f4bd0f..ba888346035 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -10,7 +10,7 @@ use crate::{ }; use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::{BlockNumber, B256, U256}; use eyre::eyre; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_config::config::PruneConfig; @@ -346,12 +346,6 @@ impl NodeConfig { .header_by_number(head)? .expect("the header for the latest block is missing, database is corrupt"); - let total_difficulty = provider - .header_td_by_number(head)? - // total difficulty is effectively deprecated, but still required in some places, e.g. - // p2p - .unwrap_or_default(); - let hash = provider .block_hash(head)? .expect("the hash for the latest block is missing, database is corrupt"); @@ -360,7 +354,7 @@ impl NodeConfig { number: head, hash, difficulty: header.difficulty(), - total_difficulty, + total_difficulty: U256::ZERO, timestamp: header.timestamp(), }) } diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index fdb5f8f190f..c8645aa0325 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -462,7 +462,6 @@ impl From for EthApiError { } ProviderError::BestBlockNotFound => Self::HeaderNotFound(BlockId::latest()), ProviderError::BlockNumberForTransactionIndexNotFound => Self::UnknownBlockOrTxIndex, - ProviderError::TotalDifficultyNotFound(num) => Self::HeaderNotFound(num.into()), ProviderError::FinalizedBlockNotFound => Self::HeaderNotFound(BlockId::finalized()), ProviderError::SafeBlockNotFound => Self::HeaderNotFound(BlockId::safe()), err => Self::Internal(err.into()), diff --git a/crates/stages/stages/src/stages/era.rs b/crates/stages/stages/src/stages/era.rs index 971bc11f897..e4f25325a42 100644 --- a/crates/stages/stages/src/stages/era.rs +++ b/crates/stages/stages/src/stages/era.rs @@ -10,12 +10,11 @@ use reth_era_utils as era; use reth_etl::Collector; use reth_primitives_traits::{FullBlockBody, FullBlockHeader, NodePrimitives}; use reth_provider::{ - BlockReader, BlockWriter, DBProvider, HeaderProvider, StageCheckpointWriter, - StaticFileProviderFactory, StaticFileWriter, + BlockReader, BlockWriter, DBProvider, StageCheckpointWriter, StaticFileProviderFactory, + StaticFileWriter, }; use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_static_file_types::StaticFileSegment; -use reth_storage_errors::ProviderError; use std::{ fmt::{Debug, Formatter}, iter, @@ -176,11 +175,6 @@ where .get_highest_static_file_block(StaticFileSegment::Headers) .unwrap_or_default(); - // Find the latest total difficulty - let mut td = static_file_provider - .header_td_by_number(last_header_number)? - .ok_or(ProviderError::TotalDifficultyNotFound(last_header_number))?; - // Although headers were downloaded in reverse order, the collector iterates it in // ascending order let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; @@ -190,7 +184,6 @@ where &mut writer, provider, &mut self.hash_collector, - &mut td, last_header_number..=input.target(), ) .map_err(|e| StageError::Fatal(e.into()))?; @@ -336,7 +329,7 @@ mod tests { }; use reth_ethereum_primitives::TransactionSigned; use reth_primitives_traits::{SealedBlock, SealedHeader}; - use reth_provider::{BlockNumReader, TransactionsProvider}; + use reth_provider::{BlockNumReader, HeaderProvider, TransactionsProvider}; use reth_testing_utils::generators::{ random_block_range, random_signed_tx, BlockRangeParams, }; @@ -447,9 +440,6 @@ mod tests { match output { Some(output) if output.checkpoint.block_number > initial_checkpoint => { let provider = self.db.factory.provider()?; - let mut td = provider - .header_td_by_number(initial_checkpoint.saturating_sub(1))? - .unwrap_or_default(); for block_num in initial_checkpoint.. output @@ -469,10 +459,6 @@ mod tests { assert!(header.is_some()); let header = SealedHeader::seal_slow(header.unwrap()); assert_eq!(header.hash(), hash); - - // validate the header total difficulty - td += header.difficulty; - assert_eq!(provider.header_td_by_number(block_num)?, Some(td)); } self.validate_db_blocks( diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index d3e690dc516..74709e81421 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -16,15 +16,14 @@ use reth_network_p2p::headers::{ }; use reth_primitives_traits::{serde_bincode_compat, FullBlockHeader, NodePrimitives, SealedHeader}; use reth_provider::{ - providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderProvider, - HeaderSyncGapProvider, StaticFileProviderFactory, + providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderSyncGapProvider, + StaticFileProviderFactory, }; use reth_stages_api::{ CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, HeadersCheckpoint, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, }; use reth_static_file_types::StaticFileSegment; -use reth_storage_errors::provider::ProviderError; use std::task::{ready, Context, Poll}; use tokio::sync::watch; @@ -107,11 +106,6 @@ where .get_highest_static_file_block(StaticFileSegment::Headers) .unwrap_or_default(); - // Find the latest total difficulty - let mut td = static_file_provider - .header_td_by_number(last_header_number)? - .ok_or(ProviderError::TotalDifficultyNotFound(last_header_number))?; - // Although headers were downloaded in reverse order, the collector iterates it in ascending // order let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; @@ -134,11 +128,8 @@ where } last_header_number = header.number(); - // Increase total difficulty - td += header.difficulty(); - // Append to Headers segment - writer.append_header(header, td, header_hash)?; + writer.append_header(header, header_hash)?; } info!(target: "sync::stages::headers", total = total_headers, "Writing headers hash index"); @@ -415,7 +406,7 @@ mod tests { ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder, }; use reth_network_p2p::test_utils::{TestHeaderDownloader, TestHeadersClient}; - use reth_provider::{test_utils::MockNodeTypesWithDB, BlockNumReader}; + use reth_provider::{test_utils::MockNodeTypesWithDB, BlockNumReader, HeaderProvider}; use tokio::sync::watch; pub(crate) struct HeadersTestRunner { @@ -493,9 +484,6 @@ mod tests { match output { Some(output) if output.checkpoint.block_number > initial_checkpoint => { let provider = self.db.factory.provider()?; - let mut td = provider - .header_td_by_number(initial_checkpoint.saturating_sub(1))? - .unwrap_or_default(); for block_num in initial_checkpoint..output.checkpoint.block_number { // look up the header hash @@ -509,10 +497,6 @@ mod tests { assert!(header.is_some()); let header = SealedHeader::seal_slow(header.unwrap()); assert_eq!(header.hash(), hash); - - // validate the header total difficulty - td += header.difficulty; - assert_eq!(provider.header_td_by_number(block_num)?, Some(td)); } } _ => self.check_no_header_entry_above(initial_checkpoint)?, @@ -635,16 +619,7 @@ mod tests { let static_file_provider = provider.static_file_provider(); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers).unwrap(); for header in sealed_headers { - let ttd = if header.number() == 0 { - header.difficulty() - } else { - let parent_block_number = header.number() - 1; - let parent_ttd = - provider.header_td_by_number(parent_block_number).unwrap().unwrap_or_default(); - parent_ttd + header.difficulty() - }; - - writer.append_header(header.header(), ttd, &header.hash()).unwrap(); + writer.append_header(header.header(), &header.hash()).unwrap(); } drop(writer); diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index b4f24db7c58..a3a3ac88483 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -738,7 +738,7 @@ mod tests { let hash = last_header.hash_slow(); writer.prune_headers(1).unwrap(); writer.commit().unwrap(); - writer.append_header(&last_header, U256::ZERO, &hash).unwrap(); + writer.append_header(&last_header, &hash).unwrap(); writer.commit().unwrap(); Ok(blocks) diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index f38f77b2247..c88aa4574c0 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -160,11 +160,11 @@ impl TestStageDB { for block_number in 0..header.number { let mut prev = header.clone_header(); prev.number = block_number; - writer.append_header(&prev, U256::ZERO, &B256::ZERO)?; + writer.append_header(&prev, &B256::ZERO)?; } } - writer.append_header(header.header(), td, &header.hash())?; + writer.append_header(header.header(), &header.hash())?; } else { tx.put::(header.number, header.hash())?; tx.put::(header.number, td.into())?; diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index 5232061caaf..990e33ee52a 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -36,25 +36,17 @@ where )?; let headers_walker = headers_cursor.walk_range(block_range.clone())?; - let mut header_td_cursor = - provider.tx_ref().cursor_read::()?; - let header_td_walker = header_td_cursor.walk_range(block_range.clone())?; - let mut canonical_headers_cursor = provider.tx_ref().cursor_read::()?; let canonical_headers_walker = canonical_headers_cursor.walk_range(block_range)?; - for ((header_entry, header_td_entry), canonical_header_entry) in - headers_walker.zip(header_td_walker).zip(canonical_headers_walker) - { + for (header_entry, canonical_header_entry) in headers_walker.zip(canonical_headers_walker) { let (header_block, header) = header_entry?; - let (header_td_block, header_td) = header_td_entry?; let (canonical_header_block, canonical_header) = canonical_header_entry?; - debug_assert_eq!(header_block, header_td_block); - debug_assert_eq!(header_td_block, canonical_header_block); + debug_assert_eq!(header_block, canonical_header_block); - static_file_writer.append_header(&header, header_td.0, &canonical_header)?; + static_file_writer.append_header(&header, &canonical_header)?; } Ok(()) diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 8b24f0f8d19..de55cea3c99 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -347,9 +347,8 @@ where match static_file_provider.block_hash(0) { Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => { - let (difficulty, hash) = (header.difficulty(), block_hash); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - writer.append_header(header, difficulty, &hash)?; + writer.append_header(header, &block_hash)?; } Ok(Some(_)) => {} Err(e) => return Err(e), diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 47cc630bcb6..9630a1b2a64 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -58,9 +58,6 @@ pub enum ProviderError { /// The account address. address: Address, }, - /// The total difficulty for a block is missing. - #[error("total difficulty not found for block #{_0}")] - TotalDifficultyNotFound(BlockNumber), /// When required header related data was not found but was required. #[error("no header found for {_0:?}")] HeaderNotFound(BlockHashOrNumber), diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 512b8569de2..9dbbed9e88c 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -10,7 +10,7 @@ use crate::{ }; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ BlockState, CanonicalInMemoryState, ForkChoiceNotifications, ForkChoiceSubscriptions, @@ -176,14 +176,6 @@ impl HeaderProvider for BlockchainProvider { self.consistent_provider()?.header_by_number(num) } - fn header_td(&self, hash: BlockHash) -> ProviderResult> { - self.consistent_provider()?.header_td(hash) - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - self.consistent_provider()?.header_td_by_number(number) - } - fn headers_range( &self, range: impl RangeBounds, @@ -1280,24 +1272,12 @@ mod tests { BlockRangeParams::default(), )?; - let database_block = database_blocks.first().unwrap().clone(); - let in_memory_block = in_memory_blocks.last().unwrap().clone(); // make sure that the finalized block is on db let finalized_block = database_blocks.get(database_blocks.len() - 3).unwrap(); provider.set_finalized(finalized_block.clone_sealed_header()); let blocks = [database_blocks, in_memory_blocks].concat(); - assert_eq!( - provider.header_td_by_number(database_block.number)?, - Some(database_block.difficulty) - ); - - assert_eq!( - provider.header_td_by_number(in_memory_block.number)?, - Some(in_memory_block.difficulty) - ); - assert_eq!( provider.sealed_headers_while(0..=10, |header| header.number <= 8)?, blocks diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 66a35e5e9b1..67113fc5c0c 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -13,7 +13,7 @@ use alloy_eips::{ }; use alloy_primitives::{ map::{hash_map, HashMap}, - Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, + Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, }; use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; use reth_chainspec::ChainInfo; @@ -663,37 +663,6 @@ impl HeaderProvider for ConsistentProvider { ) } - fn header_td(&self, hash: BlockHash) -> ProviderResult> { - if let Some(num) = self.block_number(hash)? { - self.header_td_by_number(num) - } else { - Ok(None) - } - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - let number = if self.head_block.as_ref().map(|b| b.block_on_chain(number.into())).is_some() - { - // If the block exists in memory, we should return a TD for it. - // - // The canonical in memory state should only store post-merge blocks. Post-merge blocks - // have zero difficulty. This means we can use the total difficulty for the last - // finalized block number if present (so that we are not affected by reorgs), if not the - // last number in the database will be used. - if let Some(last_finalized_num_hash) = - self.canonical_in_memory_state.get_finalized_num_hash() - { - last_finalized_num_hash.number - } else { - self.last_block_number()? - } - } else { - // Otherwise, return what we have on disk for the input block - number - }; - self.storage_provider.header_td_by_number(number) - } - fn headers_range( &self, range: impl RangeBounds, diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index 4923b51db37..4daac3dfddb 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -45,7 +45,6 @@ pub(crate) enum Action { InsertBlockBodyIndices, InsertTransactionBlocks, GetNextTxNum, - GetParentTD, } /// Database provider metrics @@ -71,8 +70,6 @@ struct DatabaseProviderMetrics { insert_tx_blocks: Histogram, /// Duration of get next tx num get_next_tx_num: Histogram, - /// Duration of get parent TD - get_parent_td: Histogram, } impl DatabaseProviderMetrics { @@ -88,7 +85,6 @@ impl DatabaseProviderMetrics { Action::InsertBlockBodyIndices => self.insert_block_body_indices.record(duration), Action::InsertTransactionBlocks => self.insert_tx_blocks.record(duration), Action::GetNextTxNum => self.get_next_tx_num.record(duration), - Action::GetParentTD => self.get_parent_td.record(duration), } } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index df0bc33c461..873b10b0cfc 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -9,7 +9,7 @@ use crate::{ }; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use core::fmt; use reth_chainspec::ChainInfo; use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; @@ -242,14 +242,6 @@ impl HeaderProvider for ProviderFactory { self.static_file_provider.header_by_number(num) } - fn header_td(&self, hash: BlockHash) -> ProviderResult> { - self.provider()?.header_td(hash) - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - self.static_file_provider.header_td_by_number(number) - } - fn headers_range( &self, range: impl RangeBounds, @@ -585,7 +577,7 @@ mod tests { BlockHashReader, BlockNumReader, BlockWriter, DBProvider, HeaderSyncGapProvider, TransactionsProvider, }; - use alloy_primitives::{TxNumber, B256, U256}; + use alloy_primitives::{TxNumber, B256}; use assert_matches::assert_matches; use reth_chainspec::ChainSpecBuilder; use reth_db::{ @@ -730,7 +722,7 @@ mod tests { let static_file_provider = provider.static_file_provider(); let mut static_file_writer = static_file_provider.latest_writer(StaticFileSegment::Headers).unwrap(); - static_file_writer.append_header(head.header(), U256::ZERO, &head.hash()).unwrap(); + static_file_writer.append_header(head.header(), &head.hash()).unwrap(); static_file_writer.commit().unwrap(); drop(static_file_writer); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 235bf57a4a4..4bb710abfef 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -28,12 +28,12 @@ use alloy_eips::BlockHashOrNumber; use alloy_primitives::{ keccak256, map::{hash_map, B256Map, HashMap, HashSet}, - Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, + Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, }; use itertools::Itertools; use rayon::slice::ParallelSliceMut; use reth_chain_state::ExecutedBlock; -use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, EthereumHardforks}; +use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, database::Database, @@ -971,26 +971,6 @@ impl HeaderProvider for DatabasePro self.static_file_provider.header_by_number(num) } - fn header_td(&self, block_hash: BlockHash) -> ProviderResult> { - if let Some(num) = self.block_number(block_hash)? { - self.header_td_by_number(num) - } else { - Ok(None) - } - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - if self.chain_spec.is_paris_active_at_block(number) && - let Some(td) = self.chain_spec.final_paris_total_difficulty() - { - // if this block is higher than the final paris(merge) block, return the final paris - // difficulty - return Ok(Some(td)) - } - - self.static_file_provider.header_td_by_number(number) - } - fn headers_range( &self, range: impl RangeBounds, @@ -2833,19 +2813,9 @@ impl BlockWrite let mut durations_recorder = metrics::DurationsRecorder::default(); - // total difficulty - let ttd = if block_number == 0 { - block.header().difficulty() - } else { - let parent_block_number = block_number - 1; - let parent_ttd = self.header_td_by_number(parent_block_number)?.unwrap_or_default(); - durations_recorder.record_relative(metrics::Action::GetParentTD); - parent_ttd + block.header().difficulty() - }; - self.static_file_provider .get_writer(block_number, StaticFileSegment::Headers)? - .append_header(block.header(), ttd, &block.hash())?; + .append_header(block.header(), &block.hash())?; self.tx.put::(block.hash(), block_number)?; durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 9906583f900..2cd7ec98ae9 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -8,11 +8,10 @@ use crate::{ }; use alloy_consensus::transaction::{SignerRecoverable, TransactionMeta}; use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use reth_chainspec::ChainInfo; use reth_db::static_file::{ - BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, TDWithHashMask, - TotalDifficultyMask, TransactionMask, + BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, TransactionMask, }; use reth_db_api::table::{Decompress, Value}; use reth_node_types::NodePrimitives; @@ -101,18 +100,6 @@ impl> HeaderProvider for StaticFileJarProv self.cursor()?.get_one::>(num.into()) } - fn header_td(&self, block_hash: BlockHash) -> ProviderResult> { - Ok(self - .cursor()? - .get_two::((&block_hash).into())? - .filter(|(_, hash)| hash == &block_hash) - .map(|(td, _)| td.into())) - } - - fn header_td_by_number(&self, num: BlockNumber) -> ProviderResult> { - Ok(self.cursor()?.get_one::(num.into())?.map(Into::into)) - } - fn headers_range( &self, range: impl RangeBounds, diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 434d3836fb2..cd7f5c16d91 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -12,9 +12,7 @@ use alloy_consensus::{ Header, }; use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; -use alloy_primitives::{ - b256, keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, -}; +use alloy_primitives::{b256, keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use dashmap::DashMap; use notify::{RecommendedWatcher, RecursiveMode, Watcher}; use parking_lot::RwLock; @@ -23,7 +21,7 @@ use reth_db::{ lockfile::StorageLock, static_file::{ iter_static_files, BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, - StaticFileCursor, TDWithHashMask, TransactionMask, + StaticFileCursor, TransactionMask, }, }; use reth_db_api::{ @@ -1403,27 +1401,6 @@ impl> HeaderProvider for StaticFileProvide }) } - fn header_td(&self, block_hash: BlockHash) -> ProviderResult> { - self.find_static_file(StaticFileSegment::Headers, |jar_provider| { - Ok(jar_provider - .cursor()? - .get_two::((&block_hash).into())? - .and_then(|(td, hash)| (hash == block_hash).then_some(td.0))) - }) - } - - fn header_td_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) - .and_then(|provider| provider.header_td_by_number(num)) - .or_else(|err| { - if let ProviderError::MissingStaticFileBlock(_, _) = err { - Ok(None) - } else { - Err(err) - } - }) - } - fn headers_range( &self, range: impl RangeBounds, diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 1c3bfd58a79..afb2836abe4 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -118,12 +118,10 @@ mod tests { { let manager = factory.static_file_provider(); let mut writer = manager.latest_writer(StaticFileSegment::Headers).unwrap(); - let mut td = U256::ZERO; for header in headers.clone() { - td += header.header().difficulty; let hash = header.hash(); - writer.append_header(&header.unseal(), td, &hash).unwrap(); + writer.append_header(&header.unseal(), &hash).unwrap(); } writer.commit().unwrap(); } @@ -148,12 +146,6 @@ mod tests { // Compare Header assert_eq!(header, db_provider.header(header_hash).unwrap().unwrap()); assert_eq!(header, jar_provider.header_by_number(header.number).unwrap().unwrap()); - - // Compare HeaderTerminalDifficulties - assert_eq!( - db_provider.header_td(header_hash).unwrap().unwrap(), - jar_provider.header_td_by_number(header.number).unwrap().unwrap() - ); } } } @@ -180,9 +172,7 @@ mod tests { let mut header = Header::default(); for num in 0..=tip { header.number = num; - header_writer - .append_header(&header, U256::default(), &BlockHash::default()) - .unwrap(); + header_writer.append_header(&header, &BlockHash::default()).unwrap(); } header_writer.commit().unwrap(); } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index b9c17f82920..7b0ae9ce11c 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -531,7 +531,20 @@ impl StaticFileProviderRW { /// blocks. /// /// Returns the current [`BlockNumber`] as seen in the static file. - pub fn append_header( + pub fn append_header(&mut self, header: &N::BlockHeader, hash: &BlockHash) -> ProviderResult<()> + where + N::BlockHeader: Compact, + { + self.append_header_with_td(header, U256::ZERO, hash) + } + + /// Appends header to static file with a specified total difficulty. + /// + /// It **CALLS** `increment_block()` since the number of headers is equal to the number of + /// blocks. + /// + /// Returns the current [`BlockNumber`] as seen in the static file. + pub fn append_header_with_td( &mut self, header: &N::BlockHeader, total_difficulty: U256, diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 3e33e2b0509..4b3829cf8ed 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -292,24 +292,6 @@ impl HeaderP Ok(lock.values().find(|h| h.number() == num).cloned()) } - fn header_td(&self, hash: BlockHash) -> ProviderResult> { - let lock = self.headers.lock(); - Ok(lock.get(&hash).map(|target| { - lock.values() - .filter(|h| h.number() < target.number()) - .fold(target.difficulty(), |td, h| td + h.difficulty()) - })) - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - let lock = self.headers.lock(); - let sum = lock - .values() - .filter(|h| h.number() <= number) - .fold(U256::ZERO, |td, h| td + h.difficulty()); - Ok(Some(sum)) - } - fn headers_range( &self, range: impl RangeBounds, diff --git a/crates/storage/rpc-provider/src/lib.rs b/crates/storage/rpc-provider/src/lib.rs index ed6e49eefbd..6e5bd17218b 100644 --- a/crates/storage/rpc-provider/src/lib.rs +++ b/crates/storage/rpc-provider/src/lib.rs @@ -364,18 +364,6 @@ where Ok(Some(sealed_header.into_header())) } - fn header_td(&self, hash: BlockHash) -> ProviderResult> { - let header = self.header(hash).map_err(ProviderError::other)?; - - Ok(header.map(|b| b.difficulty())) - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - let header = self.header_by_number(number).map_err(ProviderError::other)?; - - Ok(header.map(|b| b.difficulty())) - } - fn headers_range( &self, _range: impl RangeBounds, @@ -1674,14 +1662,6 @@ where Err(ProviderError::UnsupportedProvider) } - fn header_td(&self, _hash: BlockHash) -> Result, ProviderError> { - Err(ProviderError::UnsupportedProvider) - } - - fn header_td_by_number(&self, _number: BlockNumber) -> Result, ProviderError> { - Err(ProviderError::UnsupportedProvider) - } - fn headers_range( &self, _range: impl RangeBounds, diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index 7e3133ec712..39b2eef9031 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -1,6 +1,6 @@ use alloc::vec::Vec; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockHash, BlockNumber, U256}; +use alloy_primitives::{BlockHash, BlockNumber}; use core::ops::RangeBounds; use reth_primitives_traits::{BlockHeader, SealedHeader}; use reth_storage_errors::provider::ProviderResult; @@ -44,12 +44,6 @@ pub trait HeaderProvider: Send + Sync { } } - /// Get total difficulty by block hash. - fn header_td(&self, hash: BlockHash) -> ProviderResult>; - - /// Get total difficulty by block number. - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult>; - /// Get headers in range of block numbers fn headers_range( &self, diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 6b70a5260a6..e538e1216e8 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -15,7 +15,7 @@ use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ - Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, + Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, }; use core::{ fmt::Debug, @@ -356,14 +356,6 @@ impl HeaderProvider for NoopProvider { Ok(None) } - fn header_td(&self, _hash: BlockHash) -> ProviderResult> { - Ok(None) - } - - fn header_td_by_number(&self, _number: BlockNumber) -> ProviderResult> { - Ok(None) - } - fn headers_range( &self, _range: impl RangeBounds, diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 93896accbbc..339aa1ae3d1 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -66,11 +66,6 @@ fn header_provider_example(provider: T, number: u64) -> eyre: provider.header(sealed_header.hash())?.ok_or(eyre::eyre!("header by hash not found"))?; assert_eq!(sealed_header.header(), &header_by_hash); - // The header's total difficulty is stored in a separate table, so we have a separate call for - // it. This is not needed for post PoS transition chains. - let td = provider.header_td_by_number(number)?.ok_or(eyre::eyre!("header td not found"))?; - assert!(!td.is_zero()); - // Can query headers by range as well, already sealed! let headers = provider.sealed_headers_range(100..200)?; assert_eq!(headers.len(), 100); From 7263a7b4ebef623210a8dee706431702c8e81f26 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Tue, 21 Oct 2025 12:59:11 +0100 Subject: [PATCH 134/371] fix(cli): prune config saving to file (#19174) --- crates/cli/commands/src/common.rs | 5 ++-- crates/cli/commands/src/prune.rs | 13 +++++---- crates/cli/commands/src/stage/run.rs | 2 +- crates/cli/commands/src/stage/unwind.rs | 2 +- crates/config/src/config.rs | 18 +++++++----- crates/node/builder/src/launch/common.rs | 36 +++++++++++------------- crates/node/builder/src/launch/engine.rs | 2 +- crates/node/builder/src/setup.rs | 8 ++---- crates/node/core/src/args/pruning.rs | 7 +++-- crates/prune/types/src/target.rs | 4 +++ 10 files changed, 51 insertions(+), 46 deletions(-) diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 1ceba8f57da..5b8cfce7716 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -126,9 +126,8 @@ impl EnvironmentArgs { where C: ChainSpecParser, { - let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); - let prune_modes = - config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); + let has_receipt_pruning = config.prune.has_receipts_pruning(); + let prune_modes = config.prune.segments.clone(); let factory = ProviderFactory::>>::new( db, self.chain.clone(), diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs index de60fbfdb3b..cae0fa00901 100644 --- a/crates/cli/commands/src/prune.rs +++ b/crates/cli/commands/src/prune.rs @@ -1,5 +1,5 @@ //! Command that runs pruning without any limits. -use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -18,22 +18,23 @@ pub struct PruneCommand { impl> PruneCommand { /// Execute the `prune` command pub async fn execute>(self) -> eyre::Result<()> { - let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; - let prune_config = config.prune.unwrap_or_default(); + let env = self.env.init::(AccessRights::RW)?; + let provider_factory = env.provider_factory; + let config = env.config.prune; // Copy data from database to static files info!(target: "reth::cli", "Copying data from database to static files..."); let static_file_producer = - StaticFileProducer::new(provider_factory.clone(), prune_config.segments.clone()); + StaticFileProducer::new(provider_factory.clone(), config.segments.clone()); let lowest_static_file_height = static_file_producer.lock().copy_to_static_files()?.min_block_num(); info!(target: "reth::cli", ?lowest_static_file_height, "Copied data from database to static files"); // Delete data which has been copied to static files. if let Some(prune_tip) = lowest_static_file_height { - info!(target: "reth::cli", ?prune_tip, ?prune_config, "Pruning data from database..."); + info!(target: "reth::cli", ?prune_tip, ?config, "Pruning data from database..."); // Run the pruner according to the configuration, and don't enforce any limits on it - let mut pruner = PrunerBuilder::new(prune_config) + let mut pruner = PrunerBuilder::new(config) .delete_limit(usize::MAX) .build_with_provider_factory(provider_factory); diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 4e577af06be..010277480f5 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -151,7 +151,7 @@ impl let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1); let etl_config = config.stages.etl.clone(); - let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); + let prune_modes = config.prune.segments.clone(); let (mut exec_stage, mut unwind_stage): (Box>, Option>>) = match self.stage { diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index ba9a00b11e2..ffd8e330062 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -85,7 +85,7 @@ impl> Command evm_config: impl ConfigureEvm + 'static, ) -> Result, eyre::Error> { let stage_conf = &config.stages; - let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); + let prune_modes = config.prune.segments.clone(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 7ea5569834c..5ff2431bb56 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -23,8 +23,8 @@ pub struct Config { // TODO(onbjerg): Can we make this easier to maintain when we add/remove stages? pub stages: StageConfig, /// Configuration for pruning. - #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))] - pub prune: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub prune: PruneConfig, /// Configuration for the discovery service. pub peers: PeersConfig, /// Configuration for peer sessions. @@ -33,8 +33,8 @@ pub struct Config { impl Config { /// Sets the pruning configuration. - pub fn update_prune_config(&mut self, prune_config: PruneConfig) { - self.prune = Some(prune_config); + pub fn set_prune_config(&mut self, prune_config: PruneConfig) { + self.prune = prune_config; } } @@ -445,6 +445,11 @@ impl Default for PruneConfig { } impl PruneConfig { + /// Returns whether this configuration is the default one. + pub fn is_default(&self) -> bool { + self == &Self::default() + } + /// Returns whether there is any kind of receipt pruning configuration. pub fn has_receipts_pruning(&self) -> bool { self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty() @@ -452,8 +457,7 @@ impl PruneConfig { /// Merges another `PruneConfig` into this one, taking values from the other config if and only /// if the corresponding value in this config is not set. - pub fn merge(&mut self, other: Option) { - let Some(other) = other else { return }; + pub fn merge(&mut self, other: Self) { let Self { block_interval, segments: @@ -1030,7 +1034,7 @@ receipts = 'full' }; let original_filter = config1.segments.receipts_log_filter.clone(); - config1.merge(Some(config2)); + config1.merge(config2); // Check that the configuration has been merged. Any configuration present in config1 // should not be overwritten by config2 diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 969479bfa6c..dd3cdbf756d 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -159,7 +159,7 @@ impl LaunchContext { let mut toml_config = reth_config::Config::from_path(&config_path) .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; - Self::save_pruning_config_if_full_node(&mut toml_config, config, &config_path)?; + Self::save_pruning_config(&mut toml_config, config, &config_path)?; info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); @@ -169,8 +169,9 @@ impl LaunchContext { Ok(toml_config) } - /// Save prune config to the toml file if node is a full node. - fn save_pruning_config_if_full_node( + /// Save prune config to the toml file if node is a full node or has custom pruning CLI + /// arguments. + fn save_pruning_config( reth_config: &mut reth_config::Config, config: &NodeConfig, config_path: impl AsRef, @@ -178,14 +179,14 @@ impl LaunchContext { where ChainSpec: EthChainSpec + reth_chainspec::EthereumHardforks, { - if reth_config.prune.is_none() { - if let Some(prune_config) = config.prune_config() { - reth_config.update_prune_config(prune_config); + if let Some(prune_config) = config.prune_config() { + if reth_config.prune != prune_config { + reth_config.set_prune_config(prune_config); info!(target: "reth::cli", "Saving prune config to toml file"); reth_config.save(config_path.as_ref())?; } - } else if config.prune_config().is_none() { - warn!(target: "reth::cli", "Prune configs present in config file but --full not provided. Running as a Full node"); + } else if !reth_config.prune.is_default() { + warn!(target: "reth::cli", "Pruning configuration is present in the config file, but no CLI arguments are provided. Using config from file."); } Ok(()) } @@ -401,7 +402,7 @@ impl LaunchContextWith Option + pub fn prune_config(&self) -> PruneConfig where ChainSpec: reth_chainspec::EthereumHardforks, { @@ -412,7 +413,7 @@ impl LaunchContextWith LaunchContextWith LaunchContextWith( provider_factory: ProviderFactory, task_executor: &TaskExecutor, metrics_tx: reth_stages::MetricEventsSender, - prune_config: Option, + prune_config: PruneConfig, max_block: Option, static_file_producer: StaticFileProducer>, evm_config: Evm, @@ -85,7 +85,7 @@ pub fn build_pipeline( consensus: Arc>, max_block: Option, metrics_tx: reth_stages::MetricEventsSender, - prune_config: Option, + prune_config: PruneConfig, static_file_producer: StaticFileProducer>, evm_config: Evm, exex_manager_handle: ExExManagerHandle, @@ -106,8 +106,6 @@ where let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let prune_modes = prune_config.map(|prune| prune.segments).unwrap_or_default(); - let pipeline = builder .with_tip_sender(tip_tx) .with_metrics_tx(metrics_tx) @@ -120,7 +118,7 @@ where body_downloader, evm_config.clone(), stage_config.clone(), - prune_modes, + prune_config.segments, era_import_source, ) .set(ExecutionStage::new( diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 846e4e6b203..42c30cf6fce 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -6,7 +6,7 @@ use clap::{builder::RangedU64ValueParser, Args}; use reth_chainspec::EthereumHardforks; use reth_config::config::PruneConfig; use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE}; -use std::collections::BTreeMap; +use std::{collections::BTreeMap, ops::Not}; /// Parameters for pruning and full node #[derive(Debug, Clone, Args, PartialEq, Eq, Default)] @@ -107,6 +107,9 @@ pub struct PruningArgs { impl PruningArgs { /// Returns pruning configuration. + /// + /// Returns [`None`] if no parameters are specified and default pruning configuration should be + /// used. pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option where ChainSpec: EthereumHardforks, @@ -163,7 +166,7 @@ impl PruningArgs { config.segments.receipts.take(); } - Some(config) + config.is_default().not().then_some(config) } fn bodies_prune_mode(&self, chain_spec: &ChainSpec) -> Option diff --git a/crates/prune/types/src/target.rs b/crates/prune/types/src/target.rs index 657cf6a37c5..3ff18554a9b 100644 --- a/crates/prune/types/src/target.rs +++ b/crates/prune/types/src/target.rs @@ -104,6 +104,10 @@ pub struct PruneModes { /// /// The [`BlockNumber`](`crate::BlockNumber`) represents the starting block from which point /// onwards the receipts are preserved. + #[cfg_attr( + any(test, feature = "serde"), + serde(skip_serializing_if = "ReceiptsLogPruneConfig::is_empty") + )] pub receipts_log_filter: ReceiptsLogPruneConfig, } From 936baf12320ecd0a0c4dab3afd36ac526c62515d Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 21 Oct 2025 16:05:38 +0400 Subject: [PATCH 135/371] refactor: remove `FullNodePrimitives` (#19176) --- Cargo.lock | 6 ++-- crates/engine/local/Cargo.toml | 4 +-- crates/engine/local/src/miner.rs | 2 +- crates/ethereum/node/tests/e2e/dev.rs | 3 +- crates/exex/exex/src/backfill/stream.rs | 4 +-- crates/exex/exex/src/backfill/test_utils.rs | 8 ++--- crates/node/builder/src/builder/mod.rs | 4 +-- crates/node/types/src/lib.rs | 2 +- crates/optimism/flashblocks/Cargo.toml | 4 +-- crates/optimism/flashblocks/src/consensus.rs | 3 +- crates/optimism/flashblocks/src/lib.rs | 8 +++++ crates/optimism/flashblocks/src/payload.rs | 2 +- crates/primitives-traits/src/block/mod.rs | 12 ++----- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives-traits/src/node.rs | 31 +++---------------- .../stages/src/stages/hashing_account.rs | 2 +- .../provider/src/providers/database/chain.rs | 8 ++--- crates/storage/provider/src/providers/mod.rs | 6 ++-- .../src/providers/static_file/manager.rs | 8 ++--- crates/storage/storage-api/src/chain.rs | 10 +++--- 20 files changed, 52 insertions(+), 77 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 228c0783058..5a07fa205da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7951,7 +7951,7 @@ dependencies = [ "reth-optimism-chainspec", "reth-payload-builder", "reth-payload-primitives", - "reth-provider", + "reth-storage-api", "reth-transaction-pool", "tokio", "tokio-stream", @@ -9340,20 +9340,20 @@ dependencies = [ "futures-util", "metrics", "reth-chain-state", + "reth-engine-primitives", "reth-errors", "reth-evm", "reth-execution-types", "reth-metrics", - "reth-node-api", "reth-optimism-evm", "reth-optimism-payload-builder", "reth-optimism-primitives", + "reth-payload-primitives", "reth-primitives-traits", "reth-revm", "reth-rpc-eth-types", "reth-storage-api", "reth-tasks", - "reth-trie", "ringbuffer", "serde", "serde_json", diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index 98793a24b21..dd708dee905 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -11,11 +11,11 @@ exclude.workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-engine-primitives.workspace = true +reth-engine-primitives = { workspace = true, features = ["std"] } reth-ethereum-engine-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true -reth-provider.workspace = true +reth-storage-api.workspace = true reth-transaction-pool.workspace = true # alloy diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 818848000f6..d6298502fb5 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -10,7 +10,7 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, PayloadAttributesBuilder, PayloadKind, PayloadTypes, }; -use reth_provider::BlockReader; +use reth_storage_api::BlockReader; use reth_transaction_pool::TransactionPool; use std::{ collections::VecDeque, diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index 5ccd74ecb24..bf022a514e8 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -3,7 +3,7 @@ use alloy_genesis::Genesis; use alloy_primitives::{b256, hex, Address}; use futures::StreamExt; use reth_chainspec::ChainSpec; -use reth_node_api::{BlockBody, FullNodeComponents, FullNodePrimitives, NodeTypes}; +use reth_node_api::{BlockBody, FullNodeComponents}; use reth_node_builder::{rpc::RethRpcAddOns, FullNode, NodeBuilder, NodeConfig, NodeHandle}; use reth_node_core::args::DevArgs; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; @@ -81,7 +81,6 @@ async fn assert_chain_advances(node: &FullNode) where N: FullNodeComponents, AddOns: RethRpcAddOns, - N::Types: NodeTypes, { let mut notifications = node.provider.canonical_state_stream(); diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index aa7cacdba4a..9d50737f5aa 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -256,7 +256,7 @@ mod tests { use reth_ethereum_primitives::{Block, BlockBody, Transaction}; use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::{ - crypto::secp256k1::public_key_to_address, Block as _, FullNodePrimitives, + crypto::secp256k1::public_key_to_address, Block as _, NodePrimitives, }; use reth_provider::{ providers::{BlockchainProvider, ProviderNodeTypes}, @@ -395,7 +395,7 @@ mod tests { ) -> Result<()> where N: ProviderNodeTypes< - Primitives: FullNodePrimitives< + Primitives: NodePrimitives< Block = reth_ethereum_primitives::Block, BlockBody = reth_ethereum_primitives::BlockBody, Receipt = reth_ethereum_primitives::Receipt, diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index a3d82428822..e489a98abf7 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -10,7 +10,7 @@ use reth_evm::{ ConfigureEvm, }; use reth_evm_ethereum::EthEvmConfig; -use reth_node_api::FullNodePrimitives; +use reth_node_api::NodePrimitives; use reth_primitives_traits::{Block as _, RecoveredBlock}; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, @@ -58,7 +58,7 @@ pub(crate) fn execute_block_and_commit_to_database( ) -> eyre::Result> where N: ProviderNodeTypes< - Primitives: FullNodePrimitives< + Primitives: NodePrimitives< Block = reth_ethereum_primitives::Block, BlockBody = reth_ethereum_primitives::BlockBody, Receipt = reth_ethereum_primitives::Receipt, @@ -169,7 +169,7 @@ pub(crate) fn blocks_and_execution_outputs( > where N: ProviderNodeTypes< - Primitives: FullNodePrimitives< + Primitives: NodePrimitives< Block = reth_ethereum_primitives::Block, BlockBody = reth_ethereum_primitives::BlockBody, Receipt = reth_ethereum_primitives::Receipt, @@ -193,7 +193,7 @@ pub(crate) fn blocks_and_execution_outcome( ) -> eyre::Result<(Vec>, ExecutionOutcome)> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives< + N::Primitives: NodePrimitives< Block = reth_ethereum_primitives::Block, Receipt = reth_ethereum_primitives::Receipt, >, diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index fb22a82795e..8f01f251b53 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -21,8 +21,7 @@ use reth_network::{ NetworkPrimitives, }; use reth_node_api::{ - FullNodePrimitives, FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, - NodeTypesWithDBAdapter, + FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, NodeTypesWithDBAdapter, }; use reth_node_core::{ cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, @@ -397,7 +396,6 @@ where >>::Components, >, >, - N::Primitives: FullNodePrimitives, EngineNodeLauncher: LaunchNode< NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns>, >, diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index daa4d11153a..b5b38f48c7d 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -11,7 +11,7 @@ use core::{fmt::Debug, marker::PhantomData}; pub use reth_primitives_traits::{ - Block, BlockBody, FullBlock, FullNodePrimitives, FullReceipt, FullSignedTx, NodePrimitives, + Block, BlockBody, FullBlock, FullReceipt, FullSignedTx, NodePrimitives, }; use reth_chainspec::EthChainSpec; diff --git a/crates/optimism/flashblocks/Cargo.toml b/crates/optimism/flashblocks/Cargo.toml index 532cd4d6962..977e28d37e1 100644 --- a/crates/optimism/flashblocks/Cargo.toml +++ b/crates/optimism/flashblocks/Cargo.toml @@ -16,17 +16,17 @@ reth-optimism-primitives = { workspace = true, features = ["serde"] } reth-optimism-evm.workspace = true reth-chain-state = { workspace = true, features = ["serde"] } reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-engine-primitives = { workspace = true, features = ["std"] } reth-execution-types = { workspace = true, features = ["serde"] } reth-evm.workspace = true reth-revm.workspace = true reth-optimism-payload-builder.workspace = true reth-rpc-eth-types.workspace = true reth-errors.workspace = true +reth-payload-primitives.workspace = true reth-storage-api.workspace = true -reth-node-api.workspace = true reth-tasks.workspace = true reth-metrics.workspace = true -reth-trie.workspace = true # alloy alloy-eips = { workspace = true, features = ["serde"] } diff --git a/crates/optimism/flashblocks/src/consensus.rs b/crates/optimism/flashblocks/src/consensus.rs index 353eddbf4cc..60314d2f6c8 100644 --- a/crates/optimism/flashblocks/src/consensus.rs +++ b/crates/optimism/flashblocks/src/consensus.rs @@ -1,7 +1,8 @@ use crate::FlashBlockCompleteSequenceRx; use alloy_primitives::B256; -use reth_node_api::{ConsensusEngineHandle, EngineApiMessageVersion}; +use reth_engine_primitives::ConsensusEngineHandle; use reth_optimism_payload_builder::OpPayloadTypes; +use reth_payload_primitives::EngineApiMessageVersion; use ringbuffer::{AllocRingBuffer, RingBuffer}; use tracing::warn; diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index 11647039930..d36ddb21fca 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -1,5 +1,13 @@ //! A downstream integration of Flashblocks. +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + pub use payload::{ ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashBlock, FlashBlockDecoder, Metadata, diff --git a/crates/optimism/flashblocks/src/payload.rs b/crates/optimism/flashblocks/src/payload.rs index f7d8a38c964..da81ada016a 100644 --- a/crates/optimism/flashblocks/src/payload.rs +++ b/crates/optimism/flashblocks/src/payload.rs @@ -3,9 +3,9 @@ use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{bytes, Address, Bloom, Bytes, B256, U256}; use alloy_rpc_types_engine::PayloadId; use derive_more::Deref; -use reth_node_api::NodePrimitives; use reth_optimism_evm::OpNextBlockEnvAttributes; use reth_optimism_primitives::OpReceipt; +use reth_primitives_traits::NodePrimitives; use reth_rpc_eth_types::PendingBlock; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 2aeade9bc17..7705512d633 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -50,17 +50,9 @@ pub mod serde_bincode_compat { } /// Helper trait that unifies all behaviour required by block to support full node operations. -pub trait FullBlock: - Block + alloy_rlp::Encodable + alloy_rlp::Decodable -{ -} +pub trait FullBlock: Block {} -impl FullBlock for T where - T: Block - + alloy_rlp::Encodable - + alloy_rlp::Decodable -{ -} +impl FullBlock for T where T: Block {} /// Helper trait to access [`BlockBody::Transaction`] given a [`Block`]. pub type BlockTx = <::Body as BlockBody>::Transaction; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 1cc56ce2cb9..67df9637fa4 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -188,7 +188,7 @@ pub use size::InMemorySize; /// Node traits pub mod node; -pub use node::{BlockTy, BodyTy, FullNodePrimitives, HeaderTy, NodePrimitives, ReceiptTy, TxTy}; +pub use node::{BlockTy, BodyTy, HeaderTy, NodePrimitives, ReceiptTy, TxTy}; /// Helper trait that requires de-/serialize implementation since `serde` feature is enabled. #[cfg(feature = "serde")] diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 1f5bfed139e..f23ff222ab6 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,6 +1,5 @@ use crate::{ - Block, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, FullSignedTx, - MaybeSerdeBincodeCompat, Receipt, + FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, FullSignedTx, MaybeSerdeBincodeCompat, }; use core::fmt; @@ -13,7 +12,8 @@ pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static { /// Block primitive. - type Block: Block

+ MaybeSerdeBincodeCompat; + type Block: FullBlock
+ + MaybeSerdeBincodeCompat; /// Block header primitive. type BlockHeader: FullBlockHeader; /// Block body primitive. @@ -24,30 +24,7 @@ pub trait NodePrimitives: /// format that includes the signature and can be included in a block. type SignedTx: FullSignedTx; /// A receipt. - type Receipt: Receipt; -} -/// Helper trait that sets trait bounds on [`NodePrimitives`]. -pub trait FullNodePrimitives -where - Self: NodePrimitives< - Block: FullBlock
, - BlockHeader: FullBlockHeader, - BlockBody: FullBlockBody, - SignedTx: FullSignedTx, - Receipt: FullReceipt, - >, -{ -} - -impl FullNodePrimitives for T where - T: NodePrimitives< - Block: FullBlock
, - BlockHeader: FullBlockHeader, - BlockBody: FullBlockBody, - SignedTx: FullSignedTx, - Receipt: FullReceipt, - > -{ + type Receipt: FullReceipt; } /// Helper adapter type for accessing [`NodePrimitives`] block header types. diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index cc86db14d38..1e48f2d38e0 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -64,7 +64,7 @@ impl AccountHashingStage { opts: SeedOpts, ) -> Result, StageError> where - N::Primitives: reth_primitives_traits::FullNodePrimitives< + N::Primitives: reth_primitives_traits::NodePrimitives< Block = reth_ethereum_primitives::Block, BlockHeader = reth_primitives_traits::Header, >, diff --git a/crates/storage/provider/src/providers/database/chain.rs b/crates/storage/provider/src/providers/database/chain.rs index 2da32d9a05f..9ce3861eb3c 100644 --- a/crates/storage/provider/src/providers/database/chain.rs +++ b/crates/storage/provider/src/providers/database/chain.rs @@ -1,12 +1,12 @@ use crate::{providers::NodeTypesForProvider, DatabaseProvider}; use reth_db_api::transaction::{DbTx, DbTxMut}; -use reth_node_types::FullNodePrimitives; +use reth_node_types::NodePrimitives; use reth_primitives_traits::{FullBlockHeader, FullSignedTx}; use reth_storage_api::{ChainStorageReader, ChainStorageWriter, EmptyBodyStorage, EthStorage}; /// Trait that provides access to implementations of [`ChainStorage`] -pub trait ChainStorage: Send + Sync { +pub trait ChainStorage: Send + Sync { /// Provides access to the chain reader. fn reader(&self) -> impl ChainStorageReader, Primitives> where @@ -24,7 +24,7 @@ impl ChainStorage for EthStorage where T: FullSignedTx, H: FullBlockHeader, - N: FullNodePrimitives< + N: NodePrimitives< Block = alloy_consensus::Block, BlockHeader = H, BlockBody = alloy_consensus::BlockBody, @@ -52,7 +52,7 @@ impl ChainStorage for EmptyBodyStorage where T: FullSignedTx, H: FullBlockHeader, - N: FullNodePrimitives< + N: NodePrimitives< Block = alloy_consensus::Block, BlockHeader = H, BlockBody = alloy_consensus::BlockBody, diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 5a950bbd7d2..41e8121991b 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -2,7 +2,7 @@ use reth_chainspec::EthereumHardforks; use reth_db_api::table::Value; -use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB}; +use reth_node_types::{NodePrimitives, NodeTypes, NodeTypesWithDB}; mod database; pub use database::*; @@ -36,7 +36,7 @@ where Self: NodeTypes< ChainSpec: EthereumHardforks, Storage: ChainStorage, - Primitives: FullNodePrimitives, + Primitives: NodePrimitives, >, { } @@ -45,7 +45,7 @@ impl NodeTypesForProvider for T where T: NodeTypes< ChainSpec: EthereumHardforks, Storage: ChainStorage, - Primitives: FullNodePrimitives, + Primitives: NodePrimitives, > { } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index cd7f5c16d91..76fa45f5a56 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -33,7 +33,7 @@ use reth_db_api::{ }; use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; -use reth_node_types::{FullNodePrimitives, NodePrimitives}; +use reth_node_types::NodePrimitives; use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction}; use reth_stages_types::{PipelineTarget, StageId}; use reth_static_file_types::{ @@ -1524,8 +1524,8 @@ impl> Rec } } -impl> - TransactionsProviderExt for StaticFileProvider +impl> TransactionsProviderExt + for StaticFileProvider { fn transaction_hashes_by_range( &self, @@ -1723,7 +1723,7 @@ impl BlockNumReader for StaticFileProvider { /* Cannot be successfully implemented but must exist for trait requirements */ -impl> BlockReader +impl> BlockReader for StaticFileProvider { type Block = N::Block; diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index 63e6bdba738..5b159715ad2 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -14,7 +14,7 @@ use reth_db_api::{ use reth_db_models::StoredBlockWithdrawals; use reth_ethereum_primitives::TransactionSigned; use reth_primitives_traits::{ - Block, BlockBody, FullBlockHeader, FullNodePrimitives, SignedTransaction, + Block, BlockBody, FullBlockHeader, NodePrimitives, SignedTransaction, }; use reth_storage_errors::provider::ProviderResult; @@ -40,11 +40,11 @@ pub trait BlockBodyWriter { } /// Trait that implements how chain-specific types are written to the storage. -pub trait ChainStorageWriter: +pub trait ChainStorageWriter: BlockBodyWriter::Body> { } -impl ChainStorageWriter for T where +impl ChainStorageWriter for T where T: BlockBodyWriter::Body> { } @@ -73,11 +73,11 @@ pub trait BlockBodyReader { } /// Trait that implements how chain-specific types are read from storage. -pub trait ChainStorageReader: +pub trait ChainStorageReader: BlockBodyReader { } -impl ChainStorageReader for T where +impl ChainStorageReader for T where T: BlockBodyReader { } From dbceffdcf4b74ba003af2ad83737b3c14a6bfc7f Mon Sep 17 00:00:00 2001 From: David Klank <155117116+davidjsonn@users.noreply.github.com> Date: Tue, 21 Oct 2025 15:37:58 +0300 Subject: [PATCH 136/371] refactor(ipc): simplify RpcServiceCfg from enum to struct (#19180) --- crates/rpc/ipc/src/server/mod.rs | 2 +- crates/rpc/ipc/src/server/rpc_service.rs | 43 ++++++------------------ 2 files changed, 11 insertions(+), 34 deletions(-) diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index b6114938d2b..b86037628ea 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -391,7 +391,7 @@ where fn call(&mut self, request: String) -> Self::Future { trace!("{:?}", request); - let cfg = RpcServiceCfg::CallsAndSubscriptions { + let cfg = RpcServiceCfg { bounded_subscriptions: BoundedSubscriptions::new( self.inner.server_cfg.max_subscriptions_per_connection, ), diff --git a/crates/rpc/ipc/src/server/rpc_service.rs b/crates/rpc/ipc/src/server/rpc_service.rs index 75bd53ad6d5..f7fcdace4c4 100644 --- a/crates/rpc/ipc/src/server/rpc_service.rs +++ b/crates/rpc/ipc/src/server/rpc_service.rs @@ -25,17 +25,11 @@ pub struct RpcService { } /// Configuration of the `RpcService`. -#[allow(dead_code)] #[derive(Clone, Debug)] -pub(crate) enum RpcServiceCfg { - /// The server supports only calls. - OnlyCalls, - /// The server supports both method calls and subscriptions. - CallsAndSubscriptions { - bounded_subscriptions: BoundedSubscriptions, - sink: MethodSink, - id_provider: Arc, - }, +pub(crate) struct RpcServiceCfg { + pub(crate) bounded_subscriptions: BoundedSubscriptions, + pub(crate) sink: MethodSink, + pub(crate) id_provider: Arc, } impl RpcService { @@ -82,30 +76,20 @@ impl RpcServiceT for RpcService { ResponseFuture::future(fut) } MethodCallback::Subscription(callback) => { - let RpcServiceCfg::CallsAndSubscriptions { - bounded_subscriptions, - sink, - id_provider, - } = &self.cfg - else { - tracing::warn!(id = ?id, method = %name, "Attempted subscription on a service not configured for subscriptions."); - let rp = - MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError)); - return ResponseFuture::ready(rp); - }; - - if let Some(p) = bounded_subscriptions.acquire() { + let cfg = &self.cfg; + + if let Some(p) = cfg.bounded_subscriptions.acquire() { let conn_state = SubscriptionState { conn_id, - id_provider: &**id_provider, + id_provider: &*cfg.id_provider, subscription_permit: p, }; let fut = - callback(id.clone(), params, sink.clone(), conn_state, extensions); + callback(id.clone(), params, cfg.sink.clone(), conn_state, extensions); ResponseFuture::future(fut) } else { - let max = bounded_subscriptions.max(); + let max = cfg.bounded_subscriptions.max(); let rp = MethodResponse::error(id, reject_too_many_subscriptions(max)); ResponseFuture::ready(rp) } @@ -114,13 +98,6 @@ impl RpcServiceT for RpcService { // Don't adhere to any resource or subscription limits; always let unsubscribing // happen! - let RpcServiceCfg::CallsAndSubscriptions { .. } = self.cfg else { - tracing::warn!(id = ?id, method = %name, "Attempted unsubscription on a service not configured for subscriptions."); - let rp = - MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError)); - return ResponseFuture::ready(rp); - }; - let rp = callback(id, params, conn_id, max_response_body_size, extensions); ResponseFuture::ready(rp) } From 93b63bc765d35af3086708d569d303850a9f8225 Mon Sep 17 00:00:00 2001 From: Brawn Date: Tue, 21 Oct 2025 15:45:37 +0300 Subject: [PATCH 137/371] chore: fix incorrect hex value in comment (0x2A instead of 0x7E) (#19181) --- examples/custom-node/src/primitives/tx.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/custom-node/src/primitives/tx.rs b/examples/custom-node/src/primitives/tx.rs index 7c282922f48..fe763e079e5 100644 --- a/examples/custom-node/src/primitives/tx.rs +++ b/examples/custom-node/src/primitives/tx.rs @@ -23,7 +23,7 @@ pub enum CustomTransaction { /// A regular Optimism transaction as defined by [`OpTxEnvelope`]. #[envelope(flatten)] Op(OpTxEnvelope), - /// A [`TxPayment`] tagged with type 0x7E. + /// A [`TxPayment`] tagged with type 0x2A (decimal 42). #[envelope(ty = 42)] Payment(Signed), } From 01820fdaf7f927fad382275234278482c680733e Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 21 Oct 2025 15:04:19 +0200 Subject: [PATCH 138/371] feat(e2e): add builder API for configuring test node setups (#19146) --- crates/e2e-test-utils/src/lib.rs | 141 ++---------- crates/e2e-test-utils/src/setup_builder.rs | 210 ++++++++++++++++++ .../tests/e2e-testsuite/main.rs | 37 +++ 3 files changed, 265 insertions(+), 123 deletions(-) create mode 100644 crates/e2e-test-utils/src/setup_builder.rs diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index a51b78ae654..e7b83cb3ad9 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -1,23 +1,19 @@ //! Utilities for end-to-end tests. use node::NodeTestContext; -use reth_chainspec::{ChainSpec, EthChainSpec}; +use reth_chainspec::ChainSpec; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_network_api::test_utils::PeersHandleProvider; use reth_node_builder::{ components::NodeComponentsBuilder, rpc::{EngineValidatorAddOn, RethRpcAddOns}, - EngineNodeLauncher, FullNodeTypesAdapter, Node, NodeAdapter, NodeBuilder, NodeComponents, - NodeConfig, NodeHandle, NodePrimitives, NodeTypes, NodeTypesWithDBAdapter, - PayloadAttributesBuilder, PayloadTypes, + FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodePrimitives, NodeTypes, + NodeTypesWithDBAdapter, PayloadAttributesBuilder, PayloadTypes, }; -use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider}; -use reth_rpc_server_types::RpcModuleSelection; use reth_tasks::TaskManager; use std::sync::Arc; -use tracing::{span, Level}; use wallet::Wallet; /// Wrapper type to create test nodes @@ -45,6 +41,10 @@ mod rpc; /// Utilities for creating and writing RLP test data pub mod test_rlp_utils; +/// Builder for configuring test node setups +mod setup_builder; +pub use setup_builder::E2ETestSetupBuilder; + /// Creates the initial setup with `num_nodes` started and interconnected. pub async fn setup( num_nodes: usize, @@ -53,60 +53,14 @@ pub async fn setup( attributes_generator: impl Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + Send + Sync + Copy + 'static, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + Node> + NodeTypesForProvider, - N::ComponentsBuilder: NodeComponentsBuilder< - TmpNodeAdapter, - Components: NodeComponents, Network: PeersHandleProvider>, - >, - N::AddOns: RethRpcAddOns> + EngineValidatorAddOn>, + N: NodeBuilderHelper, LocalPayloadAttributesBuilder: PayloadAttributesBuilder<<::Payload as PayloadTypes>::PayloadAttributes>, { - let tasks = TaskManager::current(); - let exec = tasks.executor(); - - let network_config = NetworkArgs { - discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - ..NetworkArgs::default() - }; - - // Create nodes and peer them - let mut nodes: Vec> = Vec::with_capacity(num_nodes); - - for idx in 0..num_nodes { - let node_config = NodeConfig::new(chain_spec.clone()) - .with_network(network_config.clone()) - .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()) - .set_dev(is_dev); - - let span = span!(Level::INFO, "node", idx); - let _enter = span.enter(); - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) - .testing_node(exec.clone()) - .node(Default::default()) - .launch() - .await?; - - let mut node = NodeTestContext::new(node, attributes_generator).await?; - - // Connect each node in a chain. - if let Some(previous_node) = nodes.last_mut() { - previous_node.connect(&mut node).await; - } - - // Connect last node with the first if there are more than two - if idx + 1 == num_nodes && - num_nodes > 2 && - let Some(first_node) = nodes.first_mut() - { - node.connect(first_node).await; - } - - nodes.push(node); - } - - Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) + E2ETestSetupBuilder::new(num_nodes, chain_spec, attributes_generator) + .with_node_config_modifier(move |config| config.set_dev(is_dev)) + .build() + .await } /// Creates the initial setup with `num_nodes` started and interconnected. @@ -155,71 +109,12 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder<::PayloadAttributes>, { - let tasks = TaskManager::current(); - let exec = tasks.executor(); - - let network_config = NetworkArgs { - discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - ..NetworkArgs::default() - }; - - // Create nodes and peer them - let mut nodes: Vec> = Vec::with_capacity(num_nodes); - - for idx in 0..num_nodes { - let node_config = NodeConfig::new(chain_spec.clone()) - .with_network(network_config.clone()) - .with_unused_ports() - .with_rpc( - RpcServerArgs::default() - .with_unused_ports() - .with_http() - .with_http_api(RpcModuleSelection::All), - ) - .set_dev(is_dev); - - let span = span!(Level::INFO, "node", idx); - let _enter = span.enter(); - let node = N::default(); - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) - .testing_node(exec.clone()) - .with_types_and_provider::>() - .with_components(node.components_builder()) - .with_add_ons(node.add_ons()) - .launch_with_fn(|builder| { - let launcher = EngineNodeLauncher::new( - builder.task_executor().clone(), - builder.config().datadir(), - tree_config.clone(), - ); - builder.launch_with(launcher) - }) - .await?; - - let mut node = NodeTestContext::new(node, attributes_generator).await?; - - let genesis = node.block_hash(0); - node.update_forkchoice(genesis, genesis).await?; - - // Connect each node in a chain if requested. - if connect_nodes { - if let Some(previous_node) = nodes.last_mut() { - previous_node.connect(&mut node).await; - } - - // Connect last node with the first if there are more than two - if idx + 1 == num_nodes && - num_nodes > 2 && - let Some(first_node) = nodes.first_mut() - { - node.connect(first_node).await; - } - } - - nodes.push(node); - } - - Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) + E2ETestSetupBuilder::new(num_nodes, chain_spec, attributes_generator) + .with_tree_config_modifier(move |_| tree_config.clone()) + .with_node_config_modifier(move |config| config.set_dev(is_dev)) + .with_connect_nodes(connect_nodes) + .build() + .await } // Type aliases diff --git a/crates/e2e-test-utils/src/setup_builder.rs b/crates/e2e-test-utils/src/setup_builder.rs new file mode 100644 index 00000000000..8de2280fe41 --- /dev/null +++ b/crates/e2e-test-utils/src/setup_builder.rs @@ -0,0 +1,210 @@ +//! Builder for configuring and creating test node setups. +//! +//! This module provides a flexible builder API for setting up test nodes with custom +//! configurations through closures that modify `NodeConfig` and `TreeConfig`. + +use crate::{node::NodeTestContext, wallet::Wallet, NodeBuilderHelper, NodeHelperType, TmpDB}; +use reth_chainspec::EthChainSpec; +use reth_engine_local::LocalPayloadAttributesBuilder; +use reth_node_builder::{ + EngineNodeLauncher, NodeBuilder, NodeConfig, NodeHandle, NodeTypes, NodeTypesWithDBAdapter, + PayloadAttributesBuilder, PayloadTypes, +}; +use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; +use reth_provider::providers::BlockchainProvider; +use reth_rpc_server_types::RpcModuleSelection; +use reth_tasks::TaskManager; +use std::sync::Arc; +use tracing::{span, Level}; + +/// Type alias for tree config modifier closure +type TreeConfigModifier = + Box reth_node_api::TreeConfig + Send + Sync>; + +/// Type alias for node config modifier closure +type NodeConfigModifier = Box) -> NodeConfig + Send + Sync>; + +/// Builder for configuring and creating test node setups. +/// +/// This builder allows customizing test node configurations through closures that +/// modify `NodeConfig` and `TreeConfig`. It avoids code duplication by centralizing +/// the node creation logic. +pub struct E2ETestSetupBuilder +where + N: NodeBuilderHelper, + F: Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + + Send + + Sync + + Copy + + 'static, + LocalPayloadAttributesBuilder: + PayloadAttributesBuilder<::PayloadAttributes>, +{ + num_nodes: usize, + chain_spec: Arc, + attributes_generator: F, + connect_nodes: bool, + tree_config_modifier: Option, + node_config_modifier: Option>, +} + +impl E2ETestSetupBuilder +where + N: NodeBuilderHelper, + F: Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + + Send + + Sync + + Copy + + 'static, + LocalPayloadAttributesBuilder: + PayloadAttributesBuilder<::PayloadAttributes>, +{ + /// Creates a new builder with the required parameters. + pub fn new(num_nodes: usize, chain_spec: Arc, attributes_generator: F) -> Self { + Self { + num_nodes, + chain_spec, + attributes_generator, + connect_nodes: true, + tree_config_modifier: None, + node_config_modifier: None, + } + } + + /// Sets whether nodes should be interconnected (default: true). + pub const fn with_connect_nodes(mut self, connect_nodes: bool) -> Self { + self.connect_nodes = connect_nodes; + self + } + + /// Sets a modifier function for the tree configuration. + /// + /// The closure receives the base tree config and returns a modified version. + pub fn with_tree_config_modifier(mut self, modifier: G) -> Self + where + G: Fn(reth_node_api::TreeConfig) -> reth_node_api::TreeConfig + Send + Sync + 'static, + { + self.tree_config_modifier = Some(Box::new(modifier)); + self + } + + /// Sets a modifier function for the node configuration. + /// + /// The closure receives the base node config and returns a modified version. + pub fn with_node_config_modifier(mut self, modifier: G) -> Self + where + G: Fn(NodeConfig) -> NodeConfig + Send + Sync + 'static, + { + self.node_config_modifier = Some(Box::new(modifier)); + self + } + + /// Builds and launches the test nodes. + pub async fn build( + self, + ) -> eyre::Result<( + Vec>>>, + TaskManager, + Wallet, + )> { + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + // Apply tree config modifier if present + let tree_config = if let Some(modifier) = self.tree_config_modifier { + modifier(reth_node_api::TreeConfig::default()) + } else { + reth_node_api::TreeConfig::default() + }; + + let mut nodes: Vec> = Vec::with_capacity(self.num_nodes); + + for idx in 0..self.num_nodes { + // Create base node config + let base_config = NodeConfig::new(self.chain_spec.clone()) + .with_network(network_config.clone()) + .with_unused_ports() + .with_rpc( + RpcServerArgs::default() + .with_unused_ports() + .with_http() + .with_http_api(RpcModuleSelection::All), + ); + + // Apply node config modifier if present + let node_config = if let Some(modifier) = &self.node_config_modifier { + modifier(base_config) + } else { + base_config + }; + + let span = span!(Level::INFO, "node", idx); + let _enter = span.enter(); + let node = N::default(); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) + .testing_node(exec.clone()) + .with_types_and_provider::>() + .with_components(node.components_builder()) + .with_add_ons(node.add_ons()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + tree_config.clone(), + ); + builder.launch_with(launcher) + }) + .await?; + + let mut node = NodeTestContext::new(node, self.attributes_generator).await?; + + let genesis = node.block_hash(0); + node.update_forkchoice(genesis, genesis).await?; + + // Connect nodes if requested + if self.connect_nodes { + if let Some(previous_node) = nodes.last_mut() { + previous_node.connect(&mut node).await; + } + + // Connect last node with the first if there are more than two + if idx + 1 == self.num_nodes && + self.num_nodes > 2 && + let Some(first_node) = nodes.first_mut() + { + node.connect(first_node).await; + } + } + + nodes.push(node); + } + + Ok((nodes, tasks, Wallet::default().with_chain_id(self.chain_spec.chain().into()))) + } +} + +impl std::fmt::Debug for E2ETestSetupBuilder +where + N: NodeBuilderHelper, + F: Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + + Send + + Sync + + Copy + + 'static, + LocalPayloadAttributesBuilder: + PayloadAttributesBuilder<::PayloadAttributes>, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("E2ETestSetupBuilder") + .field("num_nodes", &self.num_nodes) + .field("connect_nodes", &self.connect_nodes) + .field("tree_config_modifier", &self.tree_config_modifier.as_ref().map(|_| "")) + .field("node_config_modifier", &self.node_config_modifier.as_ref().map(|_| "")) + .finish_non_exhaustive() + } +} diff --git a/crates/e2e-test-utils/tests/e2e-testsuite/main.rs b/crates/e2e-test-utils/tests/e2e-testsuite/main.rs index 5cd1bfe8c6c..04422ba34ad 100644 --- a/crates/e2e-test-utils/tests/e2e-testsuite/main.rs +++ b/crates/e2e-test-utils/tests/e2e-testsuite/main.rs @@ -15,9 +15,11 @@ use reth_e2e_test_utils::{ setup::{NetworkSetup, Setup}, Environment, TestBuilder, }, + E2ETestSetupBuilder, }; use reth_node_api::TreeConfig; use reth_node_ethereum::{EthEngineTypes, EthereumNode}; +use reth_payload_builder::EthPayloadBuilderAttributes; use std::sync::Arc; use tempfile::TempDir; use tracing::debug; @@ -349,3 +351,38 @@ async fn test_testsuite_multinode_block_production() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_setup_builder_with_custom_tree_config() -> Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) + .cancun_activated() + .build(), + ); + + let (nodes, _tasks, _wallet) = + E2ETestSetupBuilder::::new(1, chain_spec, |_| { + EthPayloadBuilderAttributes::default() + }) + .with_tree_config_modifier(|config| { + config.with_persistence_threshold(0).with_memory_block_buffer_target(5) + }) + .build() + .await?; + + assert_eq!(nodes.len(), 1); + + let genesis_hash = nodes[0].block_hash(0); + assert_ne!(genesis_hash, B256::ZERO); + + Ok(()) +} From 645672916a23996a293860ce26557d0ecfb67377 Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Tue, 21 Oct 2025 17:53:08 +0200 Subject: [PATCH 139/371] fix: remove unnecessary trait bounds in extend_sorted_vec helper (#19154) --- crates/trie/common/src/utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/trie/common/src/utils.rs b/crates/trie/common/src/utils.rs index e5d16d3ef51..5a2234fe26b 100644 --- a/crates/trie/common/src/utils.rs +++ b/crates/trie/common/src/utils.rs @@ -11,7 +11,7 @@ use alloc::vec::Vec; /// 5. Appending and re-sorting only if new items were added pub(crate) fn extend_sorted_vec(target: &mut Vec<(K, V)>, other: &[(K, V)]) where - K: Clone + Ord + core::hash::Hash + Eq, + K: Clone + Ord, V: Clone, { if other.is_empty() { From 563ae0d30b7be763d2199ecaa0503c69c946b92e Mon Sep 17 00:00:00 2001 From: 0xsensei Date: Tue, 21 Oct 2025 22:25:08 +0530 Subject: [PATCH 140/371] fix: drop support for total difficulty table (#16660) Co-authored-by: Aditya Pandey Co-authored-by: Matthias Seitz Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> --- crates/cli/commands/src/stage/drop.rs | 1 - .../cli/commands/src/stage/dump/execution.rs | 7 --- .../cli/commands/src/test_vectors/tables.rs | 1 - .../prune/src/segments/static_file/headers.rs | 45 +++++++------------ crates/prune/types/src/segment.rs | 3 +- crates/rpc/rpc-engine-api/src/error.rs | 14 +----- crates/stages/stages/benches/setup/mod.rs | 14 +----- crates/stages/stages/src/stages/bodies.rs | 2 +- crates/stages/stages/src/stages/era.rs | 6 +-- crates/stages/stages/src/stages/execution.rs | 1 - crates/stages/stages/src/stages/finish.rs | 4 +- crates/stages/stages/src/stages/headers.rs | 9 +--- .../stages/stages/src/test_utils/test_db.rs | 26 +++-------- crates/storage/db-api/src/tables/mod.rs | 3 +- crates/storage/db/benches/criterion.rs | 2 - .../src/providers/database/provider.rs | 1 - .../provider/src/providers/static_file/mod.rs | 9 +--- .../storage/provider/src/test_utils/blocks.rs | 4 -- 18 files changed, 33 insertions(+), 119 deletions(-) diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 5a01ad1fed6..0da3493cbb0 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -70,7 +70,6 @@ impl Command { StageEnum::Headers => { tx.clear::()?; tx.clear::>>()?; - tx.clear::()?; tx.clear::()?; reset_stage_checkpoint(tx, StageId::Headers)?; diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 921af75c78b..9e8e68e9800 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -69,13 +69,6 @@ fn import_tables_with_range( to, ) })??; - output_db.update(|tx| { - tx.import_table_with_range::( - &db_tool.provider_factory.db_ref().tx()?, - Some(from), - to, - ) - })??; output_db.update(|tx| { tx.import_table_with_range::( &db_tool.provider_factory.db_ref().tx()?, diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index ef34e5b5e84..10b94695399 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -69,7 +69,6 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { generate!([ (CanonicalHeaders, PER_TABLE, TABLE), - (HeaderTerminalDifficulties, PER_TABLE, TABLE), (HeaderNumbers, PER_TABLE, TABLE), (Headers
, PER_TABLE, TABLE), (BlockBodyIndices, PER_TABLE, TABLE), diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs index 9f3c291bf44..19b255ed3d3 100644 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ b/crates/prune/prune/src/segments/static_file/headers.rs @@ -21,7 +21,9 @@ use std::num::NonZeroUsize; use tracing::trace; /// Number of header tables to prune in one step -const HEADER_TABLES_TO_PRUNE: usize = 3; +/// +/// Note: `HeaderTerminalDifficulties` is no longer pruned after Paris/Merge as it's read-only +const HEADER_TABLES_TO_PRUNE: usize = 2; #[derive(Debug)] pub struct Headers { @@ -72,9 +74,6 @@ where .tx_ref() .cursor_write::::BlockHeader>>( )?; - - let mut header_tds_cursor = - provider.tx_ref().cursor_write::()?; let mut canonical_headers_cursor = provider.tx_ref().cursor_write::()?; @@ -86,7 +85,6 @@ where provider, &mut limiter, headers_cursor.walk_range(range.clone())?, - header_tds_cursor.walk_range(range.clone())?, canonical_headers_cursor.walk_range(range)?, ); @@ -111,6 +109,7 @@ where }) } } + type Walker<'a, Provider, T> = RangeWalker<'a, T, <::Tx as DbTxMut>::CursorMut>; @@ -127,7 +126,6 @@ where Provider, tables::Headers<::BlockHeader>, >, - header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>, canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, } @@ -149,10 +147,9 @@ where Provider, tables::Headers<::BlockHeader>, >, - header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>, canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, ) -> Self { - Self { provider, limiter, headers_walker, header_tds_walker, canonical_headers_walker } + Self { provider, limiter, headers_walker, canonical_headers_walker } } } @@ -168,7 +165,6 @@ where } let mut pruned_block_headers = None; - let mut pruned_block_td = None; let mut pruned_block_canonical = None; if let Err(err) = self.provider.tx_ref().prune_table_with_range_step( @@ -180,15 +176,6 @@ where return Some(Err(err.into())) } - if let Err(err) = self.provider.tx_ref().prune_table_with_range_step( - &mut self.header_tds_walker, - self.limiter, - &mut |_| false, - &mut |row| pruned_block_td = Some(row.0), - ) { - return Some(Err(err.into())) - } - if let Err(err) = self.provider.tx_ref().prune_table_with_range_step( &mut self.canonical_headers_walker, self.limiter, @@ -198,7 +185,7 @@ where return Some(Err(err.into())) } - if ![pruned_block_headers, pruned_block_td, pruned_block_canonical].iter().all_equal() { + if ![pruned_block_headers, pruned_block_canonical].iter().all_equal() { return Some(Err(PrunerError::InconsistentData( "All headers-related tables should be pruned up to the same height", ))) @@ -216,7 +203,7 @@ mod tests { static_file::headers::HEADER_TABLES_TO_PRUNE, PruneInput, PruneLimiter, Segment, SegmentOutput, }; - use alloy_primitives::{BlockNumber, B256, U256}; + use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db_api::{tables, transaction::DbTx}; use reth_provider::{ @@ -241,18 +228,17 @@ mod tests { let headers = random_header_range(&mut rng, 0..100, B256::ZERO); let tx = db.factory.provider_rw().unwrap().into_tx(); for header in &headers { - TestStageDB::insert_header(None, &tx, header, U256::ZERO).unwrap(); + TestStageDB::insert_header(None, &tx, header).unwrap(); } tx.commit().unwrap(); assert_eq!(db.table::().unwrap().len(), headers.len()); assert_eq!(db.table::().unwrap().len(), headers.len()); - assert_eq!(db.table::().unwrap().len(), headers.len()); let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| { let segment = super::Headers::new(db.factory.static_file_provider()); let prune_mode = PruneMode::Before(to_block); - let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(6); let input = PruneInput { previous_checkpoint: db .factory @@ -311,10 +297,6 @@ mod tests { db.table::().unwrap().len(), headers.len() - (last_pruned_block_number + 1) as usize ); - assert_eq!( - db.table::().unwrap().len(), - headers.len() - (last_pruned_block_number + 1) as usize - ); assert_eq!( db.factory.provider().unwrap().get_prune_checkpoint(PruneSegment::Headers).unwrap(), Some(PruneCheckpoint { @@ -325,11 +307,16 @@ mod tests { ); }; + // First test: Prune with limit of 6 entries + // This will prune blocks 0-2 (3 blocks × 2 tables = 6 entries) test_prune( 3, - (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 9), + (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 6), ); - test_prune(3, (PruneProgress::Finished, 3)); + + // Second test: Prune remaining blocks + // This will prune block 3 (1 block × 2 tables = 2 entries) + test_prune(3, (PruneProgress::Finished, 2)); } #[test] diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index 0d60d900137..c5cbecd4ccd 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -24,8 +24,7 @@ pub enum PruneSegment { AccountHistory, /// Prune segment responsible for the `StorageChangeSets` and `StoragesHistory` tables. StorageHistory, - /// Prune segment responsible for the `CanonicalHeaders`, `Headers` and - /// `HeaderTerminalDifficulties` tables. + /// Prune segment responsible for the `CanonicalHeaders`, `Headers` tables. Headers, /// Prune segment responsible for the `Transactions` table. Transactions, diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 2578b2f44e5..6155c004c36 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{B256, U256}; +use alloy_primitives::B256; use alloy_rpc_types_engine::{ ForkchoiceUpdateError, INVALID_FORK_CHOICE_STATE_ERROR, INVALID_FORK_CHOICE_STATE_ERROR_MSG, INVALID_PAYLOAD_ATTRIBUTES_ERROR, INVALID_PAYLOAD_ATTRIBUTES_ERROR_MSG, @@ -59,17 +59,6 @@ pub enum EngineApiError { /// Requested number of items count: u64, }, - /// Terminal total difficulty mismatch during transition configuration exchange. - #[error( - "invalid transition terminal total difficulty: \ - execution: {execution}, consensus: {consensus}" - )] - TerminalTD { - /// Execution terminal total difficulty value. - execution: U256, - /// Consensus terminal total difficulty value. - consensus: U256, - }, /// Terminal block hash mismatch during transition configuration exchange. #[error( "invalid transition terminal block hash: \ @@ -202,7 +191,6 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { } }, // Any other server error - EngineApiError::TerminalTD { .. } | EngineApiError::TerminalBlockHash { .. } | EngineApiError::NewPayload(_) | EngineApiError::Internal(_) | diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index 01d7571e0da..b6010dd6f39 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -1,12 +1,7 @@ #![expect(unreachable_pub)] -use alloy_primitives::{Address, B256, U256}; +use alloy_primitives::{Address, B256}; use itertools::concat; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; -use reth_db_api::{ - cursor::DbCursorRO, - tables, - transaction::{DbTx, DbTxMut}, -}; use reth_primitives_traits::{Account, SealedBlock, SealedHeader}; use reth_provider::{ test_utils::MockNodeTypesWithDB, DBProvider, DatabaseProvider, DatabaseProviderFactory, @@ -198,13 +193,6 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { ); db.insert_blocks(blocks.iter(), StorageKind::Static).unwrap(); - - // initialize TD - db.commit(|tx| { - let (head, _) = tx.cursor_read::()?.first()?.unwrap_or_default(); - Ok(tx.put::(head, U256::from(0).into())?) - }) - .unwrap(); } db diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index d1386dded4b..7b6090ca86b 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -580,7 +580,7 @@ mod tests { ..Default::default() }, ); - self.db.insert_headers_with_td(blocks.iter().map(|block| block.sealed_header()))?; + self.db.insert_headers(blocks.iter().map(|block| block.sealed_header()))?; if let Some(progress) = blocks.get(start as usize) { // Insert last progress data { diff --git a/crates/stages/stages/src/stages/era.rs b/crates/stages/stages/src/stages/era.rs index e4f25325a42..10598f90112 100644 --- a/crates/stages/stages/src/stages/era.rs +++ b/crates/stages/stages/src/stages/era.rs @@ -384,7 +384,7 @@ mod tests { ..Default::default() }, ); - self.db.insert_headers_with_td(blocks.iter().map(|block| block.sealed_header()))?; + self.db.insert_headers(blocks.iter().map(|block| block.sealed_header()))?; if let Some(progress) = blocks.get(start as usize) { // Insert last progress data { @@ -499,10 +499,6 @@ mod tests { .ensure_no_entry_above_by_value::(block, |val| val)?; self.db.ensure_no_entry_above::(block, |key| key)?; self.db.ensure_no_entry_above::(block, |key| key)?; - self.db.ensure_no_entry_above::( - block, - |num| num, - )?; Ok(()) } diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index ed50572d58b..1666e79baf3 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -39,7 +39,6 @@ use super::missing_static_data_error; /// Input tables: /// - [`tables::CanonicalHeaders`] get next block to execute. /// - [`tables::Headers`] get for revm environment variables. -/// - [`tables::HeaderTerminalDifficulties`] /// - [`tables::BlockBodyIndices`] to get tx number /// - [`tables::Transactions`] to execute /// diff --git a/crates/stages/stages/src/stages/finish.rs b/crates/stages/stages/src/stages/finish.rs index 1b9e624b41b..8d676c35b99 100644 --- a/crates/stages/stages/src/stages/finish.rs +++ b/crates/stages/stages/src/stages/finish.rs @@ -72,7 +72,7 @@ mod tests { let start = input.checkpoint().block_number; let mut rng = generators::rng(); let head = random_header(&mut rng, start, None); - self.db.insert_headers_with_td(std::iter::once(&head))?; + self.db.insert_headers(std::iter::once(&head))?; // use previous progress as seed size let end = input.target.unwrap_or_default() + 1; @@ -82,7 +82,7 @@ mod tests { } let mut headers = random_header_range(&mut rng, start + 1..end, head.hash()); - self.db.insert_headers_with_td(headers.iter())?; + self.db.insert_headers(headers.iter())?; headers.insert(0, head); Ok(headers) } diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 74709e81421..8ad39be5eb8 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -333,9 +333,6 @@ where (input.unwind_to + 1).., )?; provider.tx_ref().unwind_table_by_num::(input.unwind_to)?; - provider - .tx_ref() - .unwind_table_by_num::(input.unwind_to)?; let unfinalized_headers_unwound = provider.tx_ref().unwind_table_by_num::(input.unwind_to)?; @@ -460,7 +457,7 @@ mod tests { let start = input.checkpoint().block_number; let headers = random_header_range(&mut rng, 0..start + 1, B256::ZERO); let head = headers.last().cloned().unwrap(); - self.db.insert_headers_with_td(headers.iter())?; + self.db.insert_headers(headers.iter())?; // use previous checkpoint as seed size let end = input.target.unwrap_or_default() + 1; @@ -551,10 +548,6 @@ mod tests { .ensure_no_entry_above_by_value::(block, |val| val)?; self.db.ensure_no_entry_above::(block, |key| key)?; self.db.ensure_no_entry_above::(block, |key| key)?; - self.db.ensure_no_entry_above::( - block, - |num| num, - )?; Ok(()) } diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index c88aa4574c0..3fe1c7f1f97 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{keccak256, Address, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{keccak256, Address, BlockNumber, TxHash, TxNumber, B256}; use reth_chainspec::MAINNET; use reth_db::{ test_utils::{create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir}, @@ -150,7 +150,6 @@ impl TestStageDB { writer: Option<&mut StaticFileProviderRWRefMut<'_, EthPrimitives>>, tx: &TX, header: &SealedHeader, - td: U256, ) -> ProviderResult<()> { if let Some(writer) = writer { // Backfill: some tests start at a forward block number, but static files require no @@ -167,7 +166,6 @@ impl TestStageDB { writer.append_header(header.header(), &header.hash())?; } else { tx.put::(header.number, header.hash())?; - tx.put::(header.number, td.into())?; tx.put::(header.number, header.header().clone())?; } @@ -175,20 +173,16 @@ impl TestStageDB { Ok(()) } - fn insert_headers_inner<'a, I, const TD: bool>(&self, headers: I) -> ProviderResult<()> + fn insert_headers_inner<'a, I>(&self, headers: I) -> ProviderResult<()> where I: IntoIterator, { let provider = self.factory.static_file_provider(); let mut writer = provider.latest_writer(StaticFileSegment::Headers)?; let tx = self.factory.provider_rw()?.into_tx(); - let mut td = U256::ZERO; for header in headers { - if TD { - td += header.difficulty; - } - Self::insert_header(Some(&mut writer), &tx, header, td)?; + Self::insert_header(Some(&mut writer), &tx, header)?; } writer.commit()?; @@ -203,17 +197,7 @@ impl TestStageDB { where I: IntoIterator, { - self.insert_headers_inner::(headers) - } - - /// Inserts total difficulty of headers into the corresponding static file and tables. - /// - /// Superset functionality of [`TestStageDB::insert_headers`]. - pub fn insert_headers_with_td<'a, I>(&self, headers: I) -> ProviderResult<()> - where - I: IntoIterator, - { - self.insert_headers_inner::(headers) + self.insert_headers_inner::(headers) } /// Insert ordered collection of [`SealedBlock`] into corresponding tables. @@ -240,7 +224,7 @@ impl TestStageDB { .then(|| provider.latest_writer(StaticFileSegment::Headers).unwrap()); blocks.iter().try_for_each(|block| { - Self::insert_header(headers_writer.as_mut(), &tx, block.sealed_header(), U256::ZERO) + Self::insert_header(headers_writer.as_mut(), &tx, block.sealed_header()) })?; if let Some(mut writer) = headers_writer { diff --git a/crates/storage/db-api/src/tables/mod.rs b/crates/storage/db-api/src/tables/mod.rs index cd678260128..cf2a20fff04 100644 --- a/crates/storage/db-api/src/tables/mod.rs +++ b/crates/storage/db-api/src/tables/mod.rs @@ -308,7 +308,8 @@ tables! { type Value = HeaderHash; } - /// Stores the total difficulty from a block header. + /// Stores the total difficulty from block headers. + /// Note: Deprecated. table HeaderTerminalDifficulties { type Key = BlockNumber; type Value = CompactU256; diff --git a/crates/storage/db/benches/criterion.rs b/crates/storage/db/benches/criterion.rs index 64d6fbdbfdf..7d62384c164 100644 --- a/crates/storage/db/benches/criterion.rs +++ b/crates/storage/db/benches/criterion.rs @@ -31,7 +31,6 @@ pub fn db(c: &mut Criterion) { group.warm_up_time(std::time::Duration::from_millis(200)); measure_table_db::(&mut group); - measure_table_db::(&mut group); measure_table_db::(&mut group); measure_table_db::(&mut group); measure_table_db::(&mut group); @@ -48,7 +47,6 @@ pub fn serialization(c: &mut Criterion) { group.warm_up_time(std::time::Duration::from_millis(200)); measure_table_serialization::(&mut group); - measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 4bb710abfef..31e87b46e62 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -2788,7 +2788,6 @@ impl BlockWrite /// tables: /// * [`StaticFileSegment::Headers`] /// * [`tables::HeaderNumbers`] - /// * [`tables::HeaderTerminalDifficulties`] /// * [`tables::BlockBodyIndices`] /// /// If there are transactions in the block, the following static file segments and tables will diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index afb2836abe4..3c25f157bb3 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -58,12 +58,10 @@ mod tests { test_utils::create_test_provider_factory, HeaderProvider, StaticFileProviderFactory, }; use alloy_consensus::{Header, SignableTransaction, Transaction, TxLegacy}; - use alloy_primitives::{BlockHash, Signature, TxNumber, B256, U256}; + use alloy_primitives::{BlockHash, Signature, TxNumber, B256}; use rand::seq::SliceRandom; use reth_db::test_utils::create_test_static_files_dir; - use reth_db_api::{ - transaction::DbTxMut, CanonicalHeaders, HeaderNumbers, HeaderTerminalDifficulties, Headers, - }; + use reth_db_api::{transaction::DbTxMut, CanonicalHeaders, HeaderNumbers, Headers}; use reth_ethereum_primitives::{EthPrimitives, Receipt, TransactionSigned}; use reth_static_file_types::{ find_fixed_range, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, @@ -102,14 +100,11 @@ mod tests { let mut provider_rw = factory.provider_rw().unwrap(); let tx = provider_rw.tx_mut(); - let mut td = U256::ZERO; for header in headers.clone() { - td += header.header().difficulty; let hash = header.hash(); tx.put::(header.number, hash).unwrap(); tx.put::(header.number, header.clone_header()).unwrap(); - tx.put::(header.number, td.into()).unwrap(); tx.put::(hash, header.number).unwrap(); } provider_rw.commit().unwrap(); diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 818b97e0c15..0b27c5dc992 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -31,10 +31,6 @@ pub fn assert_genesis_block( assert_eq!(tx.table::().unwrap(), vec![(h, n)]); assert_eq!(tx.table::().unwrap(), vec![(n, h)]); - assert_eq!( - tx.table::().unwrap(), - vec![(n, g.difficulty.into())] - ); assert_eq!( tx.table::().unwrap(), vec![(0, StoredBlockBodyIndices::default())] From 2c086f0ed366446a94ef1b106f7debbd07844395 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 21 Oct 2025 21:18:45 +0200 Subject: [PATCH 141/371] chore: rm generic array dep from discv4 (#19140) --- Cargo.lock | 2 -- crates/net/discv4/Cargo.toml | 2 -- crates/net/discv4/src/node.rs | 4 +--- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a07fa205da..b9de70bfded 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4013,7 +4013,6 @@ version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ - "serde", "typenum", "version_check", "zeroize", @@ -7739,7 +7738,6 @@ dependencies = [ "assert_matches", "discv5", "enr", - "generic-array", "itertools 0.14.0", "parking_lot", "rand 0.8.5", diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 20691a6d929..fadda2b6348 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -35,7 +35,6 @@ tracing.workspace = true thiserror.workspace = true parking_lot.workspace = true rand_08 = { workspace = true, optional = true } -generic-array.workspace = true serde = { workspace = true, optional = true } itertools.workspace = true @@ -53,7 +52,6 @@ serde = [ "alloy-primitives/serde", "discv5/serde", "enr/serde", - "generic-array/serde", "parking_lot/serde", "rand_08?/serde", "secp256k1/serde", diff --git a/crates/net/discv4/src/node.rs b/crates/net/discv4/src/node.rs index 242c3883228..7e993ff8333 100644 --- a/crates/net/discv4/src/node.rs +++ b/crates/net/discv4/src/node.rs @@ -1,5 +1,4 @@ use alloy_primitives::keccak256; -use generic_array::GenericArray; use reth_network_peers::{NodeRecord, PeerId}; /// The key type for the table. @@ -15,8 +14,7 @@ impl From for NodeKey { impl From for discv5::Key { fn from(value: NodeKey) -> Self { let hash = keccak256(value.0.as_slice()); - let hash = *GenericArray::from_slice(hash.as_slice()); - Self::new_raw(value, hash) + Self::new_raw(value, hash.0.into()) } } From 21785a30e8917fdd43fd8160c1bc74e19a7ee020 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 21 Oct 2025 21:20:09 +0200 Subject: [PATCH 142/371] test: add node record parse test (#19172) --- crates/net/peers/src/node_record.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crates/net/peers/src/node_record.rs b/crates/net/peers/src/node_record.rs index 0b1ef38b3dd..641f2d274dc 100644 --- a/crates/net/peers/src/node_record.rs +++ b/crates/net/peers/src/node_record.rs @@ -309,6 +309,18 @@ mod tests { } } + #[test] + fn test_node_record() { + let url = "enode://fc8a2ff614e848c0af4c99372a81b8655edb8e11b617cffd0aab1a0691bcca66ca533626a528ee567f05f70c8cb529bda2c0a864cc0aec638a367fd2bb8e49fb@127.0.0.1:35481?discport=0"; + let node: NodeRecord = url.parse().unwrap(); + assert_eq!(node, NodeRecord { + address: IpAddr::V4([127,0,0, 1].into()), + tcp_port: 35481, + udp_port: 0, + id: "0xfc8a2ff614e848c0af4c99372a81b8655edb8e11b617cffd0aab1a0691bcca66ca533626a528ee567f05f70c8cb529bda2c0a864cc0aec638a367fd2bb8e49fb".parse().unwrap(), + }) + } + #[test] fn test_url_parse() { let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; From 876346d143e5fde3ac9aca855e051d493e18b5d2 Mon Sep 17 00:00:00 2001 From: Alex Pikme <30472093+reject-i@users.noreply.github.com> Date: Tue, 21 Oct 2025 22:12:57 +0300 Subject: [PATCH 143/371] fix: add arrayvec to dev-dependencies in reth-trie-common (#19192) --- crates/trie/common/Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index f10e53a8389..2fcc23ab53b 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -53,6 +53,7 @@ alloy-genesis.workspace = true alloy-primitives = { workspace = true, features = ["getrandom"] } alloy-trie = { workspace = true, features = ["arbitrary", "serde"] } bytes.workspace = true +arrayvec.workspace = true hash-db.workspace = true plain_hasher.workspace = true arbitrary = { workspace = true, features = ["derive"] } @@ -75,6 +76,7 @@ std = [ "alloy-rpc-types-eth?/std", "alloy-serde?/std", "alloy-trie/std", + "arrayvec?/std", "bytes?/std", "derive_more/std", "nybbles/std", @@ -84,7 +86,6 @@ std = [ "serde_json/std", "revm-database/std", "revm-state/std", - "arrayvec?/std", ] eip1186 = ["alloy-rpc-types-eth/serde", "dep:alloy-serde"] serde = [ From ba6d593aa0b91a3dbb66f83b353567cebfa1861c Mon Sep 17 00:00:00 2001 From: Dmitry <98899785+mdqst@users.noreply.github.com> Date: Tue, 21 Oct 2025 22:13:25 +0300 Subject: [PATCH 144/371] chore: fix misleading log message for body size check (#19173) --- crates/net/ecies/src/codec.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index b5a10284cf2..938e44d9385 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -110,7 +110,7 @@ impl Decoder for ECIESCodec { self.ecies.read_header(&mut buf.split_to(ECIES::header_len()))?; if body_size > MAX_INITIAL_HANDSHAKE_SIZE { - trace!(?body_size, max=?MAX_INITIAL_HANDSHAKE_SIZE, "Header exceeds max initial handshake size"); + trace!(?body_size, max=?MAX_INITIAL_HANDSHAKE_SIZE, "Body exceeds max initial handshake size"); return Err(ECIESErrorImpl::InitialHeaderBodyTooLarge { body_size, max_body_size: MAX_INITIAL_HANDSHAKE_SIZE, From 1d58ae1ff8ae35c078c33f2ab18ec0222df2ae9d Mon Sep 17 00:00:00 2001 From: Merkel Tranjes <140164174+rnkrtt@users.noreply.github.com> Date: Tue, 21 Oct 2025 21:15:13 +0200 Subject: [PATCH 145/371] feat: improve oversized data error message (#19190) --- crates/ethereum/payload/src/lib.rs | 8 ++++---- crates/rpc/rpc-eth-types/src/error/mod.rs | 15 +++++++++++---- crates/transaction-pool/src/error.rs | 13 +++++++++---- crates/transaction-pool/src/validate/eth.rs | 13 ++++++++----- 4 files changed, 32 insertions(+), 17 deletions(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 7f40e983bc8..5b3eb9cfcbd 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -232,10 +232,10 @@ where if is_osaka && estimated_block_size_with_tx > MAX_RLP_BLOCK_SIZE { best_txs.mark_invalid( &pool_tx, - InvalidPoolTransactionError::OversizedData( - estimated_block_size_with_tx, - MAX_RLP_BLOCK_SIZE, - ), + InvalidPoolTransactionError::OversizedData { + size: estimated_block_size_with_tx, + limit: MAX_RLP_BLOCK_SIZE, + }, ); continue; } diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index c8645aa0325..ef65e4ccc2b 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -930,8 +930,13 @@ pub enum RpcPoolError { #[error("negative value")] NegativeValue, /// When oversized data is encountered - #[error("oversized data")] - OversizedData, + #[error("oversized data: transaction size {size}, limit {limit}")] + OversizedData { + /// Size of the transaction/input data that exceeded the limit. + size: usize, + /// Configured limit that was exceeded. + limit: usize, + }, /// When the max initcode size is exceeded #[error("max initcode size exceeded")] ExceedsMaxInitCodeSize, @@ -973,7 +978,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { RpcPoolError::MaxTxGasLimitExceeded | RpcPoolError::ExceedsFeeCap { .. } | RpcPoolError::NegativeValue | - RpcPoolError::OversizedData | + RpcPoolError::OversizedData { .. } | RpcPoolError::ExceedsMaxInitCodeSize | RpcPoolError::PoolTransactionError(_) | RpcPoolError::Eip4844(_) | @@ -1017,7 +1022,9 @@ impl From for RpcPoolError { InvalidPoolTransactionError::IntrinsicGasTooLow => { Self::Invalid(RpcInvalidTransactionError::GasTooLow) } - InvalidPoolTransactionError::OversizedData(_, _) => Self::OversizedData, + InvalidPoolTransactionError::OversizedData { size, limit } => { + Self::OversizedData { size, limit } + } InvalidPoolTransactionError::Underpriced => Self::Underpriced, InvalidPoolTransactionError::Eip2681 => { Self::Invalid(RpcInvalidTransactionError::NonceMaxValue) diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 6360817caa1..3bcbb4cd0ab 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -237,8 +237,13 @@ pub enum InvalidPoolTransactionError { /// Thrown if the input data of a transaction is greater /// than some meaningful limit a user might use. This is not a consensus error /// making the transaction invalid, rather a DOS protection. - #[error("input data too large")] - OversizedData(usize, usize), + #[error("oversized data: transaction size {size}, limit {limit}")] + OversizedData { + /// Size of the transaction/input data that exceeded the limit. + size: usize, + /// Configured limit that was exceeded. + limit: usize, + }, /// Thrown if the transaction's fee is below the minimum fee #[error("transaction underpriced")] Underpriced, @@ -335,7 +340,7 @@ impl InvalidPoolTransactionError { } Self::ExceedsFeeCap { max_tx_fee_wei: _, tx_fee_cap_wei: _ } => true, Self::ExceedsMaxInitCodeSize(_, _) => true, - Self::OversizedData(_, _) => true, + Self::OversizedData { .. } => true, Self::Underpriced => { // local setting false @@ -393,7 +398,7 @@ impl InvalidPoolTransactionError { /// Returns `true` if an import failed due to an oversized transaction pub const fn is_oversized(&self) -> bool { - matches!(self, Self::OversizedData(_, _)) + matches!(self, Self::OversizedData { .. }) } /// Returns `true` if an import failed due to nonce gap. diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 038c820bfe9..1436093d5bf 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -326,10 +326,10 @@ where if tx_input_len > self.max_tx_input_bytes { return Err(TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::OversizedData( - tx_input_len, - self.max_tx_input_bytes, - ), + InvalidPoolTransactionError::OversizedData { + size: tx_input_len, + limit: self.max_tx_input_bytes, + }, )) } } else { @@ -338,7 +338,10 @@ where if tx_size > self.max_tx_input_bytes { return Err(TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::OversizedData(tx_size, self.max_tx_input_bytes), + InvalidPoolTransactionError::OversizedData { + size: tx_size, + limit: self.max_tx_input_bytes, + }, )) } } From c6af584b007df2e84e62204ccfde6f71eeabaeeb Mon Sep 17 00:00:00 2001 From: Avory Date: Tue, 21 Oct 2025 22:19:56 +0300 Subject: [PATCH 146/371] docs: improve SealedBlockRecoveryError documentation (#19120) --- crates/primitives-traits/src/block/error.rs | 31 +++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/crates/primitives-traits/src/block/error.rs b/crates/primitives-traits/src/block/error.rs index f61d352bba4..ccb727ce88a 100644 --- a/crates/primitives-traits/src/block/error.rs +++ b/crates/primitives-traits/src/block/error.rs @@ -3,6 +3,37 @@ use crate::transaction::signed::RecoveryError; /// Type alias for [`BlockRecoveryError`] with a [`SealedBlock`](crate::SealedBlock) value. +/// +/// This error type is specifically used when recovering a sealed block fails. +/// It contains the original sealed block that could not be recovered, allowing +/// callers to inspect the problematic block or attempt recovery with different +/// parameters. +/// +/// # Example +/// +/// ```rust +/// use alloy_consensus::{Block, BlockBody, Header, Signed, TxEnvelope, TxLegacy}; +/// use alloy_primitives::{Signature, B256}; +/// use reth_primitives_traits::{block::error::SealedBlockRecoveryError, SealedBlock}; +/// +/// // Create a simple block for demonstration +/// let header = Header::default(); +/// let tx = TxLegacy::default(); +/// let signed_tx = Signed::new_unchecked(tx, Signature::test_signature(), B256::ZERO); +/// let envelope = TxEnvelope::Legacy(signed_tx); +/// let body = BlockBody { transactions: vec![envelope], ommers: vec![], withdrawals: None }; +/// let block = Block::new(header, body); +/// let sealed_block = SealedBlock::new_unchecked(block, B256::ZERO); +/// +/// // Simulate a block recovery operation that fails +/// let block_recovery_result: Result<(), SealedBlockRecoveryError<_>> = +/// Err(SealedBlockRecoveryError::new(sealed_block)); +/// +/// // When block recovery fails, you get the error with the original block +/// let error = block_recovery_result.unwrap_err(); +/// let failed_block = error.into_inner(); +/// // Now you can inspect the failed block or try recovery again +/// ``` pub type SealedBlockRecoveryError = BlockRecoveryError>; /// Error when recovering a block from [`SealedBlock`](crate::SealedBlock) to From e810df943b962dae2587d1c8fb11b9acd4a59747 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 21 Oct 2025 18:14:16 -0400 Subject: [PATCH 147/371] feat(engine): improve payload validator tracing spans 2 (#19155) --- crates/engine/tree/src/chain.rs | 2 +- crates/engine/tree/src/tree/cached_state.rs | 18 ++- crates/engine/tree/src/tree/metrics.rs | 2 +- crates/engine/tree/src/tree/mod.rs | 15 ++- .../tree/src/tree/payload_processor/mod.rs | 26 +++- .../src/tree/payload_processor/multiproof.rs | 18 ++- .../src/tree/payload_processor/prewarm.rs | 40 +++++- .../src/tree/payload_processor/sparse_trie.rs | 28 +++- .../engine/tree/src/tree/payload_validator.rs | 123 +++++++++++------- crates/node/core/src/args/trace.rs | 2 +- .../src/segments/user/account_history.rs | 2 +- .../prune/prune/src/segments/user/receipts.rs | 2 +- .../src/segments/user/receipts_by_logs.rs | 2 +- .../src/segments/user/sender_recovery.rs | 2 +- .../src/segments/user/storage_history.rs | 2 +- .../src/segments/user/transaction_lookup.rs | 2 +- crates/rpc/ipc/src/server/ipc.rs | 4 +- crates/rpc/ipc/src/server/mod.rs | 2 +- crates/rpc/rpc/src/engine.rs | 2 +- crates/trie/db/src/state.rs | 3 +- crates/trie/parallel/src/proof_task.rs | 45 +++++-- crates/trie/sparse-parallel/src/trie.rs | 18 ++- crates/trie/sparse/Cargo.toml | 2 +- crates/trie/sparse/src/state.rs | 15 ++- crates/trie/sparse/src/trie.rs | 14 +- crates/trie/trie/src/hashed_cursor/mock.rs | 4 +- crates/trie/trie/src/trie_cursor/mock.rs | 8 +- docs/vocs/docs/pages/cli/reth.mdx | 2 +- docs/vocs/docs/pages/cli/reth/config.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/checksum.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/clear.mdx | 2 +- .../docs/pages/cli/reth/db/clear/mdbx.mdx | 2 +- .../pages/cli/reth/db/clear/static-file.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/diff.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/drop.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/get.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx | 2 +- .../pages/cli/reth/db/get/static-file.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/list.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/path.mdx | 2 +- .../docs/pages/cli/reth/db/repair-trie.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/stats.mdx | 2 +- docs/vocs/docs/pages/cli/reth/db/version.mdx | 2 +- docs/vocs/docs/pages/cli/reth/download.mdx | 2 +- .../vocs/docs/pages/cli/reth/dump-genesis.mdx | 2 +- docs/vocs/docs/pages/cli/reth/export-era.mdx | 2 +- docs/vocs/docs/pages/cli/reth/import-era.mdx | 2 +- docs/vocs/docs/pages/cli/reth/import.mdx | 2 +- docs/vocs/docs/pages/cli/reth/init-state.mdx | 2 +- docs/vocs/docs/pages/cli/reth/init.mdx | 2 +- docs/vocs/docs/pages/cli/reth/node.mdx | 2 +- docs/vocs/docs/pages/cli/reth/p2p.mdx | 2 +- docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 2 +- .../vocs/docs/pages/cli/reth/p2p/bootnode.mdx | 2 +- docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 2 +- docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 2 +- .../docs/pages/cli/reth/p2p/rlpx/ping.mdx | 2 +- docs/vocs/docs/pages/cli/reth/prune.mdx | 2 +- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 2 +- .../cli/reth/stage/dump/account-hashing.mdx | 2 +- .../pages/cli/reth/stage/dump/execution.mdx | 2 +- .../docs/pages/cli/reth/stage/dump/merkle.mdx | 2 +- .../cli/reth/stage/dump/storage-hashing.mdx | 2 +- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 2 +- .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 2 +- .../cli/reth/stage/unwind/num-blocks.mdx | 2 +- .../pages/cli/reth/stage/unwind/to-block.mdx | 2 +- 71 files changed, 340 insertions(+), 151 deletions(-) diff --git a/crates/engine/tree/src/chain.rs b/crates/engine/tree/src/chain.rs index e2893bb976a..3e6207c9d40 100644 --- a/crates/engine/tree/src/chain.rs +++ b/crates/engine/tree/src/chain.rs @@ -71,7 +71,7 @@ where /// Internal function used to advance the chain. /// /// Polls the `ChainOrchestrator` for the next event. - #[tracing::instrument(level = "debug", name = "ChainOrchestrator::poll", skip(self, cx))] + #[tracing::instrument(level = "debug", target = "engine::tree::chain_orchestrator", skip_all)] fn poll_next_event(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index ffd7f49c6fc..c1bb028cab2 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -18,7 +18,7 @@ use reth_trie::{ MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; use std::{sync::Arc, time::Duration}; -use tracing::trace; +use tracing::{debug_span, instrument, trace}; pub(crate) type Cache = mini_moka::sync::Cache; @@ -354,6 +354,7 @@ impl ExecutionCache { } /// Invalidates the storage for all addresses in the set + #[instrument(level = "debug", target = "engine::caching", skip_all, fields(accounts = addresses.len()))] pub(crate) fn invalidate_storages(&self, addresses: HashSet<&Address>) { // NOTE: this must collect because the invalidate function should not be called while we // hold an iter for it @@ -385,12 +386,25 @@ impl ExecutionCache { /// ## Error Handling /// /// Returns an error if the state updates are inconsistent and should be discarded. + #[instrument(level = "debug", target = "engine::caching", skip_all)] pub(crate) fn insert_state(&self, state_updates: &BundleState) -> Result<(), ()> { + let _enter = + debug_span!(target: "engine::tree", "contracts", len = state_updates.contracts.len()) + .entered(); // Insert bytecodes for (code_hash, bytecode) in &state_updates.contracts { self.code_cache.insert(*code_hash, Some(Bytecode(bytecode.clone()))); } - + drop(_enter); + + let _enter = debug_span!( + target: "engine::tree", + "accounts", + accounts = state_updates.state.len(), + storages = + state_updates.state.values().map(|account| account.storage.len()).sum::() + ) + .entered(); let mut invalidated_accounts = HashSet::default(); for (addr, account) in &state_updates.state { // If the account was not modified, as in not changed and not destroyed, then we have diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index c014d8ba15e..1d1e208b0a6 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -79,7 +79,7 @@ impl EngineApiMetrics { for tx in transactions { let tx = tx?; let span = - debug_span!(target: "engine::tree", "execute_tx", tx_hash=?tx.tx().tx_hash()); + debug_span!(target: "engine::tree", "execute tx", tx_hash=?tx.tx().tx_hash()); let _enter = span.enter(); trace!(target: "engine::tree", "Executing transaction"); executor.execute_transaction(tx)?; diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index e66b2a8892e..a189b643f98 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -496,7 +496,12 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip_all, fields(block_hash = %payload.block_hash(), block_num = %payload.block_number(),), target = "engine::tree")] + #[instrument( + level = "debug", + target = "engine::tree", + skip_all, + fields(block_hash = %payload.block_hash(), block_num = %payload.block_number()), + )] fn on_new_payload( &mut self, payload: T::ExecutionData, @@ -577,6 +582,7 @@ where /// - `Valid`: Payload successfully validated and inserted /// - `Syncing`: Parent missing, payload buffered for later /// - Error status: Payload is invalid + #[instrument(level = "debug", target = "engine::tree", skip_all)] fn try_insert_payload( &mut self, payload: T::ExecutionData, @@ -970,7 +976,7 @@ where /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). /// /// Returns an error if an internal error occurred like a database error. - #[instrument(level = "trace", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash))] fn on_forkchoice_updated( &mut self, state: ForkchoiceState, @@ -1972,7 +1978,7 @@ where } /// Attempts to connect any buffered blocks that are connected to the given parent hash. - #[instrument(level = "trace", skip(self), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip(self))] fn try_connect_buffered_blocks( &mut self, parent: BlockNumHash, @@ -2281,7 +2287,7 @@ where /// Returns an event with the appropriate action to take, such as: /// - download more missing blocks /// - try to canonicalize the target if the `block` is the tracked target (head) block. - #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number(),), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(block_hash = %block.hash(), block_num = %block.number()))] fn on_downloaded_block( &mut self, block: RecoveredBlock, @@ -2387,6 +2393,7 @@ where /// Returns `InsertPayloadOk::Inserted(BlockStatus::Valid)` on successful execution, /// `InsertPayloadOk::AlreadySeen` if the block already exists, or /// `InsertPayloadOk::Inserted(BlockStatus::Disconnected)` if parent state is missing. + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(block_id))] fn insert_block_or_payload( &mut self, block_id: BlockWithParent, diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index d2e48a49899..090be01a0ec 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -45,7 +45,7 @@ use std::sync::{ mpsc::{self, channel, Sender}, Arc, }; -use tracing::{debug, instrument, warn}; +use tracing::{debug, debug_span, instrument, warn}; mod configured_sparse_trie; pub mod executor; @@ -167,6 +167,12 @@ where /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) #[allow(clippy::type_complexity)] + #[instrument( + level = "debug", + target = "engine::tree::payload_processor", + name = "payload processor", + skip_all + )] pub fn spawn>( &mut self, env: ExecutionEnv, @@ -187,6 +193,7 @@ where + Clone + 'static, { + let span = tracing::Span::current(); let (to_sparse_trie, sparse_trie_rx) = channel(); // spawn multiproof task, save the trie input let (trie_input, state_root_config) = MultiProofConfig::from_input(trie_input); @@ -237,6 +244,7 @@ where // spawn multi-proof task self.executor.spawn_blocking(move || { + let _enter = span.entered(); multi_proof_task.run(); }); @@ -257,6 +265,7 @@ where /// Spawns a task that exclusively handles cache prewarming for transaction execution. /// /// Returns a [`PayloadHandle`] to communicate with the task. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] pub(super) fn spawn_cache_exclusive>( &self, env: ExecutionEnv, @@ -353,7 +362,9 @@ where // spawn pre-warm task { let to_prewarm_task = to_prewarm_task.clone(); + let span = debug_span!(target: "engine::tree::payload_processor", "prewarm task"); self.executor.spawn_blocking(move || { + let _enter = span.entered(); prewarm_task.run(transactions, to_prewarm_task); }); } @@ -370,7 +381,7 @@ where /// /// If the given hash is different then what is recently cached, then this will create a new /// instance. - #[instrument(target = "engine::caching", skip(self))] + #[instrument(level = "debug", target = "engine::caching", skip(self))] fn cache_for(&self, parent_hash: B256) -> SavedCache { if let Some(cache) = self.execution_cache.get_cache_for(parent_hash) { debug!("reusing execution cache"); @@ -383,6 +394,7 @@ where } /// Spawns the [`SparseTrieTask`] for this payload processor. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] fn spawn_sparse_trie_task( &self, sparse_trie_rx: mpsc::Receiver, @@ -421,13 +433,18 @@ where sparse_state_trie, ); + let span = tracing::Span::current(); self.executor.spawn_blocking(move || { + let _enter = span.entered(); + let (result, trie) = task.run(); // Send state root computation result let _ = state_root_tx.send(result); - // Clear the SparseStateTrie and replace it back into the mutex _after_ sending results - // to the next step, so that time spent clearing doesn't block the step after this one. + // Clear the SparseStateTrie and replace it back into the mutex _after_ sending + // results to the next step, so that time spent clearing doesn't block the step after + // this one. + let _enter = debug_span!(target: "engine::tree::payload_processor", "clear").entered(); cleared_sparse_trie.lock().replace(ClearedSparseStateTrie::from_state_trie(trie)); }); } @@ -452,6 +469,7 @@ impl PayloadHandle { /// # Panics /// /// If payload processing was started without background tasks. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] pub fn state_root(&mut self) -> Result { self.state_root .take() diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index a528b759570..815ca72fbf0 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -32,7 +32,7 @@ use std::{ }, time::{Duration, Instant}, }; -use tracing::{debug, error, trace}; +use tracing::{debug, error, instrument, trace}; /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. @@ -718,6 +718,7 @@ impl MultiProofTask { /// Handles request for proof prefetch. /// /// Returns a number of proofs that were spawned. + #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all, fields(accounts = targets.len()))] fn on_prefetch_proof(&mut self, targets: MultiProofTargets) -> u64 { let proof_targets = self.get_prefetch_proof_targets(targets); self.fetched_proof_targets.extend_ref(&proof_targets); @@ -779,7 +780,7 @@ impl MultiProofTask { let all_proofs_processed = proofs_processed >= state_update_proofs_requested + prefetch_proofs_requested; let no_pending = !self.proof_sequencer.has_pending(); - debug!( + trace!( target: "engine::root", proofs_processed, state_update_proofs_requested, @@ -844,6 +845,7 @@ impl MultiProofTask { /// Handles state updates. /// /// Returns a number of proofs that were spawned. + #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip(self, update), fields(accounts = update.len()))] fn on_state_update(&mut self, source: StateChangeSource, update: EvmState) -> u64 { let hashed_state_update = evm_state_to_hashed_post_state(update); @@ -973,6 +975,12 @@ impl MultiProofTask { /// currently being calculated, or if there are any pending proofs in the proof sequencer /// left to be revealed by checking the pending tasks. /// 6. This task exits after all pending proofs are processed. + #[instrument( + level = "debug", + name = "MultiProofTask::run", + target = "engine::tree::payload_processor::multiproof", + skip_all + )] pub(crate) fn run(mut self) { // TODO convert those into fields let mut prefetch_proofs_requested = 0; @@ -1008,7 +1016,7 @@ impl MultiProofTask { let storage_targets = targets.values().map(|slots| slots.len()).sum::(); prefetch_proofs_requested += self.on_prefetch_proof(targets); - debug!( + trace!( target: "engine::root", account_targets, storage_targets, @@ -1029,7 +1037,7 @@ impl MultiProofTask { let len = update.len(); state_update_proofs_requested += self.on_state_update(source, update); - debug!( + trace!( target: "engine::root", ?source, len, @@ -1091,7 +1099,7 @@ impl MultiProofTask { .proof_calculation_duration_histogram .record(proof_calculated.elapsed); - debug!( + trace!( target: "engine::root", sequence = proof_calculated.sequence_number, total_proofs = proofs_processed, diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 44293614d3d..de8a88a167b 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -39,7 +39,7 @@ use std::{ }, time::Instant, }; -use tracing::{debug, trace, warn}; +use tracing::{debug, debug_span, instrument, trace, warn}; /// A wrapper for transactions that includes their index in the block. #[derive(Clone)] @@ -139,8 +139,11 @@ where let ctx = self.ctx.clone(); let max_concurrency = self.max_concurrency; let transaction_count_hint = self.transaction_count_hint; + let span = tracing::Span::current(); self.executor.spawn_blocking(move || { + let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", parent: span, "spawn_all").entered(); + let (done_tx, done_rx) = mpsc::channel(); let mut executing = 0usize; @@ -157,8 +160,8 @@ where }; // Only spawn initial workers as needed - for _ in 0..workers_needed { - handles.push(ctx.spawn_worker(&executor, actions_tx.clone(), done_tx.clone())); + for i in 0..workers_needed { + handles.push(ctx.spawn_worker(i, &executor, actions_tx.clone(), done_tx.clone())); } let mut tx_index = 0usize; @@ -248,6 +251,7 @@ where /// the new, warmed cache to be inserted. /// /// This method is called from `run()` only after all execution tasks are complete. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn save_cache(self, state: BundleState) { let start = Instant::now(); @@ -284,6 +288,12 @@ where /// /// This will execute the transactions until all transactions have been processed or the task /// was cancelled. + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::prewarm", + name = "prewarm", + skip_all + )] pub(super) fn run( self, pending: mpsc::Receiver + Clone + Send + 'static>, @@ -364,6 +374,7 @@ where { /// Splits this context into an evm, an evm config, metrics, and the atomic bool for terminating /// execution. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn evm_for_ctx(self) -> Option<(EvmFor, PrewarmMetrics, Arc)> { let Self { env, @@ -380,7 +391,7 @@ where Ok(provider) => provider, Err(err) => { trace!( - target: "engine::tree", + target: "engine::tree::payload_processor::prewarm", %err, "Failed to build state provider in prewarm thread" ); @@ -429,6 +440,7 @@ where /// /// Note: There are no ordering guarantees; this does not reflect the state produced by /// sequential execution. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn transact_batch( self, txs: mpsc::Receiver>, @@ -439,7 +451,15 @@ where { let Some((mut evm, metrics, terminate_execution)) = self.evm_for_ctx() else { return }; - while let Ok(IndexedTransaction { index, tx }) = txs.recv() { + while let Ok(IndexedTransaction { index, tx }) = { + let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "recv tx") + .entered(); + txs.recv() + } { + let _enter = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm tx", index, tx_hash=%tx.tx().tx_hash()) + .entered(); + // If the task was cancelled, stop execution, send an empty result to notify the task, // and exit. if terminate_execution.load(Ordering::Relaxed) { @@ -467,12 +487,18 @@ where }; metrics.execution_duration.record(start.elapsed()); + drop(_enter); + // Only send outcome for transactions after the first txn // as the main execution will be just as fast if index > 0 { + let _enter = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm outcome", index, tx_hash=%tx.tx().tx_hash()) + .entered(); let (targets, storage_targets) = multiproof_targets_from_state(res.state); metrics.prefetch_storage_targets.record(storage_targets as f64); let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: Some(targets) }); + drop(_enter); } metrics.total_runtime.record(start.elapsed()); @@ -485,6 +511,7 @@ where /// Spawns a worker task for transaction execution and returns its sender channel. fn spawn_worker( &self, + idx: usize, executor: &WorkloadExecutor, actions_tx: Sender, done_tx: Sender<()>, @@ -494,8 +521,11 @@ where { let (tx, rx) = mpsc::channel(); let ctx = self.clone(); + let span = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm worker", idx); executor.spawn_blocking(move || { + let _enter = span.entered(); ctx.transact_batch(rx, actions_tx, done_tx); }); diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index c16f7b6e4f4..6302abde5fb 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -15,7 +15,7 @@ use std::{ sync::mpsc, time::{Duration, Instant}, }; -use tracing::{debug, trace, trace_span}; +use tracing::{debug, debug_span, instrument, trace}; /// A task responsible for populating the sparse trie. pub(super) struct SparseTrieTask @@ -61,6 +61,11 @@ where /// /// - State root computation outcome. /// - `SparseStateTrie` that needs to be cleared and reused to avoid reallocations. + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::sparse_trie", + skip_all + )] pub(super) fn run( mut self, ) -> (Result, SparseStateTrie) { @@ -80,10 +85,14 @@ where while let Ok(mut update) = self.updates.recv() { num_iterations += 1; let mut num_updates = 1; + let _enter = + debug_span!(target: "engine::tree::payload_processor::sparse_trie", "drain updates") + .entered(); while let Ok(next) = self.updates.try_recv() { update.extend(next); num_updates += 1; } + drop(_enter); debug!( target: "engine::root", @@ -130,6 +139,7 @@ pub struct StateRootComputeOutcome { } /// Updates the sparse trie with the given proofs and state, and returns the elapsed time. +#[instrument(level = "debug", target = "engine::tree::payload_processor::sparse_trie", skip_all)] pub(crate) fn update_sparse_trie( trie: &mut SparseStateTrie, SparseTrieUpdate { mut state, multiproof }: SparseTrieUpdate, @@ -155,6 +165,7 @@ where ); // Update storage slots with new values and calculate storage roots. + let span = tracing::Span::current(); let (tx, rx) = mpsc::channel(); state .storages @@ -162,14 +173,16 @@ where .map(|(address, storage)| (address, storage, trie.take_storage_trie(&address))) .par_bridge() .map(|(address, storage, storage_trie)| { - let span = trace_span!(target: "engine::root::sparse", "Storage trie", ?address); - let _enter = span.enter(); - trace!(target: "engine::root::sparse", "Updating storage"); + let _enter = + debug_span!(target: "engine::tree::payload_processor::sparse_trie", parent: span.clone(), "storage trie", ?address) + .entered(); + + trace!(target: "engine::tree::payload_processor::sparse_trie", "Updating storage"); let storage_provider = blinded_provider_factory.storage_node_provider(address); let mut storage_trie = storage_trie.ok_or(SparseTrieErrorKind::Blind)?; if storage.wiped { - trace!(target: "engine::root::sparse", "Wiping storage"); + trace!(target: "engine::tree::payload_processor::sparse_trie", "Wiping storage"); storage_trie.wipe()?; } @@ -187,7 +200,7 @@ where continue; } - trace!(target: "engine::root::sparse", ?slot_nibbles, "Updating storage slot"); + trace!(target: "engine::tree::payload_processor::sparse_trie", ?slot_nibbles, "Updating storage slot"); storage_trie.update_leaf( slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec(), @@ -219,6 +232,9 @@ where let mut removed_accounts = Vec::new(); // Update account storage roots + let _enter = + tracing::debug_span!(target: "engine::tree::payload_processor::sparse_trie", "account trie") + .entered(); for result in rx { let (address, storage_trie) = result?; trie.insert_storage_trie(address, storage_trie); diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 4a3d45af8fd..2770d9a3f9d 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -44,9 +44,8 @@ use reth_trie::{ }; use reth_trie_db::DatabaseHashedPostState; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; -use revm::context::Block; use std::{collections::HashMap, sync::Arc, time::Instant}; -use tracing::{debug, debug_span, error, info, trace, warn}; +use tracing::{debug, debug_span, error, info, instrument, trace, warn}; /// Context providing access to tree state during validation. /// @@ -289,7 +288,7 @@ where V: PayloadValidator, { debug!( - target: "engine::tree", + target: "engine::tree::payload_validator", ?execution_err, block = ?input.num_hash(), "Block execution failed, checking for header validation errors" @@ -324,6 +323,15 @@ where /// - Block execution /// - State root computation /// - Fork detection + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields( + parent = ?input.parent_hash(), + type_name = ?input.type_name(), + ) + )] pub fn validate_block_with_state>>( &mut self, input: BlockOrPayload, @@ -366,7 +374,9 @@ where let parent_hash = input.parent_hash(); let block_num_hash = input.num_hash(); - trace!(target: "engine::tree", block=?block_num_hash, parent=?parent_hash, "Fetching block state provider"); + trace!(target: "engine::tree::payload_validator", "Fetching block state provider"); + let _enter = + debug_span!(target: "engine::tree::payload_validator", "state provider").entered(); let Some(provider_builder) = ensure_ok!(self.state_provider_builder(parent_hash, ctx.state())) else { @@ -377,8 +387,8 @@ where ) .into()) }; - let state_provider = ensure_ok!(provider_builder.build()); + drop(_enter); // fetch parent block let Some(parent_block) = ensure_ok!(self.sealed_header_by_hash(parent_hash, ctx.state())) @@ -390,7 +400,9 @@ where .into()) }; - let evm_env = self.evm_env_for(&input).map_err(NewPayloadError::other)?; + let evm_env = debug_span!(target: "engine::tree::payload_validator", "evm env") + .in_scope(|| self.evm_env_for(&input)) + .map_err(NewPayloadError::other)?; let env = ExecutionEnv { evm_env, hash: input.hash(), parent_hash: input.parent_hash() }; @@ -400,8 +412,7 @@ where let strategy = state_root_plan.strategy; debug!( - target: "engine::tree", - block=?block_num_hash, + target: "engine::tree::payload_validator", ?strategy, "Deciding which state root algorithm to run" ); @@ -417,7 +428,6 @@ where persisting_kind, parent_hash, ctx.state(), - block_num_hash, strategy, )); @@ -452,7 +462,7 @@ where block ); - debug!(target: "engine::tree", block=?block_num_hash, "Calculating block state root"); + debug!(target: "engine::tree::payload_validator", "Calculating block state root"); let root_time = Instant::now(); @@ -460,17 +470,17 @@ where match strategy { StateRootStrategy::StateRootTask => { - debug!(target: "engine::tree", block=?block_num_hash, "Using sparse trie state root algorithm"); + debug!(target: "engine::tree::payload_validator", "Using sparse trie state root algorithm"); match handle.state_root() { Ok(StateRootComputeOutcome { state_root, trie_updates }) => { let elapsed = root_time.elapsed(); - info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); + info!(target: "engine::tree::payload_validator", ?state_root, ?elapsed, "State root task finished"); // we double check the state root here for good measure if state_root == block.header().state_root() { maybe_state_root = Some((state_root, trie_updates, elapsed)) } else { warn!( - target: "engine::tree", + target: "engine::tree::payload_validator", ?state_root, block_state_root = ?block.header().state_root(), "State root task returned incorrect state root" @@ -478,12 +488,12 @@ where } } Err(error) => { - debug!(target: "engine::tree", %error, "State root task failed"); + debug!(target: "engine::tree::payload_validator", %error, "State root task failed"); } } } StateRootStrategy::Parallel => { - debug!(target: "engine::tree", block=?block_num_hash, "Using parallel state root algorithm"); + debug!(target: "engine::tree::payload_validator", "Using parallel state root algorithm"); match self.compute_state_root_parallel( persisting_kind, block.parent_hash(), @@ -493,8 +503,7 @@ where Ok(result) => { let elapsed = root_time.elapsed(); info!( - target: "engine::tree", - block = ?block_num_hash, + target: "engine::tree::payload_validator", regular_state_root = ?result.0, ?elapsed, "Regular root task finished" @@ -502,7 +511,7 @@ where maybe_state_root = Some((result.0, result.1, elapsed)); } Err(error) => { - debug!(target: "engine::tree", %error, "Parallel state root computation failed"); + debug!(target: "engine::tree::payload_validator", %error, "Parallel state root computation failed"); } } } @@ -519,9 +528,9 @@ where } else { // fallback is to compute the state root regularly in sync if self.config.state_root_fallback() { - debug!(target: "engine::tree", block=?block_num_hash, "Using state root fallback for testing"); + debug!(target: "engine::tree::payload_validator", "Using state root fallback for testing"); } else { - warn!(target: "engine::tree", block=?block_num_hash, ?persisting_kind, "Failed to compute state root in parallel"); + warn!(target: "engine::tree::payload_validator", ?persisting_kind, "Failed to compute state root in parallel"); self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); } @@ -533,7 +542,7 @@ where }; self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); - debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root"); + debug!(target: "engine::tree::payload_validator", ?root_elapsed, "Calculated state root"); // ensure state root matches if state_root != block.header().state_root() { @@ -587,12 +596,12 @@ where /// and block body itself. fn validate_block_inner(&self, block: &RecoveredBlock) -> Result<(), ConsensusError> { if let Err(e) = self.consensus.validate_header(block.sealed_header()) { - error!(target: "engine::tree", ?block, "Failed to validate header {}: {e}", block.hash()); + error!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {}: {e}", block.hash()); return Err(e) } if let Err(e) = self.consensus.validate_block_pre_execution(block.sealed_block()) { - error!(target: "engine::tree", ?block, "Failed to validate block {}: {e}", block.hash()); + error!(target: "engine::tree::payload_validator", ?block, "Failed to validate block {}: {e}", block.hash()); return Err(e) } @@ -600,6 +609,7 @@ where } /// Executes a block with the given state provider + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn execute_block( &mut self, state_provider: S, @@ -614,11 +624,7 @@ where T: PayloadTypes>, Evm: ConfigureEngineEvm, { - let num_hash = NumHash::new(env.evm_env.block_env.number().to(), env.hash); - - let span = debug_span!(target: "engine::tree", "execute_block", num = ?num_hash.number, hash = ?num_hash.hash); - let _enter = span.enter(); - debug!(target: "engine::tree", "Executing block"); + debug!(target: "engine::tree::payload_validator", "Executing block"); let mut db = State::builder() .with_database(StateProviderDatabase::new(&state_provider)) @@ -657,7 +663,7 @@ where )?; let execution_finish = Instant::now(); let execution_time = execution_finish.duration_since(execution_start); - debug!(target: "engine::tree", elapsed = ?execution_time, number=?num_hash.number, "Executed block"); + debug!(target: "engine::tree::payload_validator", elapsed = ?execution_time, "Executed block"); Ok(output) } @@ -669,6 +675,7 @@ where /// Returns `Err(_)` if error was encountered during computation. /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation /// should be used instead. + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn compute_state_root_parallel( &self, persisting_kind: PersistingKind, @@ -709,7 +716,7 @@ where { let start = Instant::now(); - trace!(target: "engine::tree", block=?block.num_hash(), "Validating block consensus"); + trace!(target: "engine::tree::payload_validator", block=?block.num_hash(), "Validating block consensus"); // validate block consensus rules if let Err(e) = self.validate_block_inner(block) { return Err(e.into()) @@ -719,7 +726,7 @@ where if let Err(e) = self.consensus.validate_header_against_parent(block.sealed_header(), parent_block) { - warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); + warn!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {} against parent: {e}", block.hash()); return Err(e.into()) } @@ -759,6 +766,12 @@ where /// The method handles strategy fallbacks if the preferred approach fails, ensuring /// block execution always completes with a valid state root. #[allow(clippy::too_many_arguments)] + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields(strategy) + )] fn spawn_payload_processor>( &mut self, env: ExecutionEnv, @@ -767,7 +780,6 @@ where persisting_kind: PersistingKind, parent_hash: B256, state: &EngineApiTreeState, - block_num_hash: NumHash, strategy: StateRootStrategy, ) -> Result< ( @@ -821,8 +833,7 @@ where Err((error, txs, env, provider_builder)) => { // Failed to spawn proof workers, fallback to parallel state root error!( - target: "engine::tree", - block=?block_num_hash, + target: "engine::tree::payload_validator", ?error, "Failed to spawn proof workers, falling back to parallel state root" ); @@ -840,8 +851,7 @@ where // prewarming for transaction execution } else { debug!( - target: "engine::tree", - block=?block_num_hash, + target: "engine::tree::payload_validator", "Disabling state root task due to non-empty prefix sets" ); ( @@ -884,7 +894,7 @@ where state: &EngineApiTreeState, ) -> ProviderResult>> { if let Some((historical, blocks)) = state.tree_state.blocks_by_hash(hash) { - debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory, creating provider builder"); + debug!(target: "engine::tree::payload_validator", %hash, %historical, "found canonical state for block in memory, creating provider builder"); // the block leads back to the canonical chain return Ok(Some(StateProviderBuilder::new( self.provider.clone(), @@ -895,17 +905,18 @@ where // Check if the block is persisted if let Some(header) = self.provider.header(hash)? { - debug!(target: "engine::tree", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); + debug!(target: "engine::tree::payload_validator", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); // For persisted blocks, we create a builder that will fetch state directly from the // database return Ok(Some(StateProviderBuilder::new(self.provider.clone(), hash, None))) } - debug!(target: "engine::tree", %hash, "no canonical state found for block"); + debug!(target: "engine::tree::payload_validator", %hash, "no canonical state found for block"); Ok(None) } /// Determines the state root computation strategy based on persistence state and configuration. + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn plan_state_root_computation>>( &self, input: &BlockOrPayload, @@ -939,7 +950,7 @@ where }; debug!( - target: "engine::tree", + target: "engine::tree::payload_validator", block=?input.num_hash(), ?strategy, "Planned state root computation strategy" @@ -979,6 +990,12 @@ where /// block. /// 3. Once in-memory blocks are collected and optionally filtered, we compute the /// [`HashedPostState`] from them. + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields(persisting_kind, parent_hash) + )] fn compute_trie_input( &self, persisting_kind: PersistingKind, @@ -999,6 +1016,9 @@ where // If the current block is a descendant of the currently persisting blocks, then we need to // filter in-memory blocks, so that none of them are already persisted in the database. + let _enter = + debug_span!(target: "engine::tree::payload_validator", "filter in-memory blocks", len = blocks.len()) + .entered(); if persisting_kind.is_descendant() { // Iterate over the blocks from oldest to newest. while let Some(block) = blocks.last() { @@ -1023,11 +1043,13 @@ where parent_hash.into() }; } + drop(_enter); - if blocks.is_empty() { - debug!(target: "engine::tree", %parent_hash, "Parent found on disk"); + let blocks_empty = blocks.is_empty(); + if blocks_empty { + debug!(target: "engine::tree::payload_validator", "Parent found on disk"); } else { - debug!(target: "engine::tree", %parent_hash, %historical, blocks = blocks.len(), "Parent found in memory"); + debug!(target: "engine::tree::payload_validator", %historical, blocks = blocks.len(), "Parent found in memory"); } // Convert the historical block to the block number. @@ -1035,12 +1057,15 @@ where .convert_hash_or_number(historical)? .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; + let _enter = + debug_span!(target: "engine::tree::payload_validator", "revert state", blocks_empty) + .entered(); // Retrieve revert state for historical block. let (revert_state, revert_trie) = if block_number == best_block_number { // We do not check against the `last_block_number` here because // `HashedPostState::from_reverts` / `trie_reverts` only use the database tables, and // not static files. - debug!(target: "engine::tree", block_number, best_block_number, "Empty revert state"); + debug!(target: "engine::tree::payload_validator", block_number, best_block_number, "Empty revert state"); (HashedPostState::default(), TrieUpdatesSorted::default()) } else { let revert_state = HashedPostState::from_reverts::( @@ -1050,7 +1075,7 @@ where .map_err(ProviderError::from)?; let revert_trie = provider.trie_reverts(block_number + 1)?; debug!( - target: "engine::tree", + target: "engine::tree::payload_validator", block_number, best_block_number, accounts = revert_state.accounts.len(), @@ -1232,4 +1257,12 @@ impl BlockOrPayload { Self::Block(block) => block.block_with_parent(), } } + + /// Returns a string showing whether or not this is a block or payload. + pub const fn type_name(&self) -> &'static str { + match self { + Self::Payload(_) => "payload", + Self::Block(_) => "block", + } + } } diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs index 2e37feb6739..45bc9c9029c 100644 --- a/crates/node/core/src/args/trace.rs +++ b/crates/node/core/src/args/trace.rs @@ -39,7 +39,7 @@ pub struct TraceArgs { long = "tracing-otlp.filter", global = true, value_name = "FILTER", - default_value = "TRACE", + default_value = "debug", help_heading = "Tracing" )] pub otlp_filter: EnvFilter, diff --git a/crates/prune/prune/src/segments/user/account_history.rs b/crates/prune/prune/src/segments/user/account_history.rs index 3c18cd1befc..317337f050e 100644 --- a/crates/prune/prune/src/segments/user/account_history.rs +++ b/crates/prune/prune/src/segments/user/account_history.rs @@ -45,7 +45,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index ecb0f3423be..03faddc1d5b 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -42,7 +42,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { crate::segments::receipts::prune(provider, input) } diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index 0849db52518..8fd6d1e73a5 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -45,7 +45,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { // Contract log filtering removes every receipt possible except the ones in the list. So, // for the other receipts it's as if they had a `PruneMode::Distance()` of diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index 35ee487203a..9fbad8c428c 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -37,7 +37,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/storage_history.rs b/crates/prune/prune/src/segments/user/storage_history.rs index ee7447c37da..a4ad37bf789 100644 --- a/crates/prune/prune/src/segments/user/storage_history.rs +++ b/crates/prune/prune/src/segments/user/storage_history.rs @@ -47,7 +47,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index e218f623ed5..0055f8abd22 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -38,7 +38,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune( &self, provider: &Provider, diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index 19992ead498..fda19c7cb31 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -27,7 +27,7 @@ pub(crate) struct Batch { // Batch responses must be sent back as a single message so we read the results from each // request in the batch and read the results off of a new channel, `rx_batch`, and then send the // complete batch response back to the client over `tx`. -#[instrument(name = "batch", skip(b), level = "TRACE")] +#[instrument(name = "batch", skip(b))] pub(crate) async fn process_batch_request( b: Batch, max_response_body_size: usize, @@ -98,7 +98,7 @@ where } } -#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service), level = "TRACE")] +#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service))] pub(crate) async fn execute_call_with_tracing<'a, S>( req: Request<'a>, rpc_service: &S, diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index b86037628ea..75431b915a5 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -443,7 +443,7 @@ struct ProcessConnection<'a, HttpMiddleware, RpcMiddleware> { } /// Spawns the IPC connection onto a new task -#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id), level = "INFO")] +#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id))] fn process_connection( params: ProcessConnection<'_, HttpMiddleware, RpcMiddleware>, ) where diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index a0e0bd30931..7865659ece7 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -16,7 +16,7 @@ use tracing_futures::Instrument; macro_rules! engine_span { () => { - tracing::trace_span!(target: "rpc", "engine") + tracing::info_span!(target: "rpc", "engine") }; } diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 256ee20794e..6d37c5f3413 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -20,7 +20,7 @@ use std::{ collections::HashMap, ops::{RangeBounds, RangeInclusive}, }; -use tracing::debug; +use tracing::{debug, instrument}; /// Extends [`StateRoot`] with operations specific for working with a database transaction. pub trait DatabaseStateRoot<'a, TX>: Sized { @@ -226,6 +226,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } impl DatabaseHashedPostState for HashedPostState { + #[instrument(target = "trie::db", skip(tx), fields(range))] fn from_reverts( tx: &TX, range: impl RangeBounds, diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index b66b7bbaa4f..6525500a2a2 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -57,7 +57,7 @@ use std::{ time::Instant, }; use tokio::runtime::Handle; -use tracing::trace; +use tracing::{debug_span, trace}; #[cfg(feature = "metrics")] use crate::proof_task_metrics::ProofTaskTrieMetrics; @@ -300,10 +300,16 @@ fn account_worker_loop( while let Ok(job) = work_rx.recv() { match job { AccountWorkerJob::AccountMultiproof { mut input, result_sender } => { - trace!( + let span = tracing::debug_span!( target: "trie::proof_task", - worker_id, + "Account multiproof calculation", targets = input.targets.len(), + worker_id, + ); + let _span_guard = span.enter(); + + trace!( + target: "trie::proof_task", "Processing account multiproof" ); @@ -370,18 +376,24 @@ fn account_worker_loop( trace!( target: "trie::proof_task", - worker_id, proof_time_us = proof_elapsed.as_micros(), total_processed = account_proofs_processed, "Account multiproof completed" ); + drop(_span_guard); } AccountWorkerJob::BlindedAccountNode { path, result_sender } => { - trace!( + let span = tracing::debug_span!( target: "trie::proof_task", - worker_id, + "Blinded account node calculation", ?path, + worker_id, + ); + let _span_guard = span.enter(); + + trace!( + target: "trie::proof_task", "Processing blinded account node" ); @@ -403,12 +415,11 @@ fn account_worker_loop( trace!( target: "trie::proof_task", - worker_id, - ?path, node_time_us = elapsed.as_micros(), total_processed = account_nodes_processed, "Blinded account node completed" ); + drop(_span_guard); } } } @@ -693,7 +704,7 @@ where multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); - let span = tracing::trace_span!( + let span = tracing::debug_span!( target: "trie::proof_task", "Storage proof calculation", hashed_address = ?hashed_address, @@ -889,8 +900,13 @@ impl ProofWorkerHandle { "Spawning proof worker pools" ); + let storage_worker_parent = + debug_span!(target: "trie::proof_task", "Storage worker tasks", ?storage_worker_count); + let _guard = storage_worker_parent.enter(); + // Spawn storage workers for worker_id in 0..storage_worker_count { + let parent_span = debug_span!(target: "trie::proof_task", "Storage worker", ?worker_id); let view_clone = view.clone(); let task_ctx_clone = task_ctx.clone(); let work_rx_clone = storage_work_rx.clone(); @@ -899,6 +915,7 @@ impl ProofWorkerHandle { #[cfg(feature = "metrics")] let metrics = ProofTaskTrieMetrics::default(); + let _guard = parent_span.enter(); storage_worker_loop( view_clone, task_ctx_clone, @@ -916,8 +933,15 @@ impl ProofWorkerHandle { ); } + drop(_guard); + + let account_worker_parent = + debug_span!(target: "trie::proof_task", "Account worker tasks", ?account_worker_count); + let _guard = account_worker_parent.enter(); + // Spawn account workers for worker_id in 0..account_worker_count { + let parent_span = debug_span!(target: "trie::proof_task", "Account worker", ?worker_id); let view_clone = view.clone(); let task_ctx_clone = task_ctx.clone(); let work_rx_clone = account_work_rx.clone(); @@ -927,6 +951,7 @@ impl ProofWorkerHandle { #[cfg(feature = "metrics")] let metrics = ProofTaskTrieMetrics::default(); + let _guard = parent_span.enter(); account_worker_loop( view_clone, task_ctx_clone, @@ -945,6 +970,8 @@ impl ProofWorkerHandle { ); } + drop(_guard); + Self::new_handle(storage_work_tx, account_work_tx) } diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index e99bc584ec4..5e5a838f414 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -741,13 +741,24 @@ impl SparseTrieInterface for ParallelSparseTrie { // Update subtrie hashes in parallel { use rayon::iter::{IntoParallelIterator, ParallelIterator}; + use tracing::debug_span; + let (tx, rx) = mpsc::channel(); let branch_node_tree_masks = &self.branch_node_tree_masks; let branch_node_hash_masks = &self.branch_node_hash_masks; + let span = tracing::Span::current(); changed_subtries .into_par_iter() .map(|mut changed_subtrie| { + let _enter = debug_span!( + target: "trie::parallel_sparse", + parent: span.clone(), + "subtrie", + index = changed_subtrie.index + ) + .entered(); + #[cfg(feature = "metrics")] let start = std::time::Instant::now(); changed_subtrie.subtrie.update_hashes( @@ -1292,6 +1303,7 @@ impl ParallelSparseTrie { /// Drains any [`SparseTrieUpdatesAction`]s from the given subtrie, and applies each action to /// the given `updates` set. If the given set is None then this is a no-op. + #[instrument(target = "trie::parallel_sparse", skip_all)] fn apply_subtrie_update_actions( &mut self, update_actions: impl Iterator, @@ -1315,7 +1327,7 @@ impl ParallelSparseTrie { } /// Updates hashes for the upper subtrie, using nodes from both upper and lower subtries. - #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, ret)] + #[instrument(target = "trie::parallel_sparse", skip_all, ret(level = "trace"))] fn update_upper_subtrie_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { trace!(target: "trie::parallel_sparse", "Updating upper subtrie hashes"); @@ -1393,6 +1405,7 @@ impl ParallelSparseTrie { /// /// IMPORTANT: The method removes the subtries from `lower_subtries`, and the caller is /// responsible for returning them back into the array. + #[instrument(target = "trie::parallel_sparse", skip_all, fields(prefix_set_len = prefix_set.len()))] fn take_changed_lower_subtries( &mut self, prefix_set: &mut PrefixSet, @@ -1549,6 +1562,7 @@ impl ParallelSparseTrie { /// Return updated subtries back to the trie after executing any actions required on the /// top-level `SparseTrieUpdates`. + #[instrument(target = "trie::parallel_sparse", skip_all)] fn insert_changed_subtries( &mut self, changed_subtries: impl IntoIterator, @@ -2036,7 +2050,7 @@ impl SparseSubtrie { /// # Panics /// /// If the node at the root path does not exist. - #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret)] + #[instrument(target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret(level = "trace"))] fn update_hashes( &mut self, prefix_set: &mut PrefixSet, diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 6fac7c5faad..b2c7ee0f566 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-primitives-traits.workspace = true reth-execution-errors.workspace = true reth-trie-common.workspace = true -tracing.workspace = true +tracing = { workspace = true, features = ["attributes"] } alloy-trie.workspace = true # alloy diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 08e868d2a40..aef552da3dd 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -18,7 +18,7 @@ use reth_trie_common::{ DecodedMultiProof, DecodedStorageMultiProof, MultiProof, Nibbles, RlpNode, StorageMultiProof, TrieAccount, TrieMask, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use tracing::trace; +use tracing::{instrument, trace}; /// Provides type-safe re-use of cleared [`SparseStateTrie`]s, which helps to save allocations /// across payload runs. @@ -208,6 +208,14 @@ where /// Reveal unknown trie paths from decoded multiproof. /// NOTE: This method does not extensively validate the proof. + #[instrument( + target = "trie::sparse", + skip_all, + fields( + account_nodes = multiproof.account_subtree.len(), + storages = multiproof.storages.len() + ) + )] pub fn reveal_decoded_multiproof( &mut self, multiproof: DecodedMultiProof, @@ -532,6 +540,7 @@ where /// Calculates the hashes of subtries. /// /// If the trie has not been revealed, this function does nothing. + #[instrument(target = "trie::sparse", skip_all)] pub fn calculate_subtries(&mut self) { if let SparseTrie::Revealed(trie) = &mut self.state { trie.update_subtrie_hashes(); @@ -592,6 +601,7 @@ where } /// Returns sparse trie root and trie updates if the trie has been revealed. + #[instrument(target = "trie::sparse", skip_all)] pub fn root_with_updates( &mut self, provider_factory: impl TrieNodeProviderFactory, @@ -695,6 +705,7 @@ where /// /// Returns false if the new account info and storage trie are empty, indicating the account /// leaf should be removed. + #[instrument(target = "trie::sparse", skip_all)] pub fn update_account( &mut self, address: B256, @@ -737,6 +748,7 @@ where /// /// Returns false if the new storage root is empty, and the account info was already empty, /// indicating the account leaf should be removed. + #[instrument(target = "trie::sparse", skip_all)] pub fn update_account_storage_root( &mut self, address: B256, @@ -784,6 +796,7 @@ where } /// Remove the account leaf node. + #[instrument(target = "trie::sparse", skip_all)] pub fn remove_account_leaf( &mut self, path: &Nibbles, diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index d3c83c48a09..737da842254 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -24,7 +24,7 @@ use reth_trie_common::{ TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; -use tracing::{debug, trace}; +use tracing::{debug, instrument, trace}; /// The level below which the sparse trie hashes are calculated in /// [`SerialSparseTrie::update_subtrie_hashes`]. @@ -175,6 +175,7 @@ impl SparseTrie { /// and resetting the trie to only contain an empty root node. /// /// Note: This method will error if the trie is blinded. + #[instrument(target = "trie::sparse", skip_all)] pub fn wipe(&mut self) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; revealed.wipe(); @@ -191,6 +192,7 @@ impl SparseTrie { /// /// - `Some(B256)` with the calculated root hash if the trie is revealed. /// - `None` if the trie is still blind. + #[instrument(target = "trie::sparse", skip_all)] pub fn root(&mut self) -> Option { Some(self.as_revealed_mut()?.root()) } @@ -230,6 +232,7 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the update fails. + #[instrument(target = "trie::sparse", skip_all)] pub fn update_leaf( &mut self, path: Nibbles, @@ -246,6 +249,7 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the leaf cannot be removed + #[instrument(target = "trie::sparse", skip_all)] pub fn remove_leaf( &mut self, path: &Nibbles, @@ -589,14 +593,13 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(target = "trie::sparse::serial", skip(self, provider))] fn update_leaf( &mut self, full_path: Nibbles, value: Vec, provider: P, ) -> SparseTrieResult<()> { - trace!(target: "trie::sparse", ?full_path, ?value, "update_leaf called"); - self.prefix_set.insert(full_path); let existing = self.values.insert(full_path, value); if existing.is_some() { @@ -728,6 +731,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(target = "trie::sparse::serial", skip(self, provider))] fn remove_leaf( &mut self, full_path: &Nibbles, @@ -913,6 +917,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(target = "trie::sparse::serial", skip(self))] fn root(&mut self) -> B256 { // Take the current prefix set let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); @@ -1348,6 +1353,7 @@ impl SerialSparseTrie { /// /// This function identifies all nodes that have changed (based on the prefix set) at the given /// depth and recalculates their RLP representation. + #[instrument(target = "trie::sparse::serial", skip(self))] pub fn update_rlp_node_level(&mut self, depth: usize) { // Take the current prefix set let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); @@ -1393,6 +1399,7 @@ impl SerialSparseTrie { /// specified depth. /// - A `PrefixSetMut` containing paths shallower than the specified depth that still need to be /// tracked for future updates. + #[instrument(target = "trie::sparse::serial", skip(self))] fn get_changed_nodes_at_depth( &self, prefix_set: &mut PrefixSet, @@ -1479,6 +1486,7 @@ impl SerialSparseTrie { /// # Panics /// /// If the node at provided path does not exist. + #[instrument(target = "trie::sparse::serial", skip_all, ret(level = "trace"))] pub fn rlp_node( &mut self, prefix_set: &mut PrefixSet, diff --git a/crates/trie/trie/src/hashed_cursor/mock.rs b/crates/trie/trie/src/hashed_cursor/mock.rs index aca1c303d69..f091ae6ffe5 100644 --- a/crates/trie/trie/src/hashed_cursor/mock.rs +++ b/crates/trie/trie/src/hashed_cursor/mock.rs @@ -107,7 +107,7 @@ impl MockHashedCursor { impl HashedCursor for MockHashedCursor { type Value = T; - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek(&mut self, key: B256) -> Result, DatabaseError> { // Find the first key that is greater than or equal to the given key. let entry = self.values.iter().find_map(|(k, v)| (k >= &key).then(|| (*k, v.clone()))); @@ -121,7 +121,7 @@ impl HashedCursor for MockHashedCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn next(&mut self) -> Result, DatabaseError> { let mut iter = self.values.iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first diff --git a/crates/trie/trie/src/trie_cursor/mock.rs b/crates/trie/trie/src/trie_cursor/mock.rs index e4504ee4f9c..313df0443e3 100644 --- a/crates/trie/trie/src/trie_cursor/mock.rs +++ b/crates/trie/trie/src/trie_cursor/mock.rs @@ -109,7 +109,7 @@ impl MockTrieCursor { } impl TrieCursor for MockTrieCursor { - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek_exact( &mut self, key: Nibbles, @@ -125,7 +125,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek( &mut self, key: Nibbles, @@ -142,7 +142,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn next(&mut self) -> Result, DatabaseError> { let mut iter = self.trie_nodes.iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first @@ -161,7 +161,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn current(&mut self) -> Result, DatabaseError> { Ok(self.current_key) } diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 0344c23bf2c..041d494523c 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -131,5 +131,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index adc08cd96e6..96bdcf7a98c 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -117,5 +117,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 91397e0f7e9..f2a49420837 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -182,5 +182,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index 834fd42e447..c86273aacf4 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -134,5 +134,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 0b64cefb71b..88fd92763f8 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -126,5 +126,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index eb4120a34cb..c467fe9d3dd 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -125,5 +125,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 913c6fcc5eb..d4b59a05223 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -128,5 +128,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index b5120d7409a..4bb81ac07c9 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -161,5 +161,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index e0a54dcac35..c75a889458b 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -124,5 +124,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index 0d027754d59..8c20c7e311a 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -126,5 +126,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 2ea1ea48f2e..3b8df2f3a4f 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -134,5 +134,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 21e08493453..3980903c65d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -134,5 +134,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index 55e14d822cd..16131a95a17 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -167,5 +167,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index 3f95c5761d9..0c09f5be69b 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -121,5 +121,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index d972bcccd54..9c08ff331ed 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -124,5 +124,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 1fd305c4e63..47695e1b22a 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -134,5 +134,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index c2b50b8944f..7611b69946d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -121,5 +121,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 1890b95821d..b18faa93205 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -179,5 +179,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 4791d561980..bf5b0ac534c 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -120,5 +120,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 430e0948a99..cd413c12841 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -185,5 +185,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index c0d03852de9..7d62409a638 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -180,5 +180,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index b5795a6e1d7..8e3e1cdb0a2 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -181,5 +181,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 1ba1affc519..49c0e098098 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -201,5 +201,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index 11777b1f6e6..ac1c7ff254b 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -169,5 +169,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index a752f76b019..1f2ce545bc0 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -1016,5 +1016,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 4138656604d..b81c00a0382 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -118,5 +118,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index 63f77913f9c..fd28a37ebb1 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -338,5 +338,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 578932411f6..63baa86d367 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -129,5 +129,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index f9b3276ced0..f9f94497547 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -338,5 +338,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index 8bf19d3ecab..78d6dd8d3ba 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -115,5 +115,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index de13e93b561..2089c92461e 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -115,5 +115,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index bc5d0385697..8f5828e8a67 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -169,5 +169,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index dc3bcbe4627..56a7e3558c4 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -182,5 +182,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index 85f2559de4d..822f0f0c2db 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -118,5 +118,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 923fd5ff955..037495979a0 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -184,5 +184,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 2466edcb966..8484379fe36 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -176,5 +176,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index c79571b31c3..079804ff088 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -133,5 +133,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index c2480bae00f..7aee318e1ac 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -133,5 +133,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index 423771b183b..17b2b7c9515 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -133,5 +133,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index 211f4e59979..de64aa51c33 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -133,5 +133,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 9eae5963a17..5407938072f 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -405,5 +405,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index ab5776e2e5b..2d2f94d6801 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -177,5 +177,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index 500cb3197fb..a376af84012 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -125,5 +125,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index 4ec68dbb1ec..ce62c643600 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -125,5 +125,5 @@ Tracing: Defaults to TRACE if not specified. - [default: TRACE] + [default: debug] ``` \ No newline at end of file From 60e3eded5e0a19dd03b9efb16e08ca58ccb47eb9 Mon Sep 17 00:00:00 2001 From: YK Date: Wed, 22 Oct 2025 14:53:55 +0800 Subject: [PATCH 148/371] refactor: decouple max proof task concurrency from inflight proof limits (#19171) --- crates/engine/primitives/src/config.rs | 24 ------------------- .../tree/src/tree/payload_processor/mod.rs | 5 ---- .../src/tree/payload_processor/multiproof.rs | 22 ++++++++--------- crates/node/core/src/args/engine.rs | 10 ++------ crates/node/core/src/node_config.rs | 3 +-- docs/cli/help.rs | 5 ---- docs/vocs/docs/pages/cli/reth/node.mdx | 5 ---- 7 files changed, 14 insertions(+), 60 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 6f759036eb2..0b9b7d9f821 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -6,9 +6,6 @@ pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; -/// Default maximum concurrency for on-demand proof tasks (blinded nodes) -pub const DEFAULT_MAX_PROOF_TASK_CONCURRENCY: u64 = 256; - /// Minimum number of workers we allow configuring explicitly. pub const MIN_WORKER_COUNT: usize = 32; @@ -102,8 +99,6 @@ pub struct TreeConfig { cross_block_cache_size: u64, /// Whether the host has enough parallelism to run state root task. has_enough_parallelism: bool, - /// Maximum number of concurrent proof tasks - max_proof_task_concurrency: u64, /// Whether multiproof task should chunk proof targets. multiproof_chunking_enabled: bool, /// Multiproof task chunk size for proof targets. @@ -153,7 +148,6 @@ impl Default for TreeConfig { state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE, has_enough_parallelism: has_enough_parallelism(), - max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY, multiproof_chunking_enabled: true, multiproof_chunk_size: DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE, reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, @@ -184,7 +178,6 @@ impl TreeConfig { state_provider_metrics: bool, cross_block_cache_size: u64, has_enough_parallelism: bool, - max_proof_task_concurrency: u64, multiproof_chunking_enabled: bool, multiproof_chunk_size: usize, reserved_cpu_cores: usize, @@ -196,7 +189,6 @@ impl TreeConfig { storage_worker_count: usize, account_worker_count: usize, ) -> Self { - assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); Self { persistence_threshold, memory_block_buffer_target, @@ -210,7 +202,6 @@ impl TreeConfig { state_provider_metrics, cross_block_cache_size, has_enough_parallelism, - max_proof_task_concurrency, multiproof_chunking_enabled, multiproof_chunk_size, reserved_cpu_cores, @@ -249,11 +240,6 @@ impl TreeConfig { self.max_execute_block_batch_size } - /// Return the maximum proof task concurrency. - pub const fn max_proof_task_concurrency(&self) -> u64 { - self.max_proof_task_concurrency - } - /// Return whether the multiproof task chunking is enabled. pub const fn multiproof_chunking_enabled(&self) -> bool { self.multiproof_chunking_enabled @@ -420,16 +406,6 @@ impl TreeConfig { self } - /// Setter for maximum number of concurrent proof tasks. - pub const fn with_max_proof_task_concurrency( - mut self, - max_proof_task_concurrency: u64, - ) -> Self { - assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); - self.max_proof_task_concurrency = max_proof_task_concurrency; - self - } - /// Setter for whether multiproof task should chunk proof targets. pub const fn with_multiproof_chunking_enabled( mut self, diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 090be01a0ec..cdac92ed675 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -207,7 +207,6 @@ where ); let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); - let max_proof_task_concurrency = config.max_proof_task_concurrency() as usize; let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), consistent_view, @@ -216,15 +215,11 @@ where account_worker_count, ); - // We set it to half of the proof task concurrency, because often for each multiproof we - // spawn one Tokio task for the account proof, and one Tokio task for the storage proof. - let max_multi_proof_task_concurrency = max_proof_task_concurrency / 2; let multi_proof_task = MultiProofTask::new( state_root_config, self.executor.clone(), proof_handle.clone(), to_sparse_trie, - max_multi_proof_task_concurrency, config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), ); diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 815ca72fbf0..1e5b226f591 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -34,6 +34,10 @@ use std::{ }; use tracing::{debug, error, instrument, trace}; +/// Default upper bound for inflight multiproof calculations. These would be sitting in the queue +/// waiting to be processed. +const DEFAULT_MULTIPROOF_INFLIGHT_LIMIT: usize = 128; + /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. #[derive(Default, Debug)] @@ -338,8 +342,8 @@ impl MultiproofInput { /// availability has been signaled. #[derive(Debug)] pub struct MultiproofManager { - /// Maximum number of concurrent calculations. - max_concurrent: usize, + /// Maximum number of proof calculations allowed to be inflight at once. + inflight_limit: usize, /// Currently running calculations. inflight: usize, /// Queued calculations. @@ -370,11 +374,10 @@ impl MultiproofManager { executor: WorkloadExecutor, metrics: MultiProofTaskMetrics, proof_worker_handle: ProofWorkerHandle, - max_concurrent: usize, ) -> Self { Self { - pending: VecDeque::with_capacity(max_concurrent), - max_concurrent, + pending: VecDeque::with_capacity(DEFAULT_MULTIPROOF_INFLIGHT_LIMIT), + inflight_limit: DEFAULT_MULTIPROOF_INFLIGHT_LIMIT, executor, inflight: 0, metrics, @@ -384,11 +387,10 @@ impl MultiproofManager { } const fn is_full(&self) -> bool { - self.inflight >= self.max_concurrent + self.inflight >= self.inflight_limit } - /// Spawns a new multiproof calculation or enqueues it for later if - /// `max_concurrent` are already inflight. + /// Spawns a new multiproof calculation or enqueues it if the inflight limit is reached. fn spawn_or_queue(&mut self, input: PendingMultiproofTask) { // If there are no proof targets, we can just send an empty multiproof back immediately if input.proof_targets_is_empty() { @@ -685,7 +687,6 @@ impl MultiProofTask { executor: WorkloadExecutor, proof_worker_handle: ProofWorkerHandle, to_sparse_trie: Sender, - max_concurrency: usize, chunk_size: Option, ) -> Self { let (tx, rx) = channel(); @@ -704,7 +705,6 @@ impl MultiProofTask { executor, metrics.clone(), proof_worker_handle, - max_concurrency, ), metrics, } @@ -1239,7 +1239,7 @@ mod tests { ProofWorkerHandle::new(executor.handle().clone(), consistent_view, task_ctx, 1, 1); let channel = channel(); - MultiProofTask::new(config, executor, proof_handle, channel.0, 1, None) + MultiProofTask::new(config, executor, proof_handle, channel.0, Some(1)) } #[test] diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index c82b1b03a15..29535f2c1df 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -4,8 +4,8 @@ use clap::Args; use reth_engine_primitives::{TreeConfig, DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE}; use crate::node_config::{ - DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, DEFAULT_MAX_PROOF_TASK_CONCURRENCY, - DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, + DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, + DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; /// Parameters for configuring the engine driver. @@ -63,10 +63,6 @@ pub struct EngineArgs { #[arg(long = "engine.accept-execution-requests-hash")] pub accept_execution_requests_hash: bool, - /// Configure the maximum number of concurrent proof tasks - #[arg(long = "engine.max-proof-task-concurrency", default_value_t = DEFAULT_MAX_PROOF_TASK_CONCURRENCY)] - pub max_proof_task_concurrency: u64, - /// Whether multiproof task should chunk proof targets. #[arg(long = "engine.multiproof-chunking", default_value = "true")] pub multiproof_chunking_enabled: bool, @@ -135,7 +131,6 @@ impl Default for EngineArgs { state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, accept_execution_requests_hash: false, - max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY, multiproof_chunking_enabled: true, multiproof_chunk_size: DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE, reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, @@ -162,7 +157,6 @@ impl EngineArgs { .with_state_provider_metrics(self.state_provider_metrics) .with_always_compare_trie_updates(self.state_root_task_compare_updates) .with_cross_block_cache_size(self.cross_block_cache_size * 1024 * 1024) - .with_max_proof_task_concurrency(self.max_proof_task_concurrency) .with_multiproof_chunking_enabled(self.multiproof_chunking_enabled) .with_multiproof_chunk_size(self.multiproof_chunk_size) .with_reserved_cpu_cores(self.reserved_cpu_cores) diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index ba888346035..61eb29db38b 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -34,8 +34,7 @@ use tracing::*; use crate::args::{EraArgs, MetricArgs}; pub use reth_engine_primitives::{ - DEFAULT_MAX_PROOF_TASK_CONCURRENCY, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, - DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, + DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; /// Default size of cross-block cache in megabytes. diff --git a/docs/cli/help.rs b/docs/cli/help.rs index 05e61eef740..0474d00e723 100755 --- a/docs/cli/help.rs +++ b/docs/cli/help.rs @@ -269,11 +269,6 @@ fn preprocess_help(s: &str) -> Cow<'_, str> { r"(rpc.max-tracing-requests \n.*\n.*\n.*\n.*\n.*)\[default: \d+\]", r"$1[default: ]", ), - // Handle engine.max-proof-task-concurrency dynamic default - ( - r"(engine\.max-proof-task-concurrency.*)\[default: \d+\]", - r"$1[default: ]", - ), // Handle engine.reserved-cpu-cores dynamic default ( r"(engine\.reserved-cpu-cores.*)\[default: \d+\]", diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 1f2ce545bc0..30a2d3edffb 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -840,11 +840,6 @@ Engine: --engine.accept-execution-requests-hash Enables accepting requests hash instead of an array of requests in `engine_newPayloadV4` - --engine.max-proof-task-concurrency - Configure the maximum number of concurrent proof tasks - - [default: 256] - --engine.multiproof-chunking Whether multiproof task should chunk proof targets From ada053aa67a46a5580a804fac4b6234da2d9c6d8 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Wed, 22 Oct 2025 09:10:47 +0100 Subject: [PATCH 149/371] chore: remove rkrasiuk from codeowners (#19206) --- .github/CODEOWNERS | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ffbd600db7e..eed64b157f3 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,12 +1,12 @@ * @gakonst -crates/blockchain-tree-api/ @rakita @rkrasiuk @mattsse @Rjected -crates/blockchain-tree/ @rakita @rkrasiuk @mattsse @Rjected -crates/chain-state/ @fgimenez @mattsse @rkrasiuk +crates/blockchain-tree-api/ @rakita @mattsse @Rjected +crates/blockchain-tree/ @rakita @mattsse @Rjected +crates/chain-state/ @fgimenez @mattsse crates/chainspec/ @Rjected @joshieDo @mattsse crates/cli/ @mattsse -crates/consensus/ @rkrasiuk @mattsse @Rjected +crates/consensus/ @mattsse @Rjected crates/e2e-test-utils/ @mattsse @Rjected @klkvr @fgimenez -crates/engine/ @rkrasiuk @mattsse @Rjected @fgimenez @mediocregopher @yongkangc +crates/engine/ @mattsse @Rjected @fgimenez @mediocregopher @yongkangc crates/era/ @mattsse @RomanHodulak crates/errors/ @mattsse crates/ethereum-forks/ @mattsse @Rjected @@ -15,17 +15,17 @@ crates/etl/ @joshieDo @shekhirin crates/evm/ @rakita @mattsse @Rjected crates/exex/ @shekhirin crates/net/ @mattsse @Rjected -crates/net/downloaders/ @rkrasiuk +crates/net/downloaders/ @Rjected crates/node/ @mattsse @Rjected @klkvr crates/optimism/ @mattsse @Rjected @fgimenez crates/payload/ @mattsse @Rjected crates/primitives-traits/ @Rjected @RomanHodulak @mattsse @klkvr crates/primitives/ @Rjected @mattsse @klkvr crates/prune/ @shekhirin @joshieDo -crates/ress @rkrasiuk +crates/ress @shekhirin @Rjected crates/revm/ @mattsse @rakita crates/rpc/ @mattsse @Rjected @RomanHodulak -crates/stages/ @rkrasiuk @shekhirin @mediocregopher +crates/stages/ @shekhirin @mediocregopher crates/static-file/ @joshieDo @shekhirin crates/storage/codecs/ @joshieDo crates/storage/db-api/ @joshieDo @rakita @@ -35,10 +35,10 @@ crates/storage/errors/ @rakita crates/storage/libmdbx-rs/ @rakita @shekhirin crates/storage/nippy-jar/ @joshieDo @shekhirin crates/storage/provider/ @rakita @joshieDo @shekhirin -crates/storage/storage-api/ @joshieDo @rkrasiuk +crates/storage/storage-api/ @joshieDo crates/tasks/ @mattsse crates/tokio-util/ @fgimenez crates/transaction-pool/ @mattsse @yongkangc -crates/trie/ @rkrasiuk @Rjected @shekhirin @mediocregopher +crates/trie/ @Rjected @shekhirin @mediocregopher etc/ @Rjected @shekhirin .github/ @gakonst @DaniPopes From 4d3c1631202830206ebadf3789cb1f146fa9f2af Mon Sep 17 00:00:00 2001 From: robinsdan <115981357+robinsdan@users.noreply.github.com> Date: Wed, 22 Oct 2025 16:46:26 +0800 Subject: [PATCH 150/371] perf(net): convert Bytes to BytesMut to avoid reallocation (#19204) --- crates/net/eth-wire/src/multiplex.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 9eb4f15f0bc..058dfe311e3 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -332,9 +332,9 @@ impl ProtocolProxy { return Ok(msg); } - let mut masked = Vec::from(msg); + let mut masked: BytesMut = msg.into(); masked[0] = masked[0].checked_add(offset).ok_or(io::ErrorKind::InvalidInput)?; - Ok(masked.into()) + Ok(masked.freeze()) } /// Unmasks the message ID of a message received from the wire. From b5df3f31b28adda9879b086b7d91b0863f300eec Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Wed, 22 Oct 2025 11:22:11 +0100 Subject: [PATCH 151/371] refactor(prune): remove receipts log filter segment (#19184) --- Cargo.lock | 2 - crates/config/src/config.rs | 32 +- crates/exex/exex/src/backfill/factory.rs | 2 +- crates/node/builder/src/launch/common.rs | 6 +- crates/node/core/Cargo.toml | 1 - crates/node/core/src/args/error.rs | 22 -- crates/node/core/src/args/mod.rs | 1 - crates/node/core/src/args/pruning.rs | 175 +-------- crates/prune/prune/Cargo.toml | 1 - crates/prune/prune/src/builder.rs | 2 +- crates/prune/prune/src/segments/mod.rs | 4 +- crates/prune/prune/src/segments/set.rs | 12 +- crates/prune/prune/src/segments/user/mod.rs | 2 - .../src/segments/user/receipts_by_logs.rs | 364 ------------------ crates/prune/types/src/lib.rs | 301 --------------- crates/prune/types/src/target.rs | 28 +- crates/stages/stages/src/stages/execution.rs | 26 +- .../static-file/src/static_file_producer.rs | 16 +- .../provider/src/providers/database/mod.rs | 2 +- .../src/providers/database/provider.rs | 26 +- docs/vocs/docs/pages/cli/reth/node.mdx | 3 - 21 files changed, 67 insertions(+), 961 deletions(-) delete mode 100644 crates/node/core/src/args/error.rs delete mode 100644 crates/prune/prune/src/segments/user/receipts_by_logs.rs diff --git a/Cargo.lock b/Cargo.lock index b9de70bfded..6e672b6f684 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9006,7 +9006,6 @@ dependencies = [ "serde", "shellexpand", "strum 0.27.2", - "thiserror 2.0.17", "tokio", "toml", "tracing", @@ -9786,7 +9785,6 @@ dependencies = [ name = "reth-prune" version = "1.8.2" dependencies = [ - "alloy-consensus", "alloy-eips", "alloy-primitives", "assert_matches", diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 5ff2431bb56..dd2e7046b0c 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -33,7 +33,7 @@ pub struct Config { impl Config { /// Sets the pruning configuration. - pub fn set_prune_config(&mut self, prune_config: PruneConfig) { + pub const fn set_prune_config(&mut self, prune_config: PruneConfig) { self.prune = prune_config; } } @@ -451,13 +451,14 @@ impl PruneConfig { } /// Returns whether there is any kind of receipt pruning configuration. - pub fn has_receipts_pruning(&self) -> bool { - self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty() + pub const fn has_receipts_pruning(&self) -> bool { + self.segments.receipts.is_some() } /// Merges another `PruneConfig` into this one, taking values from the other config if and only /// if the corresponding value in this config is not set. pub fn merge(&mut self, other: Self) { + #[expect(deprecated)] let Self { block_interval, segments: @@ -469,7 +470,7 @@ impl PruneConfig { storage_history, bodies_history, merkle_changesets, - receipts_log_filter, + receipts_log_filter: (), }, } = other; @@ -487,10 +488,6 @@ impl PruneConfig { self.segments.bodies_history = self.segments.bodies_history.or(bodies_history); // Merkle changesets is not optional, so we just replace it if provided self.segments.merkle_changesets = merkle_changesets; - - if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() { - self.segments.receipts_log_filter = receipts_log_filter; - } } } @@ -517,10 +514,9 @@ where mod tests { use super::{Config, EXTENSION}; use crate::PruneConfig; - use alloy_primitives::Address; use reth_network_peers::TrustedPeer; - use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig}; - use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration}; + use reth_prune_types::{PruneMode, PruneModes}; + use std::{path::Path, str::FromStr, time::Duration}; fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) { let temp_dir = tempfile::tempdir().unwrap(); @@ -1009,10 +1005,8 @@ receipts = 'full' storage_history: Some(PruneMode::Before(5000)), bodies_history: None, merkle_changesets: PruneMode::Before(0), - receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([( - Address::random(), - PruneMode::Full, - )])), + #[expect(deprecated)] + receipts_log_filter: (), }, }; @@ -1026,14 +1020,11 @@ receipts = 'full' storage_history: Some(PruneMode::Distance(3000)), bodies_history: None, merkle_changesets: PruneMode::Distance(10000), - receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([ - (Address::random(), PruneMode::Distance(1000)), - (Address::random(), PruneMode::Before(2000)), - ])), + #[expect(deprecated)] + receipts_log_filter: (), }, }; - let original_filter = config1.segments.receipts_log_filter.clone(); config1.merge(config2); // Check that the configuration has been merged. Any configuration present in config1 @@ -1045,7 +1036,6 @@ receipts = 'full' assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000))); assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000))); assert_eq!(config1.segments.merkle_changesets, PruneMode::Distance(10000)); - assert_eq!(config1.segments.receipts_log_filter, original_filter); } #[test] diff --git a/crates/exex/exex/src/backfill/factory.rs b/crates/exex/exex/src/backfill/factory.rs index d9a51bc47a7..29734b905e2 100644 --- a/crates/exex/exex/src/backfill/factory.rs +++ b/crates/exex/exex/src/backfill/factory.rs @@ -39,7 +39,7 @@ impl BackfillJobFactory { } /// Sets the prune modes - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + pub const fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { self.prune_modes = prune_modes; self } diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index dd3cdbf756d..190cfdc8817 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -406,13 +406,14 @@ impl LaunchContextWith, - // Receipts Log Filter - /// Configure receipts log filter. Format: - /// <`address`>:<`prune_mode`>... where <`prune_mode`> can be 'full', 'distance:<`blocks`>', or - /// 'before:<`block_number`>' - #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance", "receipts_before"], value_parser = parse_receipts_log_filter)] - pub receipts_log_filter: Option, + /// Receipts Log Filter + #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", hide = true)] + #[deprecated] + pub receipts_log_filter: Option, // Account History /// Prunes all account history. @@ -130,7 +129,8 @@ impl PruningArgs { // TODO: set default to pre-merge block if available bodies_history: None, merkle_changesets: PruneMode::Distance(MINIMUM_PRUNING_DISTANCE), - receipts_log_filter: Default::default(), + #[expect(deprecated)] + receipts_log_filter: (), }, } } @@ -157,13 +157,14 @@ impl PruningArgs { if let Some(mode) = self.storage_history_prune_mode() { config.segments.storage_history = Some(mode); } - if let Some(receipt_logs) = - self.receipts_log_filter.as_ref().filter(|c| !c.is_empty()).cloned() - { - config.segments.receipts_log_filter = receipt_logs; - // need to remove the receipts segment filter entirely because that takes precedence - // over the logs filter - config.segments.receipts.take(); + + // Log warning if receipts_log_filter is set (deprecated feature) + #[expect(deprecated)] + if self.receipts_log_filter.is_some() { + tracing::warn!( + target: "reth::cli", + "The --prune.receiptslogfilter flag is deprecated and has no effect. It will be removed in a future release." + ); } config.is_default().not().then_some(config) @@ -251,141 +252,3 @@ impl PruningArgs { } } } - -/// Parses `,` separated pruning info into [`ReceiptsLogPruneConfig`]. -pub(crate) fn parse_receipts_log_filter( - value: &str, -) -> Result { - let mut config = BTreeMap::new(); - // Split out each of the filters. - let filters = value.split(','); - for filter in filters { - let parts: Vec<&str> = filter.split(':').collect(); - if parts.len() < 2 { - return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); - } - // Parse the address - let address = parts[0] - .parse::
() - .map_err(|_| ReceiptsLogError::InvalidAddress(parts[0].to_string()))?; - - // Parse the prune mode - let prune_mode = match parts[1] { - "full" => PruneMode::Full, - s if s.starts_with("distance") => { - if parts.len() < 3 { - return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); - } - let distance = - parts[2].parse::().map_err(ReceiptsLogError::InvalidDistance)?; - PruneMode::Distance(distance) - } - s if s.starts_with("before") => { - if parts.len() < 3 { - return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); - } - let block_number = - parts[2].parse::().map_err(ReceiptsLogError::InvalidBlockNumber)?; - PruneMode::Before(block_number) - } - _ => return Err(ReceiptsLogError::InvalidPruneMode(parts[1].to_string())), - }; - config.insert(address, prune_mode); - } - Ok(ReceiptsLogPruneConfig(config)) -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::address; - use clap::Parser; - - /// A helper type to parse Args more easily - #[derive(Parser)] - struct CommandParser { - #[command(flatten)] - args: T, - } - - #[test] - fn pruning_args_sanity_check() { - let args = CommandParser::::parse_from([ - "reth", - "--prune.receiptslogfilter", - "0x0000000000000000000000000000000000000003:before:5000000", - ]) - .args; - let mut config = ReceiptsLogPruneConfig::default(); - config.0.insert( - address!("0x0000000000000000000000000000000000000003"), - PruneMode::Before(5000000), - ); - assert_eq!(args.receipts_log_filter, Some(config)); - } - - #[test] - fn parse_receiptslogfilter() { - let default_args = PruningArgs::default(); - let args = CommandParser::::parse_from(["reth"]).args; - assert_eq!(args, default_args); - } - - #[test] - fn test_parse_receipts_log_filter() { - let filter1 = "0x0000000000000000000000000000000000000001:full"; - let filter2 = "0x0000000000000000000000000000000000000002:distance:1000"; - let filter3 = "0x0000000000000000000000000000000000000003:before:5000000"; - let filters = [filter1, filter2, filter3].join(","); - - // Args can be parsed. - let result = parse_receipts_log_filter(&filters); - assert!(result.is_ok()); - let config = result.unwrap(); - assert_eq!(config.0.len(), 3); - - // Check that the args were parsed correctly. - let addr1: Address = "0x0000000000000000000000000000000000000001".parse().unwrap(); - let addr2: Address = "0x0000000000000000000000000000000000000002".parse().unwrap(); - let addr3: Address = "0x0000000000000000000000000000000000000003".parse().unwrap(); - - assert_eq!(config.0.get(&addr1), Some(&PruneMode::Full)); - assert_eq!(config.0.get(&addr2), Some(&PruneMode::Distance(1000))); - assert_eq!(config.0.get(&addr3), Some(&PruneMode::Before(5000000))); - } - - #[test] - fn test_parse_receipts_log_filter_invalid_filter_format() { - let result = parse_receipts_log_filter("invalid_format"); - assert!(matches!(result, Err(ReceiptsLogError::InvalidFilterFormat(_)))); - } - - #[test] - fn test_parse_receipts_log_filter_invalid_address() { - let result = parse_receipts_log_filter("invalid_address:full"); - assert!(matches!(result, Err(ReceiptsLogError::InvalidAddress(_)))); - } - - #[test] - fn test_parse_receipts_log_filter_invalid_prune_mode() { - let result = - parse_receipts_log_filter("0x0000000000000000000000000000000000000000:invalid_mode"); - assert!(matches!(result, Err(ReceiptsLogError::InvalidPruneMode(_)))); - } - - #[test] - fn test_parse_receipts_log_filter_invalid_distance() { - let result = parse_receipts_log_filter( - "0x0000000000000000000000000000000000000000:distance:invalid_distance", - ); - assert!(matches!(result, Err(ReceiptsLogError::InvalidDistance(_)))); - } - - #[test] - fn test_parse_receipts_log_filter_invalid_block_number() { - let result = parse_receipts_log_filter( - "0x0000000000000000000000000000000000000000:before:invalid_block", - ); - assert!(matches!(result, Err(ReceiptsLogError::InvalidBlockNumber(_)))); - } -} diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index a2d82c26923..615a793bb89 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -24,7 +24,6 @@ reth-primitives-traits.workspace = true reth-static-file-types.workspace = true # ethereum -alloy-consensus.workspace = true alloy-eips.workspace = true # metrics diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index 78283710e15..f61aa6bd46d 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -43,7 +43,7 @@ impl PrunerBuilder { } /// Sets the configuration for every part of the data that can be pruned. - pub fn segments(mut self, segments: PruneModes) -> Self { + pub const fn segments(mut self, segments: PruneModes) -> Self { self.segments = segments; self } diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index dc175254453..f0f688a7c86 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -15,8 +15,8 @@ pub use static_file::{ use std::{fmt::Debug, ops::RangeInclusive}; use tracing::error; pub use user::{ - AccountHistory, MerkleChangeSets, Receipts as UserReceipts, ReceiptsByLogs, SenderRecovery, - StorageHistory, TransactionLookup, + AccountHistory, MerkleChangeSets, Receipts as UserReceipts, SenderRecovery, StorageHistory, + TransactionLookup, }; /// A segment represents a pruning of some portion of the data. diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 72847219b09..e551a8de9a1 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -1,6 +1,6 @@ use crate::segments::{ - AccountHistory, MerkleChangeSets, ReceiptsByLogs, Segment, SenderRecovery, StorageHistory, - TransactionLookup, UserReceipts, + AccountHistory, MerkleChangeSets, Segment, SenderRecovery, StorageHistory, TransactionLookup, + UserReceipts, }; use alloy_eips::eip2718::Encodable2718; use reth_db_api::{table::Value, transaction::DbTxMut}; @@ -61,6 +61,7 @@ where static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { + #[expect(deprecated)] let PruneModes { sender_recovery, transaction_lookup, @@ -69,7 +70,7 @@ where storage_history, bodies_history: _, merkle_changesets, - receipts_log_filter, + receipts_log_filter: (), } = prune_modes; Self::default() @@ -87,11 +88,6 @@ where .segment_opt(storage_history.map(StorageHistory::new)) // User receipts .segment_opt(receipts.map(UserReceipts::new)) - // Receipts by logs - .segment_opt( - (!receipts_log_filter.is_empty()) - .then(|| ReceiptsByLogs::new(receipts_log_filter.clone())), - ) // Transaction lookup .segment_opt(transaction_lookup.map(TransactionLookup::new)) // Sender recovery diff --git a/crates/prune/prune/src/segments/user/mod.rs b/crates/prune/prune/src/segments/user/mod.rs index c25bc6bc764..bdbc27f22f0 100644 --- a/crates/prune/prune/src/segments/user/mod.rs +++ b/crates/prune/prune/src/segments/user/mod.rs @@ -2,7 +2,6 @@ mod account_history; mod history; mod merkle_change_sets; mod receipts; -mod receipts_by_logs; mod sender_recovery; mod storage_history; mod transaction_lookup; @@ -10,7 +9,6 @@ mod transaction_lookup; pub use account_history::AccountHistory; pub use merkle_change_sets::MerkleChangeSets; pub use receipts::Receipts; -pub use receipts_by_logs::ReceiptsByLogs; pub use sender_recovery::SenderRecovery; pub use storage_history::StorageHistory; pub use transaction_lookup::TransactionLookup; diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs deleted file mode 100644 index 8fd6d1e73a5..00000000000 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ /dev/null @@ -1,364 +0,0 @@ -use crate::{ - db_ext::DbTxPruneExt, - segments::{PruneInput, Segment}, - PrunerError, -}; -use alloy_consensus::TxReceipt; -use reth_db_api::{table::Value, tables, transaction::DbTxMut}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, -}; -use reth_prune_types::{ - PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, SegmentOutput, - MINIMUM_PRUNING_DISTANCE, -}; -use tracing::{instrument, trace}; -#[derive(Debug)] -pub struct ReceiptsByLogs { - config: ReceiptsLogPruneConfig, -} - -impl ReceiptsByLogs { - pub const fn new(config: ReceiptsLogPruneConfig) -> Self { - Self { config } - } -} - -impl Segment for ReceiptsByLogs -where - Provider: DBProvider - + PruneCheckpointWriter - + TransactionsProvider - + BlockReader - + NodePrimitivesProvider>, -{ - fn segment(&self) -> PruneSegment { - PruneSegment::ContractLogs - } - - fn mode(&self) -> Option { - None - } - - fn purpose(&self) -> PrunePurpose { - PrunePurpose::User - } - - #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] - fn prune(&self, provider: &Provider, input: PruneInput) -> Result { - // Contract log filtering removes every receipt possible except the ones in the list. So, - // for the other receipts it's as if they had a `PruneMode::Distance()` of - // `MINIMUM_PRUNING_DISTANCE`. - let to_block = PruneMode::Distance(MINIMUM_PRUNING_DISTANCE) - .prune_target_block(input.to_block, PruneSegment::ContractLogs, PrunePurpose::User)? - .map(|(bn, _)| bn) - .unwrap_or_default(); - - // Get status checkpoint from latest run - let mut last_pruned_block = - input.previous_checkpoint.and_then(|checkpoint| checkpoint.block_number); - - let initial_last_pruned_block = last_pruned_block; - - let mut from_tx_number = match initial_last_pruned_block { - Some(block) => provider - .block_body_indices(block)? - .map(|block| block.last_tx_num() + 1) - .unwrap_or(0), - None => 0, - }; - - // Figure out what receipts have already been pruned, so we can have an accurate - // `address_filter` - let address_filter = self.config.group_by_block(input.to_block, last_pruned_block)?; - - // Splits all transactions in different block ranges. Each block range will have its own - // filter address list and will check it while going through the table - // - // Example: - // For an `address_filter` such as: - // { block9: [a1, a2], block20: [a3, a4, a5] } - // - // The following structures will be created in the exact order as showed: - // `block_ranges`: [ - // (block0, block8, 0 addresses), - // (block9, block19, 2 addresses), - // (block20, to_block, 5 addresses) - // ] - // `filtered_addresses`: [a1, a2, a3, a4, a5] - // - // The first range will delete all receipts between block0 - block8 - // The second range will delete all receipts between block9 - 19, except the ones with - // emitter logs from these addresses: [a1, a2]. - // The third range will delete all receipts between block20 - to_block, except the ones with - // emitter logs from these addresses: [a1, a2, a3, a4, a5] - let mut block_ranges = vec![]; - let mut blocks_iter = address_filter.iter().peekable(); - let mut filtered_addresses = vec![]; - - while let Some((start_block, addresses)) = blocks_iter.next() { - filtered_addresses.extend_from_slice(addresses); - - // This will clear all receipts before the first appearance of a contract log or since - // the block after the last pruned one. - if block_ranges.is_empty() { - let init = last_pruned_block.map(|b| b + 1).unwrap_or_default(); - if init < *start_block { - block_ranges.push((init, *start_block - 1, 0)); - } - } - - let end_block = - blocks_iter.peek().map(|(next_block, _)| *next_block - 1).unwrap_or(to_block); - - // Addresses in lower block ranges, are still included in the inclusion list for future - // ranges. - block_ranges.push((*start_block, end_block, filtered_addresses.len())); - } - - trace!( - target: "pruner", - ?block_ranges, - ?filtered_addresses, - "Calculated block ranges and filtered addresses", - ); - - let mut limiter = input.limiter; - - let mut done = true; - let mut pruned = 0; - let mut last_pruned_transaction = None; - for (start_block, end_block, num_addresses) in block_ranges { - let block_range = start_block..=end_block; - - // Calculate the transaction range from this block range - let tx_range_end = match provider.block_body_indices(end_block)? { - Some(body) => body.last_tx_num(), - None => { - trace!( - target: "pruner", - ?block_range, - "No receipts to prune." - ); - continue - } - }; - let tx_range = from_tx_number..=tx_range_end; - - // Delete receipts, except the ones in the inclusion list - let mut last_skipped_transaction = 0; - let deleted; - (deleted, done) = provider.tx_ref().prune_table_with_range::::Receipt, - >>( - tx_range, - &mut limiter, - |(tx_num, receipt)| { - let skip = num_addresses > 0 && - receipt.logs().iter().any(|log| { - filtered_addresses[..num_addresses].contains(&&log.address) - }); - - if skip { - last_skipped_transaction = *tx_num; - } - skip - }, - |row| last_pruned_transaction = Some(row.0), - )?; - - trace!(target: "pruner", %deleted, %done, ?block_range, "Pruned receipts"); - - pruned += deleted; - - // For accurate checkpoints we need to know that we have checked every transaction. - // Example: we reached the end of the range, and the last receipt is supposed to skip - // its deletion. - let last_pruned_transaction = *last_pruned_transaction - .insert(last_pruned_transaction.unwrap_or_default().max(last_skipped_transaction)); - - last_pruned_block = Some( - provider - .transaction_block(last_pruned_transaction)? - .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? - // If there's more receipts to prune, set the checkpoint block number to - // previous, so we could finish pruning its receipts on the - // next run. - .saturating_sub(if done { 0 } else { 1 }), - ); - - if limiter.is_limit_reached() { - done &= end_block == to_block; - break - } - - from_tx_number = last_pruned_transaction + 1; - } - - // If there are contracts using `PruneMode::Distance(_)` there will be receipts before - // `to_block` that become eligible to be pruned in future runs. Therefore, our checkpoint is - // not actually `to_block`, but the `lowest_block_with_distance` from any contract. - // This ensures that in future pruner runs we can prune all these receipts between the - // previous `lowest_block_with_distance` and the new one using - // `get_next_tx_num_range_from_checkpoint`. - // - // Only applies if we were able to prune everything intended for this run, otherwise the - // checkpoint is the `last_pruned_block`. - let prune_mode_block = self - .config - .lowest_block_with_distance(input.to_block, initial_last_pruned_block)? - .unwrap_or(to_block); - - provider.save_prune_checkpoint( - PruneSegment::ContractLogs, - PruneCheckpoint { - block_number: Some(prune_mode_block.min(last_pruned_block.unwrap_or(u64::MAX))), - tx_number: last_pruned_transaction, - prune_mode: PruneMode::Before(prune_mode_block), - }, - )?; - - let progress = limiter.progress(done); - - Ok(SegmentOutput { progress, pruned, checkpoint: None }) - } -} - -#[cfg(test)] -mod tests { - use crate::segments::{PruneInput, PruneLimiter, ReceiptsByLogs, Segment}; - use alloy_primitives::B256; - use assert_matches::assert_matches; - use reth_db_api::{cursor::DbCursorRO, tables, transaction::DbTx}; - use reth_primitives_traits::InMemorySize; - use reth_provider::{ - DBProvider, DatabaseProviderFactory, PruneCheckpointReader, TransactionsProvider, - }; - use reth_prune_types::{PruneMode, PruneSegment, ReceiptsLogPruneConfig}; - use reth_stages::test_utils::{StorageKind, TestStageDB}; - use reth_testing_utils::generators::{ - self, random_block_range, random_eoa_account, random_log, random_receipt, BlockRangeParams, - }; - use std::collections::BTreeMap; - - #[test] - fn prune_receipts_by_logs() { - reth_tracing::init_test_tracing(); - - let db = TestStageDB::default(); - let mut rng = generators::rng(); - - let tip = 20000; - let blocks = [ - random_block_range( - &mut rng, - 0..=100, - BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..5, ..Default::default() }, - ), - random_block_range( - &mut rng, - (100 + 1)..=(tip - 100), - BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, - ), - random_block_range( - &mut rng, - (tip - 100 + 1)..=tip, - BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..5, ..Default::default() }, - ), - ] - .concat(); - db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); - - let mut receipts = Vec::new(); - - let (deposit_contract_addr, _) = random_eoa_account(&mut rng); - for block in &blocks { - receipts.reserve_exact(block.body().size()); - for (txi, transaction) in block.body().transactions.iter().enumerate() { - let mut receipt = random_receipt(&mut rng, transaction, Some(1), None); - receipt.logs.push(random_log( - &mut rng, - (txi == (block.transaction_count() - 1)).then_some(deposit_contract_addr), - Some(1), - )); - receipts.push((receipts.len() as u64, receipt)); - } - } - db.insert_receipts(receipts).expect("insert receipts"); - - assert_eq!( - db.table::().unwrap().len(), - blocks.iter().map(|block| block.transaction_count()).sum::() - ); - assert_eq!( - db.table::().unwrap().len(), - db.table::().unwrap().len() - ); - - let run_prune = || { - let provider = db.factory.database_provider_rw().unwrap(); - - let prune_before_block: usize = 20; - let prune_mode = PruneMode::Before(prune_before_block as u64); - let receipts_log_filter = - ReceiptsLogPruneConfig(BTreeMap::from([(deposit_contract_addr, prune_mode)])); - - let limiter = PruneLimiter::default().set_deleted_entries_limit(10); - - let result = ReceiptsByLogs::new(receipts_log_filter).prune( - &provider, - PruneInput { - previous_checkpoint: db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::ContractLogs) - .unwrap(), - to_block: tip, - limiter, - }, - ); - provider.commit().expect("commit"); - - assert_matches!(result, Ok(_)); - let output = result.unwrap(); - - let (pruned_block, pruned_tx) = db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::ContractLogs) - .unwrap() - .map(|checkpoint| (checkpoint.block_number.unwrap(), checkpoint.tx_number.unwrap())) - .unwrap_or_default(); - - // All receipts are in the end of the block - let unprunable = pruned_block.saturating_sub(prune_before_block as u64 - 1); - - assert_eq!( - db.table::().unwrap().len(), - blocks.iter().map(|block| block.transaction_count()).sum::() - - ((pruned_tx + 1) - unprunable) as usize - ); - - output.progress.is_finished() - }; - - while !run_prune() {} - - let provider = db.factory.provider().unwrap(); - let mut cursor = provider.tx_ref().cursor_read::().unwrap(); - let walker = cursor.walk(None).unwrap(); - for receipt in walker { - let (tx_num, receipt) = receipt.unwrap(); - - // Either we only find our contract, or the receipt is part of the unprunable receipts - // set by tip - 128 - assert!( - receipt.logs.iter().any(|l| l.address == deposit_contract_addr) || - provider.transaction_block(tx_num).unwrap().unwrap() > tip - 128, - ); - } - } -} diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 315063278b2..a588693892a 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -18,10 +18,6 @@ mod pruner; mod segment; mod target; -use alloc::{collections::BTreeMap, vec::Vec}; -use alloy_primitives::{Address, BlockNumber}; -use core::ops::Deref; - pub use checkpoint::PruneCheckpoint; pub use event::PrunerEvent; pub use mode::PruneMode; @@ -31,300 +27,3 @@ pub use pruner::{ }; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; pub use target::{PruneModes, UnwindTargetPrunedError, MINIMUM_PRUNING_DISTANCE}; - -/// Configuration for pruning receipts not associated with logs emitted by the specified contracts. -#[derive(Debug, Clone, PartialEq, Eq, Default)] -#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] -pub struct ReceiptsLogPruneConfig(pub BTreeMap); - -impl ReceiptsLogPruneConfig { - /// Checks if the configuration is empty - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Given the `tip` block number, consolidates the structure so it can easily be queried for - /// filtering across a range of blocks. - /// - /// Example: - /// - /// `{ addrA: Before(872), addrB: Before(500), addrC: Distance(128) }` - /// - /// for `tip: 1000`, gets transformed to a map such as: - /// - /// `{ 500: [addrB], 872: [addrA, addrC] }` - /// - /// The [`BlockNumber`] key of the new map should be viewed as `PruneMode::Before(block)`, which - /// makes the previous result equivalent to - /// - /// `{ Before(500): [addrB], Before(872): [addrA, addrC] }` - pub fn group_by_block( - &self, - tip: BlockNumber, - pruned_block: Option, - ) -> Result>, PruneSegmentError> { - let mut map = BTreeMap::new(); - let base_block = pruned_block.unwrap_or_default() + 1; - - for (address, mode) in &self.0 { - // Getting `None`, means that there is nothing to prune yet, so we need it to include in - // the BTreeMap (block = 0), otherwise it will be excluded. - // Reminder that this BTreeMap works as an inclusion list that excludes (prunes) all - // other receipts. - // - // Reminder, that we increment because the [`BlockNumber`] key of the new map should be - // viewed as `PruneMode::Before(block)` - let block = base_block.max( - mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? - .map(|(block, _)| block) - .unwrap_or_default() + - 1, - ); - - map.entry(block).or_insert_with(Vec::new).push(address) - } - Ok(map) - } - - /// Returns the lowest block where we start filtering logs which use `PruneMode::Distance(_)`. - pub fn lowest_block_with_distance( - &self, - tip: BlockNumber, - pruned_block: Option, - ) -> Result, PruneSegmentError> { - let pruned_block = pruned_block.unwrap_or_default(); - let mut lowest = None; - - for mode in self.values() { - if mode.is_distance() && - let Some((block, _)) = - mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? - { - lowest = Some(lowest.unwrap_or(u64::MAX).min(block)); - } - } - - Ok(lowest.map(|lowest| lowest.max(pruned_block))) - } -} - -impl Deref for ReceiptsLogPruneConfig { - type Target = BTreeMap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_group_by_block_empty_config() { - let config = ReceiptsLogPruneConfig(BTreeMap::new()); - let tip = 1000; - let pruned_block = None; - - let result = config.group_by_block(tip, pruned_block).unwrap(); - assert!(result.is_empty(), "The result should be empty when the config is empty"); - } - - #[test] - fn test_group_by_block_single_entry() { - let mut config_map = BTreeMap::new(); - let address = Address::new([1; 20]); - let prune_mode = PruneMode::Before(500); - config_map.insert(address, prune_mode); - - let config = ReceiptsLogPruneConfig(config_map); - // Big tip to have something to prune for the target block - let tip = 3000000; - let pruned_block = Some(400); - - let result = config.group_by_block(tip, pruned_block).unwrap(); - - // Expect one entry with block 500 and the corresponding address - assert_eq!(result.len(), 1); - assert_eq!(result[&500], vec![&address], "Address should be grouped under block 500"); - - // Tip smaller than the target block, so that we have nothing to prune for the block - let tip = 300; - let pruned_block = Some(400); - - let result = config.group_by_block(tip, pruned_block).unwrap(); - - // Expect one entry with block 400 and the corresponding address - assert_eq!(result.len(), 1); - assert_eq!(result[&401], vec![&address], "Address should be grouped under block 400"); - } - - #[test] - fn test_group_by_block_multiple_entries() { - let mut config_map = BTreeMap::new(); - let address1 = Address::new([1; 20]); - let address2 = Address::new([2; 20]); - let prune_mode1 = PruneMode::Before(600); - let prune_mode2 = PruneMode::Before(800); - config_map.insert(address1, prune_mode1); - config_map.insert(address2, prune_mode2); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 900000; - let pruned_block = Some(400); - - let result = config.group_by_block(tip, pruned_block).unwrap(); - - // Expect two entries: one for block 600 and another for block 800 - assert_eq!(result.len(), 2); - assert_eq!(result[&600], vec![&address1], "Address1 should be grouped under block 600"); - assert_eq!(result[&800], vec![&address2], "Address2 should be grouped under block 800"); - } - - #[test] - fn test_group_by_block_with_distance_prune_mode() { - let mut config_map = BTreeMap::new(); - let address = Address::new([1; 20]); - let prune_mode = PruneMode::Distance(100000); - config_map.insert(address, prune_mode); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 100100; - // Pruned block is smaller than the target block - let pruned_block = Some(50); - - let result = config.group_by_block(tip, pruned_block).unwrap(); - - // Expect the entry to be grouped under block 100 (tip - distance) - assert_eq!(result.len(), 1); - assert_eq!(result[&101], vec![&address], "Address should be grouped under block 100"); - - let tip = 100100; - // Pruned block is larger than the target block - let pruned_block = Some(800); - - let result = config.group_by_block(tip, pruned_block).unwrap(); - - // Expect the entry to be grouped under block 800 which is larger than tip - distance - assert_eq!(result.len(), 1); - assert_eq!(result[&801], vec![&address], "Address should be grouped under block 800"); - } - - #[test] - fn test_lowest_block_with_distance_empty_config() { - let config = ReceiptsLogPruneConfig(BTreeMap::new()); - let tip = 1000; - let pruned_block = None; - - let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); - assert_eq!(result, None, "The result should be None when the config is empty"); - } - - #[test] - fn test_lowest_block_with_distance_no_distance_mode() { - let mut config_map = BTreeMap::new(); - let address = Address::new([1; 20]); - let prune_mode = PruneMode::Before(500); - config_map.insert(address, prune_mode); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 1000; - let pruned_block = None; - - let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); - assert_eq!(result, None, "The result should be None when there are no Distance modes"); - } - - #[test] - fn test_lowest_block_with_distance_single_entry() { - let mut config_map = BTreeMap::new(); - let address = Address::new([1; 20]); - let prune_mode = PruneMode::Distance(100000); - config_map.insert(address, prune_mode); - - let config = ReceiptsLogPruneConfig(config_map); - - let tip = 100100; - let pruned_block = Some(400); - - // Expect the lowest block to be 400 as 400 > 100100 - 100000 (tip - distance) - assert_eq!( - config.lowest_block_with_distance(tip, pruned_block).unwrap(), - Some(400), - "The lowest block should be 400" - ); - - let tip = 100100; - let pruned_block = Some(50); - - // Expect the lowest block to be 100 as 100 > 50 (pruned block) - assert_eq!( - config.lowest_block_with_distance(tip, pruned_block).unwrap(), - Some(100), - "The lowest block should be 100" - ); - } - - #[test] - fn test_lowest_block_with_distance_multiple_entries_last() { - let mut config_map = BTreeMap::new(); - let address1 = Address::new([1; 20]); - let address2 = Address::new([2; 20]); - let prune_mode1 = PruneMode::Distance(100100); - let prune_mode2 = PruneMode::Distance(100300); - config_map.insert(address1, prune_mode1); - config_map.insert(address2, prune_mode2); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 200300; - let pruned_block = Some(100); - - // The lowest block should be 200300 - 100300 = 100000: - // - First iteration will return 100200 => 200300 - 100100 = 100200 - // - Second iteration will return 100000 => 200300 - 100300 = 100000 < 100200 - // - Final result is 100000 - assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); - } - - #[test] - fn test_lowest_block_with_distance_multiple_entries_first() { - let mut config_map = BTreeMap::new(); - let address1 = Address::new([1; 20]); - let address2 = Address::new([2; 20]); - let prune_mode1 = PruneMode::Distance(100400); - let prune_mode2 = PruneMode::Distance(100300); - config_map.insert(address1, prune_mode1); - config_map.insert(address2, prune_mode2); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 200300; - let pruned_block = Some(100); - - // The lowest block should be 200300 - 100400 = 99900: - // - First iteration, lowest block is 200300 - 100400 = 99900 - // - Second iteration, lowest block is still 99900 < 200300 - 100300 = 100000 - // - Final result is 99900 - assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(99900)); - } - - #[test] - fn test_lowest_block_with_distance_multiple_entries_pruned_block() { - let mut config_map = BTreeMap::new(); - let address1 = Address::new([1; 20]); - let address2 = Address::new([2; 20]); - let prune_mode1 = PruneMode::Distance(100400); - let prune_mode2 = PruneMode::Distance(100300); - config_map.insert(address1, prune_mode1); - config_map.insert(address2, prune_mode2); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 200300; - let pruned_block = Some(100000); - - // The lowest block should be 100000 because: - // - Lowest is 200300 - 100400 = 99900 < 200300 - 100300 = 100000 - // - Lowest is compared to the pruned block 100000: 100000 > 99900 - // - Finally the lowest block is 100000 - assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); - } -} diff --git a/crates/prune/types/src/target.rs b/crates/prune/types/src/target.rs index 3ff18554a9b..bb61c006cdc 100644 --- a/crates/prune/types/src/target.rs +++ b/crates/prune/types/src/target.rs @@ -2,7 +2,7 @@ use alloy_primitives::BlockNumber; use derive_more::Display; use thiserror::Error; -use crate::{PruneCheckpoint, PruneMode, PruneSegment, ReceiptsLogPruneConfig}; +use crate::{PruneCheckpoint, PruneMode, PruneSegment}; /// Minimum distance from the tip necessary for the node to work correctly: /// 1. Minimum 2 epochs (32 blocks per epoch) required to handle any reorg according to the @@ -99,16 +99,10 @@ pub struct PruneModes { ) )] pub merkle_changesets: PruneMode, - /// Receipts pruning configuration by retaining only those receipts that contain logs emitted - /// by the specified addresses, discarding others. This setting is overridden by `receipts`. - /// - /// The [`BlockNumber`](`crate::BlockNumber`) represents the starting block from which point - /// onwards the receipts are preserved. - #[cfg_attr( - any(test, feature = "serde"), - serde(skip_serializing_if = "ReceiptsLogPruneConfig::is_empty") - )] - pub receipts_log_filter: ReceiptsLogPruneConfig, + /// Receipts log filtering has been deprecated and will be removed in a future release. + #[deprecated] + #[cfg_attr(any(test, feature = "serde"), serde(skip))] + pub receipts_log_filter: (), } impl Default for PruneModes { @@ -121,14 +115,15 @@ impl Default for PruneModes { storage_history: None, bodies_history: None, merkle_changesets: default_merkle_changesets_mode(), - receipts_log_filter: ReceiptsLogPruneConfig::default(), + #[expect(deprecated)] + receipts_log_filter: (), } } } impl PruneModes { /// Sets pruning to all targets. - pub fn all() -> Self { + pub const fn all() -> Self { Self { sender_recovery: Some(PruneMode::Full), transaction_lookup: Some(PruneMode::Full), @@ -137,13 +132,14 @@ impl PruneModes { storage_history: Some(PruneMode::Full), bodies_history: Some(PruneMode::Full), merkle_changesets: PruneMode::Full, - receipts_log_filter: Default::default(), + #[expect(deprecated)] + receipts_log_filter: (), } } /// Returns whether there is any kind of receipt pruning configuration. - pub fn has_receipts_pruning(&self) -> bool { - self.receipts.is_some() || !self.receipts_log_filter.is_empty() + pub const fn has_receipts_pruning(&self) -> bool { + self.receipts.is_some() } /// Returns an error if we can't unwind to the targeted block because the target block is diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 1666e79baf3..adfc87c5ccc 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -660,7 +660,7 @@ where mod tests { use super::*; use crate::{stages::MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD, test_utils::TestStageDB}; - use alloy_primitives::{address, hex_literal::hex, keccak256, Address, B256, U256}; + use alloy_primitives::{address, hex_literal::hex, keccak256, B256, U256}; use alloy_rlp::Decodable; use assert_matches::assert_matches; use reth_chainspec::ChainSpecBuilder; @@ -677,9 +677,7 @@ mod tests { DatabaseProviderFactory, ReceiptProvider, StaticFileProviderFactory, }; use reth_prune::PruneModes; - use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig}; use reth_stages_api::StageUnitCheckpoint; - use std::collections::BTreeMap; fn stage() -> ExecutionStage { let evm_config = @@ -896,20 +894,11 @@ mod tests { // If there is a pruning configuration, then it's forced to use the database. // This way we test both cases. let modes = [None, Some(PruneModes::default())]; - let random_filter = ReceiptsLogPruneConfig(BTreeMap::from([( - Address::random(), - PruneMode::Distance(100000), - )])); // Tests node with database and node with static files - for mut mode in modes { + for mode in modes { let mut provider = factory.database_provider_rw().unwrap(); - if let Some(mode) = &mut mode { - // Simulating a full node where we write receipts to database - mode.receipts_log_filter = random_filter.clone(); - } - let mut execution_stage = stage(); provider.set_prune_modes(mode.clone().unwrap_or_default()); @@ -1033,18 +1022,9 @@ mod tests { // If there is a pruning configuration, then it's forced to use the database. // This way we test both cases. let modes = [None, Some(PruneModes::default())]; - let random_filter = ReceiptsLogPruneConfig(BTreeMap::from([( - Address::random(), - PruneMode::Before(100000), - )])); // Tests node with database and node with static files - for mut mode in modes { - if let Some(mode) = &mut mode { - // Simulating a full node where we write receipts to database - mode.receipts_log_filter = random_filter.clone(); - } - + for mode in modes { // Test Execution let mut execution_stage = stage(); provider.set_prune_modes(mode.clone().unwrap_or_default()); diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index b6d205a42e1..185fbf7c498 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -207,19 +207,17 @@ where headers: finalized_block_numbers.headers.and_then(|finalized_block_number| { self.get_static_file_target(highest_static_files.headers, finalized_block_number) }), - // StaticFile receipts only if they're not pruned according to the user configuration - receipts: if self.prune_modes.receipts.is_none() && - self.prune_modes.receipts_log_filter.is_empty() - { - finalized_block_numbers.receipts.and_then(|finalized_block_number| { + receipts: finalized_block_numbers + .receipts + // StaticFile receipts only if they're not pruned according to the user + // configuration + .filter(|_| !self.prune_modes.has_receipts_pruning()) + .and_then(|finalized_block_number| { self.get_static_file_target( highest_static_files.receipts, finalized_block_number, ) - }) - } else { - None - }, + }), transactions: finalized_block_numbers.transactions.and_then(|finalized_block_number| { self.get_static_file_target( highest_static_files.transactions, diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 873b10b0cfc..5d3b5280cda 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -96,7 +96,7 @@ impl ProviderFactory { } /// Sets the pruning configuration for an existing [`ProviderFactory`]. - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + pub const fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { self.prune_modes = prune_modes; self } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 31e87b46e62..d5e49d822b2 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -22,7 +22,7 @@ use crate::{ }; use alloy_consensus::{ transaction::{SignerRecoverable, TransactionMeta, TxHashRef}, - BlockHeader, TxReceipt, + BlockHeader, }; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{ @@ -214,7 +214,7 @@ impl DatabaseProvider { #[cfg(feature = "test-utils")] /// Sets the prune modes for provider. - pub fn set_prune_modes(&mut self, prune_modes: PruneModes) { + pub const fn set_prune_modes(&mut self, prune_modes: PruneModes) { self.prune_modes = prune_modes; } } @@ -1621,20 +1621,11 @@ impl StateWriter .then(|| self.static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)) .transpose()?; - let has_contract_log_filter = !self.prune_modes.receipts_log_filter.is_empty(); - let contract_log_pruner = self.prune_modes.receipts_log_filter.group_by_block(tip, None)?; - // All receipts from the last 128 blocks are required for blockchain tree, even with // [`PruneSegment::ContractLogs`]. let prunable_receipts = PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(first_block, tip); - // Prepare set of addresses which logs should not be pruned. - let mut allowed_addresses: HashSet = HashSet::new(); - for (_, addresses) in contract_log_pruner.range(..first_block) { - allowed_addresses.extend(addresses.iter().copied()); - } - for (idx, (receipts, first_tx_index)) in execution_outcome.receipts.iter().zip(block_indices).enumerate() { @@ -1654,21 +1645,8 @@ impl StateWriter continue } - // If there are new addresses to retain after this block number, track them - if let Some(new_addresses) = contract_log_pruner.get(&block_number) { - allowed_addresses.extend(new_addresses.iter().copied()); - } - for (idx, receipt) in receipts.iter().enumerate() { let receipt_idx = first_tx_index + idx as u64; - // Skip writing receipt if log filter is active and it does not have any logs to - // retain - if prunable_receipts && - has_contract_log_filter && - !receipt.logs().iter().any(|log| allowed_addresses.contains(&log.address)) - { - continue - } if let Some(writer) = &mut receipts_static_writer { writer.append_receipt(receipt_idx, receipt)?; diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 30a2d3edffb..3fc6988dc69 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -776,9 +776,6 @@ Pruning: --prune.receipts.before Prune receipts before the specified block number. The specified block number is not pruned - --prune.receiptslogfilter - Configure receipts log filter. Format: <`address`>:<`prune_mode`>... where <`prune_mode`> can be 'full', 'distance:<`blocks`>', or 'before:<`block_number`>' - --prune.accounthistory.full Prunes all account history From 0ea75f5edf0bb18c2a1efdc4ee296a03f1c2dce0 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 22 Oct 2025 15:21:59 +0400 Subject: [PATCH 152/371] fix: small features fix (#19212) --- crates/ethereum/primitives/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/ethereum/primitives/Cargo.toml b/crates/ethereum/primitives/Cargo.toml index efa8b945f95..3bf9e8f3a48 100644 --- a/crates/ethereum/primitives/Cargo.toml +++ b/crates/ethereum/primitives/Cargo.toml @@ -73,6 +73,7 @@ reth-codec = [ "dep:reth-zstd-compressors", ] arbitrary = [ + "std", "dep:arbitrary", "alloy-consensus/arbitrary", "alloy-consensus/k256", From bb620736b9fb37dfa5daa2f21073b9682a207b26 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 22 Oct 2025 13:29:00 +0200 Subject: [PATCH 153/371] perf: check prewarm termination multiple times (#19214) --- .../src/tree/payload_processor/prewarm.rs | 26 ++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index de8a88a167b..ca9ff7f47e0 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -233,8 +233,19 @@ where }); } + /// Returns true if prewarming was terminated and no more transactions should be prewarmed. + fn is_execution_terminated(&self) -> bool { + self.ctx.terminate_execution.load(Ordering::Relaxed) + } + /// If configured and the tx returned proof targets, emit the targets the transaction produced fn send_multi_proof_targets(&self, targets: Option) { + if self.is_execution_terminated() { + // if execution is already terminated then we dont need to send more proof fetch + // messages + return + } + if let Some((proof_targets, to_multi_proof)) = targets.zip(self.to_multi_proof.as_ref()) { let _ = to_multi_proof.send(MultiProofMessage::PrefetchProofs(proof_targets)); } @@ -308,6 +319,7 @@ where match event { PrewarmTaskEvent::TerminateTransactionExecution => { // stop tx processing + debug!(target: "engine::tree::prewarm", "Terminating prewarm execution"); self.ctx.terminate_execution.store(true, Ordering::Relaxed); } PrewarmTaskEvent::Outcome { proof_targets } => { @@ -338,7 +350,7 @@ where } } - trace!(target: "engine::tree::prewarm", "Completed prewarm execution"); + debug!(target: "engine::tree::prewarm", "Completed prewarm execution"); // save caches and finish if let Some(Some(state)) = final_block_output { @@ -460,6 +472,9 @@ where debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm tx", index, tx_hash=%tx.tx().tx_hash()) .entered(); + // create the tx env + let start = Instant::now(); + // If the task was cancelled, stop execution, send an empty result to notify the task, // and exit. if terminate_execution.load(Ordering::Relaxed) { @@ -467,8 +482,6 @@ where break } - // create the tx env - let start = Instant::now(); let res = match evm.transact(&tx) { Ok(res) => res, Err(err) => { @@ -489,6 +502,13 @@ where drop(_enter); + // If the task was cancelled, stop execution, send an empty result to notify the task, + // and exit. + if terminate_execution.load(Ordering::Relaxed) { + let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: None }); + break + } + // Only send outcome for transactions after the first txn // as the main execution will be just as fast if index > 0 { From 56d8cea93915863468bbcafd025b80bca220ea61 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 22 Oct 2025 13:40:56 +0200 Subject: [PATCH 154/371] chore: only alloc required capacity (#19217) --- crates/engine/tree/src/tree/payload_processor/prewarm.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index ca9ff7f47e0..e57a2aeaa86 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -147,9 +147,6 @@ where let (done_tx, done_rx) = mpsc::channel(); let mut executing = 0usize; - // Initialize worker handles container - let mut handles = Vec::with_capacity(max_concurrency); - // When transaction_count_hint is 0, it means the count is unknown. In this case, spawn // max workers to handle potentially many transactions in parallel rather // than bottlenecking on a single worker. @@ -159,6 +156,9 @@ where transaction_count_hint.min(max_concurrency) }; + // Initialize worker handles container + let mut handles = Vec::with_capacity(workers_needed); + // Only spawn initial workers as needed for i in 0..workers_needed { handles.push(ctx.spawn_worker(i, &executor, actions_tx.clone(), done_tx.clone())); From 7a98145defadcc656d33b17902d7f7ba024036d8 Mon Sep 17 00:00:00 2001 From: greg <82421016+greged93@users.noreply.github.com> Date: Wed, 22 Oct 2025 13:58:01 +0200 Subject: [PATCH 155/371] fix: captured impl trait lifetime (#19216) Signed-off-by: Gregory Edison --- crates/node/builder/src/node.rs | 10 +++++++--- crates/rpc/rpc-builder/src/auth.rs | 4 +++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index ca44ad9523d..1cc50c4ba6f 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -179,14 +179,16 @@ where /// Returns the [`EngineApiClient`] interface for the authenticated engine API. /// /// This will send authenticated http requests to the node's auth server. - pub fn engine_http_client(&self) -> impl EngineApiClient { + pub fn engine_http_client(&self) -> impl EngineApiClient + use { self.auth_server_handle().http_client() } /// Returns the [`EngineApiClient`] interface for the authenticated engine API. /// /// This will send authenticated ws requests to the node's auth server. - pub async fn engine_ws_client(&self) -> impl EngineApiClient { + pub async fn engine_ws_client( + &self, + ) -> impl EngineApiClient + use { self.auth_server_handle().ws_client().await } @@ -194,7 +196,9 @@ where /// /// This will send not authenticated IPC requests to the node's auth server. #[cfg(unix)] - pub async fn engine_ipc_client(&self) -> Option> { + pub async fn engine_ipc_client( + &self, + ) -> Option + use> { self.auth_server_handle().ipc_client().await } } diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 777081a7e6f..0d0a6165ff7 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -354,7 +354,9 @@ impl AuthServerHandle { /// Returns a http client connected to the server. /// /// This client uses the JWT token to authenticate requests. - pub fn http_client(&self) -> impl SubscriptionClientT + Clone + Send + Sync + Unpin + 'static { + pub fn http_client( + &self, + ) -> impl SubscriptionClientT + use<> + Clone + Send + Sync + Unpin + 'static { // Create a middleware that adds a new JWT token to every request. let secret_layer = AuthClientLayer::new(self.secret); let middleware = tower::ServiceBuilder::default().layer(secret_layer); From 35b28ea54362a7907e8e847e291bed36c8de1bb0 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 22 Oct 2025 14:30:26 +0200 Subject: [PATCH 156/371] fix: OverlayStateProviderFactory: validating trie changeset range and revert target (#19207) --- .../provider/src/providers/state/overlay.rs | 79 ++++++++++++++----- crates/storage/provider/src/traits/full.rs | 16 ++-- crates/trie/common/src/hashed_state.rs | 17 ++++ crates/trie/common/src/updates.rs | 16 ++++ 4 files changed, 103 insertions(+), 25 deletions(-) diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 046072ef5fe..98bd17aa4f9 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -1,8 +1,11 @@ use alloy_primitives::{BlockNumber, B256}; use reth_db_api::DatabaseError; use reth_errors::ProviderError; +use reth_prune_types::PruneSegment; use reth_stages_types::StageId; -use reth_storage_api::{DBProvider, DatabaseProviderFactory, StageCheckpointReader, TrieReader}; +use reth_storage_api::{ + DBProvider, DatabaseProviderFactory, PruneCheckpointReader, StageCheckpointReader, TrieReader, +}; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, @@ -13,6 +16,7 @@ use reth_trie_db::{ DatabaseHashedCursorFactory, DatabaseHashedPostState, DatabaseTrieCursorFactory, }; use std::sync::Arc; +use tracing::debug; /// Factory for creating overlay state providers with optional reverts and overlays. /// @@ -33,26 +37,31 @@ pub struct OverlayStateProviderFactory { impl OverlayStateProviderFactory where F: DatabaseProviderFactory, - F::Provider: Clone + TrieReader + StageCheckpointReader, + F::Provider: Clone + TrieReader + StageCheckpointReader + PruneCheckpointReader, { /// Create a new overlay state provider factory pub const fn new(factory: F) -> Self { Self { factory, block_number: None, trie_overlay: None, hashed_state_overlay: None } } - /// Set the block number for collecting reverts + /// Set the block number for collecting reverts. All state will be reverted to the point + /// _after_ this block has been processed. pub const fn with_block_number(mut self, block_number: Option) -> Self { self.block_number = block_number; self } - /// Set the trie overlay + /// Set the trie overlay. + /// + /// This overlay will be applied on top of any reverts applied via `with_block_number`. pub fn with_trie_overlay(mut self, trie_overlay: Option>) -> Self { self.trie_overlay = trie_overlay; self } /// Set the hashed state overlay + /// + /// This overlay will be applied on top of any reverts applied via `with_block_number`. pub fn with_hashed_state_overlay( mut self, hashed_state_overlay: Option>, @@ -64,25 +73,47 @@ where /// Validates that there are sufficient changesets to revert to the requested block number. /// /// Returns an error if the `MerkleChangeSets` checkpoint doesn't cover the requested block. + /// Takes into account both the stage checkpoint and the prune checkpoint to determine the + /// available data range. fn validate_changesets_availability( &self, provider: &F::Provider, requested_block: BlockNumber, ) -> Result<(), ProviderError> { - // Get the MerkleChangeSets stage checkpoint - let errors propagate as-is - let checkpoint = provider.get_stage_checkpoint(StageId::MerkleChangeSets)?; - - // If there's no checkpoint at all or block range details are missing, we can't revert - let available_range = checkpoint - .and_then(|chk| { - chk.merkle_changesets_stage_checkpoint() - .map(|stage_chk| stage_chk.block_range.from..=chk.block_number) - }) - .ok_or_else(|| ProviderError::InsufficientChangesets { - requested: requested_block, - available: 0..=0, + // Get the MerkleChangeSets stage and prune checkpoints. + let stage_checkpoint = provider.get_stage_checkpoint(StageId::MerkleChangeSets)?; + let prune_checkpoint = provider.get_prune_checkpoint(PruneSegment::MerkleChangeSets)?; + + // Get the upper bound from stage checkpoint + let upper_bound = + stage_checkpoint.as_ref().map(|chk| chk.block_number).ok_or_else(|| { + ProviderError::InsufficientChangesets { + requested: requested_block, + available: 0..=0, + } })?; + // Extract a possible lower bound from stage checkpoint if available + let stage_lower_bound = stage_checkpoint.as_ref().and_then(|chk| { + chk.merkle_changesets_stage_checkpoint().map(|stage_chk| stage_chk.block_range.from) + }); + + // Extract a possible lower bound from prune checkpoint if available + // The prune checkpoint's block_number is the highest pruned block, so data is available + // starting from the next block + let prune_lower_bound = + prune_checkpoint.and_then(|chk| chk.block_number.map(|block| block + 1)); + + // Use the higher of the two lower bounds (or error if neither is available) + let Some(lower_bound) = stage_lower_bound.max(prune_lower_bound) else { + return Err(ProviderError::InsufficientChangesets { + requested: requested_block, + available: 0..=upper_bound, + }) + }; + + let available_range = lower_bound..=upper_bound; + // Check if the requested block is within the available range if !available_range.contains(&requested_block) { return Err(ProviderError::InsufficientChangesets { @@ -105,11 +136,13 @@ where self.validate_changesets_availability(&provider, from_block)?; // Collect trie reverts - let mut trie_updates_mut = provider.trie_reverts(from_block)?; + let mut trie_updates_mut = provider.trie_reverts(from_block + 1)?; // Collect state reverts using HashedPostState::from_reverts - let reverted_state = - HashedPostState::from_reverts::(provider.tx_ref(), from_block..)?; + let reverted_state = HashedPostState::from_reverts::( + provider.tx_ref(), + from_block + 1.., + )?; let mut hashed_state_mut = reverted_state.into_sorted(); // Extend with overlays if provided @@ -121,6 +154,14 @@ where hashed_state_mut.extend_ref(hashed_state_overlay); } + debug!( + target: "providers::state::overlay", + ?from_block, + num_trie_updates = ?trie_updates_mut.total_len(), + num_state_updates = ?hashed_state_mut.total_len(), + "Reverted to target block", + ); + (Arc::new(trie_updates_mut), Arc::new(hashed_state_mut)) } else { // If no block_number, use overlays directly or defaults diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 710ca9400ed..6fe88a6640a 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -2,8 +2,8 @@ use crate::{ AccountReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, - DatabaseProviderFactory, HashedPostStateProvider, StageCheckpointReader, StateProviderFactory, - StateReader, StaticFileProviderFactory, TrieReader, + DatabaseProviderFactory, HashedPostStateProvider, PruneCheckpointReader, StageCheckpointReader, + StateProviderFactory, StateReader, StaticFileProviderFactory, TrieReader, }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; @@ -12,8 +12,10 @@ use std::fmt::Debug; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: - DatabaseProviderFactory - + NodePrimitivesProvider + DatabaseProviderFactory< + DB = N::DB, + Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + > + NodePrimitivesProvider + StaticFileProviderFactory + BlockReaderIdExt< Transaction = TxTy, @@ -37,8 +39,10 @@ pub trait FullProvider: } impl FullProvider for T where - T: DatabaseProviderFactory - + NodePrimitivesProvider + T: DatabaseProviderFactory< + DB = N::DB, + Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + > + NodePrimitivesProvider + StaticFileProviderFactory + BlockReaderIdExt< Transaction = TxTy, diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 27c2807ad2a..8fb994daddd 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -486,6 +486,13 @@ impl HashedPostStateSorted { &self.storages } + /// Returns the total number of updates including all accounts and storage updates. + pub fn total_len(&self) -> usize { + self.accounts.accounts.len() + + self.accounts.destroyed_accounts.len() + + self.storages.values().map(|storage| storage.len()).sum::() + } + /// Extends this state with contents of another sorted state. /// Entries in `other` take precedence for duplicate keys. pub fn extend_ref(&mut self, other: &Self) { @@ -568,6 +575,16 @@ impl HashedStorageSorted { .sorted_by_key(|entry| *entry.0) } + /// Returns the total number of storage slot updates. + pub fn len(&self) -> usize { + self.non_zero_valued_slots.len() + self.zero_valued_slots.len() + } + + /// Returns `true` if there are no storage slot updates. + pub fn is_empty(&self) -> bool { + self.non_zero_valued_slots.is_empty() && self.zero_valued_slots.is_empty() + } + /// Extends this storage with contents of another sorted storage. /// Entries in `other` take precedence for duplicate keys. pub fn extend_ref(&mut self, other: &Self) { diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 00a160c4f9f..e3e098ac8e5 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -453,6 +453,12 @@ impl TrieUpdatesSorted { &self.storage_tries } + /// Returns the total number of updates including account nodes and all storage updates. + pub fn total_len(&self) -> usize { + self.account_nodes.len() + + self.storage_tries.values().map(|storage| storage.len()).sum::() + } + /// Extends the trie updates with another set of sorted updates. /// /// This merges the account nodes and storage tries from `other` into `self`. @@ -535,6 +541,16 @@ impl StorageTrieUpdatesSorted { &self.storage_nodes } + /// Returns the total number of storage node updates. + pub const fn len(&self) -> usize { + self.storage_nodes.len() + } + + /// Returns `true` if there are no storage node updates. + pub const fn is_empty(&self) -> bool { + self.storage_nodes.is_empty() + } + /// Extends the storage trie updates with another set of sorted updates. /// /// If `other` is marked as deleted, this will be marked as deleted and all nodes cleared. From 712569d4ce30d9f65cafffdbbea04ee1c1ab4fe9 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 22 Oct 2025 15:04:16 +0200 Subject: [PATCH 157/371] feat: warning log when blocked on execution cache (#19222) --- .../tree/src/tree/payload_processor/mod.rs | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index cdac92ed675..8ab186dea5b 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -40,10 +40,13 @@ use reth_trie_sparse::{ ClearedSparseStateTrie, SparseStateTrie, SparseTrie, }; use reth_trie_sparse_parallel::{ParallelSparseTrie, ParallelismThresholds}; -use std::sync::{ - atomic::AtomicBool, - mpsc::{self, channel, Sender}, - Arc, +use std::{ + sync::{ + atomic::AtomicBool, + mpsc::{self, channel, Sender}, + Arc, + }, + time::Instant, }; use tracing::{debug, debug_span, instrument, warn}; @@ -596,8 +599,16 @@ impl ExecutionCache { /// A cache is considered available when: /// - It exists and matches the requested parent hash /// - No other tasks are currently using it (checked via Arc reference count) + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip(self))] pub(crate) fn get_cache_for(&self, parent_hash: B256) -> Option { + let start = Instant::now(); let cache = self.inner.read(); + + let elapsed = start.elapsed(); + if elapsed.as_millis() > 5 { + warn!(blocked_for=?elapsed, "Blocked waiting for execution cache mutex"); + } + cache .as_ref() .filter(|c| c.executed_block_hash() == parent_hash && c.is_available()) From 47dc43287f1872c18c9e9b2ee08c4f3ecbfbab23 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 22 Oct 2025 15:27:03 +0200 Subject: [PATCH 158/371] fix(reth-bench): Lower block channel capacity and make it configurable (#19226) --- bin/reth-bench/src/bench/new_payload_fcu.rs | 24 +++++++++- bin/reth-bench/src/bench/new_payload_only.rs | 47 +++++++++++++++++--- 2 files changed, 65 insertions(+), 6 deletions(-) diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index 0303c7d014d..ce094895ee3 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -33,6 +33,16 @@ pub struct Command { #[arg(long, value_name = "WAIT_TIME", value_parser = parse_duration, verbatim_doc_comment)] wait_time: Option, + /// The size of the block buffer (channel capacity) for prefetching blocks from the RPC + /// endpoint. + #[arg( + long = "rpc-block-buffer-size", + value_name = "RPC_BLOCK_BUFFER_SIZE", + default_value = "20", + verbatim_doc_comment + )] + rpc_block_buffer_size: usize, + #[command(flatten)] benchmark: BenchmarkArgs, } @@ -48,7 +58,12 @@ impl Command { is_optimism, } = BenchContext::new(&self.benchmark, self.rpc_url).await?; - let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); + let buffer_size = self.rpc_block_buffer_size; + + // Use a oneshot channel to propagate errors from the spawned task + let (error_sender, mut error_receiver) = tokio::sync::oneshot::channel(); + let (sender, mut receiver) = tokio::sync::mpsc::channel(buffer_size); + tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { let block_res = block_provider @@ -60,6 +75,7 @@ impl Command { Ok(block) => block, Err(e) => { tracing::error!("Failed to fetch block {next_block}: {e}"); + let _ = error_sender.send(e); break; } }; @@ -69,6 +85,7 @@ impl Command { Ok(result) => result, Err(e) => { tracing::error!("Failed to convert block to new payload: {e}"); + let _ = error_sender.send(e); break; } }; @@ -163,6 +180,11 @@ impl Command { results.push((gas_row, combined_result)); } + // Check if the spawned task encountered an error + if let Ok(error) = error_receiver.try_recv() { + return Err(error); + } + let (gas_output_results, combined_results): (_, Vec) = results.into_iter().unzip(); diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 34fe3780553..3dfa619ec7b 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -13,7 +13,7 @@ use crate::{ use alloy_provider::Provider; use clap::Parser; use csv::Writer; -use eyre::Context; +use eyre::{Context, OptionExt}; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; use std::time::{Duration, Instant}; @@ -26,6 +26,16 @@ pub struct Command { #[arg(long, value_name = "RPC_URL", verbatim_doc_comment)] rpc_url: String, + /// The size of the block buffer (channel capacity) for prefetching blocks from the RPC + /// endpoint. + #[arg( + long = "rpc-block-buffer-size", + value_name = "RPC_BLOCK_BUFFER_SIZE", + default_value = "20", + verbatim_doc_comment + )] + rpc_block_buffer_size: usize, + #[command(flatten)] benchmark: BenchmarkArgs, } @@ -41,7 +51,12 @@ impl Command { is_optimism, } = BenchContext::new(&self.benchmark, self.rpc_url).await?; - let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); + let buffer_size = self.rpc_block_buffer_size; + + // Use a oneshot channel to propagate errors from the spawned task + let (error_sender, mut error_receiver) = tokio::sync::oneshot::channel(); + let (sender, mut receiver) = tokio::sync::mpsc::channel(buffer_size); + tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { let block_res = block_provider @@ -49,13 +64,30 @@ impl Command { .full() .await .wrap_err_with(|| format!("Failed to fetch block by number {next_block}")); - let block = block_res.unwrap().unwrap(); + let block = match block_res.and_then(|opt| opt.ok_or_eyre("Block not found")) { + Ok(block) => block, + Err(e) => { + tracing::error!("Failed to fetch block {next_block}: {e}"); + let _ = error_sender.send(e); + break; + } + }; let header = block.header.clone(); - let (version, params) = block_to_new_payload(block, is_optimism).unwrap(); + let (version, params) = match block_to_new_payload(block, is_optimism) { + Ok(result) => result, + Err(e) => { + tracing::error!("Failed to convert block to new payload: {e}"); + let _ = error_sender.send(e); + break; + } + }; next_block += 1; - sender.send((header, version, params)).await.unwrap(); + if let Err(e) = sender.send((header, version, params)).await { + tracing::error!("Failed to send block data: {e}"); + break; + } } }); @@ -96,6 +128,11 @@ impl Command { results.push((row, new_payload_result)); } + // Check if the spawned task encountered an error + if let Ok(error) = error_receiver.try_recv() { + return Err(error); + } + let (gas_output_results, new_payload_results): (_, Vec) = results.into_iter().unzip(); From 778146cb0116c019af284b89cfa5a63f9b455f02 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 22 Oct 2025 15:36:49 +0200 Subject: [PATCH 159/371] chore: use retrylayer for benchmarkcontext (#19227) --- bin/reth-bench/src/bench/context.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bin/reth-bench/src/bench/context.rs b/bin/reth-bench/src/bench/context.rs index 75c8592ad3c..1d53ce8e1a3 100644 --- a/bin/reth-bench/src/bench/context.rs +++ b/bin/reth-bench/src/bench/context.rs @@ -7,6 +7,7 @@ use alloy_primitives::address; use alloy_provider::{network::AnyNetwork, Provider, RootProvider}; use alloy_rpc_client::ClientBuilder; use alloy_rpc_types_engine::JwtSecret; +use alloy_transport::layers::RetryBackoffLayer; use reqwest::Url; use reth_node_core::args::BenchmarkArgs; use tracing::info; @@ -49,7 +50,9 @@ impl BenchContext { } // set up alloy client for blocks - let client = ClientBuilder::default().http(rpc_url.parse()?); + let client = ClientBuilder::default() + .layer(RetryBackoffLayer::new(10, 800, u64::MAX)) + .http(rpc_url.parse()?); let block_provider = RootProvider::::new(client); // Check if this is an OP chain by checking code at a predeploy address. From b9f6068f59085002e8f8fa26a7b7821b53fcf94d Mon Sep 17 00:00:00 2001 From: wizard <112275929+famouswizard@users.noreply.github.com> Date: Wed, 22 Oct 2025 17:04:10 +0300 Subject: [PATCH 160/371] fix: incorrect RPC namespace reference (#19225) --- examples/node-custom-rpc/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/node-custom-rpc/src/main.rs b/examples/node-custom-rpc/src/main.rs index 2af789a989c..7ab271b4cc5 100644 --- a/examples/node-custom-rpc/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -91,7 +91,7 @@ pub trait TxpoolExtApi { ) -> SubscriptionResult; } -/// The type that implements the `txpool` rpc namespace trait +/// The type that implements the `txpoolExt` rpc namespace trait pub struct TxpoolExt { pool: Pool, } From f438a6cc830a1830d301ad89a107fd19d8c39cde Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 22 Oct 2025 17:02:37 +0200 Subject: [PATCH 161/371] chore: add elapsed info log (#19211) --- crates/engine/tree/src/tree/payload_processor/prewarm.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index e57a2aeaa86..134233233ee 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -270,9 +270,9 @@ where self; let hash = env.hash; + debug!(target: "engine::caching", parent_hash=?hash, "Updating execution cache"); // Perform all cache operations atomically under the lock execution_cache.update_with_guard(|cached| { - // consumes the `SavedCache` held by the prewarming task, which releases its usage guard let (caches, cache_metrics) = saved_cache.split(); let new_cache = SavedCache::new(hash, caches, cache_metrics); @@ -286,13 +286,15 @@ where } new_cache.update_metrics(); - debug!(target: "engine::caching", parent_hash=?new_cache.executed_block_hash(), "Updated execution cache"); // Replace the shared cache with the new one; the previous cache (if any) is dropped. *cached = Some(new_cache); }); - metrics.cache_saving_duration.set(start.elapsed().as_secs_f64()); + let elapsed = start.elapsed(); + debug!(target: "engine::caching", parent_hash=?hash, elapsed=?elapsed, "Updated execution cache"); + + metrics.cache_saving_duration.set(elapsed.as_secs_f64()); } /// Executes the task. From df0da36bc43e8c96e9a897ca87ba2e335b0a239b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Hodul=C3=A1k?= Date: Wed, 22 Oct 2025 17:04:08 +0200 Subject: [PATCH 162/371] test(hive): Ignore new failures that are won't fix (#19218) --- .github/assets/hive/expected_failures.yaml | 108 +++++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index ae3817cfc3d..2650d9a2d90 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -49,6 +49,8 @@ engine-auth: [] # realistic on mainnet # 7251 related tests - modified contract, not necessarily practical on mainnet, # 7594: https://github.com/paradigmxyz/reth/issues/18975 +# 4844: reth unwinds from block 2 to genesis but tests expect to unwind to block 1 if chain.rlp has an invalid block +# 7610: tests are related to empty account that has storage, close to impossible to trigger # worth re-visiting when more of these related tests are passing eest/consume-engine: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth @@ -80,6 +82,62 @@ eest/consume-engine: - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Osaka-blockchain_test_engine-log_argument_withdrawal_credentials_size-value_zero]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_log_length[fork_Osaka-blockchain_test_engine-slice_bytes_False]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_log_length[fork_Osaka-blockchain_test_engine-slice_bytes_True]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Cancun-insufficient_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth + - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Cancun-invalid_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth + - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Osaka-insufficient_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth + - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Osaka-invalid_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth + - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Prague-insufficient_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth + - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Prague-invalid_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth eest/consume-rlp: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test-zero_nonce]-reth - tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py::test_system_contract_errors[fork_Prague-blockchain_test_engine-system_contract_reaches_gas_limit-system_contract_0x0000bbddc7ce488642fb579f8b00f3a590007251]-reth @@ -117,3 +175,53 @@ eest/consume-rlp: - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-zero_balance]-reth - tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-nonzero_balance]-reth - tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-zero_balance]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_1-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_0-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_2-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_1-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_1-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_2-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_1-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_2-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_0-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_2-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_1-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_0-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_1-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_2-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_1-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_2-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_1-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_2-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_0-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_0-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_2-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_0-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_0-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_0-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth From bab9dee55507bb73109ae53d0993e84cae395660 Mon Sep 17 00:00:00 2001 From: Jennifer Date: Wed, 22 Oct 2025 16:16:29 +0100 Subject: [PATCH 163/371] fix: rename consume-* test suite (#19230) --- .github/workflows/hive.yml | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 4b1b36027f2..d606ddab7ab 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -137,43 +137,43 @@ jobs: - debug_ # consume-engine - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/osaka.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/prague.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/cancun.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/shanghai.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/berlin.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/istanbul.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/homestead.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/frontier.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/paris.* # consume-rlp - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/osaka.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/prague.* - sim: ethereum/eest/consume-rlp limit: .*tests/cancun.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/shanghai.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/berlin.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/istanbul.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/homestead.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/frontier.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/paris.* needs: - prepare-reth From fa2f173aacca8e53a55d5f2f4cc2aa9df8314190 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Wed, 22 Oct 2025 17:10:33 +0100 Subject: [PATCH 164/371] chore(storage): remove `UnifiedStorageWriterError` (#19210) --- crates/storage/errors/src/lib.rs | 3 --- crates/storage/errors/src/provider.rs | 5 +---- crates/storage/errors/src/writer.rs | 24 ------------------------ 3 files changed, 1 insertion(+), 31 deletions(-) delete mode 100644 crates/storage/errors/src/writer.rs diff --git a/crates/storage/errors/src/lib.rs b/crates/storage/errors/src/lib.rs index 1a09d745140..eca6cd47a45 100644 --- a/crates/storage/errors/src/lib.rs +++ b/crates/storage/errors/src/lib.rs @@ -21,8 +21,5 @@ pub mod lockfile; pub mod provider; pub use provider::{ProviderError, ProviderResult}; -/// Writer error -pub mod writer; - /// Any error pub mod any; diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 9630a1b2a64..ed5230c18fb 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,4 +1,4 @@ -use crate::{any::AnyError, db::DatabaseError, writer::UnifiedStorageWriterError}; +use crate::{any::AnyError, db::DatabaseError}; use alloc::{boxed::Box, string::String}; use alloy_eips::{BlockHashOrNumber, HashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; @@ -125,9 +125,6 @@ pub enum ProviderError { /// Consistent view error. #[error("failed to initialize consistent view: {_0}")] ConsistentView(Box), - /// Storage writer error. - #[error(transparent)] - UnifiedStorageWriterError(#[from] UnifiedStorageWriterError), /// Received invalid output from configured storage implementation. #[error("received invalid output from storage")] InvalidStorageOutput, diff --git a/crates/storage/errors/src/writer.rs b/crates/storage/errors/src/writer.rs deleted file mode 100644 index 52a5ba06e5e..00000000000 --- a/crates/storage/errors/src/writer.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::db::DatabaseError; -use reth_static_file_types::StaticFileSegment; - -/// `UnifiedStorageWriter` related errors -#[derive(Clone, Debug, derive_more::Display, PartialEq, Eq, derive_more::Error)] -pub enum UnifiedStorageWriterError { - /// Database writer is missing - #[display("Database writer is missing")] - MissingDatabaseWriter, - /// Static file writer is missing - #[display("Static file writer is missing")] - MissingStaticFileWriter, - /// Static file writer is of wrong segment - #[display("Static file writer is of wrong segment: got {_0}, expected {_1}")] - IncorrectStaticFileWriter(StaticFileSegment, StaticFileSegment), - /// Database-related errors. - Database(DatabaseError), -} - -impl From for UnifiedStorageWriterError { - fn from(error: DatabaseError) -> Self { - Self::Database(error) - } -} From 8119045258eaa6948080c9fae67de52d488c83ea Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 22 Oct 2025 18:29:55 +0200 Subject: [PATCH 165/371] chore(e2e): relax bounds (#19231) --- crates/e2e-test-utils/src/lib.rs | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index e7b83cb3ad9..57d03f70fa5 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -8,8 +8,8 @@ use reth_network_api::test_utils::PeersHandleProvider; use reth_node_builder::{ components::NodeComponentsBuilder, rpc::{EngineValidatorAddOn, RethRpcAddOns}, - FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodePrimitives, NodeTypes, - NodeTypesWithDBAdapter, PayloadAttributesBuilder, PayloadTypes, + FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodeTypes, NodeTypesWithDBAdapter, + PayloadAttributesBuilder, PayloadTypes, }; use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider}; use reth_tasks::TaskManager; @@ -146,12 +146,6 @@ where >, > + Node< TmpNodeAdapter>>, - Primitives: NodePrimitives< - BlockHeader = alloy_consensus::Header, - BlockBody = alloy_consensus::BlockBody< - ::SignedTx, - >, - >, ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, Components: NodeComponents< @@ -180,12 +174,6 @@ where >, > + Node< TmpNodeAdapter>>, - Primitives: NodePrimitives< - BlockHeader = alloy_consensus::Header, - BlockBody = alloy_consensus::BlockBody< - ::SignedTx, - >, - >, ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, Components: NodeComponents< From 1972ec0949df5d75894c42731c9ee9c2c6b8e8bc Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 22 Oct 2025 12:33:54 -0400 Subject: [PATCH 166/371] revert: "fix(engine): flatten storage cache (#18880)" (#19235) --- crates/engine/tree/src/tree/cached_state.rs | 143 ++++++++++++-------- 1 file changed, 89 insertions(+), 54 deletions(-) diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index c1bb028cab2..bc543d067a0 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -1,8 +1,5 @@ //! Execution cache implementation for block processing. -use alloy_primitives::{ - map::{DefaultHashBuilder, HashSet}, - Address, StorageKey, StorageValue, B256, -}; +use alloy_primitives::{Address, StorageKey, StorageValue, B256}; use metrics::Gauge; use mini_moka::sync::CacheBuilder; use reth_errors::ProviderResult; @@ -17,6 +14,7 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; +use revm_primitives::map::DefaultHashBuilder; use std::{sync::Arc, time::Duration}; use tracing::{debug_span, instrument, trace}; @@ -302,70 +300,65 @@ pub(crate) struct ExecutionCache { /// Cache for contract bytecode, keyed by code hash. code_cache: Cache>, - /// Flattened storage cache: composite key of (`Address`, `StorageKey`) maps directly to - /// values. - storage_cache: Cache<(Address, StorageKey), Option>, + /// Per-account storage cache: outer cache keyed by Address, inner cache tracks that account’s + /// storage slots. + storage_cache: Cache, /// Cache for basic account information (nonce, balance, code hash). account_cache: Cache>, } impl ExecutionCache { - /// Get storage value from flattened cache. + /// Get storage value from hierarchical cache. /// /// Returns a `SlotStatus` indicating whether: - /// - `NotCached`: The storage slot is not in the cache - /// - `Empty`: The slot exists in the cache but is empty + /// - `NotCached`: The account's storage cache doesn't exist + /// - `Empty`: The slot exists in the account's cache but is empty /// - `Value`: The slot exists and has a specific value pub(crate) fn get_storage(&self, address: &Address, key: &StorageKey) -> SlotStatus { - match self.storage_cache.get(&(*address, *key)) { + match self.storage_cache.get(address) { None => SlotStatus::NotCached, - Some(None) => SlotStatus::Empty, - Some(Some(value)) => SlotStatus::Value(value), + Some(account_cache) => account_cache.get_storage(key), } } - /// Insert storage value into flattened cache + /// Insert storage value into hierarchical cache pub(crate) fn insert_storage( &self, address: Address, key: StorageKey, value: Option, ) { - self.storage_cache.insert((address, key), value); + self.insert_storage_bulk(address, [(key, value)]); } - /// Insert multiple storage values into flattened cache for a single account + /// Insert multiple storage values into hierarchical cache for a single account /// - /// This method inserts multiple storage values for the same address directly - /// into the flattened cache. + /// This method is optimized for inserting multiple storage values for the same address + /// by doing the account cache lookup only once instead of for each key-value pair. pub(crate) fn insert_storage_bulk(&self, address: Address, storage_entries: I) where I: IntoIterator)>, { + let account_cache = self.storage_cache.get(&address).unwrap_or_else(|| { + let account_cache = AccountStorageCache::default(); + self.storage_cache.insert(address, account_cache.clone()); + account_cache + }); + for (key, value) in storage_entries { - self.storage_cache.insert((address, key), value); + account_cache.insert_storage(key, value); } } + /// Invalidate storage for specific account + pub(crate) fn invalidate_account_storage(&self, address: &Address) { + self.storage_cache.invalidate(address); + } + /// Returns the total number of storage slots cached across all accounts pub(crate) fn total_storage_slots(&self) -> usize { - self.storage_cache.entry_count() as usize - } - - /// Invalidates the storage for all addresses in the set - #[instrument(level = "debug", target = "engine::caching", skip_all, fields(accounts = addresses.len()))] - pub(crate) fn invalidate_storages(&self, addresses: HashSet<&Address>) { - // NOTE: this must collect because the invalidate function should not be called while we - // hold an iter for it - let storage_entries = self - .storage_cache - .iter() - .filter_map(|entry| addresses.contains(&entry.key().0).then_some(*entry.key())) - .collect::>(); - for key in storage_entries { - self.storage_cache.invalidate(&key) - } + self.storage_cache.iter().map(|addr| addr.len()).sum() } /// Inserts the post-execution state changes into the cache. @@ -405,7 +398,6 @@ impl ExecutionCache { state_updates.state.values().map(|account| account.storage.len()).sum::() ) .entered(); - let mut invalidated_accounts = HashSet::default(); for (addr, account) in &state_updates.state { // If the account was not modified, as in not changed and not destroyed, then we have // nothing to do w.r.t. this particular account and can move on @@ -418,7 +410,7 @@ impl ExecutionCache { // Invalidate the account cache entry if destroyed self.account_cache.invalidate(addr); - invalidated_accounts.insert(addr); + self.invalidate_account_storage(addr); continue } @@ -445,9 +437,6 @@ impl ExecutionCache { self.account_cache.insert(*addr, Some(Account::from(account_info))); } - // invalidate storage for all destroyed accounts - self.invalidate_storages(invalidated_accounts); - Ok(()) } } @@ -476,11 +465,11 @@ impl ExecutionCacheBuilder { const TIME_TO_IDLE: Duration = Duration::from_secs(3600); // 1 hour let storage_cache = CacheBuilder::new(self.storage_cache_entries) - .weigher(|_key: &(Address, StorageKey), _value: &Option| -> u32 { - // Size of composite key (Address + StorageKey) + Option - // Address: 20 bytes, StorageKey: 32 bytes, Option: 33 bytes - // Plus some overhead for the hash map entry - 120_u32 + .weigher(|_key: &Address, value: &AccountStorageCache| -> u32 { + // values based on results from measure_storage_cache_overhead test + let base_weight = 39_000; + let slots_weight = value.len() * 218; + (base_weight + slots_weight) as u32 }) .max_capacity(storage_cache_size) .time_to_live(EXPIRY_TIME) @@ -603,6 +592,56 @@ impl SavedCache { } } +/// Cache for an individual account's storage slots. +/// +/// This represents the second level of the hierarchical storage cache. +/// Each account gets its own `AccountStorageCache` to store accessed storage slots. +#[derive(Debug, Clone)] +pub(crate) struct AccountStorageCache { + /// Map of storage keys to their cached values. + slots: Cache>, +} + +impl AccountStorageCache { + /// Create a new [`AccountStorageCache`] + pub(crate) fn new(max_slots: u64) -> Self { + Self { + slots: CacheBuilder::new(max_slots).build_with_hasher(DefaultHashBuilder::default()), + } + } + + /// Get a storage value from this account's cache. + /// - `NotCached`: The slot is not in the cache + /// - `Empty`: The slot is empty + /// - `Value`: The slot has a specific value + pub(crate) fn get_storage(&self, key: &StorageKey) -> SlotStatus { + match self.slots.get(key) { + None => SlotStatus::NotCached, + Some(None) => SlotStatus::Empty, + Some(Some(value)) => SlotStatus::Value(value), + } + } + + /// Insert a storage value + pub(crate) fn insert_storage(&self, key: StorageKey, value: Option) { + self.slots.insert(key, value); + } + + /// Returns the number of slots in the cache + pub(crate) fn len(&self) -> usize { + self.slots.entry_count() as usize + } +} + +impl Default for AccountStorageCache { + fn default() -> Self { + // With weigher and max_capacity in place, this number represents + // the maximum number of entries that can be stored, not the actual + // memory usage which is controlled by storage cache's max_capacity. + Self::new(1_000_000) + } +} + #[cfg(test)] mod tests { use super::*; @@ -677,36 +716,32 @@ mod tests { #[test] fn measure_storage_cache_overhead() { - let (base_overhead, cache) = - measure_allocation(|| ExecutionCacheBuilder::default().build_caches(1000)); - println!("Base ExecutionCache overhead: {base_overhead} bytes"); + let (base_overhead, cache) = measure_allocation(|| AccountStorageCache::new(1000)); + println!("Base AccountStorageCache overhead: {base_overhead} bytes"); let mut rng = rand::rng(); - let address = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::()); let (first_slot, _) = measure_allocation(|| { - cache.insert_storage(address, key, Some(value)); + cache.insert_storage(key, Some(value)); }); println!("First slot insertion overhead: {first_slot} bytes"); const TOTAL_SLOTS: usize = 10_000; let (test_slots, _) = measure_allocation(|| { for _ in 0..TOTAL_SLOTS { - let addr = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::()); - cache.insert_storage(addr, key, Some(value)); + cache.insert_storage(key, Some(value)); } }); println!("Average overhead over {} slots: {} bytes", TOTAL_SLOTS, test_slots / TOTAL_SLOTS); println!("\nTheoretical sizes:"); - println!("Address size: {} bytes", size_of::
()); println!("StorageKey size: {} bytes", size_of::()); println!("StorageValue size: {} bytes", size_of::()); println!("Option size: {} bytes", size_of::>()); - println!("(Address, StorageKey) size: {} bytes", size_of::<(Address, StorageKey)>()); + println!("Option size: {} bytes", size_of::>()); } #[test] From 4f6cc7a359ba4f7039e4be5dafde727dc6854cdb Mon Sep 17 00:00:00 2001 From: radik878 Date: Wed, 22 Oct 2025 21:20:25 +0300 Subject: [PATCH 167/371] fix(node): remove unused ConsensusLayerHealthEvent variants (#19238) --- crates/node/events/src/cl.rs | 10 +++------- crates/node/events/src/node.rs | 11 ----------- 2 files changed, 3 insertions(+), 18 deletions(-) diff --git a/crates/node/events/src/cl.rs b/crates/node/events/src/cl.rs index bdced7c97d6..99cdc1c245f 100644 --- a/crates/node/events/src/cl.rs +++ b/crates/node/events/src/cl.rs @@ -61,7 +61,7 @@ impl Stream for ConsensusLayerHealthEvents { )) } - // We never had both FCU and transition config exchange. + // We never received any forkchoice updates. return Poll::Ready(Some(ConsensusLayerHealthEvent::NeverSeen)) } } @@ -71,12 +71,8 @@ impl Stream for ConsensusLayerHealthEvents { /// Execution Layer point of view. #[derive(Clone, Copy, Debug)] pub enum ConsensusLayerHealthEvent { - /// Consensus Layer client was never seen. + /// Consensus Layer client was never seen (no forkchoice updates received). NeverSeen, - /// Consensus Layer client has not been seen for a while. - HasNotBeenSeenForAWhile(Duration), - /// Updates from the Consensus Layer client were never received. - NeverReceivedUpdates, - /// Updates from the Consensus Layer client have not been received for a while. + /// Forkchoice updates from the Consensus Layer client have not been received for a while. HaveNotReceivedUpdatesForAWhile(Duration), } diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 3539eae0316..02c7709819e 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -296,17 +296,6 @@ impl NodeState { "Post-merge network, but never seen beacon client. Please launch one to follow the chain!" ) } - ConsensusLayerHealthEvent::HasNotBeenSeenForAWhile(period) => { - warn!( - ?period, - "Post-merge network, but no beacon client seen for a while. Please launch one to follow the chain!" - ) - } - ConsensusLayerHealthEvent::NeverReceivedUpdates => { - warn!( - "Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!" - ) - } ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile(period) => { warn!( ?period, From 346ef408a4bb657de7529c7765c7a5fe77780230 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 22 Oct 2025 22:38:53 +0200 Subject: [PATCH 168/371] chore: swap order for canon stream (#19242) --- crates/optimism/rpc/src/eth/transaction.rs | 2 +- crates/rpc/rpc-eth-api/src/helpers/transaction.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index aa7e8ea60bd..37c05815a61 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -95,8 +95,8 @@ where let this = self.clone(); let timeout_duration = self.send_raw_transaction_sync_timeout(); async move { - let hash = EthTransactions::send_raw_transaction(&this, tx).await?; let mut canonical_stream = this.provider().canonical_state_stream(); + let hash = EthTransactions::send_raw_transaction(&this, tx).await?; let flashblock_rx = this.pending_block_rx(); let mut flashblock_stream = flashblock_rx.map(WatchStream::new); diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 81909b3f36e..2cbf1aff14e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -91,8 +91,8 @@ pub trait EthTransactions: LoadTransaction { let this = self.clone(); let timeout_duration = self.send_raw_transaction_sync_timeout(); async move { - let hash = EthTransactions::send_raw_transaction(&this, tx).await?; let mut stream = this.provider().canonical_state_stream(); + let hash = EthTransactions::send_raw_transaction(&this, tx).await?; tokio::time::timeout(timeout_duration, async { while let Some(notification) = stream.next().await { let chain = notification.committed(); From bcef01ce4724070278cc9366d08e8d40decfa335 Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Wed, 22 Oct 2025 19:28:23 -0400 Subject: [PATCH 169/371] feat(jovian): track da footprint block limit. Update basefee calculation (#19048) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 13 +- Cargo.toml | 6 +- crates/optimism/chainspec/src/basefee.rs | 169 +++++++++++++++++++--- crates/optimism/evm/src/build.rs | 8 +- crates/optimism/evm/src/error.rs | 3 + crates/optimism/evm/src/l1.rs | 136 +++++++++++++++-- crates/optimism/payload/Cargo.toml | 1 + crates/optimism/payload/src/builder.rs | 56 +++++-- crates/optimism/rpc/src/eth/receipt.rs | 20 ++- crates/optimism/txpool/src/validator.rs | 4 +- crates/rpc/rpc-convert/src/transaction.rs | 1 - 11 files changed, 358 insertions(+), 59 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6e672b6f684..90aed93b946 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -253,9 +253,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb19405755c6f94c9bb856f2b1449767074b7e2002e1ab2be0a79b9b28db322" +checksum = "83ce19ea6140497670b1b7e721f9a9ce88022fe475a5e4e6a68a403499cca209" dependencies = [ "alloy-consensus", "alloy-eips", @@ -370,9 +370,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f059cf29d7f15b3e6581ceb6eda06a16d8ed4b55adc02b0677add3fd381db6bb" +checksum = "7d7aeaf6051f53880a65b547c43e3b05ee42f68236b1f43f013abfe4eadc47bb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6127,9 +6127,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "11.1.2" +version = "11.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d721c4c196273dd135ea5b823cd573ea8735cd3c5f2c19fcb91ee3af655351" +checksum = "a33ab6a7bbcfffcbf784de78f14593b6389003f5c69653fcffcc163459a37d69" dependencies = [ "auto_impl", "revm", @@ -9435,6 +9435,7 @@ version = "1.8.2" dependencies = [ "alloy-consensus", "alloy-eips", + "alloy-evm", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", diff --git a/Cargo.toml b/Cargo.toml index 08041015646..ae7956ef489 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -478,14 +478,14 @@ revm-inspector = { version = "11.1.0", default-features = false } revm-context = { version = "10.1.0", default-features = false } revm-context-interface = { version = "11.1.0", default-features = false } revm-database-interface = { version = "8.0.1", default-features = false } -op-revm = { version = "11.1.0", default-features = false } +op-revm = { version = "11.2.0", default-features = false } revm-inspectors = "0.31.0" # eth alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.4.1" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.22.0", default-features = false } +alloy-evm = { version = "0.22.4", default-features = false } alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.4.1" @@ -523,7 +523,7 @@ alloy-transport-ipc = { version = "1.0.41", default-features = false } alloy-transport-ws = { version = "1.0.41", default-features = false } # op -alloy-op-evm = { version = "0.22.0", default-features = false } +alloy-op-evm = { version = "0.22.4", default-features = false } alloy-op-hardforks = "0.4.0" op-alloy-rpc-types = { version = "0.21.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.21.0", default-features = false } diff --git a/crates/optimism/chainspec/src/basefee.rs b/crates/optimism/chainspec/src/basefee.rs index 0ef712dc04f..394de296f23 100644 --- a/crates/optimism/chainspec/src/basefee.rs +++ b/crates/optimism/chainspec/src/basefee.rs @@ -1,26 +1,13 @@ //! Base fee related utilities for Optimism chains. +use core::cmp::max; + use alloy_consensus::BlockHeader; +use alloy_eips::calc_next_block_base_fee; use op_alloy_consensus::{decode_holocene_extra_data, decode_jovian_extra_data, EIP1559ParamError}; use reth_chainspec::{BaseFeeParams, EthChainSpec}; use reth_optimism_forks::OpHardforks; -fn next_base_fee_params( - chain_spec: impl EthChainSpec + OpHardforks, - parent: &H, - timestamp: u64, - denominator: u32, - elasticity: u32, -) -> u64 { - let base_fee_params = if elasticity == 0 && denominator == 0 { - chain_spec.base_fee_params_at_timestamp(timestamp) - } else { - BaseFeeParams::new(denominator as u128, elasticity as u128) - }; - - parent.next_block_base_fee(base_fee_params).unwrap_or_default() -} - /// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header. /// /// Caution: Caller must ensure that holocene is active in the parent header. @@ -36,7 +23,13 @@ where { let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?; - Ok(next_base_fee_params(chain_spec, parent, timestamp, denominator, elasticity)) + let base_fee_params = if elasticity == 0 && denominator == 0 { + chain_spec.base_fee_params_at_timestamp(timestamp) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128) + }; + + Ok(parent.next_block_base_fee(base_fee_params).unwrap_or_default()) } /// Extracts the Jovian 1599 parameters from the encoded extra data from the parent header. @@ -57,8 +50,22 @@ where { let (elasticity, denominator, min_base_fee) = decode_jovian_extra_data(parent.extra_data())?; - let next_base_fee = - next_base_fee_params(chain_spec, parent, timestamp, denominator, elasticity); + let base_fee_params = if elasticity == 0 && denominator == 0 { + chain_spec.base_fee_params_at_timestamp(timestamp) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128) + }; + + // Starting from Jovian, we use the maximum of the gas used and the blob gas used to calculate + // the next base fee. + let gas_used = max(parent.gas_used(), parent.blob_gas_used().unwrap_or_default()); + + let next_base_fee = calc_next_block_base_fee( + gas_used, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + base_fee_params, + ); if next_base_fee < min_base_fee { return Ok(min_base_fee); @@ -66,3 +73,127 @@ where Ok(next_base_fee) } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use op_alloy_consensus::encode_jovian_extra_data; + use reth_chainspec::{ChainSpec, ForkCondition, Hardfork}; + use reth_optimism_forks::OpHardfork; + + use crate::{OpChainSpec, BASE_SEPOLIA}; + + use super::*; + + const JOVIAN_TIMESTAMP: u64 = 1900000000; + + fn get_chainspec() -> Arc { + let mut base_sepolia_spec = BASE_SEPOLIA.inner.clone(); + base_sepolia_spec + .hardforks + .insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(JOVIAN_TIMESTAMP)); + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: base_sepolia_spec.chain, + genesis: base_sepolia_spec.genesis, + genesis_header: base_sepolia_spec.genesis_header, + ..Default::default() + }, + }) + } + + #[test] + fn test_next_base_fee_jovian_blob_gas_used_greater_than_gas_used() { + let chain_spec = get_chainspec(); + let mut parent = chain_spec.genesis_header().clone(); + let timestamp = JOVIAN_TIMESTAMP; + + const GAS_LIMIT: u64 = 10_000_000_000; + const BLOB_GAS_USED: u64 = 5_000_000_000; + const GAS_USED: u64 = 1_000_000_000; + const MIN_BASE_FEE: u64 = 100_000_000; + + parent.extra_data = + encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) + .unwrap(); + parent.blob_gas_used = Some(BLOB_GAS_USED); + parent.gas_used = GAS_USED; + parent.gas_limit = GAS_LIMIT; + + let expected_base_fee = calc_next_block_base_fee( + BLOB_GAS_USED, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + BaseFeeParams::base_sepolia(), + ); + assert_eq!( + expected_base_fee, + compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() + ); + assert_ne!( + expected_base_fee, + calc_next_block_base_fee( + GAS_USED, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + BaseFeeParams::base_sepolia(), + ) + ) + } + + #[test] + fn test_next_base_fee_jovian_blob_gas_used_less_than_gas_used() { + let chain_spec = get_chainspec(); + let mut parent = chain_spec.genesis_header().clone(); + let timestamp = JOVIAN_TIMESTAMP; + + const GAS_LIMIT: u64 = 10_000_000_000; + const BLOB_GAS_USED: u64 = 100_000_000; + const GAS_USED: u64 = 1_000_000_000; + const MIN_BASE_FEE: u64 = 100_000_000; + + parent.extra_data = + encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) + .unwrap(); + parent.blob_gas_used = Some(BLOB_GAS_USED); + parent.gas_used = GAS_USED; + parent.gas_limit = GAS_LIMIT; + + let expected_base_fee = calc_next_block_base_fee( + GAS_USED, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + BaseFeeParams::base_sepolia(), + ); + assert_eq!( + expected_base_fee, + compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() + ); + } + + #[test] + fn test_next_base_fee_jovian_min_base_fee() { + let chain_spec = get_chainspec(); + let mut parent = chain_spec.genesis_header().clone(); + let timestamp = JOVIAN_TIMESTAMP; + + const GAS_LIMIT: u64 = 10_000_000_000; + const BLOB_GAS_USED: u64 = 100_000_000; + const GAS_USED: u64 = 1_000_000_000; + const MIN_BASE_FEE: u64 = 5_000_000_000; + + parent.extra_data = + encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) + .unwrap(); + parent.blob_gas_used = Some(BLOB_GAS_USED); + parent.gas_used = GAS_USED; + parent.gas_limit = GAS_LIMIT; + + let expected_base_fee = MIN_BASE_FEE; + assert_eq!( + expected_base_fee, + compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() + ); + } +} diff --git a/crates/optimism/evm/src/build.rs b/crates/optimism/evm/src/build.rs index edc877a9a5d..b8fab18833c 100644 --- a/crates/optimism/evm/src/build.rs +++ b/crates/optimism/evm/src/build.rs @@ -46,7 +46,7 @@ impl OpBlockAssembler { evm_env, execution_ctx: ctx, transactions, - output: BlockExecutionResult { receipts, gas_used, .. }, + output: BlockExecutionResult { receipts, gas_used, blob_gas_used, requests: _ }, bundle_state, state_root, state_provider, @@ -80,7 +80,11 @@ impl OpBlockAssembler { }; let (excess_blob_gas, blob_gas_used) = - if self.chain_spec.is_ecotone_active_at_timestamp(timestamp) { + if self.chain_spec.is_jovian_active_at_timestamp(timestamp) { + // In jovian, we're using the blob gas used field to store the current da + // footprint's value. + (Some(0), Some(*blob_gas_used)) + } else if self.chain_spec.is_ecotone_active_at_timestamp(timestamp) { (Some(0), Some(0)) } else { (None, None) diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index 9b694243fac..1a8e76c1490 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -38,6 +38,9 @@ pub enum L1BlockInfoError { /// Operator fee constant conversion error #[error("could not convert operator fee constant")] OperatorFeeConstantConversion, + /// DA foootprint gas scalar constant conversion error + #[error("could not convert DA footprint gas scalar constant")] + DaFootprintGasScalarConversion, /// Optimism hardforks not active #[error("Optimism hardforks are not active")] HardforksNotActive, diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 4165221c987..2afe6e9d3a2 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -2,7 +2,7 @@ use crate::{error::L1BlockInfoError, revm_spec_by_timestamp_after_bedrock, OpBlockExecutionError}; use alloy_consensus::Transaction; -use alloy_primitives::{hex, U256}; +use alloy_primitives::{hex, U16, U256}; use op_revm::L1BlockInfo; use reth_execution_errors::BlockExecutionError; use reth_optimism_forks::OpHardforks; @@ -14,6 +14,10 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// The function selector of the "setL1BlockValuesIsthmus" function in the `L1Block` contract. const L1_BLOCK_ISTHMUS_SELECTOR: [u8; 4] = hex!("098999be"); +/// The function selector of the "setL1BlockValuesJovian" function in the `L1Block` contract. +/// This is the first 4 bytes of `keccak256("setL1BlockValuesJovian()")`. +const L1_BLOCK_JOVIAN_SELECTOR: [u8; 4] = hex!("3db6be2b"); + /// Extracts the [`L1BlockInfo`] from the L2 block. The L1 info transaction is always the first /// transaction in the L2 block. /// @@ -52,11 +56,14 @@ pub fn extract_l1_info_from_tx( /// If the input is shorter than 4 bytes. pub fn parse_l1_info(input: &[u8]) -> Result { // Parse the L1 info transaction into an L1BlockInfo struct, depending on the function selector. - // There are currently 3 variants: + // There are currently 4 variants: + // - Jovian // - Isthmus // - Ecotone // - Bedrock - if input[0..4] == L1_BLOCK_ISTHMUS_SELECTOR { + if input[0..4] == L1_BLOCK_JOVIAN_SELECTOR { + parse_l1_info_tx_jovian(input[4..].as_ref()) + } else if input[0..4] == L1_BLOCK_ISTHMUS_SELECTOR { parse_l1_info_tx_isthmus(input[4..].as_ref()) } else if input[0..4] == L1_BLOCK_ECOTONE_SELECTOR { parse_l1_info_tx_ecotone(input[4..].as_ref()) @@ -88,14 +95,12 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result Result Result Result { + if data.len() != 174 { + return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::UnexpectedCalldataLength)); + } + + // https://github.com/ethereum-optimism/op-geth/blob/60038121c7571a59875ff9ed7679c48c9f73405d/core/types/rollup_cost.go#L317-L328 + // + // data layout assumed for Ecotone: + // offset type varname + // 0 + // 4 uint32 _basefeeScalar (start offset in this scope) + // 8 uint32 _blobBaseFeeScalar + // 12 uint64 _sequenceNumber, + // 20 uint64 _timestamp, + // 28 uint64 _l1BlockNumber + // 36 uint256 _basefee, + // 68 uint256 _blobBaseFee, + // 100 bytes32 _hash, + // 132 bytes32 _batcherHash, + // 164 uint32 _operatorFeeScalar + // 168 uint64 _operatorFeeConstant + // 176 uint16 _daFootprintGasScalar + + let l1_base_fee_scalar = U256::try_from_be_slice(&data[..4]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeScalarConversion))?; + let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[4..8]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeScalarConversion) + })?; + let l1_base_fee = U256::try_from_be_slice(&data[32..64]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeConversion))?; + let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeConversion))?; + let operator_fee_scalar = U256::try_from_be_slice(&data[160..164]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeScalarConversion) + })?; + let operator_fee_constant = U256::try_from_be_slice(&data[164..172]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeConstantConversion) + })?; + let da_footprint_gas_scalar: u16 = U16::try_from_be_slice(&data[172..174]) + .ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::DaFootprintGasScalarConversion) + })? + .to(); - Ok(l1block) + Ok(L1BlockInfo { + l1_base_fee, + l1_base_fee_scalar, + l1_blob_base_fee: Some(l1_blob_base_fee), + l1_blob_base_fee_scalar: Some(l1_blob_base_fee_scalar), + operator_fee_scalar: Some(operator_fee_scalar), + operator_fee_constant: Some(operator_fee_constant), + da_footprint_gas_scalar: Some(da_footprint_gas_scalar), + ..Default::default() + }) } /// An extension trait for [`L1BlockInfo`] that allows us to calculate the L1 cost of a transaction @@ -282,6 +354,7 @@ mod tests { use super::*; use alloy_consensus::{Block, BlockBody}; use alloy_eips::eip2718::Decodable2718; + use alloy_primitives::keccak256; use reth_optimism_chainspec::OP_MAINNET; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::OpTransactionSigned; @@ -308,6 +381,12 @@ mod tests { assert_eq!(l1_info.l1_blob_base_fee_scalar, None); } + #[test] + fn test_verify_set_jovian() { + let hash = &keccak256("setL1BlockValuesJovian()")[..4]; + assert_eq!(hash, L1_BLOCK_JOVIAN_SELECTOR) + } + #[test] fn sanity_l1_block_ecotone() { // rig @@ -408,4 +487,33 @@ mod tests { assert_eq!(l1_block_info.operator_fee_scalar, operator_fee_scalar); assert_eq!(l1_block_info.operator_fee_constant, operator_fee_constant); } + + #[test] + fn parse_l1_info_jovian() { + // L1 block info from a devnet with Isthmus activated + const DATA: &[u8] = &hex!( + "3db6be2b00000558000c5fc500000000000000030000000067a9f765000000000000002900000000000000000000000000000000000000000000000000000000006a6d09000000000000000000000000000000000000000000000000000000000000000172fcc8e8886636bdbe96ba0e4baab67ea7e7811633f52b52e8cf7a5123213b6f000000000000000000000000d3f2c5afb2d76f5579f326b0cd7da5f5a4126c3500004e2000000000000001f4dead" + ); + + // expected l1 block info verified against expected l1 fee and operator fee for tx. + let l1_base_fee = U256::from(6974729); + let l1_base_fee_scalar = U256::from(1368); + let l1_blob_base_fee = Some(U256::from(1)); + let l1_blob_base_fee_scalar = Some(U256::from(810949)); + let operator_fee_scalar = Some(U256::from(20000)); + let operator_fee_constant = Some(U256::from(500)); + let da_footprint_gas_scalar: Option = Some(U16::from(0xdead).to()); + + // test + + let l1_block_info = parse_l1_info(DATA).unwrap(); + + assert_eq!(l1_block_info.l1_base_fee, l1_base_fee); + assert_eq!(l1_block_info.l1_base_fee_scalar, l1_base_fee_scalar); + assert_eq!(l1_block_info.l1_blob_base_fee, l1_blob_base_fee); + assert_eq!(l1_block_info.l1_blob_base_fee_scalar, l1_blob_base_fee_scalar); + assert_eq!(l1_block_info.operator_fee_scalar, operator_fee_scalar); + assert_eq!(l1_block_info.operator_fee_constant, operator_fee_constant); + assert_eq!(l1_block_info.da_footprint_gas_scalar, da_footprint_gas_scalar); + } } diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 8d1875fe753..e75075a12cf 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -44,6 +44,7 @@ op-alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true alloy-rpc-types-debug.workspace = true alloy-consensus.workspace = true +alloy-evm.workspace = true # misc derive_more.workspace = true diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 67b8faf5608..05f33d3b699 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,5 +1,4 @@ //! Optimism payload builder implementation. - use crate::{ config::{OpBuilderConfig, OpDAConfig}, error::OpPayloadBuilderError, @@ -7,6 +6,7 @@ use crate::{ OpAttributes, OpPayloadBuilderAttributes, OpPayloadPrimitives, }; use alloy_consensus::{BlockHeader, Transaction, Typed2718}; +use alloy_evm::Evm as AlloyEvm; use alloy_primitives::{B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; @@ -14,10 +14,12 @@ use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ + block::BlockExecutorFor, execute::{ BlockBuilder, BlockBuilderOutcome, BlockExecutionError, BlockExecutor, BlockValidationError, }, - ConfigureEvm, Database, Evm, + op_revm::{constants::L1_BLOCK_CONTRACT, L1BlockInfo}, + ConfigureEvm, Database, }; use reth_execution_types::ExecutionOutcome; use reth_optimism_forks::OpHardforks; @@ -340,6 +342,11 @@ impl OpBuilder<'_, Txs> { let mut db = State::builder().with_database(db).with_bundle_update().build(); + // Load the L1 block contract into the database cache. If the L1 block contract is not + // pre-loaded the database will panic when trying to fetch the DA footprint gas + // scalar. + db.load_cache_account(L1_BLOCK_CONTRACT).map_err(BlockExecutionError::other)?; + let mut builder = ctx.block_builder(&mut db)?; // 1. apply pre-execution changes @@ -509,17 +516,27 @@ impl ExecutionInfo { tx_data_limit: Option, block_data_limit: Option, tx_gas_limit: u64, + da_footprint_gas_scalar: Option, ) -> bool { if tx_data_limit.is_some_and(|da_limit| tx_da_size > da_limit) { return true; } - if block_data_limit - .is_some_and(|da_limit| self.cumulative_da_bytes_used + tx_da_size > da_limit) - { + let total_da_bytes_used = self.cumulative_da_bytes_used.saturating_add(tx_da_size); + + if block_data_limit.is_some_and(|da_limit| total_da_bytes_used > da_limit) { return true; } + // Post Jovian: the tx DA footprint must be less than the block gas limit + if let Some(da_footprint_gas_scalar) = da_footprint_gas_scalar { + let tx_da_footprint = + total_da_bytes_used.saturating_mul(da_footprint_gas_scalar as u64); + if tx_da_footprint > block_gas_limit { + return true; + } + } + self.cumulative_gas_used + tx_gas_limit > block_gas_limit } } @@ -586,7 +603,13 @@ where pub fn block_builder<'a, DB: Database>( &'a self, db: &'a mut State, - ) -> Result + 'a, PayloadBuilderError> { + ) -> Result< + impl BlockBuilder< + Primitives = Evm::Primitives, + Executor: BlockExecutorFor<'a, Evm::BlockExecutorFactory, DB>, + > + 'a, + PayloadBuilderError, + > { self.evm_config .builder_for_next_block( db, @@ -649,14 +672,18 @@ where /// Executes the given best transactions and updates the execution info. /// /// Returns `Ok(Some(())` if the job was cancelled. - pub fn execute_best_transactions( + pub fn execute_best_transactions( &self, info: &mut ExecutionInfo, - builder: &mut impl BlockBuilder, + builder: &mut Builder, mut best_txs: impl PayloadTransactions< Transaction: PoolTransaction> + OpPooledTx, >, - ) -> Result, PayloadBuilderError> { + ) -> Result, PayloadBuilderError> + where + Builder: BlockBuilder, + <::Evm as AlloyEvm>::DB: Database, + { let block_gas_limit = builder.evm_mut().block().gas_limit(); let block_da_limit = self.da_config.max_da_block_size(); let tx_da_limit = self.da_config.max_da_tx_size(); @@ -666,12 +693,23 @@ where let interop = tx.interop_deadline(); let tx_da_size = tx.estimated_da_size(); let tx = tx.into_consensus(); + + let da_footprint_gas_scalar = self + .chain_spec + .is_jovian_active_at_timestamp(self.attributes().timestamp()) + .then_some( + L1BlockInfo::fetch_da_footprint_gas_scalar(builder.evm_mut().db_mut()).expect( + "DA footprint should always be available from the database post jovian", + ), + ); + if info.is_tx_over_limits( tx_da_size, block_gas_limit, tx_da_limit, block_da_limit, tx.gas_limit(), + da_footprint_gas_scalar, ) { // we can't fit this transaction into the block, so we need to mark it as // invalid which also removes all dependent transaction from diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index f8910c22a33..5d1e8e29794 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -131,10 +131,14 @@ pub struct OpReceiptFieldsBuilder { pub l1_blob_base_fee: Option, /// The current L1 blob base fee scalar. pub l1_blob_base_fee_scalar: Option, + /* ---------------------------------------- Isthmus ---------------------------------------- */ /// The current operator fee scalar. pub operator_fee_scalar: Option, /// The current L1 blob base fee scalar. pub operator_fee_constant: Option, + /* ---------------------------------------- Jovian ----------------------------------------- */ + /// The current DA footprint gas scalar. + pub da_footprint_gas_scalar: Option, } impl OpReceiptFieldsBuilder { @@ -154,6 +158,7 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar: None, operator_fee_scalar: None, operator_fee_constant: None, + da_footprint_gas_scalar: None, } } @@ -205,6 +210,8 @@ impl OpReceiptFieldsBuilder { l1_block_info.operator_fee_constant.map(|constant| constant.saturating_to()); } + self.da_footprint_gas_scalar = l1_block_info.da_footprint_gas_scalar; + Ok(self) } @@ -236,6 +243,7 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, + da_footprint_gas_scalar, } = self; OpTransactionReceiptFields { @@ -249,7 +257,7 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, - da_footprint_gas_scalar: None, + da_footprint_gas_scalar, }, deposit_nonce, deposit_receipt_version, @@ -409,7 +417,7 @@ mod test { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, - .. + da_footprint_gas_scalar, } = receipt_meta.l1_block_info; assert_eq!( @@ -453,6 +461,11 @@ mod test { TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.operator_fee_constant, "incorrect operator fee constant" ); + assert_eq!( + da_footprint_gas_scalar, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.da_footprint_gas_scalar, + "incorrect da footprint gas scalar" + ); } #[test] @@ -540,7 +553,7 @@ mod test { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, - .. + da_footprint_gas_scalar, } = receipt_meta.l1_block_info; assert_eq!(l1_gas_price, Some(14121491676), "incorrect l1 base fee (former gas price)"); @@ -552,5 +565,6 @@ mod test { assert_eq!(l1_blob_base_fee_scalar, Some(1055762), "incorrect l1 blob base fee scalar"); assert_eq!(operator_fee_scalar, None, "incorrect operator fee scalar"); assert_eq!(operator_fee_constant, None, "incorrect operator fee constant"); + assert_eq!(da_footprint_gas_scalar, None, "incorrect da footprint gas scalar"); } } diff --git a/crates/optimism/txpool/src/validator.rs b/crates/optimism/txpool/src/validator.rs index 631c4255942..0cec4482a32 100644 --- a/crates/optimism/txpool/src/validator.rs +++ b/crates/optimism/txpool/src/validator.rs @@ -143,8 +143,8 @@ where self.block_info.timestamp.store(header.timestamp(), Ordering::Relaxed); self.block_info.number.store(header.number(), Ordering::Relaxed); - if let Some(Ok(cost_addition)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) { - *self.block_info.l1_block_info.write() = cost_addition; + if let Some(Ok(l1_block_info)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) { + *self.block_info.l1_block_info.write() = l1_block_info; } if self.chain_spec().is_interop_active_at_timestamp(header.timestamp()) { diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 046acbda544..6766ec43fb0 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -1,5 +1,4 @@ //! Compatibility functions for rpc `Transaction` type. - use crate::{ fees::{CallFees, CallFeesError}, RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, SignableTxRequest, From f8845c6fbb8e0fe23e5f69f9514dc2b9415558cb Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 23 Oct 2025 05:36:16 +0100 Subject: [PATCH 170/371] fix(engine): payload processor tracing event targets (#19223) --- .../src/tree/payload_processor/multiproof.rs | 48 +++++++++---------- .../src/tree/payload_processor/prewarm.rs | 16 +++---- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 1e5b226f591..737f57fb345 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -218,7 +218,7 @@ pub(crate) fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostStat for (address, account) in update { if account.is_touched() { let hashed_address = keccak256(address); - trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); + trace!(target: "engine::tree::payload_processor::multiproof", ?address, ?hashed_address, "Adding account to state update"); let destroyed = account.is_selfdestructed(); let info = if destroyed { None } else { Some(account.info.into()) }; @@ -456,7 +456,7 @@ impl MultiproofManager { let storage_targets = proof_targets.len(); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", proof_sequence_number, ?proof_targets, storage_targets, @@ -475,7 +475,7 @@ impl MultiproofManager { .storage_proof(hashed_address, proof_targets); let elapsed = start.elapsed(); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", proof_sequence_number, ?elapsed, ?source, @@ -529,7 +529,7 @@ impl MultiproofManager { let storage_targets = proof_targets.values().map(|slots| slots.len()).sum::(); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", proof_sequence_number, ?proof_targets, account_targets, @@ -567,7 +567,7 @@ impl MultiproofManager { })(); let elapsed = start.elapsed(); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", proof_sequence_number, ?elapsed, ?source, @@ -781,7 +781,7 @@ impl MultiProofTask { proofs_processed >= state_update_proofs_requested + prefetch_proofs_requested; let no_pending = !self.proof_sequencer.has_pending(); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", proofs_processed, state_update_proofs_requested, prefetch_proofs_requested, @@ -836,7 +836,7 @@ impl MultiProofTask { } if duplicates > 0 { - trace!(target: "engine::root", duplicates, "Removed duplicate prefetch proof targets"); + trace!(target: "engine::tree::payload_processor::multiproof", duplicates, "Removed duplicate prefetch proof targets"); } targets @@ -998,18 +998,18 @@ impl MultiProofTask { let mut updates_finished_time = None; loop { - trace!(target: "engine::root", "entering main channel receiving loop"); + trace!(target: "engine::tree::payload_processor::multiproof", "entering main channel receiving loop"); match self.rx.recv() { Ok(message) => match message { MultiProofMessage::PrefetchProofs(targets) => { - trace!(target: "engine::root", "processing MultiProofMessage::PrefetchProofs"); + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::PrefetchProofs"); if first_update_time.is_none() { // record the wait time self.metrics .first_update_wait_time_histogram .record(start.elapsed().as_secs_f64()); first_update_time = Some(Instant::now()); - debug!(target: "engine::root", "Started state root calculation"); + debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); } let account_targets = targets.len(); @@ -1017,7 +1017,7 @@ impl MultiProofTask { targets.values().map(|slots| slots.len()).sum::(); prefetch_proofs_requested += self.on_prefetch_proof(targets); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", account_targets, storage_targets, prefetch_proofs_requested, @@ -1025,20 +1025,20 @@ impl MultiProofTask { ); } MultiProofMessage::StateUpdate(source, update) => { - trace!(target: "engine::root", "processing MultiProofMessage::StateUpdate"); + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::StateUpdate"); if first_update_time.is_none() { // record the wait time self.metrics .first_update_wait_time_histogram .record(start.elapsed().as_secs_f64()); first_update_time = Some(Instant::now()); - debug!(target: "engine::root", "Started state root calculation"); + debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); } let len = update.len(); state_update_proofs_requested += self.on_state_update(source, update); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", ?source, len, ?state_update_proofs_requested, @@ -1046,7 +1046,7 @@ impl MultiProofTask { ); } MultiProofMessage::FinishedStateUpdates => { - trace!(target: "engine::root", "processing MultiProofMessage::FinishedStateUpdates"); + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::FinishedStateUpdates"); updates_finished = true; updates_finished_time = Some(Instant::now()); if self.is_done( @@ -1056,14 +1056,14 @@ impl MultiProofTask { updates_finished, ) { debug!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", "State updates finished and all proofs processed, ending calculation" ); break } } MultiProofMessage::EmptyProof { sequence_number, state } => { - trace!(target: "engine::root", "processing MultiProofMessage::EmptyProof"); + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::EmptyProof"); proofs_processed += 1; @@ -1081,14 +1081,14 @@ impl MultiProofTask { updates_finished, ) { debug!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", "State updates finished and all proofs processed, ending calculation" ); break } } MultiProofMessage::ProofCalculated(proof_calculated) => { - trace!(target: "engine::root", "processing + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::ProofCalculated"); // we increment proofs_processed for both state updates and prefetches, @@ -1100,7 +1100,7 @@ impl MultiProofTask { .record(proof_calculated.elapsed); trace!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", sequence = proof_calculated.sequence_number, total_proofs = proofs_processed, "Processing calculated proof" @@ -1121,14 +1121,14 @@ impl MultiProofTask { updates_finished, ) { debug!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", "State updates finished and all proofs processed, ending calculation"); break } } MultiProofMessage::ProofCalculationError(err) => { error!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", ?err, "proof calculation error" ); @@ -1138,14 +1138,14 @@ impl MultiProofTask { Err(_) => { // this means our internal message channel is closed, which shouldn't happen // in normal operation since we hold both ends - error!(target: "engine::root", "Internal message channel closed unexpectedly"); + error!(target: "engine::tree::payload_processor::multiproof", "Internal message channel closed unexpectedly"); return } } } debug!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", total_updates = state_update_proofs_requested, total_proofs = proofs_processed, total_time = ?first_update_time.map(|t|t.elapsed()), diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 134233233ee..abc3bd58351 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -106,7 +106,7 @@ where let (actions_tx, actions_rx) = channel(); trace!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", max_concurrency, transaction_count_hint, "Initialized prewarm task" @@ -185,7 +185,7 @@ where for handle in &handles { if let Err(err) = handle.send(indexed_tx.clone()) { warn!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", tx_hash = %first_tx_hash, error = %err, "Failed to send deposit transaction to worker" @@ -196,7 +196,7 @@ where // Not a deposit, send to first worker via round-robin if let Err(err) = handles[0].send(indexed_tx) { warn!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", task_idx = 0, error = %err, "Failed to send transaction to worker" @@ -213,7 +213,7 @@ where let task_idx = executing % workers_needed; if let Err(err) = handles[task_idx].send(indexed_tx) { warn!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", task_idx, error = %err, "Failed to send transaction to worker" @@ -329,7 +329,7 @@ where self.send_multi_proof_targets(proof_targets); } PrewarmTaskEvent::Terminate { block_output } => { - trace!(target: "engine::tree::prewarm", "Received termination signal"); + trace!(target: "engine::tree::payload_processor::prewarm", "Received termination signal"); final_block_output = Some(block_output); if finished_execution { @@ -338,7 +338,7 @@ where } } PrewarmTaskEvent::FinishedTxExecution { executed_transactions } => { - trace!(target: "engine::tree::prewarm", "Finished prewarm execution signal"); + trace!(target: "engine::tree::payload_processor::prewarm", "Finished prewarm execution signal"); self.ctx.metrics.transactions.set(executed_transactions as f64); self.ctx.metrics.transactions_histogram.record(executed_transactions as f64); @@ -352,7 +352,7 @@ where } } - debug!(target: "engine::tree::prewarm", "Completed prewarm execution"); + debug!(target: "engine::tree::payload_processor::prewarm", "Completed prewarm execution"); // save caches and finish if let Some(Some(state)) = final_block_output { @@ -488,7 +488,7 @@ where Ok(res) => res, Err(err) => { trace!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", %err, tx_hash=%tx.tx().tx_hash(), sender=%tx.signer(), From 4548209e7b00351a993779820b78db86b8628c9b Mon Sep 17 00:00:00 2001 From: YK Date: Thu, 23 Oct 2025 15:19:21 +0800 Subject: [PATCH 171/371] perf: rm pending queue from MultiproofManager (#19178) --- .../src/tree/payload_processor/multiproof.rs | 94 +++++++++---------- crates/trie/parallel/src/proof_task.rs | 86 +++++++++++++++-- 2 files changed, 125 insertions(+), 55 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 737f57fb345..9f136a48125 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -24,7 +24,7 @@ use reth_trie_parallel::{ root::ParallelStateRootError, }; use std::{ - collections::{BTreeMap, VecDeque}, + collections::BTreeMap, ops::DerefMut, sync::{ mpsc::{channel, Receiver, Sender}, @@ -34,10 +34,6 @@ use std::{ }; use tracing::{debug, error, instrument, trace}; -/// Default upper bound for inflight multiproof calculations. These would be sitting in the queue -/// waiting to be processed. -const DEFAULT_MULTIPROOF_INFLIGHT_LIMIT: usize = 128; - /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. #[derive(Default, Debug)] @@ -337,17 +333,10 @@ impl MultiproofInput { } /// Manages concurrent multiproof calculations. -/// Takes care of not having more calculations in flight than a given maximum -/// concurrency, further calculation requests are queued and spawn later, after -/// availability has been signaled. #[derive(Debug)] pub struct MultiproofManager { - /// Maximum number of proof calculations allowed to be inflight at once. - inflight_limit: usize, /// Currently running calculations. inflight: usize, - /// Queued calculations. - pending: VecDeque, /// Executor for tasks executor: WorkloadExecutor, /// Handle to the proof worker pools (storage and account). @@ -376,22 +365,16 @@ impl MultiproofManager { proof_worker_handle: ProofWorkerHandle, ) -> Self { Self { - pending: VecDeque::with_capacity(DEFAULT_MULTIPROOF_INFLIGHT_LIMIT), - inflight_limit: DEFAULT_MULTIPROOF_INFLIGHT_LIMIT, - executor, inflight: 0, + executor, metrics, proof_worker_handle, missed_leaves_storage_roots: Default::default(), } } - const fn is_full(&self) -> bool { - self.inflight >= self.inflight_limit - } - - /// Spawns a new multiproof calculation or enqueues it if the inflight limit is reached. - fn spawn_or_queue(&mut self, input: PendingMultiproofTask) { + /// Spawns a new multiproof calculation. + fn spawn(&mut self, input: PendingMultiproofTask) { // If there are no proof targets, we can just send an empty multiproof back immediately if input.proof_targets_is_empty() { debug!( @@ -402,27 +385,9 @@ impl MultiproofManager { return } - if self.is_full() { - self.pending.push_back(input); - self.metrics.pending_multiproofs_histogram.record(self.pending.len() as f64); - return; - } - self.spawn_multiproof_task(input); } - /// Signals that a multiproof calculation has finished and there's room to - /// spawn a new calculation if needed. - fn on_calculation_complete(&mut self) { - self.inflight = self.inflight.saturating_sub(1); - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); - - if let Some(input) = self.pending.pop_front() { - self.metrics.pending_multiproofs_histogram.record(self.pending.len() as f64); - self.spawn_multiproof_task(input); - } - } - /// Spawns a multiproof task, dispatching to `spawn_storage_proof` if the input is a storage /// multiproof, and dispatching to `spawn_multiproof` otherwise. fn spawn_multiproof_task(&mut self, input: PendingMultiproofTask) { @@ -508,6 +473,24 @@ impl MultiproofManager { self.inflight += 1; self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + self.metrics + .pending_storage_multiproofs_histogram + .record(self.proof_worker_handle.pending_storage_tasks() as f64); + self.metrics + .pending_account_multiproofs_histogram + .record(self.proof_worker_handle.pending_account_tasks() as f64); + } + + /// Signals that a multiproof calculation has finished. + fn on_calculation_complete(&mut self) { + self.inflight = self.inflight.saturating_sub(1); + self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + self.metrics + .pending_storage_multiproofs_histogram + .record(self.proof_worker_handle.pending_storage_tasks() as f64); + self.metrics + .pending_account_multiproofs_histogram + .record(self.proof_worker_handle.pending_account_tasks() as f64); } /// Spawns a single multiproof calculation task. @@ -598,6 +581,12 @@ impl MultiproofManager { self.inflight += 1; self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + self.metrics + .pending_storage_multiproofs_histogram + .record(self.proof_worker_handle.pending_storage_tasks() as f64); + self.metrics + .pending_account_multiproofs_histogram + .record(self.proof_worker_handle.pending_account_tasks() as f64); } } @@ -606,8 +595,10 @@ impl MultiproofManager { pub(crate) struct MultiProofTaskMetrics { /// Histogram of inflight multiproofs. pub inflight_multiproofs_histogram: Histogram, - /// Histogram of pending multiproofs. - pub pending_multiproofs_histogram: Histogram, + /// Histogram of pending storage multiproofs in the queue. + pub pending_storage_multiproofs_histogram: Histogram, + /// Histogram of pending account multiproofs in the queue. + pub pending_account_multiproofs_histogram: Histogram, /// Histogram of the number of prefetch proof target accounts. pub prefetch_proof_targets_accounts_histogram: Histogram, @@ -657,8 +648,7 @@ pub(crate) struct MultiProofTaskMetrics { #[derive(Debug)] pub(super) struct MultiProofTask { /// The size of proof targets chunk to spawn in one calculation. - /// - /// If [`None`], then chunking is disabled. + /// If None, chunking is disabled and all targets are processed in a single proof. chunk_size: Option, /// Task configuration. config: MultiProofConfig, @@ -738,10 +728,14 @@ impl MultiProofTask { // Process proof targets in chunks. let mut chunks = 0; - let should_chunk = !self.multiproof_manager.is_full(); + + // Only chunk if account or storage workers are available to take advantage of parallelism. + let should_chunk = + self.multiproof_manager.proof_worker_handle.has_available_account_workers() || + self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); let mut spawn = |proof_targets| { - self.multiproof_manager.spawn_or_queue( + self.multiproof_manager.spawn( MultiproofInput { config: self.config.clone(), source: None, @@ -873,10 +867,14 @@ impl MultiProofTask { // Process state updates in chunks. let mut chunks = 0; - let should_chunk = !self.multiproof_manager.is_full(); let mut spawned_proof_targets = MultiProofTargets::default(); + // Only chunk if account or storage workers are available to take advantage of parallelism. + let should_chunk = + self.multiproof_manager.proof_worker_handle.has_available_account_workers() || + self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); + let mut spawn = |hashed_state_update| { let proof_targets = get_proof_targets( &hashed_state_update, @@ -885,7 +883,7 @@ impl MultiProofTask { ); spawned_proof_targets.extend_ref(&proof_targets); - self.multiproof_manager.spawn_or_queue( + self.multiproof_manager.spawn( MultiproofInput { config: self.config.clone(), source: Some(source), @@ -954,7 +952,7 @@ impl MultiProofTask { /// so that the proofs for accounts and storage slots that were already fetched are not /// requested again. /// 2. Using the proof targets, a new multiproof is calculated using - /// [`MultiproofManager::spawn_or_queue`]. + /// [`MultiproofManager::spawn`]. /// * If the list of proof targets is empty, the [`MultiProofMessage::EmptyProof`] message is /// sent back to this task along with the original state update. /// * Otherwise, the multiproof is calculated and the [`MultiProofMessage::ProofCalculated`] diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 6525500a2a2..18e93dc26a4 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -51,6 +51,7 @@ use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ sync::{ + atomic::{AtomicUsize, Ordering}, mpsc::{channel, Receiver, Sender}, Arc, }, @@ -116,6 +117,7 @@ fn storage_worker_loop( task_ctx: ProofTaskCtx, work_rx: CrossbeamReceiver, worker_id: usize, + available_workers: Arc, #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where Factory: DatabaseProviderFactory, @@ -144,7 +146,13 @@ fn storage_worker_loop( let mut storage_proofs_processed = 0u64; let mut storage_nodes_processed = 0u64; + // Initially mark this worker as available. + available_workers.fetch_add(1, Ordering::Relaxed); + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); + match job { StorageWorkerJob::StorageProof { input, result_sender } => { let hashed_address = input.hashed_address; @@ -186,6 +194,9 @@ fn storage_worker_loop( total_processed = storage_proofs_processed, "Storage proof completed" ); + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); } StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { @@ -224,6 +235,9 @@ fn storage_worker_loop( total_processed = storage_nodes_processed, "Blinded storage node completed" ); + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); } } } @@ -244,11 +258,9 @@ fn storage_worker_loop( /// /// # Lifecycle /// -/// Each worker: -/// 1. Receives `AccountWorkerJob` from crossbeam unbounded channel -/// 2. Computes result using its dedicated long-lived transaction -/// 3. Sends result directly to original caller via `std::mpsc` -/// 4. Repeats until channel closes (graceful shutdown) +/// Each worker initializes its providers, advertises availability, then loops: +/// receive an account job, mark busy, process the work, respond, and mark available again. +/// The loop ends gracefully once the channel closes. /// /// # Transaction Reuse /// @@ -269,6 +281,7 @@ fn account_worker_loop( work_rx: CrossbeamReceiver, storage_work_tx: CrossbeamSender, worker_id: usize, + available_workers: Arc, #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where Factory: DatabaseProviderFactory, @@ -297,7 +310,13 @@ fn account_worker_loop( let mut account_proofs_processed = 0u64; let mut account_nodes_processed = 0u64; + // Count this worker as available only after successful initialization. + available_workers.fetch_add(1, Ordering::Relaxed); + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); + match job { AccountWorkerJob::AccountMultiproof { mut input, result_sender } => { let span = tracing::debug_span!( @@ -381,6 +400,9 @@ fn account_worker_loop( "Account multiproof completed" ); drop(_span_guard); + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); } AccountWorkerJob::BlindedAccountNode { path, result_sender } => { @@ -420,6 +442,9 @@ fn account_worker_loop( "Blinded account node completed" ); drop(_span_guard); + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); } } } @@ -866,6 +891,12 @@ pub struct ProofWorkerHandle { storage_work_tx: CrossbeamSender, /// Direct sender to account worker pool account_work_tx: CrossbeamSender, + /// Counter tracking available storage workers. Workers decrement when starting work, + /// increment when finishing. Used to determine whether to chunk multiproofs. + storage_available_workers: Arc, + /// Counter tracking available account workers. Workers decrement when starting work, + /// increment when finishing. Used to determine whether to chunk multiproofs. + account_available_workers: Arc, } impl ProofWorkerHandle { @@ -893,6 +924,11 @@ impl ProofWorkerHandle { let (storage_work_tx, storage_work_rx) = unbounded::(); let (account_work_tx, account_work_rx) = unbounded::(); + // Initialize availability counters at zero. Each worker will increment when it + // successfully initializes, ensuring only healthy workers are counted. + let storage_available_workers = Arc::new(AtomicUsize::new(0)); + let account_available_workers = Arc::new(AtomicUsize::new(0)); + tracing::debug!( target: "trie::proof_task", storage_worker_count, @@ -910,6 +946,7 @@ impl ProofWorkerHandle { let view_clone = view.clone(); let task_ctx_clone = task_ctx.clone(); let work_rx_clone = storage_work_rx.clone(); + let storage_available_workers_clone = storage_available_workers.clone(); executor.spawn_blocking(move || { #[cfg(feature = "metrics")] @@ -921,6 +958,7 @@ impl ProofWorkerHandle { task_ctx_clone, work_rx_clone, worker_id, + storage_available_workers_clone, #[cfg(feature = "metrics")] metrics, ) @@ -946,6 +984,7 @@ impl ProofWorkerHandle { let task_ctx_clone = task_ctx.clone(); let work_rx_clone = account_work_rx.clone(); let storage_work_tx_clone = storage_work_tx.clone(); + let account_available_workers_clone = account_available_workers.clone(); executor.spawn_blocking(move || { #[cfg(feature = "metrics")] @@ -958,6 +997,7 @@ impl ProofWorkerHandle { work_rx_clone, storage_work_tx_clone, worker_id, + account_available_workers_clone, #[cfg(feature = "metrics")] metrics, ) @@ -972,7 +1012,12 @@ impl ProofWorkerHandle { drop(_guard); - Self::new_handle(storage_work_tx, account_work_tx) + Self::new_handle( + storage_work_tx, + account_work_tx, + storage_available_workers, + account_available_workers, + ) } /// Creates a new [`ProofWorkerHandle`] with direct access to worker pools. @@ -981,8 +1026,35 @@ impl ProofWorkerHandle { const fn new_handle( storage_work_tx: CrossbeamSender, account_work_tx: CrossbeamSender, + storage_available_workers: Arc, + account_available_workers: Arc, ) -> Self { - Self { storage_work_tx, account_work_tx } + Self { + storage_work_tx, + account_work_tx, + storage_available_workers, + account_available_workers, + } + } + + /// Returns true if there are available storage workers to process tasks. + pub fn has_available_storage_workers(&self) -> bool { + self.storage_available_workers.load(Ordering::Relaxed) > 0 + } + + /// Returns true if there are available account workers to process tasks. + pub fn has_available_account_workers(&self) -> bool { + self.account_available_workers.load(Ordering::Relaxed) > 0 + } + + /// Returns the number of pending storage tasks in the queue. + pub fn pending_storage_tasks(&self) -> usize { + self.storage_work_tx.len() + } + + /// Returns the number of pending account tasks in the queue. + pub fn pending_account_tasks(&self) -> usize { + self.account_work_tx.len() } /// Dispatch a storage proof computation to storage worker pool From b2236d1db7826921ade7cff141848ef265c2bce6 Mon Sep 17 00:00:00 2001 From: Fallengirl <155266340+Fallengirl@users.noreply.github.com> Date: Thu, 23 Oct 2025 13:20:59 +0200 Subject: [PATCH 172/371] docs: correct Payment tx type from 0x7E to 0x2A (#19255) --- examples/custom-node/src/pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/custom-node/src/pool.rs b/examples/custom-node/src/pool.rs index 0959b3bcae0..8828803a0f3 100644 --- a/examples/custom-node/src/pool.rs +++ b/examples/custom-node/src/pool.rs @@ -17,7 +17,7 @@ pub enum CustomPooledTransaction { /// A regular Optimism transaction as defined by [`OpPooledTransaction`]. #[envelope(flatten)] Op(OpPooledTransaction), - /// A [`TxPayment`] tagged with type 0x7E. + /// A [`TxPayment`] tagged with type 0x2A (decimal 42). #[envelope(ty = 42)] Payment(Signed), } From ce876a96ad30509fb00c2d2c9a7d4cd5b4470a9f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 Oct 2025 13:39:12 +0200 Subject: [PATCH 173/371] fix: use network id in p2p command (#19252) --- crates/cli/commands/src/p2p/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index 792d4533856..c72ceca78e6 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -192,6 +192,7 @@ impl DownloadArgs { let net = NetworkConfigBuilder::::new(p2p_secret_key) .peer_config(config.peers_config_with_basic_nodes_from_file(None)) .external_ip_resolver(self.network.nat) + .network_id(self.network.network_id) .boot_nodes(boot_nodes.clone()) .apply(|builder| { self.network.discovery.apply_to_builder(builder, rlpx_socket, boot_nodes) From 71f91cf4eb4b7ab4512885b8c00096b5d4fe10b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Hodul=C3=A1k?= Date: Thu, 23 Oct 2025 13:43:24 +0200 Subject: [PATCH 174/371] feat(prune): Add an empty `reth-prune-db` crate (#19232) --- Cargo.lock | 4 ++++ Cargo.toml | 1 + crates/prune/db/Cargo.toml | 15 +++++++++++++++ crates/prune/db/src/lib.rs | 1 + 4 files changed, 21 insertions(+) create mode 100644 crates/prune/db/Cargo.toml create mode 100644 crates/prune/db/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 90aed93b946..6839523354a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9812,6 +9812,10 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-prune-db" +version = "1.8.2" + [[package]] name = "reth-prune-types" version = "1.8.2" diff --git a/Cargo.toml b/Cargo.toml index ae7956ef489..324135b2233 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,6 +93,7 @@ members = [ "crates/payload/util/", "crates/primitives-traits/", "crates/primitives/", + "crates/prune/db", "crates/prune/prune", "crates/prune/types", "crates/ress/protocol", diff --git a/crates/prune/db/Cargo.toml b/crates/prune/db/Cargo.toml new file mode 100644 index 00000000000..269a87bf7b6 --- /dev/null +++ b/crates/prune/db/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "reth-prune-db" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true +description = "Database integration with prune implementation" + +[dependencies] + +[lints] +workspace = true diff --git a/crates/prune/db/src/lib.rs b/crates/prune/db/src/lib.rs new file mode 100644 index 00000000000..ef777085e54 --- /dev/null +++ b/crates/prune/db/src/lib.rs @@ -0,0 +1 @@ +//! An integration of `reth-prune` with `reth-db`. From c54719145bdab1e78c0ccd854e7e28b0b8019d15 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 Oct 2025 14:43:56 +0200 Subject: [PATCH 175/371] fix: use known paris activation blocks in genesis parsing (#19258) --- crates/chainspec/src/spec.rs | 143 +++++++++++++++++++++++++++++------ 1 file changed, 121 insertions(+), 22 deletions(-) diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index a0cccfcc449..e8d16886aac 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -3,7 +3,12 @@ use alloy_evm::eth::spec::EthExecutorSpec; use crate::{ constants::{MAINNET_DEPOSIT_CONTRACT, MAINNET_PRUNE_DELETE_LIMIT}, - holesky, hoodi, sepolia, EthChainSpec, + ethereum::SEPOLIA_PARIS_TTD, + holesky, hoodi, + mainnet::{MAINNET_PARIS_BLOCK, MAINNET_PARIS_TTD}, + sepolia, + sepolia::SEPOLIA_PARIS_BLOCK, + EthChainSpec, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; @@ -100,7 +105,7 @@ pub static MAINNET: LazyLock> = LazyLock::new(|| { genesis, // paris_block_and_final_difficulty: Some(( - 15537394, + MAINNET_PARIS_BLOCK, U256::from(58_750_003_716_598_352_816_469u128), )), hardforks, @@ -127,7 +132,10 @@ pub static SEPOLIA: LazyLock> = LazyLock::new(|| { ), genesis, // - paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))), + paris_block_and_final_difficulty: Some(( + SEPOLIA_PARIS_BLOCK, + U256::from(17_000_018_015_853_232u128), + )), hardforks, // https://sepolia.etherscan.io/tx/0x025ecbf81a2f1220da6285d1701dc89fb5a956b62562ee922e1a9efd73eb4b14 deposit_contract: Some(DepositContract::new( @@ -678,26 +686,50 @@ impl From for ChainSpec { // We expect no new networks to be configured with the merge, so we ignore the TTD field // and merge netsplit block from external genesis files. All existing networks that have // merged should have a static ChainSpec already (namely mainnet and sepolia). - let paris_block_and_final_difficulty = - if let Some(ttd) = genesis.config.terminal_total_difficulty { - hardforks.push(( - EthereumHardfork::Paris.boxed(), - ForkCondition::TTD { - // NOTE: this will not work properly if the merge is not activated at - // genesis, and there is no merge netsplit block - activation_block_number: genesis - .config - .merge_netsplit_block - .unwrap_or_default(), - total_difficulty: ttd, - fork_block: genesis.config.merge_netsplit_block, - }, - )); + let paris_block_and_final_difficulty = if let Some(ttd) = + genesis.config.terminal_total_difficulty + { + hardforks.push(( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { + // NOTE: this will not work properly if the merge is not activated at + // genesis, and there is no merge netsplit block + activation_block_number: genesis + .config + .merge_netsplit_block + .or_else(|| { + // due to this limitation we can't determine the merge block, + // this is the case for perfnet testing for example + // at the time of this fix, only two networks transitioned: MAINNET + + // SEPOLIA and this parsing from genesis is used for shadowforking, so + // we can reasonably assume that if the TTD and the chainid matches + // those networks we use the activation + // blocks of those networks + match genesis.config.chain_id { + 1 => { + if ttd == MAINNET_PARIS_TTD { + return Some(MAINNET_PARIS_BLOCK) + } + } + 11155111 => { + if ttd == SEPOLIA_PARIS_TTD { + return Some(SEPOLIA_PARIS_BLOCK) + } + } + _ => {} + }; + None + }) + .unwrap_or_default(), + total_difficulty: ttd, + fork_block: genesis.config.merge_netsplit_block, + }, + )); - genesis.config.merge_netsplit_block.map(|block| (block, ttd)) - } else { - None - }; + genesis.config.merge_netsplit_block.map(|block| (block, ttd)) + } else { + None + }; // Time-based hardforks let time_hardfork_opts = [ @@ -2647,4 +2679,71 @@ Post-merge hard forks (timestamp based): }; assert_eq!(hardfork_params, expected); } + + #[test] + fn parse_perf_net_genesis() { + let s = r#"{ + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficulty": 58750000000000000000000, + "terminalTotalDifficultyPassed": true, + "shanghaiTime": 1681338455, + "cancunTime": 1710338135, + "pragueTime": 1746612311, + "ethash": {}, + "depositContractAddress": "0x00000000219ab540356cBB839Cbe05303d7705Fa", + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "prague": { + "target": 6, + "max": 9, + "baseFeeUpdateFraction": 5007716 + } + } + }, + "nonce": "0x42", + "timestamp": "0x0", + "extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa", + "gasLimit": "0x1388", + "difficulty": "0x400000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas": null +}"#; + + let genesis = serde_json::from_str::(s).unwrap(); + let chainspec = ChainSpec::from_genesis(genesis); + let activation = chainspec.hardforks.fork(EthereumHardfork::Paris); + assert_eq!( + activation, + ForkCondition::TTD { + activation_block_number: MAINNET_PARIS_BLOCK, + total_difficulty: MAINNET_PARIS_TTD, + fork_block: None, + } + ) + } } From 75931f8772a21b260927f1acce85f8e6a33f034d Mon Sep 17 00:00:00 2001 From: Fallengirl <155266340+Fallengirl@users.noreply.github.com> Date: Thu, 23 Oct 2025 15:13:03 +0200 Subject: [PATCH 176/371] chore: align env filter comment with configured directives (#19237) --- crates/tracing/src/layers.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 156bd8c8253..210c0066308 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -18,8 +18,9 @@ pub type FileWorkerGuard = tracing_appender::non_blocking::WorkerGuard; /// A boxed tracing [Layer]. pub(crate) type BoxedLayer = Box + Send + Sync>; -/// Default [directives](Directive) for [`EnvFilter`] which disables high-frequency debug logs from -/// `hyper`, `hickory-resolver`, `jsonrpsee-server`, and `discv5`. +/// Default [directives](Directive) for [`EnvFilter`] which disable high-frequency debug logs from +/// dependencies such as `hyper`, `hickory-resolver`, `hickory_proto`, `discv5`, `jsonrpsee-server`, +/// the `opentelemetry_*` crates, and `hyper_util::client::legacy::pool`. const DEFAULT_ENV_FILTER_DIRECTIVES: [&str; 9] = [ "hyper::proto::h1=off", "hickory_resolver=off", From 3d3a05386a598500c926d3c1319beba7e1616ea2 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 23 Oct 2025 14:31:15 +0100 Subject: [PATCH 177/371] refactor(static-file): remove unused segments (#19209) --- .../static-file/src/segments/headers.rs | 54 ----------- .../static-file/src/segments/mod.rs | 6 -- .../static-file/src/segments/transactions.rs | 60 ------------ .../static-file/src/static_file_producer.rs | 95 ++++--------------- crates/static-file/types/src/lib.rs | 94 +++--------------- .../src/providers/static_file/manager.rs | 2 - 6 files changed, 33 insertions(+), 278 deletions(-) delete mode 100644 crates/static-file/static-file/src/segments/headers.rs delete mode 100644 crates/static-file/static-file/src/segments/transactions.rs diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs deleted file mode 100644 index 990e33ee52a..00000000000 --- a/crates/static-file/static-file/src/segments/headers.rs +++ /dev/null @@ -1,54 +0,0 @@ -use crate::segments::Segment; -use alloy_primitives::BlockNumber; -use reth_codecs::Compact; -use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{providers::StaticFileWriter, DBProvider, StaticFileProviderFactory}; -use reth_static_file_types::StaticFileSegment; -use reth_storage_errors::provider::ProviderResult; -use std::ops::RangeInclusive; - -/// Static File segment responsible for [`StaticFileSegment::Headers`] part of data. -#[derive(Debug, Default)] -pub struct Headers; - -impl Segment for Headers -where - Provider: StaticFileProviderFactory> - + DBProvider, -{ - fn segment(&self) -> StaticFileSegment { - StaticFileSegment::Headers - } - - fn copy_to_static_files( - &self, - provider: Provider, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let static_file_provider = provider.static_file_provider(); - let mut static_file_writer = - static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Headers)?; - - let mut headers_cursor = provider - .tx_ref() - .cursor_read::::BlockHeader>>( - )?; - let headers_walker = headers_cursor.walk_range(block_range.clone())?; - - let mut canonical_headers_cursor = - provider.tx_ref().cursor_read::()?; - let canonical_headers_walker = canonical_headers_cursor.walk_range(block_range)?; - - for (header_entry, canonical_header_entry) in headers_walker.zip(canonical_headers_walker) { - let (header_block, header) = header_entry?; - let (canonical_header_block, canonical_header) = canonical_header_entry?; - - debug_assert_eq!(header_block, canonical_header_block); - - static_file_writer.append_header(&header, &canonical_header)?; - } - - Ok(()) - } -} diff --git a/crates/static-file/static-file/src/segments/mod.rs b/crates/static-file/static-file/src/segments/mod.rs index fc79effdd5a..a1499a2eaa8 100644 --- a/crates/static-file/static-file/src/segments/mod.rs +++ b/crates/static-file/static-file/src/segments/mod.rs @@ -1,11 +1,5 @@ //! `StaticFile` segment implementations and utilities. -mod transactions; -pub use transactions::Transactions; - -mod headers; -pub use headers::Headers; - mod receipts; pub use receipts::Receipts; diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs deleted file mode 100644 index 74cb58ed708..00000000000 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ /dev/null @@ -1,60 +0,0 @@ -use crate::segments::Segment; -use alloy_primitives::BlockNumber; -use reth_codecs::Compact; -use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, -}; -use reth_static_file_types::StaticFileSegment; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use std::ops::RangeInclusive; - -/// Static File segment responsible for [`StaticFileSegment::Transactions`] part of data. -#[derive(Debug, Default)] -pub struct Transactions; - -impl Segment for Transactions -where - Provider: StaticFileProviderFactory> - + DBProvider - + BlockReader, -{ - fn segment(&self) -> StaticFileSegment { - StaticFileSegment::Transactions - } - - /// Write transactions from database table [`tables::Transactions`] to static files with segment - /// [`StaticFileSegment::Transactions`] for the provided block range. - fn copy_to_static_files( - &self, - provider: Provider, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let static_file_provider = provider.static_file_provider(); - let mut static_file_writer = static_file_provider - .get_writer(*block_range.start(), StaticFileSegment::Transactions)?; - - for block in block_range { - static_file_writer.increment_block(block)?; - - let block_body_indices = provider - .block_body_indices(block)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; - - let mut transactions_cursor = provider.tx_ref().cursor_read::::SignedTx, - >>()?; - let transactions_walker = - transactions_cursor.walk_range(block_body_indices.tx_num_range())?; - - for entry in transactions_walker { - let (tx_number, transaction) = entry?; - - static_file_writer.append_transaction(tx_number, &transaction)?; - } - } - - Ok(()) - } -} diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 185fbf7c498..2e7aa4b9df4 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -131,12 +131,6 @@ where let mut segments = Vec::<(Box>, RangeInclusive)>::new(); - if let Some(block_range) = targets.transactions.clone() { - segments.push((Box::new(segments::Transactions), block_range)); - } - if let Some(block_range) = targets.headers.clone() { - segments.push((Box::new(segments::Headers), block_range)); - } if let Some(block_range) = targets.receipts.clone() { segments.push((Box::new(segments::Receipts), block_range)); } @@ -178,16 +172,11 @@ where /// Returns highest block numbers for all static file segments. pub fn copy_to_static_files(&self) -> ProviderResult { let provider = self.provider.database_provider_ro()?; - let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] - .into_iter() + let stages_checkpoints = std::iter::once(StageId::Execution) .map(|stage| provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number))) .collect::, _>>()?; - let highest_static_files = HighestStaticFiles { - headers: stages_checkpoints[0], - receipts: stages_checkpoints[1], - transactions: stages_checkpoints[2], - }; + let highest_static_files = HighestStaticFiles { receipts: stages_checkpoints[0] }; let targets = self.get_static_file_targets(highest_static_files)?; self.run(targets)?; @@ -204,26 +193,17 @@ where let highest_static_files = self.provider.static_file_provider().get_highest_static_files(); let targets = StaticFileTargets { - headers: finalized_block_numbers.headers.and_then(|finalized_block_number| { - self.get_static_file_target(highest_static_files.headers, finalized_block_number) - }), - receipts: finalized_block_numbers - .receipts - // StaticFile receipts only if they're not pruned according to the user - // configuration - .filter(|_| !self.prune_modes.has_receipts_pruning()) - .and_then(|finalized_block_number| { + // StaticFile receipts only if they're not pruned according to the user configuration + receipts: if self.prune_modes.receipts.is_none() { + finalized_block_numbers.receipts.and_then(|finalized_block_number| { self.get_static_file_target( highest_static_files.receipts, finalized_block_number, ) - }), - transactions: finalized_block_numbers.transactions.and_then(|finalized_block_number| { - self.get_static_file_target( - highest_static_files.transactions, - finalized_block_number, - ) - }), + }) + } else { + None + }, }; trace!( @@ -313,69 +293,36 @@ mod tests { StaticFileProducerInner::new(provider_factory.clone(), PruneModes::default()); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(1), - receipts: Some(1), - transactions: Some(1), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(1) }) .expect("get static file targets"); - assert_eq!( - targets, - StaticFileTargets { - headers: Some(0..=1), - receipts: Some(0..=1), - transactions: Some(0..=1) - } - ); + assert_eq!(targets, StaticFileTargets { receipts: Some(0..=1) }); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(1), receipts: Some(1), transactions: Some(1) } + HighestStaticFiles { receipts: Some(1) } ); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(3), - receipts: Some(3), - transactions: Some(3), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(3) }) .expect("get static file targets"); - assert_eq!( - targets, - StaticFileTargets { - headers: Some(2..=3), - receipts: Some(2..=3), - transactions: Some(2..=3) - } - ); + assert_eq!(targets, StaticFileTargets { receipts: Some(2..=3) }); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } + HighestStaticFiles { receipts: Some(3) } ); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(4), - receipts: Some(4), - transactions: Some(4), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(4) }) .expect("get static file targets"); - assert_eq!( - targets, - StaticFileTargets { - headers: Some(4..=4), - receipts: Some(4..=4), - transactions: Some(4..=4) - } - ); + assert_eq!(targets, StaticFileTargets { receipts: Some(4..=4) }); assert_matches!( static_file_producer.run(targets), Err(ProviderError::BlockBodyIndicesNotFound(4)) ); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } + HighestStaticFiles { receipts: Some(3) } ); } @@ -399,11 +346,7 @@ mod tests { std::thread::sleep(Duration::from_millis(100)); } let targets = locked_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(1), - receipts: Some(1), - transactions: Some(1), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(1) }) .expect("get static file targets"); assert_matches!(locked_producer.run(targets.clone()), Ok(_)); tx.send(targets).unwrap(); diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 53be4f6d1c1..9606b0ec98b 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -27,39 +27,15 @@ pub const DEFAULT_BLOCKS_PER_STATIC_FILE: u64 = 500_000; /// Highest static file block numbers, per data segment. #[derive(Debug, Clone, Copy, Default, Eq, PartialEq)] pub struct HighestStaticFiles { - /// Highest static file block of headers, inclusive. - /// If [`None`], no static file is available. - pub headers: Option, /// Highest static file block of receipts, inclusive. /// If [`None`], no static file is available. pub receipts: Option, - /// Highest static file block of transactions, inclusive. - /// If [`None`], no static file is available. - pub transactions: Option, } impl HighestStaticFiles { - /// Returns the highest static file if it exists for a segment - pub const fn highest(&self, segment: StaticFileSegment) -> Option { - match segment { - StaticFileSegment::Headers => self.headers, - StaticFileSegment::Transactions => self.transactions, - StaticFileSegment::Receipts => self.receipts, - } - } - - /// Returns a mutable reference to a static file segment - pub const fn as_mut(&mut self, segment: StaticFileSegment) -> &mut Option { - match segment { - StaticFileSegment::Headers => &mut self.headers, - StaticFileSegment::Transactions => &mut self.transactions, - StaticFileSegment::Receipts => &mut self.receipts, - } - } - /// Returns an iterator over all static file segments fn iter(&self) -> impl Iterator> { - [self.headers, self.transactions, self.receipts].into_iter() + [self.receipts].into_iter() } /// Returns the minimum block of all segments. @@ -76,36 +52,28 @@ impl HighestStaticFiles { /// Static File targets, per data segment, measured in [`BlockNumber`]. #[derive(Debug, Clone, Eq, PartialEq)] pub struct StaticFileTargets { - /// Targeted range of headers. - pub headers: Option>, /// Targeted range of receipts. pub receipts: Option>, - /// Targeted range of transactions. - pub transactions: Option>, } impl StaticFileTargets { /// Returns `true` if any of the targets are [Some]. pub const fn any(&self) -> bool { - self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() + self.receipts.is_some() } /// Returns `true` if all targets are either [`None`] or has beginning of the range equal to the /// highest static file. pub fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { - [ - (self.headers.as_ref(), static_files.headers), - (self.receipts.as_ref(), static_files.receipts), - (self.transactions.as_ref(), static_files.transactions), - ] - .iter() - .all(|(target_block_range, highest_static_file_block)| { - target_block_range.is_none_or(|target_block_range| { - *target_block_range.start() == - highest_static_file_block - .map_or(0, |highest_static_file_block| highest_static_file_block + 1) - }) - }) + core::iter::once(&(self.receipts.as_ref(), static_files.receipts)).all( + |(target_block_range, highest_static_file_block)| { + target_block_range.is_none_or(|target_block_range| { + *target_block_range.start() == + highest_static_file_block + .map_or(0, |highest_static_file_block| highest_static_file_block + 1) + }) + }, + ) } } @@ -123,42 +91,9 @@ pub const fn find_fixed_range( mod tests { use super::*; - #[test] - fn test_highest_static_files_highest() { - let files = - HighestStaticFiles { headers: Some(100), receipts: Some(200), transactions: None }; - - // Test for headers segment - assert_eq!(files.highest(StaticFileSegment::Headers), Some(100)); - - // Test for receipts segment - assert_eq!(files.highest(StaticFileSegment::Receipts), Some(200)); - - // Test for transactions segment - assert_eq!(files.highest(StaticFileSegment::Transactions), None); - } - - #[test] - fn test_highest_static_files_as_mut() { - let mut files = HighestStaticFiles::default(); - - // Modify headers value - *files.as_mut(StaticFileSegment::Headers) = Some(150); - assert_eq!(files.headers, Some(150)); - - // Modify receipts value - *files.as_mut(StaticFileSegment::Receipts) = Some(250); - assert_eq!(files.receipts, Some(250)); - - // Modify transactions value - *files.as_mut(StaticFileSegment::Transactions) = Some(350); - assert_eq!(files.transactions, Some(350)); - } - #[test] fn test_highest_static_files_min() { - let files = - HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: None }; + let files = HighestStaticFiles { receipts: Some(100) }; // Minimum value among the available segments assert_eq!(files.min_block_num(), Some(100)); @@ -170,11 +105,10 @@ mod tests { #[test] fn test_highest_static_files_max() { - let files = - HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: Some(500) }; + let files = HighestStaticFiles { receipts: Some(100) }; // Maximum value among the available segments - assert_eq!(files.max_block_num(), Some(500)); + assert_eq!(files.max_block_num(), Some(100)); let empty_files = HighestStaticFiles::default(); // No values, should return None diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 76fa45f5a56..d066a704a24 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1062,9 +1062,7 @@ impl StaticFileProvider { /// Gets the highest static file block for all segments. pub fn get_highest_static_files(&self) -> HighestStaticFiles { HighestStaticFiles { - headers: self.get_highest_static_file_block(StaticFileSegment::Headers), receipts: self.get_highest_static_file_block(StaticFileSegment::Receipts), - transactions: self.get_highest_static_file_block(StaticFileSegment::Transactions), } } From f3b9349d6f0722b1c089ea4b9e0c1fb5615025d7 Mon Sep 17 00:00:00 2001 From: Ragnar Date: Thu, 23 Oct 2025 15:34:51 +0200 Subject: [PATCH 178/371] docs: add usage examples and documentation to NoopConsensus (#19194) --- crates/consensus/consensus/src/noop.rs | 28 ++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 259fae27d67..3d6818ca306 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,9 +1,32 @@ +//! A consensus implementation that does nothing. +//! +//! This module provides `NoopConsensus`, a consensus implementation that performs no validation +//! and always returns `Ok(())` for all validation methods. Useful for testing and scenarios +//! where consensus validation is not required. +//! +//! # Examples +//! +//! ```rust +//! use reth_consensus::noop::NoopConsensus; +//! use std::sync::Arc; +//! +//! let consensus = NoopConsensus::default(); +//! let consensus_arc = NoopConsensus::arc(); +//! ``` +//! +//! # Warning +//! +//! **Not for production use** - provides no security guarantees or consensus validation. + use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; use alloc::sync::Arc; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; /// A Consensus implementation that does nothing. +/// +/// Always returns `Ok(())` for all validation methods. Suitable for testing and scenarios +/// where consensus validation is not required. #[derive(Debug, Copy, Clone, Default)] #[non_exhaustive] pub struct NoopConsensus; @@ -16,10 +39,12 @@ impl NoopConsensus { } impl HeaderValidator for NoopConsensus { + /// Validates a header (no-op implementation). fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { Ok(()) } + /// Validates a header against its parent (no-op implementation). fn validate_header_against_parent( &self, _header: &SealedHeader, @@ -32,6 +57,7 @@ impl HeaderValidator for NoopConsensus { impl Consensus for NoopConsensus { type Error = ConsensusError; + /// Validates body against header (no-op implementation). fn validate_body_against_header( &self, _body: &B::Body, @@ -40,12 +66,14 @@ impl Consensus for NoopConsensus { Ok(()) } + /// Validates block before execution (no-op implementation). fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { Ok(()) } } impl FullConsensus for NoopConsensus { + /// Validates block after execution (no-op implementation). fn validate_block_post_execution( &self, _block: &RecoveredBlock, From 81b1949c3c6eba2c839f6f5982af0b27ccf09a19 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 23 Oct 2025 15:06:04 +0100 Subject: [PATCH 179/371] fix(cli): prune CLI argument names (#19215) --- crates/node/core/src/args/pruning.rs | 33 +++++++++++++++----------- docs/vocs/docs/pages/cli/reth/node.mdx | 26 ++++++++++---------- 2 files changed, 32 insertions(+), 27 deletions(-) diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index b5c782e62bf..2ff67446bbf 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -18,33 +18,33 @@ pub struct PruningArgs { pub full: bool, /// Minimum pruning interval measured in blocks. - #[arg(long, value_parser = RangedU64ValueParser::::new().range(1..),)] + #[arg(long = "prune.block-interval", alias = "block-interval", value_parser = RangedU64ValueParser::::new().range(1..))] pub block_interval: Option, // Sender Recovery /// Prunes all sender recovery data. - #[arg(long = "prune.senderrecovery.full", conflicts_with_all = &["sender_recovery_distance", "sender_recovery_before"])] + #[arg(long = "prune.sender-recovery.full", alias = "prune.senderrecovery.full", conflicts_with_all = &["sender_recovery_distance", "sender_recovery_before"])] pub sender_recovery_full: bool, /// Prune sender recovery data before the `head-N` block number. In other words, keep last N + /// 1 blocks. - #[arg(long = "prune.senderrecovery.distance", value_name = "BLOCKS", conflicts_with_all = &["sender_recovery_full", "sender_recovery_before"])] + #[arg(long = "prune.sender-recovery.distance", alias = "prune.senderrecovery.distance", value_name = "BLOCKS", conflicts_with_all = &["sender_recovery_full", "sender_recovery_before"])] pub sender_recovery_distance: Option, /// Prune sender recovery data before the specified block number. The specified block number is /// not pruned. - #[arg(long = "prune.senderrecovery.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["sender_recovery_full", "sender_recovery_distance"])] + #[arg(long = "prune.sender-recovery.before", alias = "prune.senderrecovery.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["sender_recovery_full", "sender_recovery_distance"])] pub sender_recovery_before: Option, // Transaction Lookup /// Prunes all transaction lookup data. - #[arg(long = "prune.transactionlookup.full", conflicts_with_all = &["transaction_lookup_distance", "transaction_lookup_before"])] + #[arg(long = "prune.transaction-lookup.full", alias = "prune.transactionlookup.full", conflicts_with_all = &["transaction_lookup_distance", "transaction_lookup_before"])] pub transaction_lookup_full: bool, /// Prune transaction lookup data before the `head-N` block number. In other words, keep last N /// + 1 blocks. - #[arg(long = "prune.transactionlookup.distance", value_name = "BLOCKS", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_before"])] + #[arg(long = "prune.transaction-lookup.distance", alias = "prune.transactionlookup.distance", value_name = "BLOCKS", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_before"])] pub transaction_lookup_distance: Option, /// Prune transaction lookup data before the specified block number. The specified block number /// is not pruned. - #[arg(long = "prune.transactionlookup.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_distance"])] + #[arg(long = "prune.transaction-lookup.before", alias = "prune.transactionlookup.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_distance"])] pub transaction_lookup_before: Option, // Receipts @@ -61,33 +61,38 @@ pub struct PruningArgs { #[arg(long = "prune.receipts.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance"])] pub receipts_before: Option, /// Receipts Log Filter - #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", hide = true)] + #[arg( + long = "prune.receipts-log-filter", + alias = "prune.receiptslogfilter", + value_name = "FILTER_CONFIG", + hide = true + )] #[deprecated] pub receipts_log_filter: Option, // Account History /// Prunes all account history. - #[arg(long = "prune.accounthistory.full", conflicts_with_all = &["account_history_distance", "account_history_before"])] + #[arg(long = "prune.account-history.full", alias = "prune.accounthistory.full", conflicts_with_all = &["account_history_distance", "account_history_before"])] pub account_history_full: bool, /// Prune account before the `head-N` block number. In other words, keep last N + 1 blocks. - #[arg(long = "prune.accounthistory.distance", value_name = "BLOCKS", conflicts_with_all = &["account_history_full", "account_history_before"])] + #[arg(long = "prune.account-history.distance", alias = "prune.accounthistory.distance", value_name = "BLOCKS", conflicts_with_all = &["account_history_full", "account_history_before"])] pub account_history_distance: Option, /// Prune account history before the specified block number. The specified block number is not /// pruned. - #[arg(long = "prune.accounthistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["account_history_full", "account_history_distance"])] + #[arg(long = "prune.account-history.before", alias = "prune.accounthistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["account_history_full", "account_history_distance"])] pub account_history_before: Option, // Storage History /// Prunes all storage history data. - #[arg(long = "prune.storagehistory.full", conflicts_with_all = &["storage_history_distance", "storage_history_before"])] + #[arg(long = "prune.storage-history.full", alias = "prune.storagehistory.full", conflicts_with_all = &["storage_history_distance", "storage_history_before"])] pub storage_history_full: bool, /// Prune storage history before the `head-N` block number. In other words, keep last N + 1 /// blocks. - #[arg(long = "prune.storagehistory.distance", value_name = "BLOCKS", conflicts_with_all = &["storage_history_full", "storage_history_before"])] + #[arg(long = "prune.storage-history.distance", alias = "prune.storagehistory.distance", value_name = "BLOCKS", conflicts_with_all = &["storage_history_full", "storage_history_before"])] pub storage_history_distance: Option, /// Prune storage history before the specified block number. The specified block number is not /// pruned. - #[arg(long = "prune.storagehistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["storage_history_full", "storage_history_distance"])] + #[arg(long = "prune.storage-history.before", alias = "prune.storagehistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["storage_history_full", "storage_history_distance"])] pub storage_history_before: Option, // Bodies diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 3fc6988dc69..7b70afe44c9 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -743,25 +743,25 @@ Pruning: --full Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored - --block-interval + --prune.block-interval Minimum pruning interval measured in blocks - --prune.senderrecovery.full + --prune.sender-recovery.full Prunes all sender recovery data - --prune.senderrecovery.distance + --prune.sender-recovery.distance Prune sender recovery data before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.senderrecovery.before + --prune.sender-recovery.before Prune sender recovery data before the specified block number. The specified block number is not pruned - --prune.transactionlookup.full + --prune.transaction-lookup.full Prunes all transaction lookup data - --prune.transactionlookup.distance + --prune.transaction-lookup.distance Prune transaction lookup data before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.transactionlookup.before + --prune.transaction-lookup.before Prune transaction lookup data before the specified block number. The specified block number is not pruned --prune.receipts.full @@ -776,22 +776,22 @@ Pruning: --prune.receipts.before Prune receipts before the specified block number. The specified block number is not pruned - --prune.accounthistory.full + --prune.account-history.full Prunes all account history - --prune.accounthistory.distance + --prune.account-history.distance Prune account before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.accounthistory.before + --prune.account-history.before Prune account history before the specified block number. The specified block number is not pruned - --prune.storagehistory.full + --prune.storage-history.full Prunes all storage history data - --prune.storagehistory.distance + --prune.storage-history.distance Prune storage history before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.storagehistory.before + --prune.storage-history.before Prune storage history before the specified block number. The specified block number is not pruned --prune.bodies.pre-merge From 7b7f563987d76a21aa4980738aa7fc9cb4561e98 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 23 Oct 2025 10:38:32 -0400 Subject: [PATCH 180/371] fix(engine): shrink tries after clearing (#19159) --- .../configured_sparse_trie.rs | 14 +++++ .../tree/src/tree/payload_processor/mod.rs | 35 +++++++++++- crates/trie/sparse-parallel/src/lower.rs | 22 ++++++++ crates/trie/sparse-parallel/src/trie.rs | 55 +++++++++++++++++++ crates/trie/sparse/src/state.rs | 51 +++++++++++++++++ crates/trie/sparse/src/traits.rs | 8 +++ crates/trie/sparse/src/trie.rs | 32 +++++++++++ 7 files changed, 215 insertions(+), 2 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index 90e8928dba2..9e8f787823a 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -186,4 +186,18 @@ impl SparseTrieInterface for ConfiguredSparseTrie { Self::Parallel(trie) => trie.value_capacity(), } } + + fn shrink_nodes_to(&mut self, size: usize) { + match self { + Self::Serial(trie) => trie.shrink_nodes_to(size), + Self::Parallel(trie) => trie.shrink_nodes_to(size), + } + } + + fn shrink_values_to(&mut self, size: usize) { + match self { + Self::Serial(trie) => trie.shrink_values_to(size), + Self::Parallel(trie) => trie.shrink_values_to(size), + } + } } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 8ab186dea5b..bf3d7268ea5 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -66,6 +66,29 @@ use configured_sparse_trie::ConfiguredSparseTrie; pub const PARALLEL_SPARSE_TRIE_PARALLELISM_THRESHOLDS: ParallelismThresholds = ParallelismThresholds { min_revealed_nodes: 100, min_updated_nodes: 100 }; +/// Default node capacity for shrinking the sparse trie. This is used to limit the number of trie +/// nodes in allocated sparse tries. +/// +/// Node maps have a key of `Nibbles` and value of `SparseNode`. +/// The `size_of::` is 40, and `size_of::` is 80. +/// +/// If we have 1 million entries of 120 bytes each, this conservative estimate comes out at around +/// 120MB. +pub const SPARSE_TRIE_MAX_NODES_SHRINK_CAPACITY: usize = 1_000_000; + +/// Default value capacity for shrinking the sparse trie. This is used to limit the number of values +/// in allocated sparse tries. +/// +/// There are storage and account values, the largest of the two being account values, which are +/// essentially `TrieAccount`s. +/// +/// Account value maps have a key of `Nibbles` and value of `TrieAccount`. +/// The `size_of::` is 40, and `size_of::` is 104. +/// +/// If we have 1 million entries of 144 bytes each, this conservative estimate comes out at around +/// 144MB. +pub const SPARSE_TRIE_MAX_VALUES_SHRINK_CAPACITY: usize = 1_000_000; + /// Entrypoint for executing the payload. #[derive(Debug)] pub struct PayloadProcessor @@ -439,11 +462,19 @@ where // Send state root computation result let _ = state_root_tx.send(result); - // Clear the SparseStateTrie and replace it back into the mutex _after_ sending + // Clear the SparseStateTrie, shrink, and replace it back into the mutex _after_ sending // results to the next step, so that time spent clearing doesn't block the step after // this one. let _enter = debug_span!(target: "engine::tree::payload_processor", "clear").entered(); - cleared_sparse_trie.lock().replace(ClearedSparseStateTrie::from_state_trie(trie)); + let mut cleared_trie = ClearedSparseStateTrie::from_state_trie(trie); + + // Shrink the sparse trie so that we don't have ever increasing memory. + cleared_trie.shrink_to( + SPARSE_TRIE_MAX_NODES_SHRINK_CAPACITY, + SPARSE_TRIE_MAX_VALUES_SHRINK_CAPACITY, + ); + + cleared_sparse_trie.lock().replace(cleared_trie); }); } } diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs index b5454dd3970..bc8ae006074 100644 --- a/crates/trie/sparse-parallel/src/lower.rs +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -122,4 +122,26 @@ impl LowerSparseSubtrie { Self::Blind(None) => 0, } } + + /// Shrinks the capacity of the subtrie's node storage. + /// Works for both revealed and blind tries with allocated storage. + pub(crate) fn shrink_nodes_to(&mut self, size: usize) { + match self { + Self::Revealed(trie) | Self::Blind(Some(trie)) => { + trie.shrink_nodes_to(size); + } + Self::Blind(None) => {} + } + } + + /// Shrinks the capacity of the subtrie's value storage. + /// Works for both revealed and blind tries with allocated storage. + pub(crate) fn shrink_values_to(&mut self, size: usize) { + match self { + Self::Revealed(trie) | Self::Blind(Some(trie)) => { + trie.shrink_values_to(size); + } + Self::Blind(None) => {} + } + } } diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 5e5a838f414..34c1ff2a963 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -883,6 +883,42 @@ impl SparseTrieInterface for ParallelSparseTrie { self.upper_subtrie.value_capacity() + self.lower_subtries.iter().map(|trie| trie.value_capacity()).sum::() } + + fn shrink_nodes_to(&mut self, size: usize) { + // Distribute the capacity across upper and lower subtries + // + // Always include upper subtrie, plus any lower subtries + let total_subtries = 1 + NUM_LOWER_SUBTRIES; + let size_per_subtrie = size / total_subtries; + + // Shrink the upper subtrie + self.upper_subtrie.shrink_nodes_to(size_per_subtrie); + + // Shrink lower subtries (works for both revealed and blind with allocation) + for subtrie in &mut self.lower_subtries { + subtrie.shrink_nodes_to(size_per_subtrie); + } + + // shrink masks maps + self.branch_node_hash_masks.shrink_to(size); + self.branch_node_tree_masks.shrink_to(size); + } + + fn shrink_values_to(&mut self, size: usize) { + // Distribute the capacity across upper and lower subtries + // + // Always include upper subtrie, plus any lower subtries + let total_subtries = 1 + NUM_LOWER_SUBTRIES; + let size_per_subtrie = size / total_subtries; + + // Shrink the upper subtrie + self.upper_subtrie.shrink_values_to(size_per_subtrie); + + // Shrink lower subtries (works for both revealed and blind with allocation) + for subtrie in &mut self.lower_subtries { + subtrie.shrink_values_to(size_per_subtrie); + } + } } impl ParallelSparseTrie { @@ -2111,6 +2147,16 @@ impl SparseSubtrie { pub(crate) fn value_capacity(&self) -> usize { self.inner.value_capacity() } + + /// Shrinks the capacity of the subtrie's node storage. + pub(crate) fn shrink_nodes_to(&mut self, size: usize) { + self.nodes.shrink_to(size); + } + + /// Shrinks the capacity of the subtrie's value storage. + pub(crate) fn shrink_values_to(&mut self, size: usize) { + self.inner.values.shrink_to(size); + } } /// Helper type for [`SparseSubtrie`] to mutably access only a subset of fields from the original @@ -2571,10 +2617,19 @@ impl SparseSubtrieBuffers { /// Clears all buffers. fn clear(&mut self) { self.path_stack.clear(); + self.path_stack.shrink_to_fit(); + self.rlp_node_stack.clear(); + self.rlp_node_stack.shrink_to_fit(); + self.branch_child_buf.clear(); + self.branch_child_buf.shrink_to_fit(); + self.branch_value_stack_buf.clear(); + self.branch_value_stack_buf.shrink_to_fit(); + self.rlp_buf.clear(); + self.rlp_buf.shrink_to_fit(); } } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index aef552da3dd..a202ebc8b2b 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -43,6 +43,32 @@ where Self(trie) } + /// Shrink the cleared sparse trie's capacity to the given node and value size. + /// This helps reduce memory usage when the trie has excess capacity. + /// The capacity is distributed equally across the account trie and all storage tries. + pub fn shrink_to(&mut self, node_size: usize, value_size: usize) { + // Count total number of storage tries (active + cleared + default) + let storage_tries_count = self.0.storage.tries.len() + self.0.storage.cleared_tries.len(); + + // Total tries = 1 account trie + all storage tries + let total_tries = 1 + storage_tries_count; + + // Distribute capacity equally among all tries + let node_size_per_trie = node_size / total_tries; + let value_size_per_trie = value_size / total_tries; + + // Shrink the account trie + self.0.state.shrink_nodes_to(node_size_per_trie); + self.0.state.shrink_values_to(value_size_per_trie); + + // Give storage tries the remaining capacity after account trie allocation + let storage_node_size = node_size.saturating_sub(node_size_per_trie); + let storage_value_size = value_size.saturating_sub(value_size_per_trie); + + // Shrink all storage tries (they will redistribute internally) + self.0.storage.shrink_to(storage_node_size, storage_value_size); + } + /// Returns the cleared [`SparseStateTrie`], consuming this instance. pub fn into_inner(self) -> SparseStateTrie { self.0 @@ -860,6 +886,31 @@ impl StorageTries { set })); } + + /// Shrinks the capacity of all storage tries (active, cleared, and default) to the given sizes. + /// The capacity is distributed equally among all tries that have allocations. + fn shrink_to(&mut self, node_size: usize, value_size: usize) { + // Count total number of tries with capacity (active + cleared + default) + let active_count = self.tries.len(); + let cleared_count = self.cleared_tries.len(); + let total_tries = 1 + active_count + cleared_count; + + // Distribute capacity equally among all tries + let node_size_per_trie = node_size / total_tries; + let value_size_per_trie = value_size / total_tries; + + // Shrink active storage tries + for trie in self.tries.values_mut() { + trie.shrink_nodes_to(node_size_per_trie); + trie.shrink_values_to(value_size_per_trie); + } + + // Shrink cleared storage tries + for trie in &mut self.cleared_tries { + trie.shrink_nodes_to(node_size_per_trie); + trie.shrink_values_to(value_size_per_trie); + } + } } impl StorageTries { diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 8fdbb78d876..5b7b6193f96 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -228,6 +228,14 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// This returns the capacity of any inner data structures which store leaf values. fn value_capacity(&self) -> usize; + + /// Shrink the capacity of the sparse trie's node storage to the given size. + /// This will reduce memory usage if the current capacity is higher than the given size. + fn shrink_nodes_to(&mut self, size: usize); + + /// Shrink the capacity of the sparse trie's value storage to the given size. + /// This will reduce memory usage if the current capacity is higher than the given size. + fn shrink_values_to(&mut self, size: usize); } /// Struct for passing around branch node mask information. diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 737da842254..8500ea400b5 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -275,6 +275,28 @@ impl SparseTrie { _ => 0, } } + + /// Shrinks the capacity of the sparse trie's node storage. + /// Works for both revealed and blind tries with allocated storage. + pub fn shrink_nodes_to(&mut self, size: usize) { + match self { + Self::Blind(Some(trie)) | Self::Revealed(trie) => { + trie.shrink_nodes_to(size); + } + _ => {} + } + } + + /// Shrinks the capacity of the sparse trie's value storage. + /// Works for both revealed and blind tries with allocated storage. + pub fn shrink_values_to(&mut self, size: usize) { + match self { + Self::Blind(Some(trie)) | Self::Revealed(trie) => { + trie.shrink_values_to(size); + } + _ => {} + } + } } /// The representation of revealed sparse trie. @@ -1088,6 +1110,16 @@ impl SparseTrieInterface for SerialSparseTrie { fn value_capacity(&self) -> usize { self.values.capacity() } + + fn shrink_nodes_to(&mut self, size: usize) { + self.nodes.shrink_to(size); + self.branch_node_tree_masks.shrink_to(size); + self.branch_node_hash_masks.shrink_to(size); + } + + fn shrink_values_to(&mut self, size: usize) { + self.values.shrink_to(size); + } } impl SerialSparseTrie { From 6739914ce7c9c5fe0947a6b7ff5a300a917afa6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Thu, 23 Oct 2025 16:44:24 +0200 Subject: [PATCH 181/371] feat(otlp-tracing): enable to export traces with grpc export with `tracing-otlp` and `tracing-otlp-protocol` arg (#18985) --- Cargo.lock | 26 +++++++ crates/ethereum/cli/Cargo.toml | 2 + crates/ethereum/cli/src/app.rs | 51 ++++++++++--- crates/node/core/Cargo.toml | 3 +- crates/node/core/src/args/trace.rs | 58 +++++++++------ crates/optimism/cli/Cargo.toml | 3 + crates/optimism/cli/src/app.rs | 50 ++++++++++--- crates/tracing-otlp/Cargo.toml | 3 +- crates/tracing-otlp/src/lib.rs | 71 +++++++++++-------- crates/tracing/src/layers.rs | 12 ++-- docs/vocs/docs/pages/cli/reth.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/config.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/checksum.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/clear.mdx | 18 ++++- .../docs/pages/cli/reth/db/clear/mdbx.mdx | 18 ++++- .../pages/cli/reth/db/clear/static-file.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/diff.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/drop.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/get.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx | 18 ++++- .../pages/cli/reth/db/get/static-file.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/list.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/path.mdx | 18 ++++- .../docs/pages/cli/reth/db/repair-trie.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/stats.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/db/version.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/download.mdx | 18 ++++- .../vocs/docs/pages/cli/reth/dump-genesis.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/export-era.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/import-era.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/import.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/init-state.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/init.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/node.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/p2p.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 18 ++++- .../vocs/docs/pages/cli/reth/p2p/bootnode.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 18 ++++- .../docs/pages/cli/reth/p2p/rlpx/ping.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/prune.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/stage.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 18 ++++- .../cli/reth/stage/dump/account-hashing.mdx | 18 ++++- .../pages/cli/reth/stage/dump/execution.mdx | 18 ++++- .../docs/pages/cli/reth/stage/dump/merkle.mdx | 18 ++++- .../cli/reth/stage/dump/storage-hashing.mdx | 18 ++++- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 18 ++++- .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 18 ++++- .../cli/reth/stage/unwind/num-blocks.mdx | 18 ++++- .../pages/cli/reth/stage/unwind/to-block.mdx | 18 ++++- 54 files changed, 910 insertions(+), 161 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6839523354a..dbfc2f99a6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4478,6 +4478,19 @@ dependencies = [ "webpki-roots 1.0.3", ] +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.17" @@ -6189,6 +6202,8 @@ dependencies = [ "prost", "reqwest", "thiserror 2.0.17", + "tokio", + "tonic", "tracing", ] @@ -8304,8 +8319,10 @@ dependencies = [ "reth-node-metrics", "reth-rpc-server-types", "reth-tracing", + "reth-tracing-otlp", "tempfile", "tracing", + "url", ] [[package]] @@ -9001,6 +9018,7 @@ dependencies = [ "reth-storage-api", "reth-storage-errors", "reth-tracing", + "reth-tracing-otlp", "reth-transaction-pool", "secp256k1 0.30.0", "serde", @@ -9255,11 +9273,13 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-tracing", + "reth-tracing-otlp", "serde", "tempfile", "tokio", "tokio-util", "tracing", + "url", ] [[package]] @@ -10580,6 +10600,7 @@ dependencies = [ name = "reth-tracing-otlp" version = "1.8.2" dependencies = [ + "clap", "eyre", "opentelemetry", "opentelemetry-otlp", @@ -12594,10 +12615,15 @@ dependencies = [ "http", "http-body", "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", "sync_wrapper", + "tokio", "tokio-stream", + "tower", "tower-layer", "tower-service", "tracing", diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index e232ea0cdb1..5dbb8bf4cd3 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -23,11 +23,13 @@ reth-node-ethereum.workspace = true reth-node-metrics.workspace = true reth-rpc-server-types.workspace = true reth-tracing.workspace = true +reth-tracing-otlp.workspace = true reth-node-api.workspace = true # misc clap.workspace = true eyre.workspace = true +url.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index ab3682be6dc..b947d6df1db 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -14,8 +14,10 @@ use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthereumNo use reth_node_metrics::recorder::install_prometheus_recorder; use reth_rpc_server_types::RpcModuleValidator; use reth_tracing::{FileWorkerGuard, Layers}; +use reth_tracing_otlp::OtlpProtocol; use std::{fmt, sync::Arc}; use tracing::info; +use url::Url; /// A wrapper around a parsed CLI that handles command execution. #[derive(Debug)] @@ -96,7 +98,8 @@ where self.cli.logs.log_file_directory.join(chain_spec.chain().to_string()); } - self.init_tracing()?; + self.init_tracing(&runner)?; + // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); @@ -106,18 +109,19 @@ where /// Initializes tracing with the configured options. /// /// If file logging is enabled, this function stores guard to the struct. - pub fn init_tracing(&mut self) -> Result<()> { + /// For gRPC OTLP, it requires tokio runtime context. + pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> { if self.guard.is_none() { let mut layers = self.layers.take().unwrap_or_default(); #[cfg(feature = "otlp")] - if let Some(output_type) = &self.cli.traces.otlp { - info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); - layers.with_span_layer( - "reth".to_string(), - output_type.clone(), - self.cli.traces.otlp_filter.clone(), - )?; + { + self.cli.traces.validate()?; + + if let Some(endpoint) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", endpoint); + self.init_otlp_export(&mut layers, endpoint, runner)?; + } } self.guard = self.cli.logs.init_tracing_with_layers(layers)?; @@ -125,6 +129,35 @@ where } Ok(()) } + + /// Initialize OTLP tracing export based on protocol type. + /// + /// For gRPC, `block_on` is required because tonic's channel initialization needs + /// a tokio runtime context, even though `with_span_layer` itself is not async. + #[cfg(feature = "otlp")] + fn init_otlp_export( + &self, + layers: &mut Layers, + endpoint: &Url, + runner: &CliRunner, + ) -> Result<()> { + let endpoint = endpoint.clone(); + let protocol = self.cli.traces.protocol; + let filter_level = self.cli.traces.otlp_filter.clone(); + + match protocol { + OtlpProtocol::Grpc => { + runner.block_on(async { + layers.with_span_layer("reth".to_string(), endpoint, filter_level, protocol) + })?; + } + OtlpProtocol::Http => { + layers.with_span_layer("reth".to_string(), endpoint, filter_level, protocol)?; + } + } + + Ok(()) + } } /// Run CLI commands with the provided runner, components and launcher. diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 1a4f85b6198..b1a472bd9fd 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -58,8 +58,9 @@ url.workspace = true dirs-next.workspace = true shellexpand.workspace = true -# tracing +# obs tracing.workspace = true +reth-tracing-otlp.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] } diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs index 45bc9c9029c..5b5e21502d1 100644 --- a/crates/node/core/src/args/trace.rs +++ b/crates/node/core/src/args/trace.rs @@ -1,17 +1,19 @@ //! Opentelemetry tracing configuration through CLI args. use clap::Parser; -use eyre::{ensure, WrapErr}; +use eyre::WrapErr; use reth_tracing::tracing_subscriber::EnvFilter; +use reth_tracing_otlp::OtlpProtocol; use url::Url; /// CLI arguments for configuring `Opentelemetry` trace and span export. #[derive(Debug, Clone, Parser)] pub struct TraceArgs { - /// Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently - /// only http exporting is supported. + /// Enable `Opentelemetry` tracing export to an OTLP endpoint. /// - /// If no value provided, defaults to `http://localhost:4318/v1/traces`. + /// If no value provided, defaults based on protocol: + /// - HTTP: `http://localhost:4318/v1/traces` + /// - gRPC: `http://localhost:4317` /// /// Example: --tracing-otlp=http://collector:4318/v1/traces #[arg( @@ -28,6 +30,22 @@ pub struct TraceArgs { )] pub otlp: Option, + /// OTLP transport protocol to use for exporting traces. + /// + /// - `http`: expects endpoint path to end with `/v1/traces` + /// - `grpc`: expects endpoint without a path + /// + /// Defaults to HTTP if not specified. + #[arg( + long = "tracing-otlp-protocol", + env = "OTEL_EXPORTER_OTLP_PROTOCOL", + global = true, + value_name = "PROTOCOL", + default_value = "http", + help_heading = "Tracing" + )] + pub protocol: OtlpProtocol, + /// Set a filter directive for the OTLP tracer. This controls the verbosity /// of spans and events sent to the OTLP endpoint. It follows the same /// syntax as the `RUST_LOG` environment variable. @@ -47,25 +65,25 @@ pub struct TraceArgs { impl Default for TraceArgs { fn default() -> Self { - Self { otlp: None, otlp_filter: EnvFilter::from_default_env() } + Self { + otlp: None, + protocol: OtlpProtocol::Http, + otlp_filter: EnvFilter::from_default_env(), + } } } -// Parses and validates an OTLP endpoint url. -fn parse_otlp_endpoint(arg: &str) -> eyre::Result { - let mut url = Url::parse(arg).wrap_err("Invalid URL for OTLP trace output")?; - - // If the path is empty, we set the path. - if url.path() == "/" { - url.set_path("/v1/traces") +impl TraceArgs { + /// Validate the configuration + pub fn validate(&mut self) -> eyre::Result<()> { + if let Some(url) = &mut self.otlp { + self.protocol.validate_endpoint(url)?; + } + Ok(()) } +} - // OTLP url must end with `/v1/traces` per the OTLP specification. - ensure!( - url.path().ends_with("/v1/traces"), - "OTLP trace endpoint must end with /v1/traces, got path: {}", - url.path() - ); - - Ok(url) +// Parses an OTLP endpoint url. +fn parse_otlp_endpoint(arg: &str) -> eyre::Result { + Url::parse(arg).wrap_err("Invalid URL for OTLP trace output") } diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 6ed24ca5823..eb320045337 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -44,6 +44,7 @@ reth-optimism-evm.workspace = true reth-cli-runner.workspace = true reth-node-builder = { workspace = true, features = ["op"] } reth-tracing.workspace = true +reth-tracing-otlp.workspace = true # eth alloy-eips.workspace = true @@ -55,6 +56,7 @@ alloy-rlp.workspace = true futures-util.workspace = true derive_more.workspace = true serde.workspace = true +url.workspace = true clap = { workspace = true, features = ["derive", "env"] } tokio = { workspace = true, features = ["sync", "macros", "time", "rt-multi-thread"] } @@ -105,4 +107,5 @@ serde = [ "reth-optimism-primitives/serde", "reth-primitives-traits/serde", "reth-optimism-chainspec/serde", + "url/serde", ] diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 621d16c7e13..8567c2b7e5a 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -9,8 +9,10 @@ use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_node::{OpExecutorProvider, OpNode}; use reth_rpc_server_types::RpcModuleValidator; use reth_tracing::{FileWorkerGuard, Layers}; +use reth_tracing_otlp::OtlpProtocol; use std::{fmt, sync::Arc}; use tracing::info; +use url::Url; /// A wrapper around a parsed CLI that handles command execution. #[derive(Debug)] @@ -63,7 +65,8 @@ where self.cli.logs.log_file_directory.join(chain_spec.chain.to_string()); } - self.init_tracing()?; + self.init_tracing(&runner)?; + // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); @@ -114,18 +117,18 @@ where /// Initializes tracing with the configured options. /// /// If file logging is enabled, this function stores guard to the struct. - pub fn init_tracing(&mut self) -> Result<()> { + /// For gRPC OTLP, it requires tokio runtime context. + pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> { if self.guard.is_none() { let mut layers = self.layers.take().unwrap_or_default(); #[cfg(feature = "otlp")] - if let Some(output_type) = &self.cli.traces.otlp { - info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); - layers.with_span_layer( - "reth".to_string(), - output_type.clone(), - self.cli.traces.otlp_filter.clone(), - )?; + { + self.cli.traces.validate()?; + if let Some(endpoint) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", endpoint); + self.init_otlp_export(&mut layers, endpoint, runner)?; + } } self.guard = self.cli.logs.init_tracing_with_layers(layers)?; @@ -133,4 +136,33 @@ where } Ok(()) } + + /// Initialize OTLP tracing export based on protocol type. + /// + /// For gRPC, `block_on` is required because tonic's channel initialization needs + /// a tokio runtime context, even though `with_span_layer` itself is not async. + #[cfg(feature = "otlp")] + fn init_otlp_export( + &self, + layers: &mut Layers, + endpoint: &Url, + runner: &CliRunner, + ) -> Result<()> { + let endpoint = endpoint.clone(); + let protocol = self.cli.traces.protocol; + let level_filter = self.cli.traces.otlp_filter.clone(); + + match protocol { + OtlpProtocol::Grpc => { + runner.block_on(async { + layers.with_span_layer("reth".to_string(), endpoint, level_filter, protocol) + })?; + } + OtlpProtocol::Http => { + layers.with_span_layer("reth".to_string(), endpoint, level_filter, protocol)?; + } + } + + Ok(()) + } } diff --git a/crates/tracing-otlp/Cargo.toml b/crates/tracing-otlp/Cargo.toml index 60cee0aa229..5b01095d4ff 100644 --- a/crates/tracing-otlp/Cargo.toml +++ b/crates/tracing-otlp/Cargo.toml @@ -12,13 +12,14 @@ exclude.workspace = true # obs opentelemetry_sdk = { workspace = true, optional = true } opentelemetry = { workspace = true, optional = true } -opentelemetry-otlp = { workspace = true, optional = true } +opentelemetry-otlp = { workspace = true, optional = true, features = ["grpc-tonic"] } opentelemetry-semantic-conventions = { workspace = true, optional = true } tracing-opentelemetry = { workspace = true, optional = true } tracing-subscriber.workspace = true tracing.workspace = true # misc +clap = { workspace = true, features = ["derive"] } eyre.workspace = true url.workspace = true diff --git a/crates/tracing-otlp/src/lib.rs b/crates/tracing-otlp/src/lib.rs index 07415ac2a65..2cfd332a408 100644 --- a/crates/tracing-otlp/src/lib.rs +++ b/crates/tracing-otlp/src/lib.rs @@ -6,7 +6,8 @@ //! applications. It allows for easily capturing and exporting distributed traces to compatible //! backends like Jaeger, Zipkin, or any other OpenTelemetry-compatible tracing system. -use eyre::{ensure, WrapErr}; +use clap::ValueEnum; +use eyre::ensure; use opentelemetry::{global, trace::TracerProvider, KeyValue, Value}; use opentelemetry_otlp::{SpanExporter, WithExportConfig}; use opentelemetry_sdk::{ @@ -20,6 +21,10 @@ use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::registry::LookupSpan; use url::Url; +// Otlp http endpoint is expected to end with this path. +// See also . +const HTTP_TRACE_ENDPOINT: &str = "/v1/traces"; + /// Creates a tracing [`OpenTelemetryLayer`] that exports spans to an OTLP endpoint. /// /// This layer can be added to a [`tracing_subscriber::Registry`] to enable `OpenTelemetry` tracing @@ -27,6 +32,7 @@ use url::Url; pub fn span_layer( service_name: impl Into, endpoint: &Url, + protocol: OtlpProtocol, ) -> eyre::Result> where for<'span> S: Subscriber + LookupSpan<'span>, @@ -35,8 +41,12 @@ where let resource = build_resource(service_name); - let span_exporter = - SpanExporter::builder().with_http().with_endpoint(endpoint.to_string()).build()?; + let span_builder = SpanExporter::builder(); + + let span_exporter = match protocol { + OtlpProtocol::Http => span_builder.with_http().with_endpoint(endpoint.as_str()).build()?, + OtlpProtocol::Grpc => span_builder.with_tonic().with_endpoint(endpoint.as_str()).build()?, + }; let tracer_provider = SdkTracerProvider::builder() .with_resource(resource) @@ -45,7 +55,7 @@ where global::set_tracer_provider(tracer_provider.clone()); - let tracer = tracer_provider.tracer("reth-otlp"); + let tracer = tracer_provider.tracer("reth"); Ok(tracing_opentelemetry::layer().with_tracer(tracer)) } @@ -57,34 +67,37 @@ fn build_resource(service_name: impl Into) -> Resource { .build() } -/// Destination for exported trace spans. -#[derive(Debug, Clone)] -pub enum TraceOutput { - /// Export traces as JSON to stdout. - Stdout, - /// Export traces to an OTLP collector at the specified URL. - Otlp(Url), +/// OTLP transport protocol type +#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] +pub enum OtlpProtocol { + /// HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + Http, + /// gRPC transport, port 4317 + Grpc, } -impl TraceOutput { - /// Parses the trace output destination from a string. +impl OtlpProtocol { + /// Validate and correct the URL to match protocol requirements. /// - /// Returns `TraceOutput::Stdout` for "stdout", or `TraceOutput::Otlp` for valid OTLP URLs. - /// OTLP URLs must end with `/v1/traces` per the OTLP specification. - pub fn parse(s: &str) -> eyre::Result { - if s == "stdout" { - return Ok(Self::Stdout); + /// For HTTP: Ensures the path ends with `/v1/traces`, appending it if necessary. + /// For gRPC: Ensures the path does NOT include `/v1/traces`. + pub fn validate_endpoint(&self, url: &mut Url) -> eyre::Result<()> { + match self { + Self::Http => { + if !url.path().ends_with(HTTP_TRACE_ENDPOINT) { + let path = url.path().trim_end_matches('/'); + url.set_path(&format!("{}{}", path, HTTP_TRACE_ENDPOINT)); + } + } + Self::Grpc => { + ensure!( + !url.path().ends_with(HTTP_TRACE_ENDPOINT), + "OTLP gRPC endpoint should not include {} path, got: {}", + HTTP_TRACE_ENDPOINT, + url + ); + } } - - let url = Url::parse(s).wrap_err("Invalid URL for trace output")?; - - // OTLP specification requires the `/v1/traces` path for trace endpoints - ensure!( - url.path().ends_with("/v1/traces"), - "OTLP trace endpoint must end with /v1/traces, got path: {}", - url.path() - ); - - Ok(Self::Otlp(url)) + Ok(()) } } diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 210c0066308..660d40ae464 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -1,6 +1,4 @@ use crate::formatter::LogFormat; -#[cfg(feature = "otlp")] -use reth_tracing_otlp::span_layer; use rolling_file::{RollingConditionBasic, RollingFileAppender}; use std::{ fmt, @@ -8,6 +6,11 @@ use std::{ }; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{filter::Directive, EnvFilter, Layer, Registry}; +#[cfg(feature = "otlp")] +use { + reth_tracing_otlp::{span_layer, OtlpProtocol}, + url::Url, +}; /// A worker guard returned by the file layer. /// @@ -134,12 +137,13 @@ impl Layers { pub fn with_span_layer( &mut self, service_name: String, - endpoint_exporter: url::Url, + endpoint_exporter: Url, filter: EnvFilter, + otlp_protocol: OtlpProtocol, ) -> eyre::Result<()> { // Create the span provider - let span_layer = span_layer(service_name, &endpoint_exporter) + let span_layer = span_layer(service_name, &endpoint_exporter, otlp_protocol) .map_err(|e| eyre::eyre!("Failed to build OTLP span exporter {}", e))? .with_filter(filter); diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 041d494523c..c35216d6b5c 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -116,14 +116,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index 96bdcf7a98c..6b3c9e4b657 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -102,14 +102,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index f2a49420837..a7bda7c3da7 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -167,14 +167,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index c86273aacf4..4b8b8ca2cce 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -119,14 +119,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 88fd92763f8..1548558fe39 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -111,14 +111,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index c467fe9d3dd..b48ba180982 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -110,14 +110,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index d4b59a05223..9f22178ec4c 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -113,14 +113,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index 4bb81ac07c9..fe7dd7d0bae 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -146,14 +146,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index c75a889458b..c778320f2d8 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -109,14 +109,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index 8c20c7e311a..dfcfcac1886 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -111,14 +111,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 3b8df2f3a4f..981d0c9f9a5 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -119,14 +119,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 3980903c65d..8e045a4cdf1 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -119,14 +119,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index 16131a95a17..3be1cd183b2 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -152,14 +152,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index 0c09f5be69b..a954093dd5d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -106,14 +106,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index 9c08ff331ed..6436afc2133 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -109,14 +109,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 47695e1b22a..5bd316847c0 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -119,14 +119,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index 7611b69946d..c87496d910d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -106,14 +106,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index b18faa93205..f8f1c199de5 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -164,14 +164,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index bf5b0ac534c..7aeaa8db49a 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -105,14 +105,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index cd413c12841..da732cda33b 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -170,14 +170,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 7d62409a638..77afcd5a6b3 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -165,14 +165,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 8e3e1cdb0a2..405009c6071 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -166,14 +166,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 49c0e098098..2ef6fdbe838 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -186,14 +186,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index ac1c7ff254b..51dc401d567 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -154,14 +154,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 7b70afe44c9..48b1c75c591 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -993,14 +993,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index b81c00a0382..7b37fdfdaa3 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -103,14 +103,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index fd28a37ebb1..bbe6b375e5b 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -323,14 +323,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 63baa86d367..324b01daac5 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -114,14 +114,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index f9f94497547..533bd71de2e 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -323,14 +323,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index 78d6dd8d3ba..a8ac7fbd0df 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -100,14 +100,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index 2089c92461e..2d136630298 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -100,14 +100,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 8f5828e8a67..8dfd3003816 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -154,14 +154,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index 56a7e3558c4..b7371fa4cf6 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -167,14 +167,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index 822f0f0c2db..006c6c74340 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -103,14 +103,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 037495979a0..19e813bec22 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -169,14 +169,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 8484379fe36..20cf8660bf1 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -161,14 +161,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index 079804ff088..70fad94ea3a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -118,14 +118,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index 7aee318e1ac..bed5d33329a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -118,14 +118,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index 17b2b7c9515..3bada103c87 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -118,14 +118,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index de64aa51c33..723a54e9272 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -118,14 +118,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 5407938072f..ae57239c9d3 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -390,14 +390,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 2d2f94d6801..a7581b22b3f 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -162,14 +162,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index a376af84012..b04e1920b75 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -110,14 +110,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index ce62c643600..2c22f8127c1 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -110,14 +110,28 @@ Display: Tracing: --tracing-otlp[=] - Enable `Opentelemetry` tracing export to an OTLP endpoint. Currently only http exporting is supported. + Enable `Opentelemetry` tracing export to an OTLP endpoint. - If no value provided, defaults to `http://localhost:4318/v1/traces`. + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` Example: --tracing-otlp=http://collector:4318/v1/traces [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + --tracing-otlp.filter Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. From 4adfa286f7447efe7abff0e6f9038d88b5481b25 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 23 Oct 2025 20:17:26 +0400 Subject: [PATCH 182/371] fix: return hashed peer key as id (#19245) --- crates/rpc/rpc/src/admin.rs | 48 ++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 28 deletions(-) diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index ce548230864..af5e1ae2ef9 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -14,6 +14,7 @@ use reth_network_types::PeerKind; use reth_rpc_api::AdminApiServer; use reth_rpc_server_types::ToRpcResult; use reth_transaction_pool::TransactionPool; +use revm_primitives::keccak256; /// `admin` API implementation. /// @@ -74,34 +75,25 @@ where let mut infos = Vec::with_capacity(peers.len()); for peer in peers { - if let Ok(pk) = id2pk(peer.remote_id) { - infos.push(PeerInfo { - id: pk.to_string(), - name: peer.client_version.to_string(), - enode: peer.enode, - enr: peer.enr, - caps: peer - .capabilities - .capabilities() - .iter() - .map(|cap| cap.to_string()) - .collect(), - network: PeerNetworkInfo { - remote_address: peer.remote_addr, - local_address: peer.local_addr.unwrap_or_else(|| self.network.local_addr()), - inbound: peer.direction.is_incoming(), - trusted: peer.kind.is_trusted(), - static_node: peer.kind.is_static(), - }, - protocols: PeerProtocolInfo { - eth: Some(EthPeerInfo::Info(EthInfo { - version: peer.status.version as u64, - })), - snap: None, - other: Default::default(), - }, - }) - } + infos.push(PeerInfo { + id: keccak256(peer.remote_id.as_slice()).to_string(), + name: peer.client_version.to_string(), + enode: peer.enode, + enr: peer.enr, + caps: peer.capabilities.capabilities().iter().map(|cap| cap.to_string()).collect(), + network: PeerNetworkInfo { + remote_address: peer.remote_addr, + local_address: peer.local_addr.unwrap_or_else(|| self.network.local_addr()), + inbound: peer.direction.is_incoming(), + trusted: peer.kind.is_trusted(), + static_node: peer.kind.is_static(), + }, + protocols: PeerProtocolInfo { + eth: Some(EthPeerInfo::Info(EthInfo { version: peer.status.version as u64 })), + snap: None, + other: Default::default(), + }, + }) } Ok(infos) From 3883df3e6ca648a5d295b1dbb1daaa3d6be29030 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 23 Oct 2025 18:20:55 +0100 Subject: [PATCH 183/371] chore: remove db pruning of header/txs segments (#19260) --- crates/cli/commands/src/stage/drop.rs | 1 - crates/prune/prune/src/segments/mod.rs | 5 +- crates/prune/prune/src/segments/set.rs | 6 +- .../prune/src/segments/static_file/headers.rs | 346 ------------------ .../prune/src/segments/static_file/mod.rs | 4 - .../src/segments/static_file/transactions.rs | 225 ------------ .../src/segments/user/transaction_lookup.rs | 39 +- crates/prune/types/src/mode.rs | 6 +- crates/prune/types/src/segment.rs | 8 +- .../src/providers/static_file/manager.rs | 7 + 10 files changed, 41 insertions(+), 606 deletions(-) delete mode 100644 crates/prune/prune/src/segments/static_file/headers.rs delete mode 100644 crates/prune/prune/src/segments/static_file/transactions.rs diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 0da3493cbb0..2c6e911d7bd 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -78,7 +78,6 @@ impl Command { StageEnum::Bodies => { tx.clear::()?; tx.clear::>>()?; - reset_prune_checkpoint(tx, PruneSegment::Transactions)?; tx.clear::()?; tx.clear::>>()?; diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index f0f688a7c86..090f445720f 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -8,10 +8,7 @@ use alloy_primitives::{BlockNumber, TxNumber}; use reth_provider::{errors::provider::ProviderResult, BlockReader, PruneCheckpointWriter}; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; pub use set::SegmentSet; -pub use static_file::{ - Headers as StaticFileHeaders, Receipts as StaticFileReceipts, - Transactions as StaticFileTransactions, -}; +pub use static_file::Receipts as StaticFileReceipts; use std::{fmt::Debug, ops::RangeInclusive}; use tracing::error; pub use user::{ diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index e551a8de9a1..f2a8794df59 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -11,7 +11,7 @@ use reth_provider::{ }; use reth_prune_types::PruneModes; -use super::{StaticFileHeaders, StaticFileReceipts, StaticFileTransactions}; +use super::StaticFileReceipts; /// Collection of [`Segment`]. Thread-safe, allocated on the heap. #[derive(Debug)] @@ -74,10 +74,6 @@ where } = prune_modes; Self::default() - // Static file headers - .segment(StaticFileHeaders::new(static_file_provider.clone())) - // Static file transactions - .segment(StaticFileTransactions::new(static_file_provider.clone())) // Static file receipts .segment(StaticFileReceipts::new(static_file_provider)) // Merkle changesets diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs deleted file mode 100644 index 19b255ed3d3..00000000000 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ /dev/null @@ -1,346 +0,0 @@ -use crate::{ - db_ext::DbTxPruneExt, - segments::{PruneInput, Segment}, - PruneLimiter, PrunerError, -}; -use alloy_primitives::BlockNumber; -use itertools::Itertools; -use reth_db_api::{ - cursor::{DbCursorRO, RangeWalker}, - table::Value, - tables, - transaction::DbTxMut, -}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{providers::StaticFileProvider, DBProvider, StaticFileProviderFactory}; -use reth_prune_types::{ - PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, -}; -use reth_static_file_types::StaticFileSegment; -use std::num::NonZeroUsize; -use tracing::trace; - -/// Number of header tables to prune in one step -/// -/// Note: `HeaderTerminalDifficulties` is no longer pruned after Paris/Merge as it's read-only -const HEADER_TABLES_TO_PRUNE: usize = 2; - -#[derive(Debug)] -pub struct Headers { - static_file_provider: StaticFileProvider, -} - -impl Headers { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { - Self { static_file_provider } - } -} - -impl Segment for Headers -where - Provider: StaticFileProviderFactory> - + DBProvider, -{ - fn segment(&self) -> PruneSegment { - PruneSegment::Headers - } - - fn mode(&self) -> Option { - self.static_file_provider - .get_highest_static_file_block(StaticFileSegment::Headers) - .map(PruneMode::before_inclusive) - } - - fn purpose(&self) -> PrunePurpose { - PrunePurpose::StaticFile - } - - fn prune(&self, provider: &Provider, input: PruneInput) -> Result { - let (block_range_start, block_range_end) = match input.get_next_block_range() { - Some(range) => (*range.start(), *range.end()), - None => { - trace!(target: "pruner", "No headers to prune"); - return Ok(SegmentOutput::done()) - } - }; - - let last_pruned_block = - if block_range_start == 0 { None } else { Some(block_range_start - 1) }; - - let range = last_pruned_block.map_or(0, |block| block + 1)..=block_range_end; - - // let mut headers_cursor = provider.tx_ref().cursor_write::()?; - let mut headers_cursor = provider - .tx_ref() - .cursor_write::::BlockHeader>>( - )?; - let mut canonical_headers_cursor = - provider.tx_ref().cursor_write::()?; - - let mut limiter = input.limiter.floor_deleted_entries_limit_to_multiple_of( - NonZeroUsize::new(HEADER_TABLES_TO_PRUNE).unwrap(), - ); - - let tables_iter = HeaderTablesIter::new( - provider, - &mut limiter, - headers_cursor.walk_range(range.clone())?, - canonical_headers_cursor.walk_range(range)?, - ); - - let mut last_pruned_block: Option = None; - let mut pruned = 0; - for res in tables_iter { - let HeaderTablesIterItem { pruned_block, entries_pruned } = res?; - last_pruned_block = Some(pruned_block); - pruned += entries_pruned; - } - - let done = last_pruned_block == Some(block_range_end); - let progress = limiter.progress(done); - - Ok(SegmentOutput { - progress, - pruned, - checkpoint: Some(SegmentOutputCheckpoint { - block_number: last_pruned_block, - tx_number: None, - }), - }) - } -} - -type Walker<'a, Provider, T> = - RangeWalker<'a, T, <::Tx as DbTxMut>::CursorMut>; - -#[allow(missing_debug_implementations)] -struct HeaderTablesIter<'a, Provider> -where - Provider: StaticFileProviderFactory> - + DBProvider, -{ - provider: &'a Provider, - limiter: &'a mut PruneLimiter, - headers_walker: Walker< - 'a, - Provider, - tables::Headers<::BlockHeader>, - >, - canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, -} - -struct HeaderTablesIterItem { - pruned_block: BlockNumber, - entries_pruned: usize, -} - -impl<'a, Provider> HeaderTablesIter<'a, Provider> -where - Provider: StaticFileProviderFactory> - + DBProvider, -{ - const fn new( - provider: &'a Provider, - limiter: &'a mut PruneLimiter, - headers_walker: Walker< - 'a, - Provider, - tables::Headers<::BlockHeader>, - >, - canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, - ) -> Self { - Self { provider, limiter, headers_walker, canonical_headers_walker } - } -} - -impl Iterator for HeaderTablesIter<'_, Provider> -where - Provider: StaticFileProviderFactory> - + DBProvider, -{ - type Item = Result; - fn next(&mut self) -> Option { - if self.limiter.is_limit_reached() { - return None - } - - let mut pruned_block_headers = None; - let mut pruned_block_canonical = None; - - if let Err(err) = self.provider.tx_ref().prune_table_with_range_step( - &mut self.headers_walker, - self.limiter, - &mut |_| false, - &mut |row| pruned_block_headers = Some(row.0), - ) { - return Some(Err(err.into())) - } - - if let Err(err) = self.provider.tx_ref().prune_table_with_range_step( - &mut self.canonical_headers_walker, - self.limiter, - &mut |_| false, - &mut |row| pruned_block_canonical = Some(row.0), - ) { - return Some(Err(err.into())) - } - - if ![pruned_block_headers, pruned_block_canonical].iter().all_equal() { - return Some(Err(PrunerError::InconsistentData( - "All headers-related tables should be pruned up to the same height", - ))) - } - - pruned_block_headers.map(move |block| { - Ok(HeaderTablesIterItem { pruned_block: block, entries_pruned: HEADER_TABLES_TO_PRUNE }) - }) - } -} - -#[cfg(test)] -mod tests { - use crate::segments::{ - static_file::headers::HEADER_TABLES_TO_PRUNE, PruneInput, PruneLimiter, Segment, - SegmentOutput, - }; - use alloy_primitives::{BlockNumber, B256}; - use assert_matches::assert_matches; - use reth_db_api::{tables, transaction::DbTx}; - use reth_provider::{ - DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, - StaticFileProviderFactory, - }; - use reth_prune_types::{ - PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, - SegmentOutputCheckpoint, - }; - use reth_stages::test_utils::TestStageDB; - use reth_testing_utils::{generators, generators::random_header_range}; - use tracing::trace; - - #[test] - fn prune() { - reth_tracing::init_test_tracing(); - - let db = TestStageDB::default(); - let mut rng = generators::rng(); - - let headers = random_header_range(&mut rng, 0..100, B256::ZERO); - let tx = db.factory.provider_rw().unwrap().into_tx(); - for header in &headers { - TestStageDB::insert_header(None, &tx, header).unwrap(); - } - tx.commit().unwrap(); - - assert_eq!(db.table::().unwrap().len(), headers.len()); - assert_eq!(db.table::().unwrap().len(), headers.len()); - - let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| { - let segment = super::Headers::new(db.factory.static_file_provider()); - let prune_mode = PruneMode::Before(to_block); - let mut limiter = PruneLimiter::default().set_deleted_entries_limit(6); - let input = PruneInput { - previous_checkpoint: db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::Headers) - .unwrap(), - to_block, - limiter: limiter.clone(), - }; - - let next_block_number_to_prune = db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::Headers) - .unwrap() - .and_then(|checkpoint| checkpoint.block_number) - .map(|block_number| block_number + 1) - .unwrap_or_default(); - - let provider = db.factory.database_provider_rw().unwrap(); - let result = segment.prune(&provider, input.clone()).unwrap(); - limiter.increment_deleted_entries_count_by(result.pruned); - trace!(target: "pruner::test", - expected_prune_progress=?expected_result.0, - expected_pruned=?expected_result.1, - result=?result, - "SegmentOutput" - ); - - assert_matches!( - result, - SegmentOutput {progress, pruned, checkpoint: Some(_)} - if (progress, pruned) == expected_result - ); - provider - .save_prune_checkpoint( - PruneSegment::Headers, - result.checkpoint.unwrap().as_prune_checkpoint(prune_mode), - ) - .unwrap(); - provider.commit().expect("commit"); - - let last_pruned_block_number = to_block.min( - next_block_number_to_prune + - (input.limiter.deleted_entries_limit().unwrap() / HEADER_TABLES_TO_PRUNE - 1) - as u64, - ); - - assert_eq!( - db.table::().unwrap().len(), - headers.len() - (last_pruned_block_number + 1) as usize - ); - assert_eq!( - db.table::().unwrap().len(), - headers.len() - (last_pruned_block_number + 1) as usize - ); - assert_eq!( - db.factory.provider().unwrap().get_prune_checkpoint(PruneSegment::Headers).unwrap(), - Some(PruneCheckpoint { - block_number: Some(last_pruned_block_number), - tx_number: None, - prune_mode - }) - ); - }; - - // First test: Prune with limit of 6 entries - // This will prune blocks 0-2 (3 blocks × 2 tables = 6 entries) - test_prune( - 3, - (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 6), - ); - - // Second test: Prune remaining blocks - // This will prune block 3 (1 block × 2 tables = 2 entries) - test_prune(3, (PruneProgress::Finished, 2)); - } - - #[test] - fn prune_cannot_be_done() { - let db = TestStageDB::default(); - - let limiter = PruneLimiter::default().set_deleted_entries_limit(0); - - let input = PruneInput { - previous_checkpoint: None, - to_block: 1, - // Less than total number of tables for `Headers` segment - limiter, - }; - - let provider = db.factory.database_provider_rw().unwrap(); - let segment = super::Headers::new(db.factory.static_file_provider()); - let result = segment.prune(&provider, input).unwrap(); - assert_eq!( - result, - SegmentOutput::not_done( - PruneInterruptReason::DeletedEntriesLimitReached, - Some(SegmentOutputCheckpoint::default()) - ) - ); - } -} diff --git a/crates/prune/prune/src/segments/static_file/mod.rs b/crates/prune/prune/src/segments/static_file/mod.rs index cb9dc79c6cd..f699dd37c9e 100644 --- a/crates/prune/prune/src/segments/static_file/mod.rs +++ b/crates/prune/prune/src/segments/static_file/mod.rs @@ -1,7 +1,3 @@ -mod headers; mod receipts; -mod transactions; -pub use headers::Headers; pub use receipts::Receipts; -pub use transactions::Transactions; diff --git a/crates/prune/prune/src/segments/static_file/transactions.rs b/crates/prune/prune/src/segments/static_file/transactions.rs deleted file mode 100644 index 115ee2ca39a..00000000000 --- a/crates/prune/prune/src/segments/static_file/transactions.rs +++ /dev/null @@ -1,225 +0,0 @@ -use crate::{ - db_ext::DbTxPruneExt, - segments::{PruneInput, Segment}, - PrunerError, -}; -use reth_db_api::{table::Value, tables, transaction::DbTxMut}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - providers::StaticFileProvider, BlockReader, DBProvider, StaticFileProviderFactory, - TransactionsProvider, -}; -use reth_prune_types::{ - PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, -}; -use reth_static_file_types::StaticFileSegment; -use tracing::trace; - -/// The type responsible for pruning transactions in the database and history expiry. -#[derive(Debug)] -pub struct Transactions { - static_file_provider: StaticFileProvider, -} - -impl Transactions { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { - Self { static_file_provider } - } -} - -impl Segment for Transactions -where - Provider: DBProvider - + TransactionsProvider - + BlockReader - + StaticFileProviderFactory>, -{ - fn segment(&self) -> PruneSegment { - PruneSegment::Transactions - } - - fn mode(&self) -> Option { - self.static_file_provider - .get_highest_static_file_block(StaticFileSegment::Transactions) - .map(PruneMode::before_inclusive) - } - - fn purpose(&self) -> PrunePurpose { - PrunePurpose::StaticFile - } - - fn prune(&self, provider: &Provider, input: PruneInput) -> Result { - let tx_range = match input.get_next_tx_num_range(provider)? { - Some(range) => range, - None => { - trace!(target: "pruner", "No transactions to prune"); - return Ok(SegmentOutput::done()) - } - }; - - let mut limiter = input.limiter; - - let mut last_pruned_transaction = *tx_range.end(); - let (pruned, done) = provider.tx_ref().prune_table_with_range::::SignedTx, - >>( - tx_range, - &mut limiter, - |_| false, - |row| last_pruned_transaction = row.0, - )?; - trace!(target: "pruner", %pruned, %done, "Pruned transactions"); - - let last_pruned_block = provider - .transaction_block(last_pruned_transaction)? - .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? - // If there's more transactions to prune, set the checkpoint block number to previous, - // so we could finish pruning its transactions on the next run. - .checked_sub(if done { 0 } else { 1 }); - - let progress = limiter.progress(done); - - Ok(SegmentOutput { - progress, - pruned, - checkpoint: Some(SegmentOutputCheckpoint { - block_number: last_pruned_block, - tx_number: Some(last_pruned_transaction), - }), - }) - } -} - -#[cfg(test)] -mod tests { - use crate::segments::{PruneInput, PruneLimiter, Segment}; - use alloy_primitives::{BlockNumber, TxNumber, B256}; - use assert_matches::assert_matches; - use itertools::{ - FoldWhile::{Continue, Done}, - Itertools, - }; - use reth_db_api::tables; - use reth_provider::{ - DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, - StaticFileProviderFactory, - }; - use reth_prune_types::{ - PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, - SegmentOutput, - }; - use reth_stages::test_utils::{StorageKind, TestStageDB}; - use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; - use std::ops::Sub; - - #[test] - fn prune() { - let db = TestStageDB::default(); - let mut rng = generators::rng(); - - let blocks = random_block_range( - &mut rng, - 1..=100, - BlockRangeParams { parent: Some(B256::ZERO), tx_count: 2..3, ..Default::default() }, - ); - db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); - - let transactions = - blocks.iter().flat_map(|block| &block.body().transactions).collect::>(); - - assert_eq!(db.table::().unwrap().len(), transactions.len()); - - let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| { - let segment = super::Transactions::new(db.factory.static_file_provider()); - let prune_mode = PruneMode::Before(to_block); - let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); - let input = PruneInput { - previous_checkpoint: db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::Transactions) - .unwrap(), - to_block, - limiter: limiter.clone(), - }; - - let next_tx_number_to_prune = db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::Transactions) - .unwrap() - .and_then(|checkpoint| checkpoint.tx_number) - .map(|tx_number| tx_number + 1) - .unwrap_or_default(); - - let provider = db.factory.database_provider_rw().unwrap(); - let result = segment.prune(&provider, input.clone()).unwrap(); - limiter.increment_deleted_entries_count_by(result.pruned); - - assert_matches!( - result, - SegmentOutput {progress, pruned, checkpoint: Some(_)} - if (progress, pruned) == expected_result - ); - - provider - .save_prune_checkpoint( - PruneSegment::Transactions, - result.checkpoint.unwrap().as_prune_checkpoint(prune_mode), - ) - .unwrap(); - provider.commit().expect("commit"); - - let last_pruned_tx_number = blocks - .iter() - .take(to_block as usize) - .map(|block| block.transaction_count()) - .sum::() - .min( - next_tx_number_to_prune as usize + - input.limiter.deleted_entries_limit().unwrap(), - ) - .sub(1); - - let last_pruned_block_number = blocks - .iter() - .fold_while((0, 0), |(_, mut tx_count), block| { - tx_count += block.transaction_count(); - - if tx_count > last_pruned_tx_number { - Done((block.number, tx_count)) - } else { - Continue((block.number, tx_count)) - } - }) - .into_inner() - .0 - .checked_sub(if result.progress.is_finished() { 0 } else { 1 }); - - assert_eq!( - db.table::().unwrap().len(), - transactions.len() - (last_pruned_tx_number + 1) - ); - assert_eq!( - db.factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::Transactions) - .unwrap(), - Some(PruneCheckpoint { - block_number: last_pruned_block_number, - tx_number: Some(last_pruned_tx_number as TxNumber), - prune_mode - }) - ); - }; - - test_prune( - 6, - (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 10), - ); - test_prune(6, (PruneProgress::Finished, 2)); - } -} diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index 0055f8abd22..fed90d84f2d 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -6,8 +6,11 @@ use crate::{ use alloy_eips::eip2718::Encodable2718; use rayon::prelude::*; use reth_db_api::{tables, transaction::DbTxMut}; -use reth_provider::{BlockReader, DBProvider, PruneCheckpointReader}; -use reth_prune_types::{PruneMode, PrunePurpose, PruneSegment, SegmentOutputCheckpoint}; +use reth_provider::{BlockReader, DBProvider, PruneCheckpointReader, StaticFileProviderFactory}; +use reth_prune_types::{ + PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutputCheckpoint, +}; +use reth_static_file_types::StaticFileSegment; use tracing::{debug, instrument, trace}; #[derive(Debug)] @@ -23,8 +26,10 @@ impl TransactionLookup { impl Segment for TransactionLookup where - Provider: - DBProvider + BlockReader + PruneCheckpointReader, + Provider: DBProvider + + BlockReader + + PruneCheckpointReader + + StaticFileProviderFactory, { fn segment(&self) -> PruneSegment { PruneSegment::TransactionLookup @@ -47,18 +52,26 @@ where // It is not possible to prune TransactionLookup data for which we don't have transaction // data. If the TransactionLookup checkpoint is lagging behind (which can happen e.g. when // pre-merge history is dropped and then later tx lookup pruning is enabled) then we can - // only prune from the tx checkpoint and onwards. - if let Some(txs_checkpoint) = provider.get_prune_checkpoint(PruneSegment::Transactions)? && + // only prune from the lowest static file. + if let Some(lowest_range) = + provider.static_file_provider().get_lowest_range(StaticFileSegment::Transactions) && input .previous_checkpoint - .is_none_or(|checkpoint| checkpoint.block_number < txs_checkpoint.block_number) + .is_none_or(|checkpoint| checkpoint.block_number < Some(lowest_range.start())) { - input.previous_checkpoint = Some(txs_checkpoint); - debug!( - target: "pruner", - transactions_checkpoint = ?input.previous_checkpoint, - "No TransactionLookup checkpoint found, using Transactions checkpoint as fallback" - ); + let new_checkpoint = lowest_range.start().saturating_sub(1); + if let Some(body_indices) = provider.block_body_indices(new_checkpoint)? { + input.previous_checkpoint = Some(PruneCheckpoint { + block_number: Some(new_checkpoint), + tx_number: Some(body_indices.last_tx_num()), + prune_mode: self.mode, + }); + debug!( + target: "pruner", + static_file_checkpoint = ?input.previous_checkpoint, + "Using static file transaction checkpoint as TransactionLookup starting point" + ); + } } let (start, end) = match input.get_next_tx_num_range(provider)? { diff --git a/crates/prune/types/src/mode.rs b/crates/prune/types/src/mode.rs index 4c09ccfa639..0565087673d 100644 --- a/crates/prune/types/src/mode.rs +++ b/crates/prune/types/src/mode.rs @@ -129,7 +129,11 @@ mod tests { // Test for a scenario where there are no minimum blocks and Full can be used assert_eq!( - PruneMode::Full.prune_target_block(tip, PruneSegment::Transactions, PrunePurpose::User), + PruneMode::Full.prune_target_block( + tip, + PruneSegment::TransactionLookup, + PrunePurpose::User + ), Ok(Some((tip, PruneMode::Full))), ); } diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index c5cbecd4ccd..542d3042049 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -24,10 +24,6 @@ pub enum PruneSegment { AccountHistory, /// Prune segment responsible for the `StorageChangeSets` and `StoragesHistory` tables. StorageHistory, - /// Prune segment responsible for the `CanonicalHeaders`, `Headers` tables. - Headers, - /// Prune segment responsible for the `Transactions` table. - Transactions, /// Prune segment responsible for all rows in `AccountsTrieChangeSets` and /// `StoragesTrieChangeSets` table. MerkleChangeSets, @@ -45,9 +41,7 @@ impl PruneSegment { /// Returns minimum number of blocks to keep in the database for this segment. pub const fn min_blocks(&self, purpose: PrunePurpose) -> u64 { match self { - Self::SenderRecovery | Self::TransactionLookup | Self::Headers | Self::Transactions => { - 0 - } + Self::SenderRecovery | Self::TransactionLookup => 0, Self::Receipts if purpose.is_static_file() => 0, Self::ContractLogs | Self::AccountHistory | diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index d066a704a24..ea7eec9e9d9 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1042,6 +1042,13 @@ impl StaticFileProvider { self.static_files_min_block.read().get(&segment).map(|range| range.end()) } + /// Gets the lowest static file's block range if it exists for a static file segment. + /// + /// If there is nothing on disk for the given segment, this will return [`None`]. + pub fn get_lowest_range(&self, segment: StaticFileSegment) -> Option { + self.static_files_min_block.read().get(&segment).copied() + } + /// Gets the highest static file's block height if it exists for a static file segment. /// /// If there is nothing on disk for the given segment, this will return [`None`]. From 5a9c7703d145e6b630ca93989504586ffe2aaf87 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 23 Oct 2025 19:44:06 +0100 Subject: [PATCH 184/371] chore: rm `StaticFileReceipts` pruner (#19265) --- crates/prune/prune/src/segments/mod.rs | 2 - crates/prune/prune/src/segments/receipts.rs | 4 +- crates/prune/prune/src/segments/set.rs | 6 +- .../prune/src/segments/static_file/mod.rs | 3 - .../src/segments/static_file/receipts.rs | 58 ------------------- 5 files changed, 2 insertions(+), 71 deletions(-) delete mode 100644 crates/prune/prune/src/segments/static_file/mod.rs delete mode 100644 crates/prune/prune/src/segments/static_file/receipts.rs diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index 090f445720f..43be33a75d1 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -1,6 +1,5 @@ mod receipts; mod set; -mod static_file; mod user; use crate::{PruneLimiter, PrunerError}; @@ -8,7 +7,6 @@ use alloy_primitives::{BlockNumber, TxNumber}; use reth_provider::{errors::provider::ProviderResult, BlockReader, PruneCheckpointWriter}; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; pub use set::SegmentSet; -pub use static_file::Receipts as StaticFileReceipts; use std::{fmt::Debug, ops::RangeInclusive}; use tracing::error; pub use user::{ diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index 12ad6e2c203..68a12552013 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -1,9 +1,7 @@ -//! Common receipts pruning logic shared between user and static file pruning segments. +//! Common receipts pruning logic. //! //! - [`crate::segments::user::Receipts`] is responsible for pruning receipts according to the //! user-configured settings (for example, on a full node or with a custom prune config) -//! - [`crate::segments::static_file::Receipts`] is responsible for pruning receipts on an archive -//! node after static file producer has finished use crate::{db_ext::DbTxPruneExt, segments::PruneInput, PrunerError}; use reth_db_api::{table::Value, tables, transaction::DbTxMut}; diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index f2a8794df59..4538773d7d2 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -11,8 +11,6 @@ use reth_provider::{ }; use reth_prune_types::PruneModes; -use super::StaticFileReceipts; - /// Collection of [`Segment`]. Thread-safe, allocated on the heap. #[derive(Debug)] pub struct SegmentSet { @@ -58,7 +56,7 @@ where /// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and /// [`PruneModes`]. pub fn from_components( - static_file_provider: StaticFileProvider, + _static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { #[expect(deprecated)] @@ -74,8 +72,6 @@ where } = prune_modes; Self::default() - // Static file receipts - .segment(StaticFileReceipts::new(static_file_provider)) // Merkle changesets .segment(MerkleChangeSets::new(merkle_changesets)) // Account history diff --git a/crates/prune/prune/src/segments/static_file/mod.rs b/crates/prune/prune/src/segments/static_file/mod.rs deleted file mode 100644 index f699dd37c9e..00000000000 --- a/crates/prune/prune/src/segments/static_file/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod receipts; - -pub use receipts::Receipts; diff --git a/crates/prune/prune/src/segments/static_file/receipts.rs b/crates/prune/prune/src/segments/static_file/receipts.rs deleted file mode 100644 index 6a84cce9c41..00000000000 --- a/crates/prune/prune/src/segments/static_file/receipts.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::{ - segments::{PruneInput, Segment}, - PrunerError, -}; -use reth_db_api::{table::Value, transaction::DbTxMut}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - errors::provider::ProviderResult, providers::StaticFileProvider, BlockReader, DBProvider, - PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, -}; -use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; -use reth_static_file_types::StaticFileSegment; - -#[derive(Debug)] -pub struct Receipts { - static_file_provider: StaticFileProvider, -} - -impl Receipts { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { - Self { static_file_provider } - } -} - -impl Segment for Receipts -where - Provider: StaticFileProviderFactory> - + DBProvider - + PruneCheckpointWriter - + TransactionsProvider - + BlockReader, -{ - fn segment(&self) -> PruneSegment { - PruneSegment::Receipts - } - - fn mode(&self) -> Option { - self.static_file_provider - .get_highest_static_file_block(StaticFileSegment::Receipts) - .map(PruneMode::before_inclusive) - } - - fn purpose(&self) -> PrunePurpose { - PrunePurpose::StaticFile - } - - fn prune(&self, provider: &Provider, input: PruneInput) -> Result { - crate::segments::receipts::prune(provider, input) - } - - fn save_checkpoint( - &self, - provider: &Provider, - checkpoint: PruneCheckpoint, - ) -> ProviderResult<()> { - crate::segments::receipts::save_checkpoint(provider, checkpoint) - } -} From 189b00b1e6c24bfce307f3f4db5a41f7760fb6e3 Mon Sep 17 00:00:00 2001 From: radik878 Date: Thu, 23 Oct 2025 23:03:16 +0300 Subject: [PATCH 185/371] chore(net): remove unnecessary TODO (#19268) --- crates/net/eth-wire/src/multiplex.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 058dfe311e3..489fd86e7dc 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -385,7 +385,6 @@ impl CanDisconnect for ProtocolProxy { &mut self, _reason: DisconnectReason, ) -> Pin>::Error>> + Send + '_>> { - // TODO handle disconnects Box::pin(async move { Ok(()) }) } } From 08fc0a918d5756fb64a5a45094118a3733c35480 Mon Sep 17 00:00:00 2001 From: Yash <72552910+kumaryash90@users.noreply.github.com> Date: Fri, 24 Oct 2025 13:46:21 +0530 Subject: [PATCH 186/371] feat: eth_fillTransaction (#19199) Co-authored-by: Arsenii Kulikov Co-authored-by: jxom <7336481+jxom@users.noreply.github.com> --- .../src/testsuite/actions/engine_api.rs | 10 +- .../src/testsuite/actions/fork.rs | 15 +- .../src/testsuite/actions/node_ops.rs | 29 +- .../src/testsuite/actions/produce_blocks.rs | 5 + .../tests/e2e-testsuite/main.rs | 1 + .../builder/src/launch/invalid_block_hook.rs | 15 +- crates/rpc/rpc-builder/src/lib.rs | 5 +- crates/rpc/rpc-builder/tests/it/http.rs | 141 ++++++---- crates/rpc/rpc-builder/tests/it/middleware.rs | 3 +- crates/rpc/rpc-eth-api/src/core.rs | 28 +- .../rpc-eth-api/src/helpers/transaction.rs | 81 +++++- crates/rpc/rpc-eth-types/src/lib.rs | 2 +- crates/rpc/rpc-eth-types/src/transaction.rs | 12 +- crates/rpc/rpc-testing-util/src/debug.rs | 4 +- crates/rpc/rpc-testing-util/tests/it/trace.rs | 4 +- crates/rpc/rpc/src/engine.rs | 2 + crates/rpc/rpc/src/eth/core.rs | 8 +- crates/rpc/rpc/src/eth/helpers/transaction.rs | 256 +++++++++++++++++- crates/rpc/rpc/src/otterscan.rs | 2 + 19 files changed, 518 insertions(+), 105 deletions(-) diff --git a/crates/e2e-test-utils/src/testsuite/actions/engine_api.rs b/crates/e2e-test-utils/src/testsuite/actions/engine_api.rs index 6548fc951c6..d4053228d9c 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/engine_api.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/engine_api.rs @@ -8,6 +8,7 @@ use alloy_rpc_types_engine::{ use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::Result; use futures_util::future::BoxFuture; +use reth_ethereum_primitives::TransactionSigned; use reth_node_api::{EngineTypes, PayloadTypes}; use reth_rpc_api::clients::{EngineApiClient, EthApiClient}; use std::marker::PhantomData; @@ -85,7 +86,14 @@ where const MAX_RETRIES: u32 = 5; while retries < MAX_RETRIES { - match EthApiClient::::block_by_number( + match EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::block_by_number( source_rpc, alloy_eips::BlockNumberOrTag::Number(self.block_number), true, // include transactions diff --git a/crates/e2e-test-utils/src/testsuite/actions/fork.rs b/crates/e2e-test-utils/src/testsuite/actions/fork.rs index 1511d90fa59..154b695adde 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/fork.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/fork.rs @@ -8,6 +8,7 @@ use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::Result; use futures_util::future::BoxFuture; +use reth_ethereum_primitives::TransactionSigned; use reth_node_api::{EngineTypes, PayloadTypes}; use reth_rpc_api::clients::EthApiClient; use std::marker::PhantomData; @@ -136,6 +137,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( rpc_client, alloy_eips::BlockNumberOrTag::Number(self.fork_base_block), @@ -248,11 +250,14 @@ where // walk backwards through the chain until we reach the fork base while current_number > self.fork_base_number { - let block = EthApiClient::::block_by_hash( - rpc_client, - current_hash, - false, - ) + let block = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::block_by_hash(rpc_client, current_hash, false) .await? .ok_or_else(|| { eyre::eyre!("Block with hash {} not found during fork validation", current_hash) diff --git a/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs index a00ab5e8675..da1cf98e617 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs @@ -4,6 +4,7 @@ use crate::testsuite::{Action, Environment}; use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::Result; use futures_util::future::BoxFuture; +use reth_ethereum_primitives::TransactionSigned; use reth_node_api::EngineTypes; use reth_rpc_api::clients::EthApiClient; use std::time::Duration; @@ -74,18 +75,28 @@ where let node_b_client = &env.node_clients[self.node_b]; // Get latest block from each node - let block_a = EthApiClient::::block_by_number( - &node_a_client.rpc, - alloy_eips::BlockNumberOrTag::Latest, - false, + let block_a = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::block_by_number( + &node_a_client.rpc, alloy_eips::BlockNumberOrTag::Latest, false ) .await? .ok_or_else(|| eyre::eyre!("Failed to get latest block from node {}", self.node_a))?; - let block_b = EthApiClient::::block_by_number( - &node_b_client.rpc, - alloy_eips::BlockNumberOrTag::Latest, - false, + let block_b = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::block_by_number( + &node_b_client.rpc, alloy_eips::BlockNumberOrTag::Latest, false ) .await? .ok_or_else(|| eyre::eyre!("Failed to get latest block from node {}", self.node_b))?; @@ -278,6 +289,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( &node_a_client.rpc, alloy_eips::BlockNumberOrTag::Latest, @@ -294,6 +306,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( &node_b_client.rpc, alloy_eips::BlockNumberOrTag::Latest, diff --git a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs index 74a5e2ba1d5..92bbba93b89 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs @@ -11,6 +11,7 @@ use alloy_rpc_types_engine::{ use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::Result; use futures_util::future::BoxFuture; +use reth_ethereum_primitives::TransactionSigned; use reth_node_api::{EngineTypes, PayloadTypes}; use reth_rpc_api::clients::{EngineApiClient, EthApiClient}; use std::{collections::HashSet, marker::PhantomData, time::Duration}; @@ -79,6 +80,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( rpc_client, alloy_eips::BlockNumberOrTag::Latest, false ) @@ -348,6 +350,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( rpc_client, alloy_eips::BlockNumberOrTag::Latest, false ) @@ -421,6 +424,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( rpc_client, alloy_eips::BlockNumberOrTag::Latest, false ) @@ -531,6 +535,7 @@ where Block, Receipt, Header, + TransactionSigned, >::header_by_number( rpc_client, alloy_eips::BlockNumberOrTag::Latest ) diff --git a/crates/e2e-test-utils/tests/e2e-testsuite/main.rs b/crates/e2e-test-utils/tests/e2e-testsuite/main.rs index 04422ba34ad..4a2ac77ec65 100644 --- a/crates/e2e-test-utils/tests/e2e-testsuite/main.rs +++ b/crates/e2e-test-utils/tests/e2e-testsuite/main.rs @@ -84,6 +84,7 @@ async fn test_apply_with_import() -> Result<()> { alloy_rpc_types_eth::Block, alloy_rpc_types_eth::Receipt, alloy_rpc_types_eth::Header, + reth_ethereum_primitives::TransactionSigned, >::block_by_number( &client.rpc, alloy_eips::BlockNumberOrTag::Number(10), diff --git a/crates/node/builder/src/launch/invalid_block_hook.rs b/crates/node/builder/src/launch/invalid_block_hook.rs index 7221077847a..3c1848dceb4 100644 --- a/crates/node/builder/src/launch/invalid_block_hook.rs +++ b/crates/node/builder/src/launch/invalid_block_hook.rs @@ -1,6 +1,7 @@ //! Invalid block hook helpers for the node builder. use crate::AddOnsContext; +use alloy_consensus::TxEnvelope; use alloy_rpc_types::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::OptionExt; use reth_chainspec::EthChainSpec; @@ -128,10 +129,16 @@ where let client = jsonrpsee::http_client::HttpClientBuilder::default().build(url)?; // Verify that the healthy node is running the same chain as the current node. - let healthy_chain_id = - EthApiClient::::chain_id(&client) - .await? - .ok_or_eyre("healthy node rpc client didn't return a chain id")?; + let healthy_chain_id = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TxEnvelope, + >::chain_id(&client) + .await? + .ok_or_eyre("healthy node rpc client didn't return a chain id")?; if healthy_chain_id.to::() != chain_id { eyre::bail!("Invalid chain ID. Expected {}, got {}", chain_id, healthy_chain_id); diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ed8114e7e91..06c3af69a9f 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -34,7 +34,7 @@ use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_consensus::{ConsensusError, FullConsensus}; use reth_evm::ConfigureEvm; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::{NodePrimitives, TxTy}; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthApi, EthApiBuilder, EthBundle, MinerApi, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, TxPoolApi, ValidationApiConfig, Web3Api, @@ -670,6 +670,7 @@ where RpcBlock, RpcReceipt, RpcHeader, + TxTy, > + EthApiTypes, EvmConfig: ConfigureEvm + 'static, { @@ -691,7 +692,7 @@ where /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_ots(&mut self) -> &mut Self where - EthApi: TraceExt + EthTransactions, + EthApi: TraceExt + EthTransactions, { let otterscan_api = self.otterscan_api(); self.modules.insert(RethRpcModule::Ots, otterscan_api.into_rpc().into()); diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index a790253d266..601fd789608 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -18,7 +18,7 @@ use jsonrpsee::{ rpc_params, types::error::ErrorCode, }; -use reth_ethereum_primitives::Receipt; +use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_network_peers::NodeRecord; use reth_rpc_api::{ clients::{AdminApiClient, EthApiClient}, @@ -176,38 +176,38 @@ where .unwrap(); // Implemented - EthApiClient::::protocol_version( + EthApiClient::::protocol_version( client, ) .await .unwrap(); - EthApiClient::::chain_id(client) + EthApiClient::::chain_id(client) .await .unwrap(); - EthApiClient::::accounts(client) + EthApiClient::::accounts(client) .await .unwrap(); - EthApiClient::::get_account( + EthApiClient::::get_account( client, address, block_number.into(), ) .await .unwrap(); - EthApiClient::::block_number(client) + EthApiClient::::block_number(client) .await .unwrap(); - EthApiClient::::get_code( + EthApiClient::::get_code( client, address, None, ) .await .unwrap(); - EthApiClient::::send_raw_transaction( + EthApiClient::::send_raw_transaction( client, tx, ) .await .unwrap(); - EthApiClient::::fee_history( + EthApiClient::::fee_history( client, U64::from(0), block_number, @@ -215,17 +215,17 @@ where ) .await .unwrap(); - EthApiClient::::balance( + EthApiClient::::balance( client, address, None, ) .await .unwrap(); - EthApiClient::::transaction_count( + EthApiClient::::transaction_count( client, address, None, ) .await .unwrap(); - EthApiClient::::storage_at( + EthApiClient::::storage_at( client, address, U256::default().into(), @@ -233,80 +233,80 @@ where ) .await .unwrap(); - EthApiClient::::block_by_hash( + EthApiClient::::block_by_hash( client, hash, false, ) .await .unwrap(); - EthApiClient::::block_by_number( + EthApiClient::::block_by_number( client, block_number, false, ) .await .unwrap(); - EthApiClient::::block_transaction_count_by_number( + EthApiClient::::block_transaction_count_by_number( client, block_number, ) .await .unwrap(); - EthApiClient::::block_transaction_count_by_hash( + EthApiClient::::block_transaction_count_by_hash( client, hash, ) .await .unwrap(); - EthApiClient::::block_uncles_count_by_hash(client, hash) + EthApiClient::::block_uncles_count_by_hash(client, hash) .await .unwrap(); - EthApiClient::::block_uncles_count_by_number( + EthApiClient::::block_uncles_count_by_number( client, block_number, ) .await .unwrap(); - EthApiClient::::uncle_by_block_hash_and_index( + EthApiClient::::uncle_by_block_hash_and_index( client, hash, index, ) .await .unwrap(); - EthApiClient::::uncle_by_block_number_and_index( + EthApiClient::::uncle_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::sign( + EthApiClient::::sign( client, address, bytes.clone(), ) .await .unwrap_err(); - EthApiClient::::sign_typed_data( + EthApiClient::::sign_typed_data( client, address, typed_data, ) .await .unwrap_err(); - EthApiClient::::transaction_by_hash( + EthApiClient::::transaction_by_hash( client, tx_hash, ) .await .unwrap(); - EthApiClient::::transaction_by_block_hash_and_index( + EthApiClient::::transaction_by_block_hash_and_index( client, hash, index, ) .await .unwrap(); - EthApiClient::::transaction_by_block_number_and_index( + EthApiClient::::transaction_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::create_access_list( + EthApiClient::::create_access_list( client, call_request.clone(), Some(block_number.into()), @@ -314,7 +314,7 @@ where ) .await .unwrap_err(); - EthApiClient::::estimate_gas( + EthApiClient::::estimate_gas( client, call_request.clone(), Some(block_number.into()), @@ -322,7 +322,7 @@ where ) .await .unwrap_err(); - EthApiClient::::call( + EthApiClient::::call( client, call_request.clone(), Some(block_number.into()), @@ -331,38 +331,38 @@ where ) .await .unwrap_err(); - EthApiClient::::syncing(client) + EthApiClient::::syncing(client) .await .unwrap(); - EthApiClient::::send_transaction( + EthApiClient::::send_transaction( client, transaction_request.clone(), ) .await .unwrap_err(); - EthApiClient::::sign_transaction( + EthApiClient::::sign_transaction( client, transaction_request, ) .await .unwrap_err(); - EthApiClient::::hashrate(client) + EthApiClient::::hashrate(client) .await .unwrap(); - EthApiClient::::submit_hashrate( + EthApiClient::::submit_hashrate( client, U256::default(), B256::default(), ) .await .unwrap(); - EthApiClient::::gas_price(client) + EthApiClient::::gas_price(client) .await .unwrap_err(); - EthApiClient::::max_priority_fee_per_gas(client) + EthApiClient::::max_priority_fee_per_gas(client) .await .unwrap_err(); - EthApiClient::::get_proof( + EthApiClient::::get_proof( client, address, vec![], @@ -372,35 +372,66 @@ where .unwrap(); // Unimplemented - assert!(is_unimplemented( - EthApiClient::::author(client) + assert!( + is_unimplemented( + EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::author(client) .await .err() .unwrap() - )); - assert!(is_unimplemented( - EthApiClient::::is_mining(client) + ) + ); + assert!( + is_unimplemented( + EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::is_mining(client) .await .err() .unwrap() - )); - assert!(is_unimplemented( - EthApiClient::::get_work(client) + ) + ); + assert!( + is_unimplemented( + EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::get_work(client) .await .err() .unwrap() - )); - assert!(is_unimplemented( - EthApiClient::::submit_work( - client, - B64::default(), - B256::default(), - B256::default() ) - .await - .err() - .unwrap() - )); + ); + assert!( + is_unimplemented( + EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::submit_work(client, B64::default(), B256::default(), B256::default()) + .await + .err() + .unwrap() + ) + ); EthCallBundleApiClient::call_bundle(client, Default::default()).await.unwrap_err(); } diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index 60541a57c39..9a70356bcac 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -5,6 +5,7 @@ use jsonrpsee::{ server::middleware::rpc::RpcServiceT, types::Request, }; +use reth_ethereum_primitives::TransactionSigned; use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig}; use reth_rpc_eth_api::EthApiClient; use reth_rpc_server_types::RpcModuleSelection; @@ -85,7 +86,7 @@ async fn test_rpc_middleware() { .unwrap(); let client = handle.http_client().unwrap(); - EthApiClient::::protocol_version( + EthApiClient::::protocol_version( &client, ) .await diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index ed05f9d373b..40f19c86227 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -16,7 +16,9 @@ use alloy_rpc_types_eth::{ }; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_primitives_traits::TxTy; use reth_rpc_convert::RpcTxReq; +use reth_rpc_eth_types::FillTransactionResult; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; @@ -29,6 +31,7 @@ pub trait FullEthApiServer: RpcBlock, RpcReceipt, RpcHeader, + TxTy, > + FullEthApi + Clone { @@ -41,6 +44,7 @@ impl FullEthApiServer for T where RpcBlock, RpcReceipt, RpcHeader, + TxTy, > + FullEthApi + Clone { @@ -49,7 +53,15 @@ impl FullEthApiServer for T where /// Eth rpc interface: #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] -pub trait EthApi { +pub trait EthApi< + TxReq: RpcObject, + T: RpcObject, + B: RpcObject, + R: RpcObject, + H: RpcObject, + RawTx: RpcObject, +> +{ /// Returns the protocol version encoded as a string. #[method(name = "protocolVersion")] async fn protocol_version(&self) -> RpcResult; @@ -228,6 +240,10 @@ pub trait EthApi>, ) -> RpcResult; + /// Fills the defaults on a given unsigned transaction. + #[method(name = "fillTransaction")] + async fn fill_transaction(&self, request: TxReq) -> RpcResult>; + /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the /// optionality of state overrides #[method(name = "callMany")] @@ -388,6 +404,7 @@ impl RpcBlock, RpcReceipt, RpcHeader, + TxTy, > for T where T: FullEthApi, @@ -682,6 +699,15 @@ where .await?) } + /// Handler for: `eth_fillTransaction` + async fn fill_transaction( + &self, + request: RpcTxReq, + ) -> RpcResult>> { + trace!(target: "rpc::eth", ?request, "Serving eth_fillTransaction"); + Ok(EthTransactions::fill_transaction(self, request).await?) + } + /// Handler for: `eth_callMany` async fn call_many( &self, diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 2cbf1aff14e..d2e0b5f943a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -1,7 +1,7 @@ //! Database access for `eth_` transaction RPC methods. Loads transaction and receipt data w.r.t. //! network. -use super::{EthApiSpec, EthSigner, LoadBlock, LoadReceipt, LoadState, SpawnBlocking}; +use super::{EthApiSpec, EthSigner, LoadBlock, LoadFee, LoadReceipt, LoadState, SpawnBlocking}; use crate::{ helpers::{estimate::EstimateCall, spec::SignersForRpc}, FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt, @@ -13,17 +13,17 @@ use alloy_consensus::{ }; use alloy_dyn_abi::TypedData; use alloy_eips::{eip2718::Encodable2718, BlockId}; -use alloy_network::TransactionBuilder; -use alloy_primitives::{Address, Bytes, TxHash, B256}; +use alloy_network::{TransactionBuilder, TransactionBuilder4844}; +use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionInfo}; use futures::{Future, StreamExt}; use reth_chain_state::CanonStateSubscriptions; use reth_node_api::BlockBody; -use reth_primitives_traits::{RecoveredBlock, SignedTransaction}; +use reth_primitives_traits::{RecoveredBlock, SignedTransaction, TxTy}; use reth_rpc_convert::{transaction::RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ - utils::binary_search, EthApiError, EthApiError::TransactionConfirmationTimeout, SignError, - TransactionSource, + utils::binary_search, EthApiError, EthApiError::TransactionConfirmationTimeout, + FillTransactionResult, SignError, TransactionSource, }; use reth_storage_api::{ BlockNumReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ProviderTx, ReceiptProvider, @@ -436,6 +436,75 @@ pub trait EthTransactions: LoadTransaction { } } + /// Fills the defaults on a given unsigned transaction. + fn fill_transaction( + &self, + mut request: RpcTxReq, + ) -> impl Future>, Self::Error>> + Send + where + Self: EthApiSpec + LoadBlock + EstimateCall + LoadFee, + { + async move { + let from = match request.as_ref().from() { + Some(from) => from, + None => return Err(SignError::NoAccount.into_eth_err()), + }; + + if request.as_ref().value().is_none() { + request.as_mut().set_value(U256::ZERO); + } + + if request.as_ref().nonce().is_none() { + let nonce = self.next_available_nonce(from).await?; + request.as_mut().set_nonce(nonce); + } + + let chain_id = self.chain_id(); + request.as_mut().set_chain_id(chain_id.to()); + + if request.as_ref().has_eip4844_fields() && + request.as_ref().max_fee_per_blob_gas().is_none() + { + let blob_fee = self.blob_base_fee().await?; + request.as_mut().set_max_fee_per_blob_gas(blob_fee.to()); + } + + if request.as_ref().blob_sidecar().is_some() && + request.as_ref().blob_versioned_hashes.is_none() + { + request.as_mut().populate_blob_hashes(); + } + + if request.as_ref().gas_limit().is_none() { + let estimated_gas = + self.estimate_gas_at(request.clone(), BlockId::pending(), None).await?; + request.as_mut().set_gas_limit(estimated_gas.to()); + } + + if request.as_ref().gas_price().is_none() { + let tip = if let Some(tip) = request.as_ref().max_priority_fee_per_gas() { + tip + } else { + let tip = self.suggested_priority_fee().await?.to::(); + request.as_mut().set_max_priority_fee_per_gas(tip); + tip + }; + if request.as_ref().max_fee_per_gas().is_none() { + let header = + self.provider().latest_header().map_err(Self::Error::from_eth_err)?; + let base_fee = header.and_then(|h| h.base_fee_per_gas()).unwrap_or_default(); + request.as_mut().set_max_fee_per_gas(base_fee as u128 + tip); + } + } + + let tx = self.tx_resp_builder().build_simulate_v1_transaction(request)?; + + let raw = tx.encoded_2718().into(); + + Ok(FillTransactionResult { raw, tx }) + } + } + /// Signs a transaction, with configured signers. fn sign_request( &self, diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index 9c603e4864e..7378ad99629 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -35,5 +35,5 @@ pub use gas_oracle::{ }; pub use id_provider::EthSubscriptionIdProvider; pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -pub use transaction::TransactionSource; +pub use transaction::{FillTransactionResult, TransactionSource}; pub use tx_forward::ForwardConfig; diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index de3323d61e6..3d099f01188 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -2,11 +2,21 @@ //! //! Transaction wrapper that labels transaction with its origin. -use alloy_primitives::B256; +use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_eth::TransactionInfo; use reth_ethereum_primitives::TransactionSigned; use reth_primitives_traits::{NodePrimitives, Recovered, SignedTransaction}; use reth_rpc_convert::{RpcConvert, RpcTransaction}; +use serde::{Deserialize, Serialize}; + +/// Response type for `eth_fillTransaction` RPC method. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FillTransactionResult { + /// RLP-encoded transaction bytes + pub raw: Bytes, + /// Filled transaction object + pub tx: T, +} /// Represents from where a transaction was fetched. #[derive(Debug, Clone, Eq, PartialEq)] diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index 4f91e7e63c0..65fc3e86e02 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -15,7 +15,7 @@ use alloy_rpc_types_trace::{ }; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_ethereum_primitives::Receipt; +use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_rpc_api::{clients::DebugApiClient, EthApiClient}; const NOOP_TRACER: &str = include_str!("../assets/noop-tracer.js"); @@ -77,7 +77,7 @@ pub trait DebugApiExt { impl DebugApiExt for T where - T: EthApiClient + T: EthApiClient + DebugApiClient + Sync, { diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index 301d65a820b..19e0b202dc6 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -8,7 +8,7 @@ use alloy_rpc_types_trace::{ use futures::StreamExt; use jsonrpsee::http_client::HttpClientBuilder; use jsonrpsee_http_client::HttpClient; -use reth_ethereum_primitives::Receipt; +use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_rpc_api_testing_util::{debug::DebugApiExt, trace::TraceApiExt, utils::parse_env_url}; use reth_rpc_eth_api::EthApiClient; use std::time::Instant; @@ -118,6 +118,7 @@ async fn debug_trace_block_entire_chain() { Block, Receipt, Header, + TransactionSigned, >>::block_number(&client) .await .unwrap() @@ -152,6 +153,7 @@ async fn debug_trace_block_opcodes_entire_chain() { Block, Receipt, Header, + TransactionSigned, >>::block_number(&client) .await .unwrap() diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index 7865659ece7..b7e62fadb75 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -5,6 +5,7 @@ use alloy_rpc_types_eth::{ }; use alloy_serde::JsonStorageKey; use jsonrpsee::core::RpcResult as Result; +use reth_primitives_traits::TxTy; use reth_rpc_api::{EngineEthApiServer, EthApiServer}; use reth_rpc_convert::RpcTxReq; /// Re-export for convenience @@ -49,6 +50,7 @@ where RpcBlock, RpcReceipt, RpcHeader, + TxTy, > + FullEthApiTypes, EthFilter: EngineEthFilter, { diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index e3850a67f54..d2e5cf124ec 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -711,7 +711,7 @@ mod tests { /// Invalid block range #[tokio::test] async fn test_fee_history_empty() { - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _, _>>::fee_history( &build_test_eth_api(NoopProvider::default()), U64::from(1), BlockNumberOrTag::Latest, @@ -733,7 +733,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _, _>>::fee_history( ð_api, U64::from(newest_block + 1), newest_block.into(), @@ -756,7 +756,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _, _>>::fee_history( ð_api, U64::from(1), (newest_block + 1000).into(), @@ -779,7 +779,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _, _>>::fee_history( ð_api, U64::from(0), newest_block.into(), diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 39758f68d77..7889dd1f54c 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -134,25 +134,54 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{hex_literal::hex, Bytes}; - use reth_chainspec::ChainSpecProvider; + use crate::eth::helpers::types::EthRpcConverter; + use alloy_consensus::{Block, Header, SidecarBuilder, SimpleCoder, Transaction}; + use alloy_primitives::{Address, U256}; + use alloy_rpc_types_eth::request::TransactionRequest; + use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_provider::test_utils::NoopProvider; - use reth_rpc_eth_api::helpers::EthTransactions; - use reth_transaction_pool::{test_utils::testing_pool, TransactionPool}; + use reth_provider::{ + test_utils::{ExtendedAccount, MockEthProvider}, + ChainSpecProvider, + }; + use reth_rpc_eth_api::node::RpcNodeCoreAdapter; + use reth_transaction_pool::test_utils::{testing_pool, TestPool}; + use std::collections::HashMap; - #[tokio::test] - async fn send_raw_transaction() { - let noop_provider = NoopProvider::default(); - let noop_network_provider = NoopNetwork::default(); + fn mock_eth_api( + accounts: HashMap, + ) -> EthApi< + RpcNodeCoreAdapter, + EthRpcConverter, + > { + let mock_provider = MockEthProvider::default() + .with_chain_spec(ChainSpecBuilder::mainnet().cancun_activated().build()); + mock_provider.extend_accounts(accounts); + let evm_config = EthEvmConfig::new(mock_provider.chain_spec()); let pool = testing_pool(); - let evm_config = EthEvmConfig::new(noop_provider.chain_spec()); - let eth_api = - EthApi::builder(noop_provider.clone(), pool.clone(), noop_network_provider, evm_config) - .build(); + let genesis_header = Header { + number: 0, + gas_limit: 30_000_000, + timestamp: 1, + excess_blob_gas: Some(0), + base_fee_per_gas: Some(1000000000), + blob_gas_used: Some(0), + ..Default::default() + }; + + let genesis_hash = B256::ZERO; + mock_provider.add_block(genesis_hash, Block::new(genesis_header, Default::default())); + + EthApi::builder(mock_provider, pool, NoopNetwork::default(), evm_config).build() + } + + #[tokio::test] + async fn send_raw_transaction() { + let eth_api = mock_eth_api(Default::default()); + let pool = eth_api.pool(); // https://etherscan.io/tx/0xa694b71e6c128a2ed8e2e0f6770bddbe52e3bb8f10e8472f9a79ab81497a8b5d let tx_1 = Bytes::from(hex!( @@ -183,4 +212,205 @@ mod tests { assert!(pool.get(&tx_1_result).is_some(), "tx1 not found in the pool"); assert!(pool.get(&tx_2_result).is_some(), "tx2 not found in the pool"); } + + #[tokio::test] + async fn test_fill_transaction_fills_chain_id() { + let address = Address::random(); + let accounts = HashMap::from([( + address, + ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)), // 10 ETH + )]); + + let eth_api = mock_eth_api(accounts); + + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + gas: Some(21_000), + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + // Should fill with the chain id from provider + assert!(filled.tx.chain_id().is_some()); + } + + #[tokio::test] + async fn test_fill_transaction_fills_nonce() { + let address = Address::random(); + let nonce = 42u64; + + let accounts = HashMap::from([( + address, + ExtendedAccount::new(nonce, U256::from(1_000_000_000_000_000_000u64)), // 1 ETH + )]); + + let eth_api = mock_eth_api(accounts); + + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + value: Some(U256::from(1000)), + gas: Some(21_000), + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + assert_eq!(filled.tx.nonce(), nonce); + } + + #[tokio::test] + async fn test_fill_transaction_preserves_provided_fields() { + let address = Address::random(); + let provided_nonce = 100u64; + let provided_gas_limit = 50_000u64; + + let accounts = HashMap::from([( + address, + ExtendedAccount::new(42, U256::from(10_000_000_000_000_000_000u64)), + )]); + + let eth_api = mock_eth_api(accounts); + + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + value: Some(U256::from(1000)), + nonce: Some(provided_nonce), + gas: Some(provided_gas_limit), + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + // Should preserve the provided nonce and gas limit + assert_eq!(filled.tx.nonce(), provided_nonce); + assert_eq!(filled.tx.gas_limit(), provided_gas_limit); + } + + #[tokio::test] + async fn test_fill_transaction_fills_all_missing_fields() { + let address = Address::random(); + + let balance = U256::from(100u128) * U256::from(1_000_000_000_000_000_000u128); + let accounts = HashMap::from([(address, ExtendedAccount::new(5, balance))]); + + let eth_api = mock_eth_api(accounts); + + // Create a simple transfer transaction + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + assert!(filled.tx.is_eip1559()); + } + + #[tokio::test] + async fn test_fill_transaction_eip4844_blob_fee() { + let address = Address::random(); + let accounts = HashMap::from([( + address, + ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)), + )]); + + let eth_api = mock_eth_api(accounts); + + let mut builder = SidecarBuilder::::new(); + builder.ingest(b"dummy blob"); + + // EIP-4844 blob transaction with versioned hashes but no blob fee + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + sidecar: Some(builder.build().unwrap()), + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + // Blob transaction should have max_fee_per_blob_gas filled + assert!( + filled.tx.max_fee_per_blob_gas().is_some(), + "max_fee_per_blob_gas should be filled for blob tx" + ); + assert!( + filled.tx.blob_versioned_hashes().is_some(), + "blob_versioned_hashes should be preserved" + ); + } + + #[tokio::test] + async fn test_fill_transaction_eip4844_preserves_blob_fee() { + let address = Address::random(); + let accounts = HashMap::from([( + address, + ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)), + )]); + + let eth_api = mock_eth_api(accounts); + + let provided_blob_fee = 5000000u128; + + let mut builder = SidecarBuilder::::new(); + builder.ingest(b"dummy blob"); + + // EIP-4844 blob transaction with blob fee already set + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + transaction_type: Some(3), // EIP-4844 + sidecar: Some(builder.build().unwrap()), + max_fee_per_blob_gas: Some(provided_blob_fee), // Already set + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + // Should preserve the provided blob fee + assert_eq!( + filled.tx.max_fee_per_blob_gas(), + Some(provided_blob_fee), + "should preserve provided max_fee_per_blob_gas" + ); + } + + #[tokio::test] + async fn test_fill_transaction_non_blob_tx_no_blob_fee() { + let address = Address::random(); + let accounts = HashMap::from([( + address, + ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)), + )]); + + let eth_api = mock_eth_api(accounts); + + // EIP-1559 transaction without blob fields + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + transaction_type: Some(2), // EIP-1559 + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + // Non-blob transaction should NOT have blob fee filled + assert!( + filled.tx.max_fee_per_blob_gas().is_none(), + "max_fee_per_blob_gas should not be set for non-blob tx" + ); + } } diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 92698e6eca2..334e8d7dea4 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -12,6 +12,7 @@ use alloy_rpc_types_trace::{ }; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, types::ErrorObjectOwned}; +use reth_primitives_traits::TxTy; use reth_rpc_api::{EthApiServer, OtterscanServer}; use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ @@ -73,6 +74,7 @@ where RpcBlock, RpcReceipt, RpcHeader, + TxTy, > + EthTransactions + TraceExt + 'static, From 51fbd5a519215985781c8c1c0acbb1f46f7ceda0 Mon Sep 17 00:00:00 2001 From: strmfos <155266597+strmfos@users.noreply.github.com> Date: Fri, 24 Oct 2025 10:36:30 +0200 Subject: [PATCH 187/371] fix: no_std compatibility in reth-optimism-chainspec (#19271) --- crates/optimism/chainspec/src/basefee.rs | 2 +- crates/optimism/chainspec/src/lib.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/optimism/chainspec/src/basefee.rs b/crates/optimism/chainspec/src/basefee.rs index 394de296f23..3c0dcdfd88d 100644 --- a/crates/optimism/chainspec/src/basefee.rs +++ b/crates/optimism/chainspec/src/basefee.rs @@ -76,7 +76,7 @@ where #[cfg(test)] mod tests { - use std::sync::Arc; + use alloc::sync::Arc; use op_alloy_consensus::encode_jovian_extra_data; use reth_chainspec::{ChainSpec, ForkCondition, Hardfork}; diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 2a78039dcf9..30d90e64c9a 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -517,7 +517,7 @@ pub fn make_op_genesis_header(genesis: &Genesis, hardforks: &ChainHardforks) -> #[cfg(test)] mod tests { - use alloc::string::String; + use alloc::string::{String, ToString}; use alloy_genesis::{ChainConfig, Genesis}; use alloy_primitives::b256; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; @@ -529,7 +529,7 @@ mod tests { #[test] fn test_storage_root_consistency() { use alloy_primitives::{B256, U256}; - use std::str::FromStr; + use core::str::FromStr; let k1 = B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000001") From ddcfc8a4402a05b8db14f90af33eb6316d699fa7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?jos=C3=A9=20v?= <52646071+Peponks9@users.noreply.github.com> Date: Fri, 24 Oct 2025 03:31:22 -0600 Subject: [PATCH 188/371] chore: add `add_or_replace_if_module_configured` method (#19266) --- crates/rpc/rpc-builder/src/lib.rs | 71 +++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 06c3af69a9f..6bd4223f60f 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1966,6 +1966,25 @@ impl TransportRpcModules { self.add_or_replace_ipc(other)?; Ok(()) } + /// Adds or replaces the given [`Methods`] in the transport modules where the specified + /// [`RethRpcModule`] is configured. + pub fn add_or_replace_if_module_configured( + &mut self, + module: RethRpcModule, + other: impl Into, + ) -> Result<(), RegisterMethodError> { + let other = other.into(); + if self.module_config().contains_http(&module) { + self.add_or_replace_http(other.clone())?; + } + if self.module_config().contains_ws(&module) { + self.add_or_replace_ws(other.clone())?; + } + if self.module_config().contains_ipc(&module) { + self.add_or_replace_ipc(other)?; + } + Ok(()) + } } /// Returns the methods installed in the given module that match the given filter. @@ -2522,4 +2541,56 @@ mod tests { assert!(modules.ipc.as_ref().unwrap().method("anything").is_some()); assert!(modules.ws.as_ref().unwrap().method("anything").is_some()); } + + #[test] + fn test_add_or_replace_if_module_configured() { + // Create a config that enables RethRpcModule::Eth for HTTP and WS, but NOT IPC + let config = TransportRpcModuleConfig::default() + .with_http([RethRpcModule::Eth]) + .with_ws([RethRpcModule::Eth]); + + // Create HTTP module with an existing method (to test "replace") + let mut http_module = RpcModule::new(()); + http_module.register_method("eth_existing", |_, _, _| "original").unwrap(); + + // Create WS module with the same existing method + let mut ws_module = RpcModule::new(()); + ws_module.register_method("eth_existing", |_, _, _| "original").unwrap(); + + // Create IPC module (empty, to ensure no changes) + let ipc_module = RpcModule::new(()); + + // Set up TransportRpcModules with the config and modules + let mut modules = TransportRpcModules { + config, + http: Some(http_module), + ws: Some(ws_module), + ipc: Some(ipc_module), + }; + + // Create new methods: one to replace an existing method, one to add a new one + let mut new_module = RpcModule::new(()); + new_module.register_method("eth_existing", |_, _, _| "replaced").unwrap(); // Replace + new_module.register_method("eth_new", |_, _, _| "added").unwrap(); // Add + let new_methods: Methods = new_module.into(); + + // Call the function for RethRpcModule::Eth + let result = modules.add_or_replace_if_module_configured(RethRpcModule::Eth, new_methods); + assert!(result.is_ok(), "Function should succeed"); + + // Verify HTTP: existing method still exists (replaced), new method added + let http = modules.http.as_ref().unwrap(); + assert!(http.method("eth_existing").is_some()); + assert!(http.method("eth_new").is_some()); + + // Verify WS: existing method still exists (replaced), new method added + let ws = modules.ws.as_ref().unwrap(); + assert!(ws.method("eth_existing").is_some()); + assert!(ws.method("eth_new").is_some()); + + // Verify IPC: no changes (Eth not configured for IPC) + let ipc = modules.ipc.as_ref().unwrap(); + assert!(ipc.method("eth_existing").is_none()); + assert!(ipc.method("eth_new").is_none()); + } } From 4a24cb3b499b997f93de9b5a01bfd0ecfe8339c1 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 24 Oct 2025 05:32:55 -0400 Subject: [PATCH 189/371] fix(engine): re-insert storage cache and use arc (#18879) --- crates/engine/tree/src/tree/cached_state.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index bc543d067a0..fd9999b9eba 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -302,7 +302,7 @@ pub(crate) struct ExecutionCache { /// Per-account storage cache: outer cache keyed by Address, inner cache tracks that account’s /// storage slots. - storage_cache: Cache, + storage_cache: Cache>, /// Cache for basic account information (nonce, balance, code hash). account_cache: Cache>, @@ -340,15 +340,15 @@ impl ExecutionCache { where I: IntoIterator)>, { - let account_cache = self.storage_cache.get(&address).unwrap_or_else(|| { - let account_cache = AccountStorageCache::default(); - self.storage_cache.insert(address, account_cache.clone()); - account_cache - }); + let account_cache = self.storage_cache.get(&address).unwrap_or_default(); for (key, value) in storage_entries { account_cache.insert_storage(key, value); } + + // Insert to the cache so that moka picks up on the changed size, even though the actual + // value (the Arc) is the same + self.storage_cache.insert(address, account_cache); } /// Invalidate storage for specific account @@ -465,7 +465,7 @@ impl ExecutionCacheBuilder { const TIME_TO_IDLE: Duration = Duration::from_secs(3600); // 1 hour let storage_cache = CacheBuilder::new(self.storage_cache_entries) - .weigher(|_key: &Address, value: &AccountStorageCache| -> u32 { + .weigher(|_key: &Address, value: &Arc| -> u32 { // values based on results from measure_storage_cache_overhead test let base_weight = 39_000; let slots_weight = value.len() * 218; From a767fe3b14f1604fc7db11ba853708fb20637c81 Mon Sep 17 00:00:00 2001 From: 0xeabz Date: Fri, 24 Oct 2025 05:25:14 -0600 Subject: [PATCH 190/371] feat: allow using SafeNoSync for MDBX (#18945) Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- crates/node/core/src/args/database.rs | 46 ++++++++++++++++++- .../storage/db/src/implementation/mdbx/mod.rs | 28 ++++++++++- crates/storage/libmdbx-rs/src/flags.rs | 19 +++++++- docs/vocs/docs/pages/cli/reth/db.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/db/diff.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/download.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/export-era.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/import-era.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/import.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/init-state.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/init.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/node.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/prune.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/re-execute.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 3 ++ docs/vocs/docs/pages/cli/reth/stage/run.mdx | 3 ++ .../vocs/docs/pages/cli/reth/stage/unwind.mdx | 3 ++ 18 files changed, 134 insertions(+), 4 deletions(-) diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 09b8f15ef68..6f1d3bfc711 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -6,9 +6,12 @@ use crate::version::default_client_version; use clap::{ builder::{PossibleValue, TypedValueParser}, error::ErrorKind, - Arg, Args, Command, Error, + value_parser, Arg, Args, Command, Error, +}; +use reth_db::{ + mdbx::{MaxReadTransactionDuration, SyncMode}, + ClientVersion, }; -use reth_db::{mdbx::MaxReadTransactionDuration, ClientVersion}; use reth_storage_errors::db::LogLevel; /// Parameters for database configuration @@ -34,6 +37,12 @@ pub struct DatabaseArgs { /// Maximum number of readers allowed to access the database concurrently. #[arg(long = "db.max-readers")] pub max_readers: Option, + /// Controls how aggressively the database synchronizes data to disk. + #[arg( + long = "db.sync-mode", + value_parser = value_parser!(SyncMode), + )] + pub sync_mode: Option, } impl DatabaseArgs { @@ -61,6 +70,7 @@ impl DatabaseArgs { .with_geometry_max_size(self.max_size) .with_growth_step(self.growth_step) .with_max_readers(self.max_readers) + .with_sync_mode(self.sync_mode) } } @@ -340,4 +350,36 @@ mod tests { let cmd = CommandParser::::try_parse_from(["reth"]).unwrap(); assert_eq!(cmd.args.log_level, None); } + + #[test] + fn test_command_parser_with_valid_default_sync_mode() { + let cmd = CommandParser::::try_parse_from(["reth"]).unwrap(); + assert!(cmd.args.sync_mode.is_none()); + } + + #[test] + fn test_command_parser_with_valid_sync_mode_durable() { + let cmd = + CommandParser::::try_parse_from(["reth", "--db.sync-mode", "durable"]) + .unwrap(); + assert!(matches!(cmd.args.sync_mode, Some(SyncMode::Durable))); + } + + #[test] + fn test_command_parser_with_valid_sync_mode_safe_no_sync() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.sync-mode", + "safe-no-sync", + ]) + .unwrap(); + assert!(matches!(cmd.args.sync_mode, Some(SyncMode::SafeNoSync))); + } + + #[test] + fn test_command_parser_with_invalid_sync_mode() { + let result = + CommandParser::::try_parse_from(["reth", "--db.sync-mode", "ultra-fast"]); + assert!(result.is_err()); + } } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index def7c90ca42..b00bfd3c9a5 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -103,6 +103,22 @@ pub struct DatabaseArguments { /// MDBX allows up to 32767 readers (`MDBX_READERS_LIMIT`). This arg is to configure the max /// readers. max_readers: Option, + /// Defines the synchronization strategy used by the MDBX database when writing data to disk. + /// + /// This determines how aggressively MDBX ensures data durability versus prioritizing + /// performance. The available modes are: + /// + /// - [`SyncMode::Durable`]: Ensures all transactions are fully flushed to disk before they are + /// considered committed. This provides the highest level of durability and crash safety + /// but may have a performance cost. + /// - [`SyncMode::SafeNoSync`]: Skips certain fsync operations to improve write performance. + /// This mode still maintains database integrity but may lose the most recent transactions if + /// the system crashes unexpectedly. + /// + /// Choose `Durable` if consistency and crash safety are critical (e.g., production + /// environments). Choose `SafeNoSync` if performance is more important and occasional data + /// loss is acceptable (e.g., testing or ephemeral data). + sync_mode: SyncMode, } impl Default for DatabaseArguments { @@ -126,6 +142,7 @@ impl DatabaseArguments { max_read_transaction_duration: None, exclusive: None, max_readers: None, + sync_mode: SyncMode::Durable, } } @@ -137,6 +154,15 @@ impl DatabaseArguments { self } + /// Sets the database sync mode. + pub const fn with_sync_mode(mut self, sync_mode: Option) -> Self { + if let Some(sync_mode) = sync_mode { + self.sync_mode = sync_mode; + } + + self + } + /// Configures the database growth step in bytes. pub const fn with_growth_step(mut self, growth_step: Option) -> Self { if let Some(growth_step) = growth_step { @@ -329,7 +355,7 @@ impl DatabaseEnv { DatabaseEnvKind::RW => { // enable writemap mode in RW mode inner_env.write_map(); - Mode::ReadWrite { sync_mode: SyncMode::Durable } + Mode::ReadWrite { sync_mode: args.sync_mode } } }; diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index 71bd77b55d2..6aefab57b19 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -1,8 +1,10 @@ +use std::str::FromStr; + use bitflags::bitflags; use ffi::*; /// MDBX sync mode -#[derive(Clone, Copy, Debug, Default)] +#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)] pub enum SyncMode { /// Default robust and durable sync mode. /// Metadata is written and flushed to disk after a data is written and flushed, which @@ -119,6 +121,21 @@ impl From for EnvironmentFlags { } } +impl FromStr for SyncMode { + type Err = String; + + fn from_str(s: &str) -> Result { + let val = s.trim().to_ascii_lowercase(); + match val.as_str() { + "durable" => Ok(Self::Durable), + "safe-no-sync" | "safenosync" | "safe_no_sync" => Ok(Self::SafeNoSync), + _ => Err(format!( + "invalid value '{s}' for sync mode. valid values: durable, safe-no-sync" + )), + } + } +} + #[derive(Clone, Copy, Debug, Default)] pub struct EnvironmentFlags { pub no_sub_dir: bool, diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index a7bda7c3da7..feb902d4938 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -83,6 +83,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index fe7dd7d0bae..27cb2198aaf 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -46,6 +46,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --table The table name to diff. If not specified, all tables are diffed. diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index f8f1c199de5..e7e3b6c0df6 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -70,6 +70,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + -u, --url Specify a snapshot URL or let the command propose a default one. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index da732cda33b..a873781d9c3 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -70,6 +70,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --first-block-number Optional first block number to export from the db. It is by default 0. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 77afcd5a6b3..77e7883e1bd 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -70,6 +70,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --path The path to a directory for import. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 405009c6071..39762051649 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -70,6 +70,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --no-state Disables stages that require state. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 2ef6fdbe838..7e97d087165 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -70,6 +70,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --without-evm Specifies whether to initialize the state without relying on EVM historical data. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index 51dc401d567..bf9dd671db6 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -70,6 +70,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 48b1c75c591..db25b9e80c0 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -715,6 +715,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Dev testnet: --dev Start the node in dev mode diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 8dfd3003816..2d586edd5c3 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -70,6 +70,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index b7371fa4cf6..e07b3f542c3 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -70,6 +70,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --from The height to start at diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 19e813bec22..c14db19c58c 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -70,6 +70,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Possible values: - headers: The headers stage within the pipeline diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 20cf8660bf1..c29547401be 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -77,6 +77,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index ae57239c9d3..f3e4ccc0e0c 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -70,6 +70,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --metrics Enable Prometheus metrics. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index a7581b22b3f..8bb44279f8d 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -75,6 +75,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --offline If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound From b88b46ac1f7fbbb119b3c1d404a016287af14f94 Mon Sep 17 00:00:00 2001 From: Galoretka Date: Fri, 24 Oct 2025 14:48:29 +0300 Subject: [PATCH 191/371] fix(optimism): guard follow-up inserts by payload_id to prevent mixed sequences (#19264) --- crates/optimism/flashblocks/src/sequence.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/crates/optimism/flashblocks/src/sequence.rs b/crates/optimism/flashblocks/src/sequence.rs index 59d4cfecbcd..fff4bd84a45 100644 --- a/crates/optimism/flashblocks/src/sequence.rs +++ b/crates/optimism/flashblocks/src/sequence.rs @@ -1,6 +1,7 @@ use crate::{ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx}; use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; +use alloy_rpc_types_engine::PayloadId; use core::mem; use eyre::{bail, OptionExt}; use reth_primitives_traits::{Recovered, SignedTransaction}; @@ -82,8 +83,12 @@ where return Ok(()) } - // only insert if we previously received the same block, assume we received index 0 - if self.block_number() == Some(flashblock.metadata.block_number) { + // only insert if we previously received the same block and payload, assume we received + // index 0 + let same_block = self.block_number() == Some(flashblock.metadata.block_number); + let same_payload = self.payload_id() == Some(flashblock.payload_id); + + if same_block && same_payload { trace!(number=%flashblock.block_number(), index = %flashblock.index, block_count = self.inner.len() ,"Received followup flashblock"); self.inner.insert(flashblock.index, PreparedFlashBlock::new(flashblock)?); } else { @@ -139,6 +144,10 @@ where pub fn index(&self) -> Option { Some(self.inner.values().last()?.block().index) } + /// Returns the payload id of the first tracked flashblock in the current sequence. + pub fn payload_id(&self) -> Option { + Some(self.inner.values().next()?.block().payload_id) + } } impl Default for FlashBlockPendingSequence From f29f4caf0e8ad19093967a446d80c023e3b0028b Mon Sep 17 00:00:00 2001 From: YK Date: Fri, 24 Oct 2025 19:56:57 +0800 Subject: [PATCH 192/371] perf: Eliminate spawn_blocking in multiproof manager (#19203) --- crates/engine/tree/Cargo.toml | 1 + .../tree/src/tree/payload_processor/mod.rs | 10 +- .../src/tree/payload_processor/multiproof.rs | 758 ++++++++++-------- .../src/tree/payload_processor/prewarm.rs | 5 +- crates/trie/parallel/src/proof.rs | 60 +- crates/trie/parallel/src/proof_task.rs | 313 ++++++-- 6 files changed, 709 insertions(+), 438 deletions(-) diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 8fd87a22bd1..503b5af2630 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -65,6 +65,7 @@ rayon.workspace = true tracing.workspace = true derive_more.workspace = true parking_lot.workspace = true +crossbeam-channel.workspace = true # optional deps for test-utils reth-prune-types = { workspace = true, optional = true } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index bf3d7268ea5..ac16c60dd67 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -15,6 +15,7 @@ use crate::tree::{ }; use alloy_evm::{block::StateChangeSource, ToTxEnv}; use alloy_primitives::B256; +use crossbeam_channel::Sender as CrossbeamSender; use executor::WorkloadExecutor; use multiproof::{SparseTrieUpdate, *}; use parking_lot::RwLock; @@ -43,7 +44,7 @@ use reth_trie_sparse_parallel::{ParallelSparseTrie, ParallelismThresholds}; use std::{ sync::{ atomic::AtomicBool, - mpsc::{self, channel, Sender}, + mpsc::{self, channel}, Arc, }, time::Instant, @@ -243,7 +244,6 @@ where let multi_proof_task = MultiProofTask::new( state_root_config, - self.executor.clone(), proof_handle.clone(), to_sparse_trie, config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), @@ -345,7 +345,7 @@ where mut transactions: mpsc::Receiver + Clone + Send + 'static>, transaction_count_hint: usize, provider_builder: StateProviderBuilder, - to_multi_proof: Option>, + to_multi_proof: Option>, ) -> CacheTaskHandle where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, @@ -483,7 +483,7 @@ where #[derive(Debug)] pub struct PayloadHandle { /// Channel for evm state updates - to_multi_proof: Option>, + to_multi_proof: Option>, // must include the receiver of the state root wired to the sparse trie prewarm_handle: CacheTaskHandle, /// Receiver for the state root @@ -561,7 +561,7 @@ pub(crate) struct CacheTaskHandle { /// Metrics for the caches cache_metrics: CachedStateMetrics, /// Channel to the spawned prewarm task if any - to_prewarm_task: Option>, + to_prewarm_task: Option>, } impl CacheTaskHandle { diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 9f136a48125..755f7a7d0d7 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1,16 +1,15 @@ //! Multiproof task related functionality. -use crate::tree::payload_processor::executor::WorkloadExecutor; use alloy_evm::block::StateChangeSource; use alloy_primitives::{ keccak256, map::{B256Set, HashSet}, B256, }; +use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use dashmap::DashMap; use derive_more::derive::Deref; use metrics::Histogram; -use reth_errors::ProviderError; use reth_metrics::Metrics; use reth_revm::state::EvmState; use reth_trie::{ @@ -20,18 +19,9 @@ use reth_trie::{ }; use reth_trie_parallel::{ proof::ParallelProof, - proof_task::{AccountMultiproofInput, ProofWorkerHandle}, - root::ParallelStateRootError, -}; -use std::{ - collections::BTreeMap, - ops::DerefMut, - sync::{ - mpsc::{channel, Receiver, Sender}, - Arc, - }, - time::{Duration, Instant}, + proof_task::{AccountMultiproofInput, ProofResultMessage, ProofWorkerHandle}, }; +use std::{collections::BTreeMap, ops::DerefMut, sync::Arc, time::Instant}; use tracing::{debug, error, instrument, trace}; /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the @@ -109,10 +99,6 @@ pub(super) enum MultiProofMessage { /// The state update that was used to calculate the proof state: HashedPostState, }, - /// Proof calculation completed for a specific state update - ProofCalculated(Box), - /// Error during proof calculation - ProofCalculationError(ProviderError), /// Signals state update stream end. /// /// This is triggered by block execution, indicating that no additional state updates are @@ -120,17 +106,6 @@ pub(super) enum MultiProofMessage { FinishedStateUpdates, } -/// Message about completion of proof calculation for a specific state update -#[derive(Debug)] -pub(super) struct ProofCalculated { - /// The index of this proof in the sequence of state updates - sequence_number: u64, - /// Sparse trie update - update: SparseTrieUpdate, - /// The time taken to calculate the proof. - elapsed: Duration, -} - /// Handle to track proof calculation ordering. #[derive(Debug, Default)] struct ProofSequencer { @@ -193,10 +168,10 @@ impl ProofSequencer { /// This should trigger once the block has been executed (after) the last state update has been /// sent. This triggers the exit condition of the multi proof task. #[derive(Deref, Debug)] -pub(super) struct StateHookSender(Sender); +pub(super) struct StateHookSender(CrossbeamSender); impl StateHookSender { - pub(crate) const fn new(inner: Sender) -> Self { + pub(crate) const fn new(inner: CrossbeamSender) -> Self { Self(inner) } } @@ -287,16 +262,14 @@ impl From for PendingMultiproofTask { } } -/// Input parameters for spawning a dedicated storage multiproof calculation. +/// Input parameters for dispatching a dedicated storage multiproof calculation. #[derive(Debug)] struct StorageMultiproofInput { - config: MultiProofConfig, - source: Option, hashed_state_update: HashedPostState, hashed_address: B256, proof_targets: B256Set, proof_sequence_number: u64, - state_root_message_sender: Sender, + state_root_message_sender: CrossbeamSender, multi_added_removed_keys: Arc, } @@ -310,7 +283,7 @@ impl StorageMultiproofInput { } } -/// Input parameters for spawning a multiproof calculation. +/// Input parameters for dispatching a multiproof calculation. #[derive(Debug)] struct MultiproofInput { config: MultiProofConfig, @@ -318,7 +291,7 @@ struct MultiproofInput { hashed_state_update: HashedPostState, proof_targets: MultiProofTargets, proof_sequence_number: u64, - state_root_message_sender: Sender, + state_root_message_sender: CrossbeamSender, multi_added_removed_keys: Option>, } @@ -332,13 +305,20 @@ impl MultiproofInput { } } -/// Manages concurrent multiproof calculations. +/// Coordinates multiproof dispatch between `MultiProofTask` and the parallel trie workers. +/// +/// # Flow +/// 1. `MultiProofTask` asks the manager to dispatch either storage or account proof work. +/// 2. The manager builds the request, clones `proof_result_tx`, and hands everything to +/// [`ProofWorkerHandle`]. +/// 3. A worker finishes the proof and sends a [`ProofResultMessage`] through the channel included +/// in the job. +/// 4. `MultiProofTask` consumes the message from the same channel and sequences it with +/// `ProofSequencer`. #[derive(Debug)] pub struct MultiproofManager { /// Currently running calculations. inflight: usize, - /// Executor for tasks - executor: WorkloadExecutor, /// Handle to the proof worker pools (storage and account). proof_worker_handle: ProofWorkerHandle, /// Cached storage proof roots for missed leaves; this maps @@ -353,6 +333,9 @@ pub struct MultiproofManager { /// a big account change into different chunks, which may repeatedly /// revisit missed leaves. missed_leaves_storage_roots: Arc>, + /// Channel sender cloned into each dispatched job so workers can send back the + /// `ProofResultMessage`. + proof_result_tx: CrossbeamSender, /// Metrics metrics: MultiProofTaskMetrics, } @@ -360,21 +343,21 @@ pub struct MultiproofManager { impl MultiproofManager { /// Creates a new [`MultiproofManager`]. fn new( - executor: WorkloadExecutor, metrics: MultiProofTaskMetrics, proof_worker_handle: ProofWorkerHandle, + proof_result_tx: CrossbeamSender, ) -> Self { Self { inflight: 0, - executor, metrics, proof_worker_handle, missed_leaves_storage_roots: Default::default(), + proof_result_tx, } } - /// Spawns a new multiproof calculation. - fn spawn(&mut self, input: PendingMultiproofTask) { + /// Dispatches a new multiproof calculation to worker pools. + fn dispatch(&mut self, input: PendingMultiproofTask) { // If there are no proof targets, we can just send an empty multiproof back immediately if input.proof_targets_is_empty() { debug!( @@ -385,91 +368,67 @@ impl MultiproofManager { return } - self.spawn_multiproof_task(input); - } - - /// Spawns a multiproof task, dispatching to `spawn_storage_proof` if the input is a storage - /// multiproof, and dispatching to `spawn_multiproof` otherwise. - fn spawn_multiproof_task(&mut self, input: PendingMultiproofTask) { match input { PendingMultiproofTask::Storage(storage_input) => { - self.spawn_storage_proof(storage_input); + self.dispatch_storage_proof(storage_input); } PendingMultiproofTask::Regular(multiproof_input) => { - self.spawn_multiproof(multiproof_input); + self.dispatch_multiproof(multiproof_input); } } } - /// Spawns a single storage proof calculation task. - fn spawn_storage_proof(&mut self, storage_multiproof_input: StorageMultiproofInput) { + /// Dispatches a single storage proof calculation to worker pool. + fn dispatch_storage_proof(&mut self, storage_multiproof_input: StorageMultiproofInput) { let StorageMultiproofInput { - config, - source, hashed_state_update, hashed_address, proof_targets, proof_sequence_number, - state_root_message_sender, multi_added_removed_keys, + state_root_message_sender: _, } = storage_multiproof_input; - let storage_proof_worker_handle = self.proof_worker_handle.clone(); - let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone(); + let storage_targets = proof_targets.len(); - self.executor.spawn_blocking(move || { - let storage_targets = proof_targets.len(); + trace!( + target: "engine::tree::payload_processor::multiproof", + proof_sequence_number, + ?proof_targets, + storage_targets, + "Dispatching storage proof to workers" + ); - trace!( - target: "engine::tree::payload_processor::multiproof", - proof_sequence_number, - ?proof_targets, - storage_targets, - "Starting dedicated storage proof calculation", - ); - let start = Instant::now(); - let proof_result = ParallelProof::new( - config.nodes_sorted, - config.state_sorted, - config.prefix_sets, - missed_leaves_storage_roots, - storage_proof_worker_handle, - ) - .with_branch_node_masks(true) - .with_multi_added_removed_keys(Some(multi_added_removed_keys)) - .storage_proof(hashed_address, proof_targets); - let elapsed = start.elapsed(); - trace!( - target: "engine::tree::payload_processor::multiproof", - proof_sequence_number, - ?elapsed, - ?source, - storage_targets, - "Storage multiproofs calculated", - ); + let start = Instant::now(); - match proof_result { - Ok(proof) => { - let _ = state_root_message_sender.send(MultiProofMessage::ProofCalculated( - Box::new(ProofCalculated { - sequence_number: proof_sequence_number, - update: SparseTrieUpdate { - state: hashed_state_update, - multiproof: DecodedMultiProof::from_storage_proof( - hashed_address, - proof, - ), - }, - elapsed, - }), - )); - } - Err(error) => { - let _ = state_root_message_sender - .send(MultiProofMessage::ProofCalculationError(error.into())); - } - } - }); + // Create prefix set from targets + let prefix_set = reth_trie::prefix_set::PrefixSetMut::from( + proof_targets.iter().map(reth_trie::Nibbles::unpack), + ); + let prefix_set = prefix_set.freeze(); + + // Build computation input (data only) + let input = reth_trie_parallel::proof_task::StorageProofInput::new( + hashed_address, + prefix_set, + proof_targets, + true, // with_branch_node_masks + Some(multi_added_removed_keys), + ); + + // Dispatch to storage worker + if let Err(e) = self.proof_worker_handle.dispatch_storage_proof( + input, + reth_trie_parallel::proof_task::ProofResultContext::new( + self.proof_result_tx.clone(), + proof_sequence_number, + hashed_state_update, + start, + ), + ) { + error!(target: "engine::tree::payload_processor::multiproof", ?e, "Failed to dispatch storage proof"); + return; + } self.inflight += 1; self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); @@ -493,91 +452,58 @@ impl MultiproofManager { .record(self.proof_worker_handle.pending_account_tasks() as f64); } - /// Spawns a single multiproof calculation task. - fn spawn_multiproof(&mut self, multiproof_input: MultiproofInput) { + /// Dispatches a single multiproof calculation to worker pool. + fn dispatch_multiproof(&mut self, multiproof_input: MultiproofInput) { let MultiproofInput { config, source, hashed_state_update, proof_targets, proof_sequence_number, - state_root_message_sender, + state_root_message_sender: _, multi_added_removed_keys, } = multiproof_input; - let account_proof_worker_handle = self.proof_worker_handle.clone(); - let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone(); - self.executor.spawn_blocking(move || { - let account_targets = proof_targets.len(); - let storage_targets = proof_targets.values().map(|slots| slots.len()).sum::(); + let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone(); + let account_targets = proof_targets.len(); + let storage_targets = proof_targets.values().map(|slots| slots.len()).sum::(); - trace!( - target: "engine::tree::payload_processor::multiproof", - proof_sequence_number, - ?proof_targets, - account_targets, - storage_targets, - ?source, - "Starting multiproof calculation", - ); + trace!( + target: "engine::tree::payload_processor::multiproof", + proof_sequence_number, + ?proof_targets, + account_targets, + storage_targets, + ?source, + "Dispatching multiproof to workers" + ); - let start = Instant::now(); + let start = Instant::now(); - // Extend prefix sets with targets - let frozen_prefix_sets = - ParallelProof::extend_prefix_sets_with_targets(&config.prefix_sets, &proof_targets); + // Extend prefix sets with targets + let frozen_prefix_sets = + ParallelProof::extend_prefix_sets_with_targets(&config.prefix_sets, &proof_targets); - // Queue account multiproof to worker pool - let input = AccountMultiproofInput { - targets: proof_targets, - prefix_sets: frozen_prefix_sets, - collect_branch_node_masks: true, - multi_added_removed_keys, - missed_leaves_storage_roots, - }; - - let proof_result: Result = (|| { - let receiver = account_proof_worker_handle - .dispatch_account_multiproof(input) - .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; - - receiver - .recv() - .map_err(|_| { - ParallelStateRootError::Other("Account multiproof channel closed".into()) - })? - .map(|(proof, _stats)| proof) - })(); - let elapsed = start.elapsed(); - trace!( - target: "engine::tree::payload_processor::multiproof", + // Dispatch account multiproof to worker pool with result sender + let input = AccountMultiproofInput { + targets: proof_targets, + prefix_sets: frozen_prefix_sets, + collect_branch_node_masks: true, + multi_added_removed_keys, + missed_leaves_storage_roots, + // Workers will send ProofResultMessage directly to proof_result_rx + proof_result_sender: reth_trie_parallel::proof_task::ProofResultContext::new( + self.proof_result_tx.clone(), proof_sequence_number, - ?elapsed, - ?source, - account_targets, - storage_targets, - "Multiproof calculated", - ); + hashed_state_update, + start, + ), + }; - match proof_result { - Ok(proof) => { - let _ = state_root_message_sender.send(MultiProofMessage::ProofCalculated( - Box::new(ProofCalculated { - sequence_number: proof_sequence_number, - update: SparseTrieUpdate { - state: hashed_state_update, - multiproof: proof, - }, - elapsed, - }), - )); - } - Err(error) => { - let _ = state_root_message_sender - .send(MultiProofMessage::ProofCalculationError(error.into())); - } - } - }); + if let Err(e) = self.proof_worker_handle.dispatch_account_multiproof(input) { + error!(target: "engine::tree::payload_processor::multiproof", ?e, "Failed to dispatch account multiproof"); + return; + } self.inflight += 1; self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); @@ -639,12 +565,104 @@ pub(crate) struct MultiProofTaskMetrics { /// Standalone task that receives a transaction state stream and updates relevant /// data structures to calculate state root. /// -/// It is responsible of initializing a blinded sparse trie and subscribe to -/// transaction state stream. As it receives transaction execution results, it -/// fetches the proofs for relevant accounts from the database and reveal them -/// to the tree. -/// Then it updates relevant leaves according to the result of the transaction. -/// This feeds updates to the sparse trie task. +/// ## Architecture: Dual-Channel Multiproof System +/// +/// This task orchestrates parallel proof computation using a dual-channel architecture that +/// separates control messages from proof computation results: +/// +/// ```text +/// ┌─────────────────────────────────────────────────────────────────┐ +/// │ MultiProofTask │ +/// │ Event Loop (crossbeam::select!) │ +/// └──┬──────────────────────────────────────────────────────────▲───┘ +/// │ │ +/// │ (1) Send proof request │ +/// │ via tx (control channel) │ +/// │ │ +/// ▼ │ +/// ┌──────────────────────────────────────────────────────────────┐ │ +/// │ MultiproofManager │ │ +/// │ - Tracks inflight calculations │ │ +/// │ - Deduplicates against fetched_proof_targets │ │ +/// │ - Routes to appropriate worker pool │ │ +/// └──┬───────────────────────────────────────────────────────────┘ │ +/// │ │ +/// │ (2) Dispatch to workers │ +/// │ OR send EmptyProof (fast path) │ +/// ▼ │ +/// ┌──────────────────────────────────────────────────────────────┐ │ +/// │ ProofWorkerHandle │ │ +/// │ ┌─────────────────────┐ ┌────────────────────────┐ │ │ +/// │ │ Storage Worker Pool │ │ Account Worker Pool │ │ │ +/// │ │ (spawn_blocking) │ │ (spawn_blocking) │ │ │ +/// │ └─────────────────────┘ └────────────────────────┘ │ │ +/// └──┬───────────────────────────────────────────────────────────┘ │ +/// │ │ +/// │ (3) Compute proofs in parallel │ +/// │ Send results back │ +/// │ │ +/// ▼ │ +/// ┌──────────────────────────────────────────────────────────────┐ │ +/// │ proof_result_tx (crossbeam unbounded channel) │ │ +/// │ → ProofResultMessage { multiproof, sequence_number, ... } │ │ +/// └──────────────────────────────────────────────────────────────┘ │ +/// │ +/// (4) Receive via crossbeam::select! on two channels: ───────────┘ +/// - rx: Control messages (PrefetchProofs, StateUpdate, +/// EmptyProof, FinishedStateUpdates) +/// - proof_result_rx: Computed proof results from workers +/// ``` +/// +/// ## Component Responsibilities +/// +/// - **[`MultiProofTask`]**: Event loop coordinator +/// - Receives state updates from transaction execution +/// - Deduplicates proof targets against already-fetched proofs +/// - Sequences proofs to maintain transaction ordering +/// - Feeds sequenced updates to sparse trie task +/// +/// - **[`MultiproofManager`]**: Calculation orchestrator +/// - Decides between fast path ([`EmptyProof`]) and worker dispatch +/// - Tracks inflight calculations +/// - Routes storage-only vs full multiproofs to appropriate workers +/// - Records metrics for monitoring +/// +/// - **[`ProofWorkerHandle`]**: Worker pool manager +/// - Maintains separate pools for storage and account proofs +/// - Dispatches work to blocking threads (CPU-intensive) +/// - Sends results directly via `proof_result_tx` (bypasses control channel) +/// +/// [`EmptyProof`]: MultiProofMessage::EmptyProof +/// [`ProofWorkerHandle`]: reth_trie_parallel::proof_task::ProofWorkerHandle +/// +/// ## Dual-Channel Design Rationale +/// +/// The system uses two separate crossbeam channels: +/// +/// 1. **Control Channel (`tx`/`rx`)**: For orchestration messages +/// - `PrefetchProofs`: Pre-fetch proofs before execution +/// - `StateUpdate`: New transaction execution results +/// - `EmptyProof`: Fast path when all targets already fetched +/// - `FinishedStateUpdates`: Signal to drain pending work +/// +/// 2. **Proof Result Channel (`proof_result_tx`/`proof_result_rx`)**: For worker results +/// - `ProofResultMessage`: Computed multiproofs from worker pools +/// - Direct path from workers to event loop (no intermediate hops) +/// - Keeps control messages separate from high-throughput proof data +/// +/// This separation enables: +/// - **Non-blocking control**: Control messages never wait behind large proof data +/// - **Backpressure management**: Each channel can apply different policies +/// - **Clear ownership**: Workers only need proof result sender, not control channel +/// +/// ## Initialization and Lifecycle +/// +/// The task initializes a blinded sparse trie and subscribes to transaction state streams. +/// As it receives transaction execution results, it fetches proofs for relevant accounts +/// from the database and reveals them to the tree, then updates relevant leaves according +/// to transaction results. This feeds updates to the sparse trie task. +/// +/// See the `run()` method documentation for detailed lifecycle flow. #[derive(Debug)] pub(super) struct MultiProofTask { /// The size of proof targets chunk to spawn in one calculation. @@ -652,12 +670,14 @@ pub(super) struct MultiProofTask { chunk_size: Option, /// Task configuration. config: MultiProofConfig, - /// Receiver for state root related messages. - rx: Receiver, + /// Receiver for state root related messages (prefetch, state updates, finish signal). + rx: CrossbeamReceiver, /// Sender for state root related messages. - tx: Sender, + tx: CrossbeamSender, + /// Receiver for proof results directly from workers. + proof_result_rx: CrossbeamReceiver, /// Sender for state updates emitted by this type. - to_sparse_trie: Sender, + to_sparse_trie: std::sync::mpsc::Sender, /// Proof targets that have been already fetched. fetched_proof_targets: MultiProofTargets, /// Tracks keys which have been added and removed throughout the entire block. @@ -674,12 +694,12 @@ impl MultiProofTask { /// Creates a new multi proof task with the unified message channel pub(super) fn new( config: MultiProofConfig, - executor: WorkloadExecutor, proof_worker_handle: ProofWorkerHandle, - to_sparse_trie: Sender, + to_sparse_trie: std::sync::mpsc::Sender, chunk_size: Option, ) -> Self { - let (tx, rx) = channel(); + let (tx, rx) = unbounded(); + let (proof_result_tx, proof_result_rx) = unbounded(); let metrics = MultiProofTaskMetrics::default(); Self { @@ -687,21 +707,22 @@ impl MultiProofTask { config, rx, tx, + proof_result_rx, to_sparse_trie, fetched_proof_targets: Default::default(), multi_added_removed_keys: MultiAddedRemovedKeys::new(), proof_sequencer: ProofSequencer::default(), multiproof_manager: MultiproofManager::new( - executor, metrics.clone(), proof_worker_handle, + proof_result_tx, ), metrics, } } - /// Returns a [`Sender`] that can be used to send arbitrary [`MultiProofMessage`]s to this task. - pub(super) fn state_root_message_sender(&self) -> Sender { + /// Returns a sender that can be used to send arbitrary [`MultiProofMessage`]s to this task. + pub(super) fn state_root_message_sender(&self) -> CrossbeamSender { self.tx.clone() } @@ -718,7 +739,7 @@ impl MultiProofTask { // we still want to optimistically fetch extension children for the leaf addition case. self.multi_added_removed_keys.touch_accounts(proof_targets.keys().copied()); - // Clone+Arc MultiAddedRemovedKeys for sharing with the spawned multiproof tasks + // Clone+Arc MultiAddedRemovedKeys for sharing with the dispatched multiproof tasks let multi_added_removed_keys = Arc::new(self.multi_added_removed_keys.clone()); self.metrics.prefetch_proof_targets_accounts_histogram.record(proof_targets.len() as f64); @@ -734,8 +755,8 @@ impl MultiProofTask { self.multiproof_manager.proof_worker_handle.has_available_account_workers() || self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); - let mut spawn = |proof_targets| { - self.multiproof_manager.spawn( + let mut dispatch = |proof_targets| { + self.multiproof_manager.dispatch( MultiproofInput { config: self.config.clone(), source: None, @@ -752,10 +773,10 @@ impl MultiProofTask { if should_chunk && let Some(chunk_size) = self.chunk_size { for proof_targets_chunk in proof_targets.chunks(chunk_size) { - spawn(proof_targets_chunk); + dispatch(proof_targets_chunk); } } else { - spawn(proof_targets); + dispatch(proof_targets); } self.metrics.prefetch_proof_chunks_histogram.record(chunks as f64); @@ -853,7 +874,7 @@ impl MultiProofTask { let mut state_updates = 0; // If there are any accounts or storage slots that we already fetched the proofs for, - // send them immediately, as they don't require spawning any additional multiproofs. + // send them immediately, as they don't require dispatching any additional multiproofs. if !fetched_state_update.is_empty() { let _ = self.tx.send(MultiProofMessage::EmptyProof { sequence_number: self.proof_sequencer.next_sequence(), @@ -862,7 +883,7 @@ impl MultiProofTask { state_updates += 1; } - // Clone+Arc MultiAddedRemovedKeys for sharing with the spawned multiproof tasks + // Clone+Arc MultiAddedRemovedKeys for sharing with the dispatched multiproof tasks let multi_added_removed_keys = Arc::new(self.multi_added_removed_keys.clone()); // Process state updates in chunks. @@ -875,7 +896,7 @@ impl MultiProofTask { self.multiproof_manager.proof_worker_handle.has_available_account_workers() || self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); - let mut spawn = |hashed_state_update| { + let mut dispatch = |hashed_state_update| { let proof_targets = get_proof_targets( &hashed_state_update, &self.fetched_proof_targets, @@ -883,7 +904,7 @@ impl MultiProofTask { ); spawned_proof_targets.extend_ref(&proof_targets); - self.multiproof_manager.spawn( + self.multiproof_manager.dispatch( MultiproofInput { config: self.config.clone(), source: Some(source), @@ -901,10 +922,10 @@ impl MultiProofTask { if should_chunk && let Some(chunk_size) = self.chunk_size { for chunk in not_fetched_state_update.chunks(chunk_size) { - spawn(chunk); + dispatch(chunk); } } else { - spawn(not_fetched_state_update); + dispatch(not_fetched_state_update); } self.metrics @@ -952,15 +973,14 @@ impl MultiProofTask { /// so that the proofs for accounts and storage slots that were already fetched are not /// requested again. /// 2. Using the proof targets, a new multiproof is calculated using - /// [`MultiproofManager::spawn`]. + /// [`MultiproofManager::dispatch`]. /// * If the list of proof targets is empty, the [`MultiProofMessage::EmptyProof`] message is /// sent back to this task along with the original state update. - /// * Otherwise, the multiproof is calculated and the [`MultiProofMessage::ProofCalculated`] - /// message is sent back to this task along with the resulting multiproof, proof targets - /// and original state update. - /// 3. Either [`MultiProofMessage::EmptyProof`] or [`MultiProofMessage::ProofCalculated`] is - /// received. - /// * The multiproof is added to the (proof sequencer)[`ProofSequencer`]. + /// * Otherwise, the multiproof is dispatched to worker pools and results are sent directly + /// to this task via the `proof_result_rx` channel as [`ProofResultMessage`]. + /// 3. Either [`MultiProofMessage::EmptyProof`] (via control channel) or [`ProofResultMessage`] + /// (via proof result channel) is received. + /// * The multiproof is added to the [`ProofSequencer`]. /// * If the proof sequencer has a contiguous sequence of multiproofs in the same order as /// state updates arrived (i.e. transaction order), such sequence is returned. /// 4. Once there's a sequence of contiguous multiproofs along with the proof targets and state @@ -969,9 +989,8 @@ impl MultiProofTask { /// 5. Steps above are repeated until this task receives a /// [`MultiProofMessage::FinishedStateUpdates`]. /// * Once this message is received, on every [`MultiProofMessage::EmptyProof`] and - /// [`MultiProofMessage::ProofCalculated`] message, we check if there are any proofs are - /// currently being calculated, or if there are any pending proofs in the proof sequencer - /// left to be revealed by checking the pending tasks. + /// [`ProofResultMessage`], we check if all proofs have been processed and if there are any + /// pending proofs in the proof sequencer left to be revealed. /// 6. This task exits after all pending proofs are processed. #[instrument( level = "debug", @@ -997,147 +1016,164 @@ impl MultiProofTask { loop { trace!(target: "engine::tree::payload_processor::multiproof", "entering main channel receiving loop"); - match self.rx.recv() { - Ok(message) => match message { - MultiProofMessage::PrefetchProofs(targets) => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::PrefetchProofs"); - if first_update_time.is_none() { - // record the wait time - self.metrics - .first_update_wait_time_histogram - .record(start.elapsed().as_secs_f64()); - first_update_time = Some(Instant::now()); - debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); - } - let account_targets = targets.len(); - let storage_targets = - targets.values().map(|slots| slots.len()).sum::(); - prefetch_proofs_requested += self.on_prefetch_proof(targets); - trace!( - target: "engine::tree::payload_processor::multiproof", - account_targets, - storage_targets, - prefetch_proofs_requested, - "Prefetching proofs" - ); - } - MultiProofMessage::StateUpdate(source, update) => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::StateUpdate"); - if first_update_time.is_none() { - // record the wait time - self.metrics - .first_update_wait_time_histogram - .record(start.elapsed().as_secs_f64()); - first_update_time = Some(Instant::now()); - debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); + crossbeam_channel::select! { + recv(self.rx) -> message => { + match message { + Ok(msg) => match msg { + MultiProofMessage::PrefetchProofs(targets) => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::PrefetchProofs"); + + if first_update_time.is_none() { + // record the wait time + self.metrics + .first_update_wait_time_histogram + .record(start.elapsed().as_secs_f64()); + first_update_time = Some(Instant::now()); + debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); + } + + let account_targets = targets.len(); + let storage_targets = + targets.values().map(|slots| slots.len()).sum::(); + prefetch_proofs_requested += self.on_prefetch_proof(targets); + debug!( + target: "engine::tree::payload_processor::multiproof", + account_targets, + storage_targets, + prefetch_proofs_requested, + "Prefetching proofs" + ); + } + MultiProofMessage::StateUpdate(source, update) => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::StateUpdate"); + + if first_update_time.is_none() { + // record the wait time + self.metrics + .first_update_wait_time_histogram + .record(start.elapsed().as_secs_f64()); + first_update_time = Some(Instant::now()); + debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); + } + + let len = update.len(); + state_update_proofs_requested += self.on_state_update(source, update); + debug!( + target: "engine::tree::payload_processor::multiproof", + ?source, + len, + ?state_update_proofs_requested, + "Received new state update" + ); + } + MultiProofMessage::FinishedStateUpdates => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::FinishedStateUpdates"); + + updates_finished = true; + updates_finished_time = Some(Instant::now()); + + if self.is_done( + proofs_processed, + state_update_proofs_requested, + prefetch_proofs_requested, + updates_finished, + ) { + debug!( + target: "engine::tree::payload_processor::multiproof", + "State updates finished and all proofs processed, ending calculation" + ); + break + } + } + MultiProofMessage::EmptyProof { sequence_number, state } => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::EmptyProof"); + + proofs_processed += 1; + + if let Some(combined_update) = self.on_proof( + sequence_number, + SparseTrieUpdate { state, multiproof: Default::default() }, + ) { + let _ = self.to_sparse_trie.send(combined_update); + } + + if self.is_done( + proofs_processed, + state_update_proofs_requested, + prefetch_proofs_requested, + updates_finished, + ) { + debug!( + target: "engine::tree::payload_processor::multiproof", + "State updates finished and all proofs processed, ending calculation" + ); + break + } + } + }, + Err(_) => { + error!(target: "engine::tree::payload_processor::multiproof", "State root related message channel closed unexpectedly"); + return } - - let len = update.len(); - state_update_proofs_requested += self.on_state_update(source, update); - trace!( - target: "engine::tree::payload_processor::multiproof", - ?source, - len, - ?state_update_proofs_requested, - "Received new state update" - ); } - MultiProofMessage::FinishedStateUpdates => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::FinishedStateUpdates"); - updates_finished = true; - updates_finished_time = Some(Instant::now()); - if self.is_done( - proofs_processed, - state_update_proofs_requested, - prefetch_proofs_requested, - updates_finished, - ) { - debug!( - target: "engine::tree::payload_processor::multiproof", - "State updates finished and all proofs processed, ending calculation" - ); - break - } - } - MultiProofMessage::EmptyProof { sequence_number, state } => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::EmptyProof"); - - proofs_processed += 1; + }, + recv(self.proof_result_rx) -> proof_msg => { + match proof_msg { + Ok(proof_result) => { + proofs_processed += 1; - if let Some(combined_update) = self.on_proof( - sequence_number, - SparseTrieUpdate { state, multiproof: Default::default() }, - ) { - let _ = self.to_sparse_trie.send(combined_update); + self.metrics + .proof_calculation_duration_histogram + .record(proof_result.elapsed); + + self.multiproof_manager.on_calculation_complete(); + + // Convert ProofResultMessage to SparseTrieUpdate + match proof_result.result { + Ok((multiproof, _stats)) => { + debug!( + target: "engine::tree::payload_processor::multiproof", + sequence = proof_result.sequence_number, + total_proofs = proofs_processed, + "Processing calculated proof from worker" + ); + + let update = SparseTrieUpdate { + state: proof_result.state, + multiproof, + }; + + if let Some(combined_update) = + self.on_proof(proof_result.sequence_number, update) + { + let _ = self.to_sparse_trie.send(combined_update); + } + } + Err(error) => { + error!(target: "engine::tree::payload_processor::multiproof", ?error, "proof calculation error from worker"); + return + } + } + + if self.is_done( + proofs_processed, + state_update_proofs_requested, + prefetch_proofs_requested, + updates_finished, + ) { + debug!( + target: "engine::tree::payload_processor::multiproof", + "State updates finished and all proofs processed, ending calculation" + ); + break + } } - - if self.is_done( - proofs_processed, - state_update_proofs_requested, - prefetch_proofs_requested, - updates_finished, - ) { - debug!( - target: "engine::tree::payload_processor::multiproof", - "State updates finished and all proofs processed, ending calculation" - ); - break + Err(_) => { + error!(target: "engine::tree::payload_processor::multiproof", "Proof result channel closed unexpectedly"); + return } } - MultiProofMessage::ProofCalculated(proof_calculated) => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing - MultiProofMessage::ProofCalculated"); - - // we increment proofs_processed for both state updates and prefetches, - // because both are used for the root termination condition. - proofs_processed += 1; - - self.metrics - .proof_calculation_duration_histogram - .record(proof_calculated.elapsed); - - trace!( - target: "engine::tree::payload_processor::multiproof", - sequence = proof_calculated.sequence_number, - total_proofs = proofs_processed, - "Processing calculated proof" - ); - - self.multiproof_manager.on_calculation_complete(); - - if let Some(combined_update) = - self.on_proof(proof_calculated.sequence_number, proof_calculated.update) - { - let _ = self.to_sparse_trie.send(combined_update); - } - - if self.is_done( - proofs_processed, - state_update_proofs_requested, - prefetch_proofs_requested, - updates_finished, - ) { - debug!( - target: "engine::tree::payload_processor::multiproof", - "State updates finished and all proofs processed, ending calculation"); - break - } - } - MultiProofMessage::ProofCalculationError(err) => { - error!( - target: "engine::tree::payload_processor::multiproof", - ?err, - "proof calculation error" - ); - return - } - }, - Err(_) => { - // this means our internal message channel is closed, which shouldn't happen - // in normal operation since we hold both ends - error!(target: "engine::tree::payload_processor::multiproof", "Internal message channel closed unexpectedly"); - return } } } @@ -1220,12 +1256,25 @@ mod tests { use reth_trie::{MultiProof, TrieInput}; use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use revm_primitives::{B256, U256}; + use std::sync::OnceLock; + use tokio::runtime::{Handle, Runtime}; + + /// Get a handle to the test runtime, creating it if necessary + fn get_test_runtime_handle() -> Handle { + static TEST_RT: OnceLock = OnceLock::new(); + TEST_RT + .get_or_init(|| { + tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap() + }) + .handle() + .clone() + } fn create_test_state_root_task(factory: F) -> MultiProofTask where F: DatabaseProviderFactory + Clone + 'static, { - let executor = WorkloadExecutor::default(); + let rt_handle = get_test_runtime_handle(); let (_trie_input, config) = MultiProofConfig::from_input(TrieInput::default()); let task_ctx = ProofTaskCtx::new( config.nodes_sorted.clone(), @@ -1233,11 +1282,10 @@ mod tests { config.prefix_sets.clone(), ); let consistent_view = ConsistentDbView::new(factory, None); - let proof_handle = - ProofWorkerHandle::new(executor.handle().clone(), consistent_view, task_ctx, 1, 1); - let channel = channel(); + let proof_handle = ProofWorkerHandle::new(rt_handle, consistent_view, task_ctx, 1, 1); + let (to_sparse_trie, _receiver) = std::sync::mpsc::channel(); - MultiProofTask::new(config, executor, proof_handle, channel.0, Some(1)) + MultiProofTask::new(config, proof_handle, to_sparse_trie, Some(1)) } #[test] diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index abc3bd58351..9815ea81228 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -24,6 +24,7 @@ use alloy_consensus::transaction::TxHashRef; use alloy_eips::Typed2718; use alloy_evm::Database; use alloy_primitives::{keccak256, map::B256Set, B256}; +use crossbeam_channel::Sender as CrossbeamSender; use metrics::{Counter, Gauge, Histogram}; use reth_evm::{execute::ExecutableTxFor, ConfigureEvm, Evm, EvmFor, SpecFor}; use reth_metrics::Metrics; @@ -83,7 +84,7 @@ where /// The number of transactions to be processed transaction_count_hint: usize, /// Sender to emit evm state outcome messages, if any. - to_multi_proof: Option>, + to_multi_proof: Option>, /// Receiver for events produced by tx execution actions_rx: Receiver, } @@ -99,7 +100,7 @@ where executor: WorkloadExecutor, execution_cache: PayloadExecutionCache, ctx: PrewarmContext, - to_multi_proof: Option>, + to_multi_proof: Option>, transaction_count_hint: usize, max_concurrency: usize, ) -> (Self, Sender) { diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 3ea5994488a..e8b39f38ec6 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -1,20 +1,25 @@ use crate::{ metrics::ParallelTrieMetrics, - proof_task::{AccountMultiproofInput, ProofWorkerHandle, StorageProofInput}, + proof_task::{ + AccountMultiproofInput, ProofResultContext, ProofResultMessage, ProofWorkerHandle, + StorageProofInput, + }, root::ParallelStateRootError, StorageRootTargets, }; use alloy_primitives::{map::B256Set, B256}; +use crossbeam_channel::{unbounded as crossbeam_unbounded, Receiver as CrossbeamReceiver}; use dashmap::DashMap; use reth_execution_errors::StorageRootError; use reth_storage_errors::db::DatabaseError; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets, TriePrefixSetsMut}, updates::TrieUpdatesSorted, - DecodedMultiProof, DecodedStorageMultiProof, HashedPostStateSorted, MultiProofTargets, Nibbles, + DecodedMultiProof, DecodedStorageMultiProof, HashedPostState, HashedPostStateSorted, + MultiProofTargets, Nibbles, }; use reth_trie_common::added_removed_keys::MultiAddedRemovedKeys; -use std::sync::{mpsc::Receiver, Arc}; +use std::{sync::Arc, time::Instant}; use tracing::trace; /// Parallel proof calculator. @@ -88,10 +93,10 @@ impl ParallelProof { hashed_address: B256, prefix_set: PrefixSet, target_slots: B256Set, - ) -> Result< - Receiver>, - ParallelStateRootError, - > { + ) -> Result, ParallelStateRootError> { + let (result_tx, result_rx) = crossbeam_channel::unbounded(); + let start = Instant::now(); + let input = StorageProofInput::new( hashed_address, prefix_set, @@ -101,8 +106,13 @@ impl ParallelProof { ); self.proof_worker_handle - .dispatch_storage_proof(input) - .map_err(|e| ParallelStateRootError::Other(e.to_string())) + .dispatch_storage_proof( + input, + ProofResultContext::new(result_tx, 0, HashedPostState::default(), start), + ) + .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; + + Ok(result_rx) } /// Generate a storage multiproof according to the specified targets and hashed address. @@ -123,12 +133,22 @@ impl ParallelProof { ); let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots)?; - let proof_result = receiver.recv().map_err(|_| { + let proof_msg = receiver.recv().map_err(|_| { ParallelStateRootError::StorageRoot(StorageRootError::Database(DatabaseError::Other( format!("channel closed for {hashed_address}"), ))) })?; + // Extract the multiproof from the result + let (mut multiproof, _stats) = proof_msg.result?; + + // Extract storage proof from the multiproof + let storage_proof = multiproof.storages.remove(&hashed_address).ok_or_else(|| { + ParallelStateRootError::StorageRoot(StorageRootError::Database(DatabaseError::Other( + format!("storage proof not found in multiproof for {hashed_address}"), + ))) + })?; + trace!( target: "trie::parallel_proof", total_targets, @@ -136,7 +156,7 @@ impl ParallelProof { "Storage proof generation completed" ); - proof_result + Ok(storage_proof) } /// Extends prefix sets with the given multiproof targets and returns the frozen result. @@ -182,6 +202,9 @@ impl ParallelProof { ); // Queue account multiproof request to account worker pool + // Create channel for receiving ProofResultMessage + let (result_tx, result_rx) = crossbeam_unbounded(); + let account_multiproof_start_time = Instant::now(); let input = AccountMultiproofInput { targets, @@ -189,19 +212,26 @@ impl ParallelProof { collect_branch_node_masks: self.collect_branch_node_masks, multi_added_removed_keys: self.multi_added_removed_keys.clone(), missed_leaves_storage_roots: self.missed_leaves_storage_roots.clone(), + proof_result_sender: ProofResultContext::new( + result_tx, + 0, + HashedPostState::default(), + account_multiproof_start_time, + ), }; - let receiver = self - .proof_worker_handle + self.proof_worker_handle .dispatch_account_multiproof(input) .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; // Wait for account multiproof result from worker - let (multiproof, stats) = receiver.recv().map_err(|_| { + let proof_result_msg = result_rx.recv().map_err(|_| { ParallelStateRootError::Other( "Account multiproof channel dropped: worker died or pool shutdown".to_string(), ) - })??; + })?; + + let (multiproof, stats) = proof_result_msg.result?; #[cfg(feature = "metrics")] self.metrics.record(stats); diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 18e93dc26a4..c24e2ce8347 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -10,6 +10,25 @@ //! access to worker channels, eliminating routing overhead //! - **Automatic Shutdown**: Workers terminate gracefully when all handles are dropped //! +//! # Message Flow +//! +//! 1. `MultiProofTask` prepares a storage or account job and hands it to [`ProofWorkerHandle`]. The +//! job carries a [`ProofResultContext`] so the worker knows how to send the result back. +//! 2. A worker receives the job, runs the proof, and sends a [`ProofResultMessage`] through the +//! provided [`ProofResultSender`]. +//! 3. `MultiProofTask` receives the message, uses `sequence_number` to keep proofs in order, and +//! proceeds with its state-root logic. +//! +//! Each job gets its own direct channel so results go straight back to `MultiProofTask`. That keeps +//! ordering decisions in one place and lets workers run independently. +//! +//! ```text +//! MultiProofTask -> MultiproofManager -> ProofWorkerHandle -> Storage/Account Worker +//! ^ | +//! | v +//! ProofResultMessage <-------- ProofResultSender --- +//! ``` +//! //! Individual [`ProofTaskTx`] instances manage a dedicated [`InMemoryTrieCursorFactory`] and //! [`HashedPostStateCursorFactory`], which are each backed by a database transaction. @@ -39,8 +58,8 @@ use reth_trie::{ trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdatesSorted, walker::TrieWalker, - DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostStateSorted, - MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, + DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostState, + HashedPostStateSorted, MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::{ added_removed_keys::MultiAddedRemovedKeys, @@ -55,10 +74,10 @@ use std::{ mpsc::{channel, Receiver, Sender}, Arc, }, - time::Instant, + time::{Duration, Instant}, }; use tokio::runtime::Handle; -use tracing::{debug_span, trace}; +use tracing::{debug_span, error, trace}; #[cfg(feature = "metrics")] use crate::proof_task_metrics::ProofTaskTrieMetrics; @@ -68,6 +87,56 @@ type TrieNodeProviderResult = Result, SparseTrieError>; type AccountMultiproofResult = Result<(DecodedMultiProof, ParallelTrieStats), ParallelStateRootError>; +/// Channel used by worker threads to deliver `ProofResultMessage` items back to +/// `MultiProofTask`. +/// +/// Workers use this sender to deliver proof results directly to `MultiProofTask`. +pub type ProofResultSender = CrossbeamSender; + +/// Message containing a completed proof result with metadata for direct delivery to +/// `MultiProofTask`. +/// +/// This type enables workers to send proof results directly to the `MultiProofTask` event loop. +#[derive(Debug)] +pub struct ProofResultMessage { + /// Sequence number for ordering proofs + pub sequence_number: u64, + /// The proof calculation result + pub result: AccountMultiproofResult, + /// Time taken for the entire proof calculation (from dispatch to completion) + pub elapsed: Duration, + /// Original state update that triggered this proof + pub state: HashedPostState, +} + +/// Context for sending proof calculation results back to `MultiProofTask`. +/// +/// This struct contains all context needed to send and track proof calculation results. +/// Workers use this to deliver completed proofs back to the main event loop. +#[derive(Debug, Clone)] +pub struct ProofResultContext { + /// Channel sender for result delivery + pub sender: ProofResultSender, + /// Sequence number for proof ordering + pub sequence_number: u64, + /// Original state update that triggered this proof + pub state: HashedPostState, + /// Calculation start time for measuring elapsed duration + pub start_time: Instant, +} + +impl ProofResultContext { + /// Creates a new proof result context. + pub const fn new( + sender: ProofResultSender, + sequence_number: u64, + state: HashedPostState, + start_time: Instant, + ) -> Self { + Self { sender, sequence_number, state, start_time } + } +} + /// Internal message for storage workers. #[derive(Debug)] enum StorageWorkerJob { @@ -75,8 +144,8 @@ enum StorageWorkerJob { StorageProof { /// Storage proof input parameters input: StorageProofInput, - /// Channel to send result back to original caller - result_sender: Sender, + /// Context for sending the proof result. + proof_result_sender: ProofResultContext, }, /// Blinded storage node retrieval request BlindedStorageNode { @@ -154,19 +223,22 @@ fn storage_worker_loop( available_workers.fetch_sub(1, Ordering::Relaxed); match job { - StorageWorkerJob::StorageProof { input, result_sender } => { + StorageWorkerJob::StorageProof { input, proof_result_sender } => { let hashed_address = input.hashed_address; + let ProofResultContext { sender, sequence_number: seq, state, start_time } = + proof_result_sender; trace!( target: "trie::proof_task", worker_id, hashed_address = ?hashed_address, prefix_set_len = input.prefix_set.len(), - target_slots = input.target_slots.len(), + target_slots_len = input.target_slots.len(), "Processing storage proof" ); let proof_start = Instant::now(); + let result = proof_tx.compute_storage_proof( input, trie_cursor_factory.clone(), @@ -176,13 +248,34 @@ fn storage_worker_loop( let proof_elapsed = proof_start.elapsed(); storage_proofs_processed += 1; - if result_sender.send(result).is_err() { + // Convert storage proof to account multiproof format + let result_msg = match result { + Ok(storage_proof) => { + let multiproof = reth_trie::DecodedMultiProof::from_storage_proof( + hashed_address, + storage_proof, + ); + let stats = crate::stats::ParallelTrieTracker::default().finish(); + Ok((multiproof, stats)) + } + Err(e) => Err(e), + }; + + if sender + .send(ProofResultMessage { + sequence_number: seq, + result: result_msg, + elapsed: start_time.elapsed(), + state, + }) + .is_err() + { tracing::debug!( target: "trie::proof_task", worker_id, hashed_address = ?hashed_address, storage_proofs_processed, - "Storage proof receiver dropped, discarding result" + "Proof result receiver dropped, discarding result" ); } @@ -259,7 +352,7 @@ fn storage_worker_loop( /// # Lifecycle /// /// Each worker initializes its providers, advertises availability, then loops: -/// receive an account job, mark busy, process the work, respond, and mark available again. +/// take a job, mark busy, compute the proof, send the result, and mark available again. /// The loop ends gracefully once the channel closes. /// /// # Transaction Reuse @@ -318,11 +411,26 @@ fn account_worker_loop( available_workers.fetch_sub(1, Ordering::Relaxed); match job { - AccountWorkerJob::AccountMultiproof { mut input, result_sender } => { + AccountWorkerJob::AccountMultiproof { input } => { + let AccountMultiproofInput { + targets, + mut prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys, + missed_leaves_storage_roots, + proof_result_sender: + ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + }, + } = *input; + let span = tracing::debug_span!( target: "trie::proof_task", "Account multiproof calculation", - targets = input.targets.len(), + targets = targets.len(), worker_id, ); let _span_guard = span.enter(); @@ -333,43 +441,49 @@ fn account_worker_loop( ); let proof_start = Instant::now(); + let mut tracker = ParallelTrieTracker::default(); - let mut storage_prefix_sets = - std::mem::take(&mut input.prefix_sets.storage_prefix_sets); + let mut storage_prefix_sets = std::mem::take(&mut prefix_sets.storage_prefix_sets); let storage_root_targets_len = StorageRootTargets::count( - &input.prefix_sets.account_prefix_set, + &prefix_sets.account_prefix_set, &storage_prefix_sets, ); + tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); let storage_proof_receivers = match dispatch_storage_proofs( &storage_work_tx, - &input.targets, + &targets, &mut storage_prefix_sets, - input.collect_branch_node_masks, - input.multi_added_removed_keys.as_ref(), + collect_branch_node_masks, + multi_added_removed_keys.as_ref(), ) { Ok(receivers) => receivers, Err(error) => { - let _ = result_sender.send(Err(error)); + // Send error through result channel + error!(target: "trie::proof_task", "Failed to dispatch storage proofs: {error}"); + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(error), + elapsed: start.elapsed(), + state, + }); continue; } }; // Use the missed leaves cache passed from the multiproof manager - let missed_leaves_storage_roots = &input.missed_leaves_storage_roots; - - let account_prefix_set = std::mem::take(&mut input.prefix_sets.account_prefix_set); + let account_prefix_set = std::mem::take(&mut prefix_sets.account_prefix_set); let ctx = AccountMultiproofParams { - targets: &input.targets, + targets: &targets, prefix_set: account_prefix_set, - collect_branch_node_masks: input.collect_branch_node_masks, - multi_added_removed_keys: input.multi_added_removed_keys.as_ref(), + collect_branch_node_masks, + multi_added_removed_keys: multi_added_removed_keys.as_ref(), storage_proof_receivers, - missed_leaves_storage_roots, + missed_leaves_storage_roots: missed_leaves_storage_roots.as_ref(), }; let result = build_account_multiproof_with_storage_roots( @@ -380,11 +494,21 @@ fn account_worker_loop( ); let proof_elapsed = proof_start.elapsed(); + let total_elapsed = start.elapsed(); let stats = tracker.finish(); let result = result.map(|proof| (proof, stats)); account_proofs_processed += 1; - if result_sender.send(result).is_err() { + // Send result to MultiProofTask + if result_tx + .send(ProofResultMessage { + sequence_number: seq, + result, + elapsed: total_elapsed, + state, + }) + .is_err() + { tracing::debug!( target: "trie::proof_task", worker_id, @@ -396,6 +520,7 @@ fn account_worker_loop( trace!( target: "trie::proof_task", proof_time_us = proof_elapsed.as_micros(), + total_elapsed_us = total_elapsed.as_micros(), total_processed = account_proofs_processed, "Account multiproof completed" ); @@ -522,7 +647,7 @@ where Some(receiver) => { // Block on this specific storage proof receiver - enables interleaved // parallelism - let proof = receiver.recv().map_err(|_| { + let proof_msg = receiver.recv().map_err(|_| { ParallelStateRootError::StorageRoot( reth_execution_errors::StorageRootError::Database( DatabaseError::Other(format!( @@ -530,7 +655,17 @@ where )), ), ) - })??; + })?; + + // Extract storage proof from the multiproof wrapper + let (mut multiproof, _stats) = proof_msg.result?; + let proof = + multiproof.storages.remove(&hashed_address).ok_or_else(|| { + ParallelStateRootError::Other(format!( + "storage proof not found in multiproof for {hashed_address}" + )) + })?; + let root = proof.root; collected_decoded_storages.insert(hashed_address, proof); root @@ -580,8 +715,13 @@ where // Consume remaining storage proof receivers for accounts not encountered during trie walk. for (hashed_address, receiver) in storage_proof_receivers { - if let Ok(Ok(proof)) = receiver.recv() { - collected_decoded_storages.insert(hashed_address, proof); + if let Ok(proof_msg) = receiver.recv() { + // Extract storage proof from the multiproof wrapper + if let Ok((mut multiproof, _stats)) = proof_msg.result && + let Some(proof) = multiproof.storages.remove(&hashed_address) + { + collected_decoded_storages.insert(hashed_address, proof); + } } } @@ -621,16 +761,19 @@ fn dispatch_storage_proofs( storage_prefix_sets: &mut B256Map, with_branch_node_masks: bool, multi_added_removed_keys: Option<&Arc>, -) -> Result>, ParallelStateRootError> { +) -> Result>, ParallelStateRootError> { let mut storage_proof_receivers = B256Map::with_capacity_and_hasher(targets.len(), Default::default()); - // Queue all storage proofs to worker pool + // Dispatch all storage proofs to worker pool for (hashed_address, target_slots) in targets.iter() { let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); - // Always queue a storage proof so we obtain the storage root even when no slots are - // requested. + // Create channel for receiving ProofResultMessage + let (result_tx, result_rx) = crossbeam_channel::unbounded(); + let start = Instant::now(); + + // Create computation input (data only, no communication channel) let input = StorageProofInput::new( *hashed_address, prefix_set, @@ -639,11 +782,18 @@ fn dispatch_storage_proofs( multi_added_removed_keys.cloned(), ); - let (sender, receiver) = channel(); - - // If queuing fails, propagate error up (no fallback) + // Always dispatch a storage proof so we obtain the storage root even when no slots are + // requested. storage_work_tx - .send(StorageWorkerJob::StorageProof { input, result_sender: sender }) + .send(StorageWorkerJob::StorageProof { + input, + proof_result_sender: ProofResultContext::new( + result_tx, + 0, + HashedPostState::default(), + start, + ), + }) .map_err(|_| { ParallelStateRootError::Other(format!( "Failed to queue storage proof for {}: storage worker pool unavailable", @@ -651,7 +801,7 @@ fn dispatch_storage_proofs( )) })?; - storage_proof_receivers.insert(*hashed_address, receiver); + storage_proof_receivers.insert(*hashed_address, result_rx); } Ok(storage_proof_receivers) @@ -770,7 +920,7 @@ where } } -/// This represents an input for a storage proof. +/// Input parameters for storage proof computation. #[derive(Debug)] pub struct StorageProofInput { /// The hashed address for which the proof is calculated. @@ -818,6 +968,8 @@ pub struct AccountMultiproofInput { pub multi_added_removed_keys: Option>, /// Cached storage proof roots for missed leaves encountered during account trie walk. pub missed_leaves_storage_roots: Arc>, + /// Context for sending the proof result. + pub proof_result_sender: ProofResultContext, } /// Parameters for building an account multiproof with pre-computed storage roots. @@ -831,7 +983,7 @@ struct AccountMultiproofParams<'a> { /// Provided by the user to give the necessary context to retain extra proofs. multi_added_removed_keys: Option<&'a Arc>, /// Receivers for storage proofs being computed in parallel. - storage_proof_receivers: B256Map>, + storage_proof_receivers: B256Map>, /// Cached storage proof roots for missed leaves encountered during account trie walk. missed_leaves_storage_roots: &'a DashMap, } @@ -842,9 +994,7 @@ enum AccountWorkerJob { /// Account multiproof computation request AccountMultiproof { /// Account multiproof input parameters - input: AccountMultiproofInput, - /// Channel to send result back to original caller - result_sender: Sender, + input: Box, }, /// Blinded account node retrieval request BlindedAccountNode { @@ -1058,33 +1208,74 @@ impl ProofWorkerHandle { } /// Dispatch a storage proof computation to storage worker pool + /// + /// The result will be sent via the `proof_result_sender` channel. pub fn dispatch_storage_proof( &self, input: StorageProofInput, - ) -> Result, ProviderError> { - let (tx, rx) = channel(); + proof_result_sender: ProofResultContext, + ) -> Result<(), ProviderError> { self.storage_work_tx - .send(StorageWorkerJob::StorageProof { input, result_sender: tx }) - .map_err(|_| { - ProviderError::other(std::io::Error::other("storage workers unavailable")) - })?; + .send(StorageWorkerJob::StorageProof { input, proof_result_sender }) + .map_err(|err| { + let error = + ProviderError::other(std::io::Error::other("storage workers unavailable")); + + if let StorageWorkerJob::StorageProof { proof_result_sender, .. } = err.0 { + let ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + } = proof_result_sender; + + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(ParallelStateRootError::Provider(error.clone())), + elapsed: start.elapsed(), + state, + }); + } - Ok(rx) + error + }) } - /// Queue an account multiproof computation + /// Dispatch an account multiproof computation + /// + /// The result will be sent via the `result_sender` channel included in the input. pub fn dispatch_account_multiproof( &self, input: AccountMultiproofInput, - ) -> Result, ProviderError> { - let (tx, rx) = channel(); + ) -> Result<(), ProviderError> { self.account_work_tx - .send(AccountWorkerJob::AccountMultiproof { input, result_sender: tx }) - .map_err(|_| { - ProviderError::other(std::io::Error::other("account workers unavailable")) - })?; + .send(AccountWorkerJob::AccountMultiproof { input: Box::new(input) }) + .map_err(|err| { + let error = + ProviderError::other(std::io::Error::other("account workers unavailable")); + + if let AccountWorkerJob::AccountMultiproof { input } = err.0 { + let AccountMultiproofInput { + proof_result_sender: + ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + }, + .. + } = *input; + + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(ParallelStateRootError::Provider(error.clone())), + elapsed: start.elapsed(), + state, + }); + } - Ok(rx) + error + }) } /// Dispatch blinded storage node request to storage worker pool From a69bbb3d7bc4f087b4362fc38e564a8da281e7cc Mon Sep 17 00:00:00 2001 From: Jennifer Date: Fri, 24 Oct 2025 14:50:53 +0100 Subject: [PATCH 193/371] fix: hive tests consume test suite (#19240) Co-authored-by: Federico Gimenez --- .github/assets/hive/build_simulators.sh | 7 ++++--- .github/assets/hive/expected_failures.yaml | 7 ------- .github/assets/hive/load_images.sh | 4 ++-- .github/workflows/hive.yml | 2 +- 4 files changed, 7 insertions(+), 13 deletions(-) diff --git a/.github/assets/hive/build_simulators.sh b/.github/assets/hive/build_simulators.sh index 709ecc51e01..d65e609e700 100755 --- a/.github/assets/hive/build_simulators.sh +++ b/.github/assets/hive/build_simulators.sh @@ -11,7 +11,8 @@ go build . # Run each hive command in the background for each simulator and wait echo "Building images" -./hive -client reth --sim "ethereum/eest" --sim.buildarg fixtures=https://github.com/ethereum/execution-spec-tests/releases/download/v5.3.0/fixtures_develop.tar.gz --sim.buildarg branch=v5.3.0 -sim.timelimit 1s || true & +# TODO: test code has been moved from https://github.com/ethereum/execution-spec-tests to https://github.com/ethereum/execution-specs we need to pin eels branch with `--sim.buildarg branch=` once we have the fusaka release tagged on the new repo +./hive -client reth --sim "ethereum/eels" --sim.buildarg fixtures=https://github.com/ethereum/execution-spec-tests/releases/download/v5.3.0/fixtures_develop.tar.gz -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/engine" -sim.timelimit 1s || true & ./hive -client reth --sim "devp2p" -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/rpc-compat" -sim.timelimit 1s || true & @@ -27,8 +28,8 @@ docker save hive/hiveproxy:latest -o ../hive_assets/hiveproxy.tar & saving_pids+ docker save hive/simulators/devp2p:latest -o ../hive_assets/devp2p.tar & saving_pids+=( $! ) docker save hive/simulators/ethereum/engine:latest -o ../hive_assets/engine.tar & saving_pids+=( $! ) docker save hive/simulators/ethereum/rpc-compat:latest -o ../hive_assets/rpc_compat.tar & saving_pids+=( $! ) -docker save hive/simulators/ethereum/eest/consume-engine:latest -o ../hive_assets/eest_engine.tar & saving_pids+=( $! ) -docker save hive/simulators/ethereum/eest/consume-rlp:latest -o ../hive_assets/eest_rlp.tar & saving_pids+=( $! ) +docker save hive/simulators/ethereum/eels/consume-engine:latest -o ../hive_assets/eels_engine.tar & saving_pids+=( $! ) +docker save hive/simulators/ethereum/eels/consume-rlp:latest -o ../hive_assets/eels_rlp.tar & saving_pids+=( $! ) docker save hive/simulators/smoke/genesis:latest -o ../hive_assets/smoke_genesis.tar & saving_pids+=( $! ) docker save hive/simulators/smoke/network:latest -o ../hive_assets/smoke_network.tar & saving_pids+=( $! ) docker save hive/simulators/ethereum/sync:latest -o ../hive_assets/ethereum_sync.tar & saving_pids+=( $! ) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index 2650d9a2d90..f4f20ae832e 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -49,7 +49,6 @@ engine-auth: [] # realistic on mainnet # 7251 related tests - modified contract, not necessarily practical on mainnet, # 7594: https://github.com/paradigmxyz/reth/issues/18975 -# 4844: reth unwinds from block 2 to genesis but tests expect to unwind to block 1 if chain.rlp has an invalid block # 7610: tests are related to empty account that has storage, close to impossible to trigger # worth re-visiting when more of these related tests are passing eest/consume-engine: @@ -132,12 +131,6 @@ eest/consume-engine: - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth - - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Cancun-insufficient_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth - - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Cancun-invalid_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth - - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Osaka-insufficient_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth - - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Osaka-invalid_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth - - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Prague-insufficient_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth - - tests/cancun/eip4844_blobs/test_blob_txs.py::test_invalid_tx_max_fee_per_blob_gas[fork_Prague-invalid_max_fee_per_blob_gas-blockchain_test_engine-account_balance_modifier_1000000000]-reth eest/consume-rlp: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test-zero_nonce]-reth - tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py::test_system_contract_errors[fork_Prague-blockchain_test_engine-system_contract_reaches_gas_limit-system_contract_0x0000bbddc7ce488642fb579f8b00f3a590007251]-reth diff --git a/.github/assets/hive/load_images.sh b/.github/assets/hive/load_images.sh index 37a2f82de54..e7dd7c99f4a 100755 --- a/.github/assets/hive/load_images.sh +++ b/.github/assets/hive/load_images.sh @@ -11,8 +11,8 @@ IMAGES=( "/tmp/smoke_genesis.tar" "/tmp/smoke_network.tar" "/tmp/ethereum_sync.tar" - "/tmp/eest_engine.tar" - "/tmp/eest_rlp.tar" + "/tmp/eels_engine.tar" + "/tmp/eels_rlp.tar" "/tmp/reth_image.tar" ) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index d606ddab7ab..ae147977580 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -161,7 +161,7 @@ jobs: limit: .*tests/osaka.* - sim: ethereum/eels/consume-rlp limit: .*tests/prague.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/cancun.* - sim: ethereum/eels/consume-rlp limit: .*tests/shanghai.* From dc781126c249f020d7abdac197fdd4bcb848ac28 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 24 Oct 2025 15:04:01 +0100 Subject: [PATCH 194/371] feat(trie): proof task tracing improvements (#19276) --- .../tree/src/tree/payload_processor/mod.rs | 7 +- .../src/tree/payload_processor/multiproof.rs | 2 +- crates/trie/parallel/src/proof.rs | 2 +- crates/trie/parallel/src/proof_task.rs | 96 ++++++------------- 4 files changed, 38 insertions(+), 69 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index ac16c60dd67..42f523700fd 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -49,7 +49,7 @@ use std::{ }, time::Instant, }; -use tracing::{debug, debug_span, instrument, warn}; +use tracing::{debug, debug_span, instrument, span::EnteredSpan, warn}; mod configured_sparse_trie; pub mod executor; @@ -234,7 +234,7 @@ where ); let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); - let proof_handle = ProofWorkerHandle::new( + let (proof_handle, proof_workers_span) = ProofWorkerHandle::new( self.executor.handle().clone(), consistent_view, task_ctx, @@ -280,6 +280,7 @@ where prewarm_handle, state_root: Some(state_root_rx), transactions: execution_rx, + _proof_workers_span: Some(proof_workers_span), }) } @@ -304,6 +305,7 @@ where prewarm_handle, state_root: None, transactions: execution_rx, + _proof_workers_span: None, } } @@ -490,6 +492,7 @@ pub struct PayloadHandle { state_root: Option>>, /// Stream of block transactions transactions: mpsc::Receiver>, + _proof_workers_span: Option, } impl PayloadHandle { diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 755f7a7d0d7..26315551f9e 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1282,7 +1282,7 @@ mod tests { config.prefix_sets.clone(), ); let consistent_view = ConsistentDbView::new(factory, None); - let proof_handle = ProofWorkerHandle::new(rt_handle, consistent_view, task_ctx, 1, 1); + let (proof_handle, _) = ProofWorkerHandle::new(rt_handle, consistent_view, task_ctx, 1, 1); let (to_sparse_trie, _receiver) = std::sync::mpsc::channel(); MultiProofTask::new(config, proof_handle, to_sparse_trie, Some(1)) diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index e8b39f38ec6..69cf55c93b2 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -336,7 +336,7 @@ mod tests { let task_ctx = ProofTaskCtx::new(Default::default(), Default::default(), Default::default()); - let proof_worker_handle = + let (proof_worker_handle, _) = ProofWorkerHandle::new(rt.handle().clone(), consistent_view, task_ctx, 1, 1); let parallel_result = ParallelProof::new( diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index c24e2ce8347..2e1e820ec3d 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -77,7 +77,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::runtime::Handle; -use tracing::{debug_span, error, trace}; +use tracing::{debug, debug_span, error, span::EnteredSpan, trace}; #[cfg(feature = "metrics")] use crate::proof_task_metrics::ProofTaskTrieMetrics; @@ -196,7 +196,7 @@ fn storage_worker_loop( view.provider_ro().expect("Storage worker failed to initialize: database unavailable"); let proof_tx = ProofTaskTx::new(provider.into_tx(), task_ctx, worker_id); - tracing::debug!( + trace!( target: "trie::proof_task", worker_id, "Storage worker started" @@ -270,7 +270,7 @@ fn storage_worker_loop( }) .is_err() { - tracing::debug!( + trace!( target: "trie::proof_task", worker_id, hashed_address = ?hashed_address, @@ -309,7 +309,7 @@ fn storage_worker_loop( storage_nodes_processed += 1; if result_sender.send(result).is_err() { - tracing::debug!( + trace!( target: "trie::proof_task", worker_id, ?account, @@ -335,7 +335,7 @@ fn storage_worker_loop( } } - tracing::debug!( + trace!( target: "trie::proof_task", worker_id, storage_proofs_processed, @@ -384,7 +384,7 @@ fn account_worker_loop( view.provider_ro().expect("Account worker failed to initialize: database unavailable"); let proof_tx = ProofTaskTx::new(provider.into_tx(), task_ctx, worker_id); - tracing::debug!( + trace!( target: "trie::proof_task", worker_id, "Account worker started" @@ -427,7 +427,7 @@ fn account_worker_loop( }, } = *input; - let span = tracing::debug_span!( + let span = debug_span!( target: "trie::proof_task", "Account multiproof calculation", targets = targets.len(), @@ -509,7 +509,7 @@ fn account_worker_loop( }) .is_err() { - tracing::debug!( + trace!( target: "trie::proof_task", worker_id, account_proofs_processed, @@ -531,7 +531,7 @@ fn account_worker_loop( } AccountWorkerJob::BlindedAccountNode { path, result_sender } => { - let span = tracing::debug_span!( + let span = debug_span!( target: "trie::proof_task", "Blinded account node calculation", ?path, @@ -551,7 +551,7 @@ fn account_worker_loop( account_nodes_processed += 1; if result_sender.send(result).is_err() { - tracing::debug!( + trace!( target: "trie::proof_task", worker_id, ?path, @@ -574,7 +574,7 @@ fn account_worker_loop( } } - tracing::debug!( + trace!( target: "trie::proof_task", worker_id, account_proofs_processed, @@ -879,7 +879,7 @@ where multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); - let span = tracing::debug_span!( + let span = debug_span!( target: "trie::proof_task", "Storage proof calculation", hashed_address = ?hashed_address, @@ -1067,7 +1067,7 @@ impl ProofWorkerHandle { task_ctx: ProofTaskCtx, storage_worker_count: usize, account_worker_count: usize, - ) -> Self + ) -> (Self, EnteredSpan) where Factory: DatabaseProviderFactory + Clone + 'static, { @@ -1079,20 +1079,20 @@ impl ProofWorkerHandle { let storage_available_workers = Arc::new(AtomicUsize::new(0)); let account_available_workers = Arc::new(AtomicUsize::new(0)); - tracing::debug!( + let parent_span = + debug_span!(target: "trie::proof_task", "proof workers", ?storage_worker_count) + .entered(); + + debug!( target: "trie::proof_task", storage_worker_count, account_worker_count, "Spawning proof worker pools" ); - let storage_worker_parent = - debug_span!(target: "trie::proof_task", "Storage worker tasks", ?storage_worker_count); - let _guard = storage_worker_parent.enter(); - // Spawn storage workers for worker_id in 0..storage_worker_count { - let parent_span = debug_span!(target: "trie::proof_task", "Storage worker", ?worker_id); + let span = debug_span!(target: "trie::proof_task", "Storage worker", ?worker_id); let view_clone = view.clone(); let task_ctx_clone = task_ctx.clone(); let work_rx_clone = storage_work_rx.clone(); @@ -1102,7 +1102,7 @@ impl ProofWorkerHandle { #[cfg(feature = "metrics")] let metrics = ProofTaskTrieMetrics::default(); - let _guard = parent_span.enter(); + let _guard = span.enter(); storage_worker_loop( view_clone, task_ctx_clone, @@ -1113,23 +1113,11 @@ impl ProofWorkerHandle { metrics, ) }); - - tracing::debug!( - target: "trie::proof_task", - worker_id, - "Storage worker spawned successfully" - ); } - drop(_guard); - - let account_worker_parent = - debug_span!(target: "trie::proof_task", "Account worker tasks", ?account_worker_count); - let _guard = account_worker_parent.enter(); - // Spawn account workers for worker_id in 0..account_worker_count { - let parent_span = debug_span!(target: "trie::proof_task", "Account worker", ?worker_id); + let span = debug_span!(target: "trie::proof_task", "Account worker", ?worker_id); let view_clone = view.clone(); let task_ctx_clone = task_ctx.clone(); let work_rx_clone = account_work_rx.clone(); @@ -1140,7 +1128,7 @@ impl ProofWorkerHandle { #[cfg(feature = "metrics")] let metrics = ProofTaskTrieMetrics::default(); - let _guard = parent_span.enter(); + let _guard = span.enter(); account_worker_loop( view_clone, task_ctx_clone, @@ -1152,41 +1140,19 @@ impl ProofWorkerHandle { metrics, ) }); - - tracing::debug!( - target: "trie::proof_task", - worker_id, - "Account worker spawned successfully" - ); } - drop(_guard); - - Self::new_handle( - storage_work_tx, - account_work_tx, - storage_available_workers, - account_available_workers, + ( + Self { + storage_work_tx, + account_work_tx, + storage_available_workers, + account_available_workers, + }, + parent_span, ) } - /// Creates a new [`ProofWorkerHandle`] with direct access to worker pools. - /// - /// This is an internal constructor used for creating handles. - const fn new_handle( - storage_work_tx: CrossbeamSender, - account_work_tx: CrossbeamSender, - storage_available_workers: Arc, - account_available_workers: Arc, - ) -> Self { - Self { - storage_work_tx, - account_work_tx, - storage_available_workers, - account_available_workers, - } - } - /// Returns true if there are available storage workers to process tasks. pub fn has_available_storage_workers(&self) -> bool { self.storage_available_workers.load(Ordering::Relaxed) > 0 @@ -1392,7 +1358,7 @@ mod tests { let view = ConsistentDbView::new(factory, None); let ctx = test_ctx(); - let proof_handle = ProofWorkerHandle::new(handle.clone(), view, ctx, 5, 3); + let (proof_handle, _) = ProofWorkerHandle::new(handle.clone(), view, ctx, 5, 3); // Verify handle can be cloned let _cloned_handle = proof_handle.clone(); From f177103937935b422d148adbfb94619d18e1ba2c Mon Sep 17 00:00:00 2001 From: AJStonewee Date: Fri, 24 Oct 2025 11:13:12 -0400 Subject: [PATCH 195/371] fix(trie): correct comment in sparse_trie_reveal_node_1 test (#19193) --- crates/trie/sparse/src/trie.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 8500ea400b5..ab0506b9364 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -3203,7 +3203,7 @@ mod tests { } /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has - /// only nodes 0x00 and 0x01, and we have proofs for them. Node B is new and inserted in the + /// only nodes 0x00 and 0x02, and we have proofs for them. Node 0x01 is new and inserted in the /// sparse trie first. /// /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. From 25f0d896d92acab6e4a6c0de7a1c0c3adb954ed2 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 24 Oct 2025 16:57:51 +0100 Subject: [PATCH 196/371] chore(trie): do not create a parent span for proof worker handle (#19281) --- .../tree/src/tree/payload_processor/mod.rs | 7 +--- .../src/tree/payload_processor/multiproof.rs | 2 +- crates/trie/parallel/src/proof.rs | 2 +- crates/trie/parallel/src/proof_task.rs | 37 ++++++++++--------- 4 files changed, 23 insertions(+), 25 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 42f523700fd..ac16c60dd67 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -49,7 +49,7 @@ use std::{ }, time::Instant, }; -use tracing::{debug, debug_span, instrument, span::EnteredSpan, warn}; +use tracing::{debug, debug_span, instrument, warn}; mod configured_sparse_trie; pub mod executor; @@ -234,7 +234,7 @@ where ); let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); - let (proof_handle, proof_workers_span) = ProofWorkerHandle::new( + let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), consistent_view, task_ctx, @@ -280,7 +280,6 @@ where prewarm_handle, state_root: Some(state_root_rx), transactions: execution_rx, - _proof_workers_span: Some(proof_workers_span), }) } @@ -305,7 +304,6 @@ where prewarm_handle, state_root: None, transactions: execution_rx, - _proof_workers_span: None, } } @@ -492,7 +490,6 @@ pub struct PayloadHandle { state_root: Option>>, /// Stream of block transactions transactions: mpsc::Receiver>, - _proof_workers_span: Option, } impl PayloadHandle { diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 26315551f9e..755f7a7d0d7 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1282,7 +1282,7 @@ mod tests { config.prefix_sets.clone(), ); let consistent_view = ConsistentDbView::new(factory, None); - let (proof_handle, _) = ProofWorkerHandle::new(rt_handle, consistent_view, task_ctx, 1, 1); + let proof_handle = ProofWorkerHandle::new(rt_handle, consistent_view, task_ctx, 1, 1); let (to_sparse_trie, _receiver) = std::sync::mpsc::channel(); MultiProofTask::new(config, proof_handle, to_sparse_trie, Some(1)) diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 69cf55c93b2..e8b39f38ec6 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -336,7 +336,7 @@ mod tests { let task_ctx = ProofTaskCtx::new(Default::default(), Default::default(), Default::default()); - let (proof_worker_handle, _) = + let proof_worker_handle = ProofWorkerHandle::new(rt.handle().clone(), consistent_view, task_ctx, 1, 1); let parallel_result = ParallelProof::new( diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 2e1e820ec3d..c05f2ad7286 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -77,7 +77,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::runtime::Handle; -use tracing::{debug, debug_span, error, span::EnteredSpan, trace}; +use tracing::{debug, debug_span, error, trace}; #[cfg(feature = "metrics")] use crate::proof_task_metrics::ProofTaskTrieMetrics; @@ -1067,7 +1067,7 @@ impl ProofWorkerHandle { task_ctx: ProofTaskCtx, storage_worker_count: usize, account_worker_count: usize, - ) -> (Self, EnteredSpan) + ) -> Self where Factory: DatabaseProviderFactory + Clone + 'static, { @@ -1079,10 +1079,6 @@ impl ProofWorkerHandle { let storage_available_workers = Arc::new(AtomicUsize::new(0)); let account_available_workers = Arc::new(AtomicUsize::new(0)); - let parent_span = - debug_span!(target: "trie::proof_task", "proof workers", ?storage_worker_count) - .entered(); - debug!( target: "trie::proof_task", storage_worker_count, @@ -1090,9 +1086,12 @@ impl ProofWorkerHandle { "Spawning proof worker pools" ); + let parent_span = + debug_span!(target: "trie::proof_task", "storage proof workers", ?storage_worker_count) + .entered(); // Spawn storage workers for worker_id in 0..storage_worker_count { - let span = debug_span!(target: "trie::proof_task", "Storage worker", ?worker_id); + let span = debug_span!(target: "trie::proof_task", "storage worker", ?worker_id); let view_clone = view.clone(); let task_ctx_clone = task_ctx.clone(); let work_rx_clone = storage_work_rx.clone(); @@ -1114,10 +1113,14 @@ impl ProofWorkerHandle { ) }); } + drop(parent_span); + let parent_span = + debug_span!(target: "trie::proof_task", "account proof workers", ?storage_worker_count) + .entered(); // Spawn account workers for worker_id in 0..account_worker_count { - let span = debug_span!(target: "trie::proof_task", "Account worker", ?worker_id); + let span = debug_span!(target: "trie::proof_task", "account worker", ?worker_id); let view_clone = view.clone(); let task_ctx_clone = task_ctx.clone(); let work_rx_clone = account_work_rx.clone(); @@ -1141,16 +1144,14 @@ impl ProofWorkerHandle { ) }); } + drop(parent_span); - ( - Self { - storage_work_tx, - account_work_tx, - storage_available_workers, - account_available_workers, - }, - parent_span, - ) + Self { + storage_work_tx, + account_work_tx, + storage_available_workers, + account_available_workers, + } } /// Returns true if there are available storage workers to process tasks. @@ -1358,7 +1359,7 @@ mod tests { let view = ConsistentDbView::new(factory, None); let ctx = test_ctx(); - let (proof_handle, _) = ProofWorkerHandle::new(handle.clone(), view, ctx, 5, 3); + let proof_handle = ProofWorkerHandle::new(handle.clone(), view, ctx, 5, 3); // Verify handle can be cloned let _cloned_handle = proof_handle.clone(); From 0c8417288b46a782091ecea04bdc0e5bd133ea73 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 24 Oct 2025 19:39:08 +0100 Subject: [PATCH 197/371] feat(tracing): set default OTLP log level to WARN (#19283) --- crates/tracing/src/layers.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 660d40ae464..33f8c90ada5 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -21,18 +21,19 @@ pub type FileWorkerGuard = tracing_appender::non_blocking::WorkerGuard; /// A boxed tracing [Layer]. pub(crate) type BoxedLayer = Box + Send + Sync>; -/// Default [directives](Directive) for [`EnvFilter`] which disable high-frequency debug logs from -/// dependencies such as `hyper`, `hickory-resolver`, `hickory_proto`, `discv5`, `jsonrpsee-server`, -/// the `opentelemetry_*` crates, and `hyper_util::client::legacy::pool`. +/// Default [directives](Directive) for [`EnvFilter`] which: +/// 1. Disable high-frequency debug logs from dependencies such as `hyper`, `hickory-resolver`, +/// `hickory_proto`, `discv5`, `jsonrpsee-server`, and `hyper_util::client::legacy::pool`. +/// 2. Set `opentelemetry_*` crates log level to `WARN`, as `DEBUG` is too noisy. const DEFAULT_ENV_FILTER_DIRECTIVES: [&str; 9] = [ "hyper::proto::h1=off", "hickory_resolver=off", "hickory_proto=off", "discv5=off", "jsonrpsee-server=off", - "opentelemetry-otlp=off", - "opentelemetry_sdk=off", - "opentelemetry-http=off", + "opentelemetry-otlp=warn", + "opentelemetry_sdk=warn", + "opentelemetry-http=warn", "hyper_util::client::legacy::pool=off", ]; From e22a51176473e5f4eee80d776f1905f46e74b308 Mon Sep 17 00:00:00 2001 From: phrwlk Date: Sat, 25 Oct 2025 09:37:22 +0300 Subject: [PATCH 198/371] fix(node): classify connect_async failures as WebSocket and use Url parse error (#19286) --- crates/node/ethstats/src/ethstats.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crates/node/ethstats/src/ethstats.rs b/crates/node/ethstats/src/ethstats.rs index b9fe5e47272..7592e93ae9d 100644 --- a/crates/node/ethstats/src/ethstats.rs +++ b/crates/node/ethstats/src/ethstats.rs @@ -109,10 +109,9 @@ where "Attempting to connect to EthStats server at {}", self.credentials.host ); let full_url = format!("ws://{}/api", self.credentials.host); - let url = Url::parse(&full_url) - .map_err(|e| EthStatsError::InvalidUrl(format!("Invalid URL: {full_url} - {e}")))?; + let url = Url::parse(&full_url).map_err(EthStatsError::Url)?; - match timeout(CONNECT_TIMEOUT, connect_async(url.to_string())).await { + match timeout(CONNECT_TIMEOUT, connect_async(url.as_str())).await { Ok(Ok((ws_stream, _))) => { debug!( target: "ethstats", @@ -123,7 +122,7 @@ where self.login().await?; Ok(()) } - Ok(Err(e)) => Err(EthStatsError::InvalidUrl(e.to_string())), + Ok(Err(e)) => Err(EthStatsError::WebSocket(e)), Err(_) => { debug!(target: "ethstats", "Connection to EthStats server timed out"); Err(EthStatsError::Timeout) From 159ff01cd266172eec0a79d48aa2f85b69ffb1c4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 26 Oct 2025 17:37:00 +0100 Subject: [PATCH 199/371] chore(deps): weekly `cargo update` (#19300) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 325 +++++++++++++++++++++++++++-------------------------- 1 file changed, 164 insertions(+), 161 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dbfc2f99a6f..b8c0da68164 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -88,9 +88,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf01dd83a1ca5e4807d0ca0223c9615e211ce5db0a9fd1443c2778cacf89b546" +checksum = "0bbb778f50ecb0cebfb5c05580948501927508da7bd628833a8c4bd8545e23e2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -253,9 +253,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83ce19ea6140497670b1b7e721f9a9ce88022fe475a5e4e6a68a403499cca209" +checksum = "28bd79e109f2b3ff81ed1a93ed3d07cf175ca627fd4fad176df721041cc40dcc" dependencies = [ "alloy-consensus", "alloy-eips", @@ -290,9 +290,9 @@ dependencies = [ [[package]] name = "alloy-hardforks" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b16ee6b2c7d39da592d30a5f9607a83f50ee5ec2a2c301746cc81e91891f4ca" +checksum = "cd78f8e1c274581c663d7949c863b10c8b015e48f2774a4b8e8efc82d43ea95c" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -370,9 +370,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.22.4" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d7aeaf6051f53880a65b547c43e3b05ee42f68236b1f43f013abfe4eadc47bb" +checksum = "35db78840a29b14fec51f3399a6dc82ecc815a5766eb80b32e69a0c92adddc14" dependencies = [ "alloy-consensus", "alloy-eips", @@ -388,9 +388,9 @@ dependencies = [ [[package]] name = "alloy-op-hardforks" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af8bb236fc008fd3b83b2792e30ae79617a99ffc4c3f584f0c9b4ce0a2da52de" +checksum = "777759314eaa14fb125c1deba5cbc06eee953bbe77bc7cc60b4e8685bd03479e" dependencies = [ "alloy-chains", "alloy-hardforks", @@ -515,7 +515,7 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -765,7 +765,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -781,7 +781,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "syn-solidity", "tiny-keccak", ] @@ -798,7 +798,7 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "syn-solidity", ] @@ -931,7 +931,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1016,7 +1016,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1158,7 +1158,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1196,7 +1196,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1285,7 +1285,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1402,7 +1402,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1413,7 +1413,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1451,7 +1451,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1569,7 +1569,7 @@ version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "cexpr", "clang-sys", "itertools 0.13.0", @@ -1578,7 +1578,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1587,7 +1587,7 @@ version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "cexpr", "clang-sys", "itertools 0.13.0", @@ -1596,7 +1596,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -1638,12 +1638,12 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.4" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" dependencies = [ "arbitrary", - "serde", + "serde_core", ] [[package]] @@ -1695,7 +1695,7 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c340fe0f0b267787095cbe35240c6786ff19da63ec7b69367ba338eace8169b" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "boa_interner", "boa_macros", "boa_string", @@ -1711,7 +1711,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f620c3f06f51e65c0504ddf04978be1b814ac6586f0b45f6019801ab5efd37f9" dependencies = [ "arrayvec", - "bitflags 2.9.4", + "bitflags 2.10.0", "boa_ast", "boa_gc", "boa_interner", @@ -1786,7 +1786,7 @@ checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "synstructure", ] @@ -1796,7 +1796,7 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cc142dac798cdc6e2dbccfddeb50f36d2523bb977a976e19bdb3ae19b740804" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "boa_ast", "boa_interner", "boa_macros", @@ -1914,7 +1914,7 @@ checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2116,9 +2116,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.49" +version = "4.5.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4512b90fa68d3a9932cea5184017c5d200f5921df706d45e853537dea51508f" +checksum = "0c2cfd7bf8a6017ddaa4e32ffe7403d547790db06bd171c1c53926faab501623" dependencies = [ "clap_builder", "clap_derive", @@ -2126,9 +2126,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.49" +version = "4.5.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0025e98baa12e766c67ba13ff4695a887a1eba19569aad00a472546795bd6730" +checksum = "0a4c05b9e80c5ccd3a7ef080ad7b6ba7d6fc00a985b8b157197075677c82c7a0" dependencies = [ "anstream", "anstyle", @@ -2145,7 +2145,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2537,7 +2537,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "crossterm_winapi", "mio", "parking_lot", @@ -2553,7 +2553,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8b9f2e4c67f833b660cdb0a3523065869fb35570177239812ed4c905aeff87b" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "crossterm_winapi", "document-features", "parking_lot", @@ -2653,7 +2653,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2687,7 +2687,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2702,7 +2702,7 @@ dependencies = [ "quote", "serde", "strsim", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2713,7 +2713,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2724,7 +2724,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2777,7 +2777,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2809,9 +2809,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41953f86f8a05768a6cda24def994fd2f424b04ec5c719cf89989779f199071" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" dependencies = [ "powerfmt", "serde_core", @@ -2836,7 +2836,7 @@ checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2847,7 +2847,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2868,7 +2868,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2878,7 +2878,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -2899,7 +2899,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "unicode-xid", ] @@ -3013,7 +3013,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -3024,9 +3024,9 @@ checksum = "aac81fa3e28d21450aa4d2ac065992ba96a1d7303efbce51a95f4fd175b67562" [[package]] name = "document-features" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95249b50c6c185bee49034bcb378a49dc2b5dff0be90ff6616d31d64febab05d" +checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61" dependencies = [ "litrs", ] @@ -3092,7 +3092,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -3200,7 +3200,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -3220,7 +3220,7 @@ checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -3296,7 +3296,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -3823,9 +3823,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc5a4e564e38c699f2880d3fda590bedc2e69f3f84cd48b457bd892ce61d0aa9" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" dependencies = [ "crc32fast", "miniz_oxide", @@ -3944,7 +3944,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -4072,7 +4072,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2deb07a133b1520dc1a5690e9bd08950108873d7ed5de38dcc74d3b5ebffa110" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "libc", "libgit2-sys", "log", @@ -4740,7 +4740,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -4797,7 +4797,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -4851,9 +4851,12 @@ dependencies = [ [[package]] name = "indoc" -version = "2.0.6" +version = "2.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" +checksum = "79cf5c93f93228cf8efb3ba362535fb11199ac548a09ce117c9b1adc3030d706" +dependencies = [ + "rustversion", +] [[package]] name = "infer" @@ -4867,7 +4870,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "inotify-sys", "libc", ] @@ -4901,7 +4904,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -4967,20 +4970,20 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "is_terminal_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] name = "itertools" @@ -5161,7 +5164,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -5374,7 +5377,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "libc", "redox_syscall", ] @@ -5433,9 +5436,9 @@ checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "litrs" -version = "0.4.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5e54036fe321fd421e10d732f155734c4e4afd610dd556d9a82833ab3ee0bed" +checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092" [[package]] name = "lock_api" @@ -5532,7 +5535,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -5563,9 +5566,9 @@ checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "memmap2" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843a98750cd611cc2965a8213b53b43e715f13c37a9e096c6408e69990961db7" +checksum = "744133e4a0e0a658e1374cf3bf8e415c4052a15a111acd372764c55b4177d490" dependencies = [ "libc", ] @@ -5598,7 +5601,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -5831,7 +5834,7 @@ version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d3d07927151ff8575b7087f245456e549fea62edf0ec4e565a5ee50c8402bc3" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "fsevent-sys", "inotify", "kqueue", @@ -5960,9 +5963,9 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" dependencies = [ "num_enum_derive", "rustversion", @@ -5970,14 +5973,14 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6016,9 +6019,9 @@ dependencies = [ [[package]] name = "once_cell_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" [[package]] name = "oorandom" @@ -6305,7 +6308,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6440,7 +6443,7 @@ dependencies = [ "phf_shared 0.11.3", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6453,7 +6456,7 @@ dependencies = [ "phf_shared 0.13.1", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6491,7 +6494,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6624,7 +6627,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6675,14 +6678,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] name = "proc-macro2" -version = "1.0.101" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] @@ -6693,7 +6696,7 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc5b72d8145275d844d4b5f6d4e1eef00c8cd889edb6035c21675d1bb1f45c9f" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "chrono", "flate2", "hex", @@ -6707,7 +6710,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25485360a54d6861439d60facef26de713b1e126bf015ec8f98239467a2b82f7" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "procfs-core 0.18.0", "rustix 1.1.2", ] @@ -6718,7 +6721,7 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "239df02d8349b06fc07398a3a1697b06418223b1c7725085e801e7c0fc6a12ec" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "chrono", "hex", ] @@ -6729,7 +6732,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6401bf7b6af22f78b563665d15a22e9aef27775b79b149a66ca022468a4e405" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "hex", ] @@ -6741,7 +6744,7 @@ checksum = "2bb0be07becd10686a0bb407298fb425360a5c44a663774406340c59a22de4ce" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.9.4", + "bitflags 2.10.0", "lazy_static", "num-traits", "rand 0.9.2", @@ -6771,7 +6774,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6782,7 +6785,7 @@ checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6805,7 +6808,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -6814,7 +6817,7 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "memchr", "unicase", ] @@ -7052,7 +7055,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabd94c2f37801c20583fc49dd5cd6b0ba68c716787c2dd6ed18571e1e63117b" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "cassowary", "compact_str", "crossterm 0.28.1", @@ -7073,7 +7076,7 @@ version = "11.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", ] [[package]] @@ -7108,7 +7111,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", ] [[package]] @@ -7150,7 +7153,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -7563,7 +7566,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -8677,7 +8680,7 @@ dependencies = [ name = "reth-libmdbx" version = "1.8.2" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "byteorder", "codspeed-criterion-compat", "dashmap 6.1.0", @@ -10623,7 +10626,7 @@ dependencies = [ "aquamarine", "assert_matches", "auto_impl", - "bitflags 2.9.4", + "bitflags 2.10.0", "codspeed-criterion-compat", "futures", "futures-util", @@ -11068,7 +11071,7 @@ version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e6bd5e669b02007872a8ca2643a14e308fe1739ee4475d74122587c3388a06a" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "revm-bytecode", "revm-primitives", "serde", @@ -11205,7 +11208,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.107", + "syn 2.0.108", "unicode-ident", ] @@ -11301,7 +11304,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "errno", "libc", "linux-raw-sys 0.4.15", @@ -11314,7 +11317,7 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "errno", "libc", "linux-raw-sys 0.11.0", @@ -11323,9 +11326,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.33" +version = "0.23.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "751e04a496ca00bb97a5e043158d23d66b5aabf2e1d5aa2a0aaebb1aafe6f82c" +checksum = "6a9586e9ee2b4f8fab52a0048ca7334d7024eef48e2cb9407e3497bb7cab7fa7" dependencies = [ "log", "once_cell", @@ -11553,7 +11556,7 @@ version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "core-foundation", "core-foundation-sys", "libc", @@ -11646,7 +11649,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -11697,9 +11700,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.15.0" +version = "3.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6093cd8c01b25262b84927e0f7151692158fab02d961e04c979d3903eba7ecc5" +checksum = "aa66c845eee442168b2c8134fec70ac50dc20e760769c8ba0ad1319ca1959b04" dependencies = [ "base64 0.22.1", "chrono", @@ -11716,14 +11719,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.15.0" +version = "3.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7e6c180db0816026a61afa1cff5344fb7ebded7e4d3062772179f2501481c27" +checksum = "b91a903660542fced4e99881aa481bdbaec1634568ee02e0b8bd57c64cb38955" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12047,7 +12050,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12059,7 +12062,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12081,9 +12084,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.107" +version = "2.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a26dbd934e5451d21ef060c018dae56fc073894c5a7896f882928a76e6d081b" +checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917" dependencies = [ "proc-macro2", "quote", @@ -12099,7 +12102,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12119,7 +12122,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12164,7 +12167,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac9ee8b664c9f1740cd813fea422116f8ba29997bb7c878d1940424889802897" dependencies = [ - "bitflags 2.9.4", + "bitflags 2.10.0", "log", "num-traits", ] @@ -12200,7 +12203,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12211,7 +12214,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "test-case-core", ] @@ -12251,7 +12254,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12299,7 +12302,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12310,7 +12313,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12475,7 +12478,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12668,7 +12671,7 @@ checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ "async-compression", "base64 0.22.1", - "bitflags 2.9.4", + "bitflags 2.10.0", "bytes", "futures-core", "futures-util", @@ -12735,7 +12738,7 @@ checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12895,7 +12898,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -12989,9 +12992,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" +checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" [[package]] name = "unicode-segmentation" @@ -13176,7 +13179,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -13257,7 +13260,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "wasm-bindgen-shared", ] @@ -13292,7 +13295,7 @@ checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -13547,7 +13550,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -13558,7 +13561,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -13569,7 +13572,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -13580,7 +13583,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -14094,7 +14097,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "synstructure", ] @@ -14106,7 +14109,7 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "synstructure", ] @@ -14127,7 +14130,7 @@ checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -14147,7 +14150,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", "synstructure", ] @@ -14168,7 +14171,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -14212,7 +14215,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] @@ -14223,7 +14226,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.107", + "syn 2.0.108", ] [[package]] From 53119fd5a1b5345d05ed0675cd287a9b69e17031 Mon Sep 17 00:00:00 2001 From: YK Date: Mon, 27 Oct 2025 17:49:04 +0800 Subject: [PATCH 200/371] refactor(trie): rename queue_storage_proof to send_storage_proof (#19310) --- crates/trie/parallel/src/proof.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index e8b39f38ec6..63d26993d50 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -88,7 +88,7 @@ impl ParallelProof { self } /// Queues a storage proof task and returns a receiver for the result. - fn queue_storage_proof( + fn send_storage_proof( &self, hashed_address: B256, prefix_set: PrefixSet, @@ -132,7 +132,7 @@ impl ParallelProof { "Starting storage proof generation" ); - let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots)?; + let receiver = self.send_storage_proof(hashed_address, prefix_set, target_slots)?; let proof_msg = receiver.recv().map_err(|_| { ParallelStateRootError::StorageRoot(StorageRootError::Database(DatabaseError::Other( format!("channel closed for {hashed_address}"), From ded9d3ce337d5e16ed42e87addac6c07bf9652b8 Mon Sep 17 00:00:00 2001 From: guha-rahul <52607971+guha-rahul@users.noreply.github.com> Date: Mon, 27 Oct 2025 15:19:39 +0530 Subject: [PATCH 201/371] refactor: add more Snap response types (#19303) Co-authored-by: suhas-sensei --- crates/net/p2p/src/snap/client.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/crates/net/p2p/src/snap/client.rs b/crates/net/p2p/src/snap/client.rs index 667824e448c..c8003c38f8e 100644 --- a/crates/net/p2p/src/snap/client.rs +++ b/crates/net/p2p/src/snap/client.rs @@ -1,15 +1,28 @@ use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; use futures::Future; use reth_eth_wire_types::snap::{ - AccountRangeMessage, GetAccountRangeMessage, GetByteCodesMessage, GetStorageRangesMessage, - GetTrieNodesMessage, + AccountRangeMessage, ByteCodesMessage, GetAccountRangeMessage, GetByteCodesMessage, + GetStorageRangesMessage, GetTrieNodesMessage, StorageRangesMessage, TrieNodesMessage, }; +/// Response types for snap sync requests +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum SnapResponse { + /// Response containing account range data + AccountRange(AccountRangeMessage), + /// Response containing storage ranges data + StorageRanges(StorageRangesMessage), + /// Response containing bytecode data + ByteCodes(ByteCodesMessage), + /// Response containing trie node data + TrieNodes(TrieNodesMessage), +} + /// The snap sync downloader client #[auto_impl::auto_impl(&, Arc, Box)] pub trait SnapClient: DownloadClient { - /// The output future type for account range requests - type Output: Future> + Send + Sync + Unpin; + /// The output future type for snap requests + type Output: Future> + Send + Sync + Unpin; /// Sends the account range request to the p2p network and returns the account range /// response received from a peer. From 74cc561917642e8024e8aeab16a85da554a7db4e Mon Sep 17 00:00:00 2001 From: Galoretka Date: Mon, 27 Oct 2025 12:16:16 +0200 Subject: [PATCH 202/371] chore(ethereum): remove redundant std::default::Default import (#19299) --- crates/ethereum/node/src/node.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 74740643a41..fa81d70e61f 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -57,7 +57,7 @@ use reth_transaction_pool::{ TransactionPool, TransactionValidationTaskExecutor, }; use revm::context::TxEnv; -use std::{default::Default, marker::PhantomData, sync::Arc, time::SystemTime}; +use std::{marker::PhantomData, sync::Arc, time::SystemTime}; /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] From 4f660dac855b9a739887ce16c81696010e2e6e8b Mon Sep 17 00:00:00 2001 From: Maximilian Hubert <64627729+gap-editor@users.noreply.github.com> Date: Mon, 27 Oct 2025 11:17:29 +0100 Subject: [PATCH 203/371] =?UTF-8?q?fix(fs):=20correct=20ReadLink=20error?= =?UTF-8?q?=20message=20and=20add=20missing=20read=5Flink=20wra=E2=80=A6?= =?UTF-8?q?=20(#19287)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crates/fs-util/src/lib.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index 08817aecfa3..54a22875d94 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -39,7 +39,7 @@ pub enum FsPathError { }, /// Error variant for failed read link operation with additional path context. - #[error("failed to read from {path:?}: {source}")] + #[error("failed to read link {path:?}: {source}")] ReadLink { /// The source `io::Error`. source: io::Error, @@ -230,6 +230,12 @@ pub fn read(path: impl AsRef) -> Result> { fs::read(path).map_err(|err| FsPathError::read(err, path)) } +/// Wrapper for `std::fs::read_link` +pub fn read_link(path: impl AsRef) -> Result { + let path = path.as_ref(); + fs::read_link(path).map_err(|err| FsPathError::read_link(err, path)) +} + /// Wrapper for `std::fs::write` pub fn write(path: impl AsRef, contents: impl AsRef<[u8]>) -> Result<()> { let path = path.as_ref(); From 094594142f04c976565ec389591c2423d05c0f84 Mon Sep 17 00:00:00 2001 From: VolodymyrBg Date: Mon, 27 Oct 2025 12:18:10 +0200 Subject: [PATCH 204/371] fix(engine): module doc to reflect schnellru::LruMap backend (#19296) --- crates/engine/tree/src/tree/precompile_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/precompile_cache.rs b/crates/engine/tree/src/tree/precompile_cache.rs index c88cb4bc720..753922f66b3 100644 --- a/crates/engine/tree/src/tree/precompile_cache.rs +++ b/crates/engine/tree/src/tree/precompile_cache.rs @@ -1,4 +1,4 @@ -//! Contains a precompile cache that is backed by a moka cache. +//! Contains a precompile cache backed by `schnellru::LruMap` (LRU by length). use alloy_primitives::Bytes; use parking_lot::Mutex; From 763bf350be40ec8223daa35d102af534ee3bc288 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Mon, 27 Oct 2025 10:02:52 +0000 Subject: [PATCH 205/371] chore(net): upgrade some noisy spans to TRACE (#19312) --- crates/net/ecies/src/codec.rs | 4 ++-- crates/net/ecies/src/stream.rs | 2 +- crates/net/network/src/session/mod.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index 938e44d9385..73c3469cd2f 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -58,7 +58,7 @@ impl Decoder for ECIESCodec { type Item = IngressECIESValue; type Error = ECIESError; - #[instrument(level = "trace", skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))] + #[instrument(level = "trace", target = "net::ecies", skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))] fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { loop { match self.state { @@ -150,7 +150,7 @@ impl Decoder for ECIESCodec { impl Encoder for ECIESCodec { type Error = io::Error; - #[instrument(level = "trace", skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))] + #[instrument(level = "trace", target = "net::ecies", skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))] fn encode(&mut self, item: EgressECIESValue, buf: &mut BytesMut) -> Result<(), Self::Error> { match item { EgressECIESValue::Auth => { diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index 830f3f5ddef..d99422f512f 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -40,7 +40,7 @@ where Io: AsyncRead + AsyncWrite + Unpin, { /// Connect to an `ECIES` server - #[instrument(level = "trace", skip(transport, secret_key))] + #[instrument(level = "trace", target = "net::ecies", skip(transport, secret_key))] pub async fn connect( transport: Io, secret_key: SecretKey, diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 9c01fc6f410..17528e2fcfa 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -905,7 +905,7 @@ pub(crate) async fn start_pending_incoming_session( } /// Starts the authentication process for a connection initiated by a remote peer. -#[instrument(skip_all, fields(%remote_addr, peer_id), target = "net")] +#[instrument(level = "trace", target = "net::network", skip_all, fields(%remote_addr, peer_id))] #[expect(clippy::too_many_arguments)] async fn start_pending_outbound_session( handshake: Arc, From 6b3534d407b553864e325d95b31909834aaed554 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Mon, 27 Oct 2025 10:32:37 +0000 Subject: [PATCH 206/371] ci: pin Bun to v1.2.23 (#19315) --- .github/workflows/book.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 9e4cf965eda..c4262cbb3ad 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -20,6 +20,8 @@ jobs: - name: Install bun uses: oven-sh/setup-bun@v2 + with: + bun-version: v1.2.23 - name: Install Playwright browsers # Required for rehype-mermaid to render Mermaid diagrams during build From be73e4a246717ea51b5f0e8bef62ea73027cd17d Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 27 Oct 2025 11:48:17 +0100 Subject: [PATCH 207/371] fix(trie): Fix trie_reverts not returning sorted nodes (#19280) --- .../src/providers/database/provider.rs | 51 ++++++++++--------- crates/trie/common/src/updates.rs | 27 +++++++++- crates/trie/trie/src/trie_cursor/in_memory.rs | 3 +- 3 files changed, 53 insertions(+), 28 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index d5e49d822b2..93baa4309d2 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -2077,7 +2077,7 @@ impl TrieWriter for DatabaseProvider let mut account_trie_cursor = tx.cursor_write::()?; // Process sorted account nodes - for (key, updated_node) in &trie_updates.account_nodes { + for (key, updated_node) in trie_updates.account_nodes_ref() { let nibbles = StoredNibbles(*key); match updated_node { Some(node) => { @@ -2144,7 +2144,7 @@ impl TrieWriter for DatabaseProvider )?; } - let mut storage_updates = trie_updates.storage_tries.iter().collect::>(); + let mut storage_updates = trie_updates.storage_tries_ref().iter().collect::>(); storage_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); num_entries += self.write_storage_trie_changesets( @@ -2194,7 +2194,7 @@ impl TrieReader for DatabaseProvider { let tx = self.tx_ref(); // Read account trie changes directly into a Vec - data is already sorted by nibbles - // within each block, and we want the oldest (first) version of each node + // within each block, and we want the oldest (first) version of each node sorted by path. let mut account_nodes = Vec::new(); let mut seen_account_keys = HashSet::new(); let mut accounts_cursor = tx.cursor_dup_read::()?; @@ -2207,8 +2207,11 @@ impl TrieReader for DatabaseProvider { } } + account_nodes.sort_by_key(|(path, _)| *path); + // Read storage trie changes - data is sorted by (block, hashed_address, nibbles) - // Keep track of seen (address, nibbles) pairs to only keep the oldest version + // Keep track of seen (address, nibbles) pairs to only keep the oldest version per address, + // sorted by path. let mut storage_tries = B256Map::>::default(); let mut seen_storage_keys = HashSet::new(); let mut storages_cursor = tx.cursor_dup_read::()?; @@ -2231,12 +2234,13 @@ impl TrieReader for DatabaseProvider { // Convert to StorageTrieUpdatesSorted let storage_tries = storage_tries .into_iter() - .map(|(address, nodes)| { + .map(|(address, mut nodes)| { + nodes.sort_by_key(|(path, _)| *path); (address, StorageTrieUpdatesSorted { storage_nodes: nodes, is_deleted: false }) }) .collect(); - Ok(TrieUpdatesSorted { account_nodes, storage_tries }) + Ok(TrieUpdatesSorted::new(account_nodes, storage_tries)) } fn get_block_trie_updates( @@ -2254,7 +2258,7 @@ impl TrieReader for DatabaseProvider { let cursor_factory = InMemoryTrieCursorFactory::new(db_cursor_factory, &reverts); // Step 3: Collect all account trie nodes that changed in the target block - let mut trie_updates = TrieUpdatesSorted::default(); + let mut account_nodes = Vec::new(); // Walk through all account trie changes for this block let mut accounts_trie_cursor = tx.cursor_dup_read::()?; @@ -2264,10 +2268,11 @@ impl TrieReader for DatabaseProvider { let (_, TrieChangeSetsEntry { nibbles, .. }) = entry?; // Look up the current value of this trie node using the overlay cursor let node_value = account_cursor.seek_exact(nibbles.0)?.map(|(_, node)| node); - trie_updates.account_nodes.push((nibbles.0, node_value)); + account_nodes.push((nibbles.0, node_value)); } // Step 4: Collect all storage trie nodes that changed in the target block + let mut storage_tries = B256Map::default(); let mut storages_trie_cursor = tx.cursor_dup_read::()?; let storage_range_start = BlockNumberHashedAddress((block_number, B256::ZERO)); let storage_range_end = BlockNumberHashedAddress((block_number + 1, B256::ZERO)); @@ -2291,8 +2296,7 @@ impl TrieReader for DatabaseProvider { let cursor = storage_cursor.as_mut().expect("storage_cursor was just initialized above"); let node_value = cursor.seek_exact(nibbles.0)?.map(|(_, node)| node); - trie_updates - .storage_tries + storage_tries .entry(hashed_address) .or_insert_with(|| StorageTrieUpdatesSorted { storage_nodes: Vec::new(), @@ -2302,7 +2306,7 @@ impl TrieReader for DatabaseProvider { .push((nibbles.0, node_value)); } - Ok(trie_updates) + Ok(TrieUpdatesSorted::new(account_nodes, storage_tries)) } } @@ -2379,7 +2383,7 @@ impl StorageTrieWriter for DatabaseP // Get the overlay updates for this storage trie, or use an empty array let overlay_updates = updates_overlay - .and_then(|overlay| overlay.storage_tries.get(hashed_address)) + .and_then(|overlay| overlay.storage_tries_ref().get(hashed_address)) .map(|updates| updates.storage_nodes_ref()) .unwrap_or(&EMPTY_UPDATES); @@ -3463,7 +3467,7 @@ mod tests { storage_tries.insert(storage_address1, storage_trie1); storage_tries.insert(storage_address2, storage_trie2); - let trie_updates = TrieUpdatesSorted { account_nodes, storage_tries }; + let trie_updates = TrieUpdatesSorted::new(account_nodes, storage_tries); // Write the changesets let num_written = @@ -3679,10 +3683,7 @@ mod tests { overlay_storage_tries.insert(storage_address1, overlay_storage_trie1); overlay_storage_tries.insert(storage_address2, overlay_storage_trie2); - let overlay = TrieUpdatesSorted { - account_nodes: overlay_account_nodes, - storage_tries: overlay_storage_tries, - }; + let overlay = TrieUpdatesSorted::new(overlay_account_nodes, overlay_storage_tries); // Normal storage trie: one Some (update) and one None (new) let storage_trie1 = StorageTrieUpdatesSorted { @@ -3709,7 +3710,7 @@ mod tests { storage_tries.insert(storage_address1, storage_trie1); storage_tries.insert(storage_address2, storage_trie2); - let trie_updates = TrieUpdatesSorted { account_nodes, storage_tries }; + let trie_updates = TrieUpdatesSorted::new(account_nodes, storage_tries); // Write the changesets WITH OVERLAY let num_written = @@ -4275,7 +4276,7 @@ mod tests { storage_tries.insert(storage_address1, storage_trie1); storage_tries.insert(storage_address2, storage_trie2); - let trie_updates = TrieUpdatesSorted { account_nodes, storage_tries }; + let trie_updates = TrieUpdatesSorted::new(account_nodes, storage_tries); // Write the sorted trie updates let num_entries = provider_rw.write_trie_updates_sorted(&trie_updates).unwrap(); @@ -4539,11 +4540,11 @@ mod tests { let result = provider.get_block_trie_updates(target_block).unwrap(); // Verify account trie updates - assert_eq!(result.account_nodes.len(), 2, "Should have 2 account trie updates"); + assert_eq!(result.account_nodes_ref().len(), 2, "Should have 2 account trie updates"); // Check nibbles1 - should have the current value (node1) let nibbles1_update = result - .account_nodes + .account_nodes_ref() .iter() .find(|(n, _)| n == &account_nibbles1) .expect("Should find nibbles1"); @@ -4556,7 +4557,7 @@ mod tests { // Check nibbles2 - should have the current value (node2) let nibbles2_update = result - .account_nodes + .account_nodes_ref() .iter() .find(|(n, _)| n == &account_nibbles2) .expect("Should find nibbles2"); @@ -4569,14 +4570,14 @@ mod tests { // nibbles3 should NOT be in the result (it was changed in next_block, not target_block) assert!( - !result.account_nodes.iter().any(|(n, _)| n == &account_nibbles3), + !result.account_nodes_ref().iter().any(|(n, _)| n == &account_nibbles3), "nibbles3 should not be in target_block updates" ); // Verify storage trie updates - assert_eq!(result.storage_tries.len(), 1, "Should have 1 storage trie"); + assert_eq!(result.storage_tries_ref().len(), 1, "Should have 1 storage trie"); let storage_updates = result - .storage_tries + .storage_tries_ref() .get(&storage_address1) .expect("Should have storage updates for address1"); diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index e3e098ac8e5..b0d178cd1d0 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -432,12 +432,35 @@ pub struct TrieUpdatesSortedRef<'a> { pub struct TrieUpdatesSorted { /// Sorted collection of updated state nodes with corresponding paths. None indicates that a /// node was removed. - pub account_nodes: Vec<(Nibbles, Option)>, + account_nodes: Vec<(Nibbles, Option)>, /// Storage tries stored by hashed address of the account the trie belongs to. - pub storage_tries: B256Map, + storage_tries: B256Map, } impl TrieUpdatesSorted { + /// Creates a new `TrieUpdatesSorted` with the given account nodes and storage tries. + /// + /// # Panics + /// + /// In debug mode, panics if `account_nodes` is not sorted by the `Nibbles` key, + /// or if any storage trie's `storage_nodes` is not sorted by its `Nibbles` key. + pub fn new( + account_nodes: Vec<(Nibbles, Option)>, + storage_tries: B256Map, + ) -> Self { + debug_assert!( + account_nodes.is_sorted_by_key(|item| &item.0), + "account_nodes must be sorted by Nibbles key" + ); + debug_assert!( + storage_tries.values().all(|storage_trie| { + storage_trie.storage_nodes.is_sorted_by_key(|item| &item.0) + }), + "all storage_nodes in storage_tries must be sorted by Nibbles key" + ); + Self { account_nodes, storage_tries } + } + /// Returns `true` if the updates are empty. pub fn is_empty(&self) -> bool { self.account_nodes.is_empty() && self.storage_tries.is_empty() diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 1c7f179ad0a..e76bf7b2be3 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -47,7 +47,8 @@ where // if the storage trie has no updates then we use this as the in-memory overlay. static EMPTY_UPDATES: Vec<(Nibbles, Option)> = Vec::new(); - let storage_trie_updates = self.trie_updates.as_ref().storage_tries.get(&hashed_address); + let storage_trie_updates = + self.trie_updates.as_ref().storage_tries_ref().get(&hashed_address); let (storage_nodes, cleared) = storage_trie_updates .map(|u| (u.storage_nodes_ref(), u.is_deleted())) .unwrap_or((&EMPTY_UPDATES, false)); From 19f5d51d862f9c4892138b6afcd07f31edeb1062 Mon Sep 17 00:00:00 2001 From: sashass1315 Date: Mon, 27 Oct 2025 13:41:48 +0200 Subject: [PATCH 208/371] chore: remove redundant PhantomData from NodeHooks (#19316) --- crates/node/builder/src/hooks.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/node/builder/src/hooks.rs b/crates/node/builder/src/hooks.rs index dda976599ed..71f0f3b4d2c 100644 --- a/crates/node/builder/src/hooks.rs +++ b/crates/node/builder/src/hooks.rs @@ -10,7 +10,6 @@ pub struct NodeHooks> { pub on_component_initialized: Box>, /// Hook to run once the node is started. pub on_node_started: Box>, - _marker: std::marker::PhantomData, } impl NodeHooks @@ -23,7 +22,6 @@ where Self { on_component_initialized: Box::<()>::default(), on_node_started: Box::<()>::default(), - _marker: Default::default(), } } From bb73d794fd6943e1e39531a461bcad61c7904802 Mon Sep 17 00:00:00 2001 From: Gengar Date: Mon, 27 Oct 2025 13:57:27 +0200 Subject: [PATCH 209/371] docs: populate modify-node section with node-custom-rpc implementation guide (#18672) --- .../docs/pages/sdk/examples/modify-node.mdx | 71 ++++++++++++++++++- 1 file changed, 70 insertions(+), 1 deletion(-) diff --git a/docs/vocs/docs/pages/sdk/examples/modify-node.mdx b/docs/vocs/docs/pages/sdk/examples/modify-node.mdx index b8f21a06bbf..b5297504f3a 100644 --- a/docs/vocs/docs/pages/sdk/examples/modify-node.mdx +++ b/docs/vocs/docs/pages/sdk/examples/modify-node.mdx @@ -4,13 +4,82 @@ This guide demonstrates how to extend a Reth node with custom functionality, inc ## Adding Custom RPC Endpoints -One of the most common modifications is adding custom RPC methods to expose additional functionality. +One of the most common modifications is adding custom RPC methods to expose additional functionality. This allows you to extend the standard Ethereum RPC API with your own methods while maintaining compatibility with existing tools and clients. ### Basic Custom RPC Module +The following example shows how to add a custom RPC namespace called `txpoolExt` that provides additional transaction pool functionality. This example is based on the `node-custom-rpc` example in the Reth repository. + +#### Project Structure + +First, create a new binary crate with the following dependencies in your `Cargo.toml`: + +```toml +[package] +name = "node-custom-rpc" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "node-custom-rpc" +path = "src/main.rs" + +[dependencies] +clap = { version = "4.0", features = ["derive"] } +jsonrpsee = { version = "0.22", features = ["macros", "server", "http-server", "ws-server"] } +reth-ethereum = { path = "../../crates/ethereum" } +tokio = { version = "1.0", features = ["full"] } +``` + +#### Implementation + +The complete implementation can be found in the [node-custom-rpc example](https://github.com/paradigmxyz/reth/tree/main/examples/node-custom-rpc). Here's a summary of the key components: + +1. **RPC Interface**: Define your custom RPC methods using `jsonrpsee` proc macros with a custom namespace +2. **RPC Handler**: Implement the trait with access to node components like the transaction pool +3. **CLI Extension**: Add custom CLI arguments to control your extensions +4. **Node Integration**: Use `extend_rpc_modules` to integrate your custom functionality + +#### Running the Custom Node + +Build and run your custom node with the extension enabled: + +```bash +cargo run -p node-custom-rpc -- node --http --ws --enable-ext +``` + +This will start a Reth node with your custom RPC methods available on both HTTP and WebSocket transports. + +#### Testing the Custom RPC Methods + +You can test your custom RPC methods using tools like `cast` from the Foundry suite: + +```bash +# Get transaction count +cast rpc txpoolExt_transactionCount + +# Clear the transaction pool +cast rpc txpoolExt_clearTxpool + +# Subscribe to transaction count updates (WebSocket only) +cast rpc txpoolExt_subscribeTransactionCount +``` + +### Key Concepts + +1. **RPC Namespaces**: Use the `namespace` parameter in the `rpc` macro to create a custom namespace for your methods. + +2. **Node Context**: Access node components like the transaction pool through the `ctx` parameter in `extend_rpc_modules`. + +3. **Transport Integration**: Your custom RPC methods are automatically available on all configured transports (HTTP, WebSocket, IPC). + +4. **CLI Integration**: Extend the default Reth CLI with your own arguments to control custom functionality. + +5. **Error Handling**: Use `RpcResult` for methods that can fail and handle errors appropriately. ## Next Steps - Explore [Standalone Components](/sdk/examples/standalone-components) for direct blockchain interaction - Learn about [Custom Node Building](/sdk/custom-node/prerequisites) for production deployments - Review [Type System](/sdk/typesystem/block) for working with blockchain data +- Check out the [node-custom-rpc example](https://github.com/paradigmxyz/reth/tree/main/examples/node-custom-rpc) for the complete implementation From 106ffefc0fbe0ba15deb6a72df2fea8b56895135 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 27 Oct 2025 12:57:44 +0100 Subject: [PATCH 210/371] chore: use hex bytes type (#19317) --- crates/transaction-pool/src/maintain.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index aa0366341a6..0e30a2473b2 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -10,8 +10,8 @@ use crate::{ }; use alloy_consensus::{transaction::TxHashRef, BlockHeader, Typed2718}; use alloy_eips::{BlockNumberOrTag, Decodable2718, Encodable2718}; -use alloy_primitives::{Address, BlockHash, BlockNumber}; -use alloy_rlp::{Bytes, Encodable}; +use alloy_primitives::{Address, BlockHash, BlockNumber, Bytes}; +use alloy_rlp::Encodable; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, From f088ec09cb9d481b6dbb0a090bed9dba9d3021f2 Mon Sep 17 00:00:00 2001 From: radik878 Date: Mon, 27 Oct 2025 13:58:55 +0200 Subject: [PATCH 211/371] docs(eth-wire): update docs to reflect eth-wire-types, alloy_rlp, version-aware decoding, and RLPx multiplexing (#19319) --- docs/crates/eth-wire.md | 147 ++++++++++++++++++++++++++-------------- 1 file changed, 96 insertions(+), 51 deletions(-) diff --git a/docs/crates/eth-wire.md b/docs/crates/eth-wire.md index cf0c2cc5377..cf62ab143e8 100644 --- a/docs/crates/eth-wire.md +++ b/docs/crates/eth-wire.md @@ -9,48 +9,70 @@ This crate can be thought of as having 2 components: 2. Abstractions over Tokio Streams that operate on these types. (Note that ECIES is implemented in a separate `reth-ecies` crate.) +Additionally, this crate focuses on stream implementations (P2P and Eth), handshakes, and multiplexing. The protocol +message types and RLP encoding/decoding live in the separate `eth-wire-types` crate and are re-exported by `eth-wire` +for convenience. ## Types The most basic Eth-wire type is a `ProtocolMessage`. It describes all messages that reth can send/receive. -[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/message.rs) +[File: crates/net/eth-wire-types/src/message.rs](../../crates/net/eth-wire-types/src/message.rs) ```rust, ignore /// An `eth` protocol message, containing a message ID and payload. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct ProtocolMessage { +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ProtocolMessage { pub message_type: EthMessageID, - pub message: EthMessage, + pub message: EthMessage, } -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum EthMessage { - Status(Status), +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum EthMessage { + Status(StatusMessage), NewBlockHashes(NewBlockHashes), - Transactions(Transactions), - NewPooledTransactionHashes(NewPooledTransactionHashes), + NewBlock(Box), + Transactions(Transactions), + NewPooledTransactionHashes66(NewPooledTransactionHashes66), + NewPooledTransactionHashes68(NewPooledTransactionHashes68), GetBlockHeaders(RequestPair), - // ... + BlockHeaders(RequestPair>), + GetBlockBodies(RequestPair), + BlockBodies(RequestPair>), + GetPooledTransactions(RequestPair), + PooledTransactions(RequestPair>), + GetNodeData(RequestPair), + NodeData(RequestPair), GetReceipts(RequestPair), - Receipts(RequestPair), + Receipts(RequestPair>), + Receipts69(RequestPair>), + BlockRangeUpdate(BlockRangeUpdate), } /// Represents message IDs for eth protocol messages. #[repr(u8)] -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum EthMessageID { Status = 0x00, NewBlockHashes = 0x01, Transactions = 0x02, - // ... + GetBlockHeaders = 0x03, + BlockHeaders = 0x04, + GetBlockBodies = 0x05, + BlockBodies = 0x06, + NewBlock = 0x07, + NewPooledTransactionHashes = 0x08, + GetPooledTransactions = 0x09, + PooledTransactions = 0x0a, + GetNodeData = 0x0d, NodeData = 0x0e, GetReceipts = 0x0f, Receipts = 0x10, + BlockRangeUpdate = 0x11, } ``` Messages can either be broadcast to the network, or can be a request/response message to a single peer. This 2nd type of message is described using a `RequestPair` struct, which is simply a concatenation of the underlying message with a request id. -[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/message.rs) +[File: crates/net/eth-wire-types/src/message.rs](../../crates/net/eth-wire-types/src/message.rs) ```rust, ignore #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct RequestPair { @@ -59,10 +81,8 @@ pub struct RequestPair { } ``` -Every `Ethmessage` has a corresponding rust struct that implements the `Encodable` and `Decodable` traits. -These traits are defined as follows: - -[Crate: crates/rlp](https://github.com/paradigmxyz/reth/tree/1563506aea09049a85e5cc72c2894f3f7a371581/crates/rlp) +Every `EthMessage` has a corresponding Rust struct that implements `alloy_rlp::Encodable` and `alloy_rlp::Decodable` +(often via derive macros like `RlpEncodable`/`RlpDecodable`). These traits are defined in `alloy_rlp`: ```rust, ignore pub trait Decodable: Sized { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result; @@ -72,10 +92,11 @@ pub trait Encodable { fn length(&self) -> usize; } ``` -These traits describe how the `Ethmessage` should be serialized/deserialized into raw bytes using the RLP format. -In reth all [RLP](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp/) encode/decode operations are handled by the `common/rlp` and `common/rlp-derive` crates. +These traits describe how the `EthMessage` should be serialized/deserialized into raw bytes using the RLP format. +In reth all [RLP](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp/) encode/decode operations are handled by `alloy_rlp` and the derive macros used in `eth-wire-types`. -Note that the `ProtocolMessage` itself implements these traits, so any stream of bytes can be converted into it by calling `ProtocolMessage::decode()` and vice versa with `ProtocolMessage::encode()`. The message type is determined by the first byte of the byte stream. +Note: `ProtocolMessage` implements `Encodable`, while decoding is performed via +`ProtocolMessage::decode_message(version, &mut bytes)` because decoding must respect the negotiated `EthVersion`. ### Example: The Transactions message Let's understand how an `EthMessage` is implemented by taking a look at the `Transactions` Message. The eth specification describes a Transaction message as a list of RLP-encoded transactions: @@ -93,17 +114,17 @@ The items in the list are transactions in the format described in the main Ether In reth, this is represented as: -[File: crates/net/eth-wire/src/types/broadcast.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/broadcast.rs) +[File: crates/net/eth-wire-types/src/broadcast.rs](../../crates/net/eth-wire-types/src/broadcast.rs) ```rust,ignore -pub struct Transactions( +pub struct Transactions( /// New transactions for the peer to include in its mempool. - pub Vec, + pub Vec, ); ``` -And the corresponding trait implementations are present in the primitives crate. +And the corresponding transaction type is defined here: -[File: crates/primitives/src/transaction/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/transaction/mod.rs) +[File: crates/ethereum/primitives/src/transaction.rs](../../crates/ethereum/primitives/src/transaction.rs) ```rust, ignore #[reth_codec] #[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Default, Serialize, Deserialize)] @@ -146,7 +167,7 @@ The lowest level stream to communicate with other peers is the P2P stream. It ta Decompression/Compression of bytes is done with snappy algorithm ([EIP 706](https://eips.ethereum.org/EIPS/eip-706)) using the external `snap` crate. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](../../crates/net/eth-wire/src/p2pstream.rs) ```rust,ignore #[pin_project] pub struct P2PStream { @@ -155,23 +176,29 @@ pub struct P2PStream { encoder: snap::raw::Encoder, decoder: snap::raw::Decoder, pinger: Pinger, - shared_capability: SharedCapability, + /// Negotiated shared capabilities + shared_capabilities: SharedCapabilities, + /// Outgoing messages buffered for sending to the underlying stream. outgoing_messages: VecDeque, + /// Maximum number of messages that can be buffered before yielding backpressure. + outgoing_message_buffer_capacity: usize, + /// Whether this stream is currently in the process of gracefully disconnecting. disconnecting: bool, } ``` ### Pinger -To manage pinging, an instance of the `Pinger` struct is used. This is a state machine that keeps track of how many pings -we have sent/received and the timeouts associated with them. +To manage pinging, an instance of the `Pinger` struct is used. This is a state machine that keeps track of pings +we have sent/received and the timeout associated with them. -[File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/pinger.rs) +[File: crates/net/eth-wire/src/pinger.rs](../../crates/net/eth-wire/src/pinger.rs) ```rust,ignore #[derive(Debug)] pub(crate) struct Pinger { /// The timer used for the next ping. ping_interval: Interval, - /// The timer used for the next ping. + /// The timer used to detect a ping timeout. timeout_timer: Pin>, + /// The timeout duration for each ping. timeout: Duration, state: PingState, } @@ -205,7 +232,7 @@ pub(crate) fn poll_ping( } } PingState::WaitingForPong => { - if self.timeout_timer.is_elapsed() { + if self.timeout_timer.as_mut().poll(cx).is_ready() { self.state = PingState::TimedOut; return Poll::Ready(Ok(PingerEvent::Timeout)) } @@ -223,7 +250,7 @@ To send and receive data, the P2PStream itself is a future that implements the ` For the `Stream` trait, the `inner` stream is polled, decompressed and returned. Most of the code is just error handling and is omitted here for clarity. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](../../crates/net/eth-wire/src/p2pstream.rs) ```rust,ignore impl Stream for P2PStream { @@ -240,7 +267,8 @@ impl Stream for P2PStream { let mut decompress_buf = BytesMut::zeroed(decompressed_len + 1); this.decoder.decompress(&bytes[1..], &mut decompress_buf[1..])?; // ... Omitted Error handling - decompress_buf[0] = bytes[0] - this.shared_capability.offset(); + // Normalize IDs: reserved p2p range is 0x00..=0x0f; subprotocols start at 0x10 + decompress_buf[0] = bytes[0] - MAX_RESERVED_MESSAGE_ID - 1; return Poll::Ready(Some(Ok(decompress_buf))) } } @@ -250,7 +278,7 @@ impl Stream for P2PStream { Similarly, for the `Sink` trait, we do the reverse, compressing and sending data out to the `inner` stream. The important functions in this trait are shown below. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](../../crates/net/eth-wire/src/p2pstream.rs) ```rust, ignore impl Sink for P2PStream { fn start_send(self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> { @@ -258,7 +286,8 @@ impl Sink for P2PStream { let mut compressed = BytesMut::zeroed(1 + snap::raw::max_compress_len(item.len() - 1)); let compressed_size = this.encoder.compress(&item[1..], &mut compressed[1..])?; compressed.truncate(compressed_size + 1); - compressed[0] = item[0] + this.shared_capability.offset(); + // Mask subprotocol IDs into global space above reserved p2p IDs + compressed[0] = item[0] + MAX_RESERVED_MESSAGE_ID + 1; this.outgoing_messages.push_back(compressed.freeze()); Ok(()) } @@ -285,9 +314,9 @@ impl Sink for P2PStream { ## EthStream -The EthStream is very simple, it does not keep track of any state, it simply wraps the P2Pstream. +The EthStream wraps a stream and handles eth message (RLP) encoding/decoding with respect to the negotiated `EthVersion`. -[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) +[File: crates/net/eth-wire/src/ethstream.rs](../../crates/net/eth-wire/src/ethstream.rs) ```rust,ignore #[pin_project] pub struct EthStream { @@ -295,10 +324,10 @@ pub struct EthStream { inner: S, } ``` -EthStream's only job is to perform the RLP decoding/encoding, using the `ProtocolMessage::decode()` and `ProtocolMessage::encode()` -functions we looked at earlier. +EthStream performs RLP decoding/encoding using `ProtocolMessage::decode_message(version, &mut bytes)` +and `ProtocolMessage::encode()`, and enforces protocol rules (e.g., prohibiting `Status` after handshake). -[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) +[File: crates/net/eth-wire/src/ethstream.rs](../../crates/net/eth-wire/src/ethstream.rs) ```rust,ignore impl Stream for EthStream { // ... @@ -306,7 +335,7 @@ impl Stream for EthStream { let this = self.project(); let bytes = ready!(this.inner.poll_next(cx)).unwrap(); // ... - let msg = match ProtocolMessage::decode(&mut bytes.as_ref()) { + let msg = match ProtocolMessage::decode_message(self.version(), &mut bytes.as_ref()) { Ok(m) => m, Err(err) => { return Poll::Ready(Some(Err(err.into()))) @@ -319,10 +348,12 @@ impl Stream for EthStream { impl Sink for EthStream { // ... fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { - // ... + if matches!(item, EthMessage::Status(_)) { + let _ = self.project().inner.disconnect(DisconnectReason::ProtocolBreach); + return Err(EthStreamError::EthHandshakeError(EthHandshakeError::StatusNotInHandshake)) + } let mut bytes = BytesMut::new(); ProtocolMessage::from(item).encode(&mut bytes); - let bytes = bytes.freeze(); self.project().inner.start_send(bytes)?; Ok(()) @@ -339,9 +370,9 @@ For a session to be established, peers in the Ethereum network must first exchan To perform these, reth has special `Unauthed` versions of streams described above. -The `UnauthedP2Pstream` does the `Hello` handshake and returns a `P2PStream`. +The `UnauthedP2PStream` does the `Hello` handshake and returns a `P2PStream`. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](../../crates/net/eth-wire/src/p2pstream.rs) ```rust, ignore #[pin_project] pub struct UnauthedP2PStream { @@ -351,8 +382,8 @@ pub struct UnauthedP2PStream { impl UnauthedP2PStream { // ... - pub async fn handshake(mut self, hello: HelloMessage) -> Result<(P2PStream, HelloMessage), Error> { - self.inner.send(alloy_rlp::encode(P2PMessage::Hello(hello.clone())).into()).await?; + pub async fn handshake(mut self, hello: HelloMessageWithProtocols) -> Result<(P2PStream, HelloMessage), Error> { + self.inner.send(alloy_rlp::encode(P2PMessage::Hello(hello.message())).into()).await?; let first_message_bytes = tokio::time::timeout(HANDSHAKE_TIMEOUT, self.inner.next()).await; let their_hello = match P2PMessage::decode(&mut &first_message_bytes[..]) { @@ -360,11 +391,25 @@ impl UnauthedP2PStream { // ... } }?; - let stream = P2PStream::new(self.inner, capability); + let stream = P2PStream::new(self.inner, shared_capabilities); Ok((stream, their_hello)) } } ``` -Similarly, UnauthedEthStream does the `Status` handshake and returns an `EthStream`. The code is [here](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) +Similarly, `UnauthedEthStream` does the `Status` handshake and returns an `EthStream`. It accepts a `UnifiedStatus` +and a `ForkFilter`, and provides a timeout wrapper. The code is [here](../../crates/net/eth-wire/src/ethstream.rs) + +### Multiplexing and satellites + +`eth-wire` also provides `RlpxProtocolMultiplexer`/`RlpxSatelliteStream` to run the primary `eth` protocol alongside +additional "satellite" protocols (e.g. `snap`) using negotiated `SharedCapabilities`. + +## Message variants and versions + +- `NewPooledTransactionHashes` differs between ETH66 (`NewPooledTransactionHashes66`) and ETH68 (`NewPooledTransactionHashes68`). +- Starting with ETH67, `GetNodeData` and `NodeData` are removed (decoding them for >=67 yields an error). +- Starting with ETH69: + - `BlockRangeUpdate (0x11)` announces the historical block range served. + - Receipts omit bloom: encoded as `Receipts69` instead of `Receipts`. From fa1f86cb916be979666017439746c65e2cd90d99 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 27 Oct 2025 14:12:22 +0100 Subject: [PATCH 212/371] fix(prune): Add unused variants back to PruneSegment enum (#19318) --- crates/prune/types/src/segment.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index 542d3042049..cfc812a1a0e 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -1,10 +1,13 @@ +#![allow(deprecated)] // necessary to all defining deprecated `PruneSegment` variants + use crate::MINIMUM_PRUNING_DISTANCE; use derive_more::Display; use thiserror::Error; /// Segment of the data that can be pruned. /// -/// NOTE new variants must be added to the end of this enum. The variant index is encoded directly +/// VERY IMPORTANT NOTE: new variants must be added to the end of this enum, and old variants which +/// are no longer used must not be removed from this enum. The variant index is encoded directly /// when writing to the `PruneCheckpoint` table, so changing the order here will corrupt the table. #[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] #[cfg_attr(test, derive(arbitrary::Arbitrary))] @@ -24,6 +27,12 @@ pub enum PruneSegment { AccountHistory, /// Prune segment responsible for the `StorageChangeSets` and `StoragesHistory` tables. StorageHistory, + #[deprecated = "Variant indexes cannot be changed"] + /// Prune segment responsible for the `CanonicalHeaders`, `Headers` tables. + Headers, + #[deprecated = "Variant indexes cannot be changed"] + /// Prune segment responsible for the `Transactions` table. + Transactions, /// Prune segment responsible for all rows in `AccountsTrieChangeSets` and /// `StoragesTrieChangeSets` table. MerkleChangeSets, @@ -48,6 +57,9 @@ impl PruneSegment { Self::StorageHistory | Self::MerkleChangeSets | Self::Receipts => MINIMUM_PRUNING_DISTANCE, + #[expect(deprecated)] + #[expect(clippy::match_same_arms)] + Self::Headers | Self::Transactions => 0, } } From eed0d9686c6ea120ee94653a319abc73111e69db Mon Sep 17 00:00:00 2001 From: YK Date: Mon, 27 Oct 2025 21:58:28 +0800 Subject: [PATCH 213/371] refactor(trie): Unify proof return types (#19311) --- .../src/tree/payload_processor/multiproof.rs | 15 ++-- crates/trie/parallel/src/proof.rs | 30 ++++--- crates/trie/parallel/src/proof_task.rs | 86 ++++++++++++------- 3 files changed, 86 insertions(+), 45 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 755f7a7d0d7..321de725bec 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -19,7 +19,10 @@ use reth_trie::{ }; use reth_trie_parallel::{ proof::ParallelProof, - proof_task::{AccountMultiproofInput, ProofResultMessage, ProofWorkerHandle}, + proof_task::{ + AccountMultiproofInput, ProofResultContext, ProofResultMessage, ProofWorkerHandle, + StorageProofInput, + }, }; use std::{collections::BTreeMap, ops::DerefMut, sync::Arc, time::Instant}; use tracing::{debug, error, instrument, trace}; @@ -408,7 +411,7 @@ impl MultiproofManager { let prefix_set = prefix_set.freeze(); // Build computation input (data only) - let input = reth_trie_parallel::proof_task::StorageProofInput::new( + let input = StorageProofInput::new( hashed_address, prefix_set, proof_targets, @@ -419,7 +422,7 @@ impl MultiproofManager { // Dispatch to storage worker if let Err(e) = self.proof_worker_handle.dispatch_storage_proof( input, - reth_trie_parallel::proof_task::ProofResultContext::new( + ProofResultContext::new( self.proof_result_tx.clone(), proof_sequence_number, hashed_state_update, @@ -492,7 +495,7 @@ impl MultiproofManager { multi_added_removed_keys, missed_leaves_storage_roots, // Workers will send ProofResultMessage directly to proof_result_rx - proof_result_sender: reth_trie_parallel::proof_task::ProofResultContext::new( + proof_result_sender: ProofResultContext::new( self.proof_result_tx.clone(), proof_sequence_number, hashed_state_update, @@ -1131,7 +1134,7 @@ impl MultiProofTask { // Convert ProofResultMessage to SparseTrieUpdate match proof_result.result { - Ok((multiproof, _stats)) => { + Ok(proof_result_data) => { debug!( target: "engine::tree::payload_processor::multiproof", sequence = proof_result.sequence_number, @@ -1141,7 +1144,7 @@ impl MultiProofTask { let update = SparseTrieUpdate { state: proof_result.state, - multiproof, + multiproof: proof_result_data.into_multiproof(), }; if let Some(combined_update) = diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 63d26993d50..4d54359d1bf 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -139,15 +139,20 @@ impl ParallelProof { ))) })?; - // Extract the multiproof from the result - let (mut multiproof, _stats) = proof_msg.result?; - - // Extract storage proof from the multiproof - let storage_proof = multiproof.storages.remove(&hashed_address).ok_or_else(|| { - ParallelStateRootError::StorageRoot(StorageRootError::Database(DatabaseError::Other( - format!("storage proof not found in multiproof for {hashed_address}"), - ))) - })?; + // Extract storage proof directly from the result + let storage_proof = match proof_msg.result? { + crate::proof_task::ProofResult::StorageProof { hashed_address: addr, proof } => { + debug_assert_eq!( + addr, + hashed_address, + "storage worker must return same address: expected {hashed_address}, got {addr}" + ); + proof + } + crate::proof_task::ProofResult::AccountMultiproof { .. } => { + unreachable!("storage worker only sends StorageProof variant") + } + }; trace!( target: "trie::parallel_proof", @@ -231,7 +236,12 @@ impl ParallelProof { ) })?; - let (multiproof, stats) = proof_result_msg.result?; + let (multiproof, stats) = match proof_result_msg.result? { + crate::proof_task::ProofResult::AccountMultiproof { proof, stats } => (proof, stats), + crate::proof_task::ProofResult::StorageProof { .. } => { + unreachable!("account worker only sends AccountMultiproof variant") + } + }; #[cfg(feature = "metrics")] self.metrics.record(stats); diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index c05f2ad7286..1b50dbe73ef 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -84,8 +84,40 @@ use crate::proof_task_metrics::ProofTaskTrieMetrics; type StorageProofResult = Result; type TrieNodeProviderResult = Result, SparseTrieError>; -type AccountMultiproofResult = - Result<(DecodedMultiProof, ParallelTrieStats), ParallelStateRootError>; + +/// Result of a proof calculation, which can be either an account multiproof or a storage proof. +#[derive(Debug)] +pub enum ProofResult { + /// Account multiproof with statistics + AccountMultiproof { + /// The account multiproof + proof: DecodedMultiProof, + /// Statistics collected during proof computation + stats: ParallelTrieStats, + }, + /// Storage proof for a specific account + StorageProof { + /// The hashed address this storage proof belongs to + hashed_address: B256, + /// The storage multiproof + proof: DecodedStorageMultiProof, + }, +} + +impl ProofResult { + /// Convert this proof result into a `DecodedMultiProof`. + /// + /// For account multiproofs, returns the multiproof directly (discarding stats). + /// For storage proofs, wraps the storage proof into a minimal multiproof. + pub fn into_multiproof(self) -> DecodedMultiProof { + match self { + Self::AccountMultiproof { proof, stats: _ } => proof, + Self::StorageProof { hashed_address, proof } => { + DecodedMultiProof::from_storage_proof(hashed_address, proof) + } + } + } +} /// Channel used by worker threads to deliver `ProofResultMessage` items back to /// `MultiProofTask`. @@ -101,8 +133,8 @@ pub type ProofResultSender = CrossbeamSender; pub struct ProofResultMessage { /// Sequence number for ordering proofs pub sequence_number: u64, - /// The proof calculation result - pub result: AccountMultiproofResult, + /// The proof calculation result (either account multiproof or storage proof) + pub result: Result, /// Time taken for the entire proof calculation (from dispatch to completion) pub elapsed: Duration, /// Original state update that triggered this proof @@ -248,18 +280,10 @@ fn storage_worker_loop( let proof_elapsed = proof_start.elapsed(); storage_proofs_processed += 1; - // Convert storage proof to account multiproof format - let result_msg = match result { - Ok(storage_proof) => { - let multiproof = reth_trie::DecodedMultiProof::from_storage_proof( - hashed_address, - storage_proof, - ); - let stats = crate::stats::ParallelTrieTracker::default().finish(); - Ok((multiproof, stats)) - } - Err(e) => Err(e), - }; + let result_msg = result.map(|storage_proof| ProofResult::StorageProof { + hashed_address, + proof: storage_proof, + }); if sender .send(ProofResultMessage { @@ -496,7 +520,7 @@ fn account_worker_loop( let proof_elapsed = proof_start.elapsed(); let total_elapsed = start.elapsed(); let stats = tracker.finish(); - let result = result.map(|proof| (proof, stats)); + let result = result.map(|proof| ProofResult::AccountMultiproof { proof, stats }); account_proofs_processed += 1; // Send result to MultiProofTask @@ -657,14 +681,20 @@ where ) })?; - // Extract storage proof from the multiproof wrapper - let (mut multiproof, _stats) = proof_msg.result?; - let proof = - multiproof.storages.remove(&hashed_address).ok_or_else(|| { - ParallelStateRootError::Other(format!( - "storage proof not found in multiproof for {hashed_address}" - )) - })?; + // Extract storage proof from the result + let proof = match proof_msg.result? { + ProofResult::StorageProof { hashed_address: addr, proof } => { + debug_assert_eq!( + addr, + hashed_address, + "storage worker must return same address: expected {hashed_address}, got {addr}" + ); + proof + } + ProofResult::AccountMultiproof { .. } => { + unreachable!("storage worker only sends StorageProof variant") + } + }; let root = proof.root; collected_decoded_storages.insert(hashed_address, proof); @@ -716,10 +746,8 @@ where // Consume remaining storage proof receivers for accounts not encountered during trie walk. for (hashed_address, receiver) in storage_proof_receivers { if let Ok(proof_msg) = receiver.recv() { - // Extract storage proof from the multiproof wrapper - if let Ok((mut multiproof, _stats)) = proof_msg.result && - let Some(proof) = multiproof.storages.remove(&hashed_address) - { + // Extract storage proof from the result + if let Ok(ProofResult::StorageProof { proof, .. }) = proof_msg.result { collected_decoded_storages.insert(hashed_address, proof); } } From a6fe713a6caa1a4f8e0e6f4fd2457a630ec85163 Mon Sep 17 00:00:00 2001 From: phrwlk Date: Mon, 27 Oct 2025 16:42:55 +0200 Subject: [PATCH 214/371] chore: remove dead OpL1BlockInfo.number field and writes (#19325) --- crates/optimism/txpool/src/validator.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/crates/optimism/txpool/src/validator.rs b/crates/optimism/txpool/src/validator.rs index 0cec4482a32..fd4710b8a4e 100644 --- a/crates/optimism/txpool/src/validator.rs +++ b/crates/optimism/txpool/src/validator.rs @@ -28,8 +28,6 @@ pub struct OpL1BlockInfo { l1_block_info: RwLock, /// Current block timestamp. timestamp: AtomicU64, - /// Current block number. - number: AtomicU64, } impl OpL1BlockInfo { @@ -103,7 +101,6 @@ where // so that we will accept txs into the pool before the first block if block.header().number() == 0 { this.block_info.timestamp.store(block.header().timestamp(), Ordering::Relaxed); - this.block_info.number.store(block.header().number(), Ordering::Relaxed); } else { this.update_l1_block_info(block.header(), block.body().transactions().first()); } @@ -141,7 +138,6 @@ where T: Transaction, { self.block_info.timestamp.store(header.timestamp(), Ordering::Relaxed); - self.block_info.number.store(header.number(), Ordering::Relaxed); if let Some(Ok(l1_block_info)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) { *self.block_info.l1_block_info.write() = l1_block_info; From 080cf72464191ef6fada5a64930570d096763801 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Mon, 27 Oct 2025 15:16:56 +0000 Subject: [PATCH 215/371] chore(trie): reduce sparse trie tracing (#19321) --- crates/trie/sparse-parallel/src/trie.rs | 12 +++++++----- crates/trie/sparse/src/state.rs | 2 +- crates/trie/sparse/src/trie.rs | 16 +++++++--------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 34c1ff2a963..c6a99e21071 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -688,6 +688,7 @@ impl SparseTrieInterface for ParallelSparseTrie { Ok(()) } + #[instrument(level = "trace", target = "trie::sparse::parallel", skip(self))] fn root(&mut self) -> B256 { trace!(target: "trie::parallel_sparse", "Calculating trie root hash"); @@ -703,6 +704,7 @@ impl SparseTrieInterface for ParallelSparseTrie { root_rlp.as_hash().unwrap_or(EMPTY_ROOT_HASH) } + #[instrument(level = "trace", target = "trie::sparse::parallel", skip(self))] fn update_subtrie_hashes(&mut self) { trace!(target: "trie::parallel_sparse", "Updating subtrie hashes"); @@ -1339,7 +1341,7 @@ impl ParallelSparseTrie { /// Drains any [`SparseTrieUpdatesAction`]s from the given subtrie, and applies each action to /// the given `updates` set. If the given set is None then this is a no-op. - #[instrument(target = "trie::parallel_sparse", skip_all)] + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all)] fn apply_subtrie_update_actions( &mut self, update_actions: impl Iterator, @@ -1363,7 +1365,7 @@ impl ParallelSparseTrie { } /// Updates hashes for the upper subtrie, using nodes from both upper and lower subtries. - #[instrument(target = "trie::parallel_sparse", skip_all, ret(level = "trace"))] + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, ret)] fn update_upper_subtrie_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { trace!(target: "trie::parallel_sparse", "Updating upper subtrie hashes"); @@ -1441,7 +1443,7 @@ impl ParallelSparseTrie { /// /// IMPORTANT: The method removes the subtries from `lower_subtries`, and the caller is /// responsible for returning them back into the array. - #[instrument(target = "trie::parallel_sparse", skip_all, fields(prefix_set_len = prefix_set.len()))] + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, fields(prefix_set_len = prefix_set.len()))] fn take_changed_lower_subtries( &mut self, prefix_set: &mut PrefixSet, @@ -1598,7 +1600,7 @@ impl ParallelSparseTrie { /// Return updated subtries back to the trie after executing any actions required on the /// top-level `SparseTrieUpdates`. - #[instrument(target = "trie::parallel_sparse", skip_all)] + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all)] fn insert_changed_subtries( &mut self, changed_subtries: impl IntoIterator, @@ -2086,7 +2088,7 @@ impl SparseSubtrie { /// # Panics /// /// If the node at the root path does not exist. - #[instrument(target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret(level = "trace"))] + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret)] fn update_hashes( &mut self, prefix_set: &mut PrefixSet, diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index a202ebc8b2b..e45a1e13fc8 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -731,7 +731,7 @@ where /// /// Returns false if the new account info and storage trie are empty, indicating the account /// leaf should be removed. - #[instrument(target = "trie::sparse", skip_all)] + #[instrument(level = "trace", target = "trie::sparse", skip_all)] pub fn update_account( &mut self, address: B256, diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index ab0506b9364..87df9cab2f6 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -175,7 +175,6 @@ impl SparseTrie { /// and resetting the trie to only contain an empty root node. /// /// Note: This method will error if the trie is blinded. - #[instrument(target = "trie::sparse", skip_all)] pub fn wipe(&mut self) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; revealed.wipe(); @@ -192,7 +191,6 @@ impl SparseTrie { /// /// - `Some(B256)` with the calculated root hash if the trie is revealed. /// - `None` if the trie is still blind. - #[instrument(target = "trie::sparse", skip_all)] pub fn root(&mut self) -> Option { Some(self.as_revealed_mut()?.root()) } @@ -232,7 +230,7 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the update fails. - #[instrument(target = "trie::sparse", skip_all)] + #[instrument(level = "trace", target = "trie::sparse", skip_all)] pub fn update_leaf( &mut self, path: Nibbles, @@ -249,7 +247,7 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the leaf cannot be removed - #[instrument(target = "trie::sparse", skip_all)] + #[instrument(level = "trace", target = "trie::sparse", skip_all)] pub fn remove_leaf( &mut self, path: &Nibbles, @@ -615,7 +613,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } - #[instrument(target = "trie::sparse::serial", skip(self, provider))] + #[instrument(level = "trace", target = "trie::sparse::serial", skip(self, provider))] fn update_leaf( &mut self, full_path: Nibbles, @@ -753,7 +751,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } - #[instrument(target = "trie::sparse::serial", skip(self, provider))] + #[instrument(level = "trace", target = "trie::sparse::serial", skip(self, provider))] fn remove_leaf( &mut self, full_path: &Nibbles, @@ -1385,7 +1383,7 @@ impl SerialSparseTrie { /// /// This function identifies all nodes that have changed (based on the prefix set) at the given /// depth and recalculates their RLP representation. - #[instrument(target = "trie::sparse::serial", skip(self))] + #[instrument(level = "trace", target = "trie::sparse::serial", skip(self))] pub fn update_rlp_node_level(&mut self, depth: usize) { // Take the current prefix set let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); @@ -1431,7 +1429,7 @@ impl SerialSparseTrie { /// specified depth. /// - A `PrefixSetMut` containing paths shallower than the specified depth that still need to be /// tracked for future updates. - #[instrument(target = "trie::sparse::serial", skip(self))] + #[instrument(level = "trace", target = "trie::sparse::serial", skip(self))] fn get_changed_nodes_at_depth( &self, prefix_set: &mut PrefixSet, @@ -1518,7 +1516,7 @@ impl SerialSparseTrie { /// # Panics /// /// If the node at provided path does not exist. - #[instrument(target = "trie::sparse::serial", skip_all, ret(level = "trace"))] + #[instrument(level = "trace", target = "trie::sparse::serial", skip_all, ret(level = "trace"))] pub fn rlp_node( &mut self, prefix_set: &mut PrefixSet, From 7e59141c4b99ca84a71edcd6a21b75bdeede1f86 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 27 Oct 2025 16:18:48 +0100 Subject: [PATCH 216/371] fix(trie): Rewrite InMemoryTrieOverlay (with proptests!) (#19277) --- crates/trie/trie/src/forward_cursor.rs | 16 +- crates/trie/trie/src/trie_cursor/in_memory.rs | 410 +++++++++++++++--- 2 files changed, 369 insertions(+), 57 deletions(-) diff --git a/crates/trie/trie/src/forward_cursor.rs b/crates/trie/trie/src/forward_cursor.rs index b1b6c041289..c99b0d049ee 100644 --- a/crates/trie/trie/src/forward_cursor.rs +++ b/crates/trie/trie/src/forward_cursor.rs @@ -23,8 +23,9 @@ impl<'a, K, V> ForwardInMemoryCursor<'a, K, V> { self.is_empty } + /// Returns the current entry pointed to be the cursor, or `None` if no entries are left. #[inline] - fn peek(&self) -> Option<&(K, V)> { + pub fn current(&self) -> Option<&(K, V)> { self.entries.clone().next() } @@ -59,7 +60,7 @@ where fn advance_while(&mut self, predicate: impl Fn(&K) -> bool) -> Option<(K, V)> { let mut entry; loop { - entry = self.peek(); + entry = self.current(); if entry.is_some_and(|(k, _)| predicate(k)) { self.next(); } else { @@ -77,20 +78,21 @@ mod tests { #[test] fn test_cursor() { let mut cursor = ForwardInMemoryCursor::new(&[(1, ()), (2, ()), (3, ()), (4, ()), (5, ())]); + assert_eq!(cursor.current(), Some(&(1, ()))); assert_eq!(cursor.seek(&0), Some((1, ()))); - assert_eq!(cursor.peek(), Some(&(1, ()))); + assert_eq!(cursor.current(), Some(&(1, ()))); assert_eq!(cursor.seek(&3), Some((3, ()))); - assert_eq!(cursor.peek(), Some(&(3, ()))); + assert_eq!(cursor.current(), Some(&(3, ()))); assert_eq!(cursor.seek(&3), Some((3, ()))); - assert_eq!(cursor.peek(), Some(&(3, ()))); + assert_eq!(cursor.current(), Some(&(3, ()))); assert_eq!(cursor.seek(&4), Some((4, ()))); - assert_eq!(cursor.peek(), Some(&(4, ()))); + assert_eq!(cursor.current(), Some(&(4, ()))); assert_eq!(cursor.seek(&6), None); - assert_eq!(cursor.peek(), None); + assert_eq!(cursor.current(), None); } } diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index e76bf7b2be3..d9658150f3a 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -69,10 +69,15 @@ where pub struct InMemoryTrieCursor<'a, C> { /// The underlying cursor. If None then it is assumed there is no DB data. cursor: Option, + /// Entry that `cursor` is currently pointing to. + cursor_entry: Option<(Nibbles, BranchNodeCompact)>, /// Forward-only in-memory cursor over storage trie nodes. in_memory_cursor: ForwardInMemoryCursor<'a, Nibbles, Option>, - /// Last key returned by the cursor. + /// The key most recently returned from the Cursor. last_key: Option, + #[cfg(debug_assertions)] + /// Whether an initial seek was called. + seeked: bool, } impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { @@ -83,47 +88,84 @@ impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { trie_updates: &'a [(Nibbles, Option)], ) -> Self { let in_memory_cursor = ForwardInMemoryCursor::new(trie_updates); - Self { cursor, in_memory_cursor, last_key: None } + Self { + cursor, + cursor_entry: None, + in_memory_cursor, + last_key: None, + #[cfg(debug_assertions)] + seeked: false, + } } - fn seek_inner( - &mut self, - key: Nibbles, - exact: bool, - ) -> Result, DatabaseError> { - let mut mem_entry = self.in_memory_cursor.seek(&key); - let mut db_entry = self.cursor.as_mut().map(|c| c.seek(key)).transpose()?.flatten(); - - // exact matching is easy, if overlay has a value then return that (updated or removed), or - // if db has a value then return that. - if exact { - return Ok(match (mem_entry, db_entry) { - (Some((mem_key, entry_inner)), _) if mem_key == key => { - entry_inner.map(|node| (key, node)) - } - (_, Some((db_key, node))) if db_key == key => Some((key, node)), - _ => None, - }) + /// Asserts that the next entry to be returned from the cursor is not previous to the last entry + /// returned. + fn set_last_key(&mut self, next_entry: &Option<(Nibbles, BranchNodeCompact)>) { + let next_key = next_entry.as_ref().map(|e| e.0); + debug_assert!( + self.last_key.is_none_or(|last| next_key.is_none_or(|next| next >= last)), + "Cannot return entry {:?} previous to the last returned entry at {:?}", + next_key, + self.last_key, + ); + self.last_key = next_key; + } + + /// Seeks the `cursor_entry` field of the struct using the cursor. + fn cursor_seek(&mut self, key: Nibbles) -> Result<(), DatabaseError> { + if let Some(entry) = self.cursor_entry.as_ref() && + entry.0 >= key + { + // If already seeked to the given key then don't do anything. Also if we're seeked past + // the given key then don't anything, because `TrieCursor` is specifically a + // forward-only cursor. + } else { + self.cursor_entry = self.cursor.as_mut().map(|c| c.seek(key)).transpose()?.flatten(); + } + + Ok(()) + } + + /// Seeks the `cursor_entry` field of the struct to the subsequent entry using the cursor. + fn cursor_next(&mut self) -> Result<(), DatabaseError> { + #[cfg(debug_assertions)] + { + debug_assert!(self.seeked); + } + + // If the previous entry is `None`, and we've done a seek previously, then the cursor is + // exhausted and we shouldn't call `next` again. + if self.cursor_entry.is_some() { + self.cursor_entry = self.cursor.as_mut().map(|c| c.next()).transpose()?.flatten(); } + Ok(()) + } + + /// Compares the current in-memory entry with the current entry of the cursor, and applies the + /// in-memory entry to the cursor entry as an overlay. + // + /// This may consume and move forward the current entries when the overlay indicates a removed + /// node. + fn choose_next_entry(&mut self) -> Result, DatabaseError> { loop { - match (mem_entry, &db_entry) { + match (self.in_memory_cursor.current().cloned(), &self.cursor_entry) { (Some((mem_key, None)), _) - if db_entry.as_ref().is_none_or(|(db_key, _)| &mem_key < db_key) => + if self.cursor_entry.as_ref().is_none_or(|(db_key, _)| &mem_key < db_key) => { // If overlay has a removed node but DB cursor is exhausted or ahead of the // in-memory cursor then move ahead in-memory, as there might be further // non-removed overlay nodes. - mem_entry = self.in_memory_cursor.first_after(&mem_key); + self.in_memory_cursor.first_after(&mem_key); } (Some((mem_key, None)), Some((db_key, _))) if &mem_key == db_key => { // If overlay has a removed node which is returned from DB then move both // cursors ahead to the next key. - mem_entry = self.in_memory_cursor.first_after(&mem_key); - db_entry = self.cursor.as_mut().map(|c| c.next()).transpose()?.flatten(); + self.in_memory_cursor.first_after(&mem_key); + self.cursor_next()?; } (Some((mem_key, Some(node))), _) - if db_entry.as_ref().is_none_or(|(db_key, _)| &mem_key <= db_key) => + if self.cursor_entry.as_ref().is_none_or(|(db_key, _)| &mem_key <= db_key) => { // If overlay returns a node prior to the DB's node, or the DB is exhausted, // then we return the overlay's node. @@ -133,18 +175,10 @@ impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { // - mem_key > db_key // - overlay is exhausted // Return the db_entry. If DB is also exhausted then this returns None. - _ => return Ok(db_entry), + _ => return Ok(self.cursor_entry.clone()), } } } - - fn next_inner( - &mut self, - last: Nibbles, - ) -> Result, DatabaseError> { - let Some(key) = last.increment() else { return Ok(None) }; - self.seek_inner(key, false) - } } impl TrieCursor for InMemoryTrieCursor<'_, C> { @@ -152,8 +186,23 @@ impl TrieCursor for InMemoryTrieCursor<'_, C> { &mut self, key: Nibbles, ) -> Result, DatabaseError> { - let entry = self.seek_inner(key, true)?; - self.last_key = entry.as_ref().map(|(nibbles, _)| *nibbles); + self.cursor_seek(key)?; + let mem_entry = self.in_memory_cursor.seek(&key); + + #[cfg(debug_assertions)] + { + self.seeked = true; + } + + let entry = match (mem_entry, &self.cursor_entry) { + (Some((mem_key, entry_inner)), _) if mem_key == key => { + entry_inner.map(|node| (key, node)) + } + (_, Some((db_key, node))) if db_key == &key => Some((key, node.clone())), + _ => None, + }; + + self.set_last_key(&entry); Ok(entry) } @@ -161,22 +210,47 @@ impl TrieCursor for InMemoryTrieCursor<'_, C> { &mut self, key: Nibbles, ) -> Result, DatabaseError> { - let entry = self.seek_inner(key, false)?; - self.last_key = entry.as_ref().map(|(nibbles, _)| *nibbles); + self.cursor_seek(key)?; + self.in_memory_cursor.seek(&key); + + #[cfg(debug_assertions)] + { + self.seeked = true; + } + + let entry = self.choose_next_entry()?; + self.set_last_key(&entry); Ok(entry) } fn next(&mut self) -> Result, DatabaseError> { - let next = match &self.last_key { - Some(last) => { - let entry = self.next_inner(*last)?; - self.last_key = entry.as_ref().map(|entry| entry.0); - entry - } - // no previous entry was found - None => None, + #[cfg(debug_assertions)] + { + debug_assert!(self.seeked, "Cursor must be seek'd before next is called"); + } + + // A `last_key` of `None` indicates that the cursor is exhausted. + let Some(last_key) = self.last_key else { + return Ok(None); }; - Ok(next) + + // If either cursor is currently pointing to the last entry which was returned then consume + // that entry so that `choose_next_entry` is looking at the subsequent one. + if let Some((key, _)) = self.in_memory_cursor.current() && + key == &last_key + { + self.in_memory_cursor.first_after(&last_key); + } + + if let Some((key, _)) = &self.cursor_entry && + key == &last_key + { + self.cursor_next()?; + } + + let entry = self.choose_next_entry()?; + self.set_last_key(&entry); + Ok(entry) } fn current(&mut self) -> Result, DatabaseError> { @@ -218,8 +292,10 @@ mod tests { results.push(entry); } - while let Ok(Some(entry)) = cursor.next() { - results.push(entry); + if !test_case.expected_results.is_empty() { + while let Ok(Some(entry)) = cursor.next() { + results.push(entry); + } } assert_eq!( @@ -501,4 +577,238 @@ mod tests { cursor.next().unwrap(); assert_eq!(cursor.current().unwrap(), Some(Nibbles::from_nibbles([0x3]))); } + + mod proptest_tests { + use super::*; + use itertools::Itertools; + use proptest::prelude::*; + + /// Merge `db_nodes` with `in_memory_nodes`, applying the in-memory overlay. + /// This properly handles deletions (None values in `in_memory_nodes`). + fn merge_with_overlay( + db_nodes: Vec<(Nibbles, BranchNodeCompact)>, + in_memory_nodes: Vec<(Nibbles, Option)>, + ) -> Vec<(Nibbles, BranchNodeCompact)> { + db_nodes + .into_iter() + .merge_join_by(in_memory_nodes, |db_entry, mem_entry| db_entry.0.cmp(&mem_entry.0)) + .filter_map(|entry| match entry { + // Only in db: keep it + itertools::EitherOrBoth::Left((key, node)) => Some((key, node)), + // Only in memory: keep if not a deletion + itertools::EitherOrBoth::Right((key, node_opt)) => { + node_opt.map(|node| (key, node)) + } + // In both: memory takes precedence (keep if not a deletion) + itertools::EitherOrBoth::Both(_, (key, node_opt)) => { + node_opt.map(|node| (key, node)) + } + }) + .collect() + } + + /// Generate a strategy for a `BranchNodeCompact` with simplified parameters. + /// The constraints are: + /// - `tree_mask` must be a subset of `state_mask` + /// - `hash_mask` must be a subset of `state_mask` + /// - `hash_mask.count_ones()` must equal `hashes.len()` + /// + /// To keep it simple, we use an empty hashes vec and `hash_mask` of 0. + fn branch_node_strategy() -> impl Strategy { + any::() + .prop_flat_map(|state_mask| { + let tree_mask_strategy = any::().prop_map(move |tree| tree & state_mask); + (Just(state_mask), tree_mask_strategy) + }) + .prop_map(|(state_mask, tree_mask)| { + BranchNodeCompact::new(state_mask, tree_mask, 0, vec![], None) + }) + } + + /// Generate a sorted vector of (Nibbles, `BranchNodeCompact`) entries + fn sorted_db_nodes_strategy() -> impl Strategy> { + prop::collection::vec( + (prop::collection::vec(any::(), 0..3), branch_node_strategy()), + 0..20, + ) + .prop_map(|entries| { + // Convert Vec to Nibbles and sort + let mut result: Vec<(Nibbles, BranchNodeCompact)> = entries + .into_iter() + .map(|(bytes, node)| (Nibbles::from_nibbles_unchecked(bytes), node)) + .collect(); + result.sort_by(|a, b| a.0.cmp(&b.0)); + result.dedup_by(|a, b| a.0 == b.0); + result + }) + } + + /// Generate a sorted vector of (Nibbles, Option) entries + fn sorted_in_memory_nodes_strategy( + ) -> impl Strategy)>> { + prop::collection::vec( + ( + prop::collection::vec(any::(), 0..3), + prop::option::of(branch_node_strategy()), + ), + 0..20, + ) + .prop_map(|entries| { + // Convert Vec to Nibbles and sort + let mut result: Vec<(Nibbles, Option)> = entries + .into_iter() + .map(|(bytes, node)| (Nibbles::from_nibbles_unchecked(bytes), node)) + .collect(); + result.sort_by(|a, b| a.0.cmp(&b.0)); + result.dedup_by(|a, b| a.0 == b.0); + result + }) + } + + proptest! { + #![proptest_config(ProptestConfig::with_cases(1000))] + + #[test] + fn proptest_in_memory_trie_cursor( + db_nodes in sorted_db_nodes_strategy(), + in_memory_nodes in sorted_in_memory_nodes_strategy(), + op_choices in prop::collection::vec(any::(), 10..500), + ) { + reth_tracing::init_test_tracing(); + use tracing::debug; + + debug!("Starting proptest!"); + + // Create the expected results by merging the two sorted vectors, + // properly handling deletions (None values in in_memory_nodes) + let expected_combined = merge_with_overlay(db_nodes.clone(), in_memory_nodes.clone()); + + // Collect all keys for operation generation + let all_keys: Vec = expected_combined.iter().map(|(k, _)| *k).collect(); + + // Create a control cursor using the combined result with a mock cursor + let control_db_map: BTreeMap = + expected_combined.into_iter().collect(); + let control_db_arc = Arc::new(control_db_map); + let control_visited_keys = Arc::new(Mutex::new(Vec::new())); + let mut control_cursor = MockTrieCursor::new(control_db_arc, control_visited_keys); + + // Create the InMemoryTrieCursor being tested + let db_nodes_map: BTreeMap = + db_nodes.into_iter().collect(); + let db_nodes_arc = Arc::new(db_nodes_map); + let visited_keys = Arc::new(Mutex::new(Vec::new())); + let mock_cursor = MockTrieCursor::new(db_nodes_arc, visited_keys); + let mut test_cursor = InMemoryTrieCursor::new(Some(mock_cursor), &in_memory_nodes); + + // Test: seek to the beginning first + let control_first = control_cursor.seek(Nibbles::default()).unwrap(); + let test_first = test_cursor.seek(Nibbles::default()).unwrap(); + debug!( + control=?control_first.as_ref().map(|(k, _)| k), + test=?test_first.as_ref().map(|(k, _)| k), + "Initial seek returned", + ); + assert_eq!(control_first, test_first, "Initial seek mismatch"); + + // If both cursors returned None, nothing to test + if control_first.is_none() && test_first.is_none() { + return Ok(()); + } + + // Track the last key returned from the cursor + let mut last_returned_key = control_first.as_ref().map(|(k, _)| *k); + + // Execute a sequence of random operations + for choice in op_choices { + let op_type = choice % 3; + + match op_type { + 0 => { + // Next operation + let control_result = control_cursor.next().unwrap(); + let test_result = test_cursor.next().unwrap(); + debug!( + control=?control_result.as_ref().map(|(k, _)| k), + test=?test_result.as_ref().map(|(k, _)| k), + "Next returned", + ); + assert_eq!(control_result, test_result, "Next operation mismatch"); + + last_returned_key = control_result.as_ref().map(|(k, _)| *k); + + // Stop if both cursors are exhausted + if control_result.is_none() && test_result.is_none() { + break; + } + } + 1 => { + // Seek operation - choose a key >= last_returned_key + if all_keys.is_empty() { + continue; + } + + let valid_keys: Vec<_> = all_keys + .iter() + .filter(|k| last_returned_key.is_none_or(|last| **k >= last)) + .collect(); + + if valid_keys.is_empty() { + continue; + } + + let key = *valid_keys[(choice as usize / 3) % valid_keys.len()]; + + let control_result = control_cursor.seek(key).unwrap(); + let test_result = test_cursor.seek(key).unwrap(); + debug!( + control=?control_result.as_ref().map(|(k, _)| k), + test=?test_result.as_ref().map(|(k, _)| k), + ?key, + "Seek returned", + ); + assert_eq!(control_result, test_result, "Seek operation mismatch for key {:?}", key); + + last_returned_key = control_result.as_ref().map(|(k, _)| *k); + + // Stop if both cursors are exhausted + if control_result.is_none() && test_result.is_none() { + break; + } + } + _ => { + // SeekExact operation - choose a key >= last_returned_key + if all_keys.is_empty() { + continue; + } + + let valid_keys: Vec<_> = all_keys + .iter() + .filter(|k| last_returned_key.is_none_or(|last| **k >= last)) + .collect(); + + if valid_keys.is_empty() { + continue; + } + + let key = *valid_keys[(choice as usize / 3) % valid_keys.len()]; + + let control_result = control_cursor.seek_exact(key).unwrap(); + let test_result = test_cursor.seek_exact(key).unwrap(); + debug!( + control=?control_result.as_ref().map(|(k, _)| k), + test=?test_result.as_ref().map(|(k, _)| k), + ?key, + "SeekExact returned", + ); + assert_eq!(control_result, test_result, "SeekExact operation mismatch for key {:?}", key); + + // seek_exact updates the last_key internally but only if it found something + last_returned_key = control_result.as_ref().map(|(k, _)| *k); + } + } + } + } + } + } } From f9c89a9bc9dd05cd3b3f13e20f3fa81656a04122 Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Mon, 27 Oct 2025 11:59:04 -0400 Subject: [PATCH 217/371] feat(jovian/block-validation): fix block validation for jovian (#19304) --- crates/optimism/consensus/src/lib.rs | 267 +++++++++++++++++- .../optimism/consensus/src/validation/mod.rs | 69 ++++- 2 files changed, 333 insertions(+), 3 deletions(-) diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 93768dcc696..25e11be9ace 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -65,7 +65,7 @@ where block: &RecoveredBlock, result: &BlockExecutionResult, ) -> Result<(), ConsensusError> { - validate_block_post_execution(block.header(), &self.chain_spec, &result.receipts) + validate_block_post_execution(block.header(), &self.chain_spec, result) } } @@ -111,7 +111,13 @@ where return Ok(()) } - if self.chain_spec.is_ecotone_active_at_timestamp(block.timestamp()) { + // Blob gas used validation + // In Jovian, the blob gas used computation has changed. We are moving the blob base fee + // validation to post-execution since the DA footprint calculation is stateful. + // Pre-execution we only validate that the blob gas used is present in the header. + if self.chain_spec.is_jovian_active_at_timestamp(block.timestamp()) { + block.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + } else if self.chain_spec.is_ecotone_active_at_timestamp(block.timestamp()) { validate_cancun_gas(block)?; } @@ -190,3 +196,260 @@ where Ok(()) } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use alloy_consensus::{BlockBody, Eip658Value, Header, Receipt, TxEip7702, TxReceipt}; + use alloy_eips::{eip4895::Withdrawals, eip7685::Requests}; + use alloy_primitives::{Address, Bytes, Signature, U256}; + use op_alloy_consensus::OpTypedTransaction; + use reth_consensus::{Consensus, ConsensusError, FullConsensus}; + use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder, OP_MAINNET}; + use reth_optimism_primitives::{OpPrimitives, OpReceipt, OpTransactionSigned}; + use reth_primitives_traits::{proofs, GotExpected, RecoveredBlock, SealedBlock}; + use reth_provider::BlockExecutionResult; + + use crate::OpBeaconConsensus; + + fn mock_tx(nonce: u64) -> OpTransactionSigned { + let tx = TxEip7702 { + chain_id: 1u64, + nonce, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + input: Bytes::from(vec![1, 2]), + access_list: Default::default(), + authorization_list: Default::default(), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature) + } + + #[test] + fn test_block_blob_gas_used_validation_isthmus() { + let chain_spec = OpChainSpecBuilder::default() + .isthmus_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let header = Header { + base_fee_per_gas: Some(1337), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + timestamp: u64::MAX, + ..Default::default() + }; + let body = BlockBody { + transactions: vec![transaction], + ommers: vec![], + withdrawals: Some(Withdrawals::default()), + }; + + let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body }); + + // validate blob, it should pass blob gas used validation + let pre_execution = beacon_consensus.validate_block_pre_execution(&block); + + assert!(pre_execution.is_ok()); + } + + #[test] + fn test_block_blob_gas_used_validation_failure_isthmus() { + let chain_spec = OpChainSpecBuilder::default() + .isthmus_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let header = Header { + base_fee_per_gas: Some(1337), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(10), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + timestamp: u64::MAX, + ..Default::default() + }; + let body = BlockBody { + transactions: vec![transaction], + ommers: vec![], + withdrawals: Some(Withdrawals::default()), + }; + + let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body }); + + // validate blob, it should fail blob gas used validation + let pre_execution = beacon_consensus.validate_block_pre_execution(&block); + + assert!(pre_execution.is_err()); + assert_eq!( + pre_execution.unwrap_err(), + ConsensusError::BlobGasUsedDiff(GotExpected { got: 10, expected: 0 }) + ); + } + + #[test] + fn test_block_blob_gas_used_validation_jovian() { + const BLOB_GAS_USED: u64 = 1000; + const GAS_USED: u64 = 10; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: GAS_USED, + logs: vec![], + }); + + let header = Header { + base_fee_per_gas: Some(1337), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(BLOB_GAS_USED), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + timestamp: u64::MAX, + gas_used: GAS_USED, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref( + &receipt.with_bloom_ref(), + )), + logs_bloom: receipt.bloom(), + ..Default::default() + }; + let body = BlockBody { + transactions: vec![transaction], + ommers: vec![], + withdrawals: Some(Withdrawals::default()), + }; + + let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body }); + + let result = BlockExecutionResult:: { + blob_gas_used: BLOB_GAS_USED, + receipts: vec![receipt], + requests: Requests::default(), + gas_used: GAS_USED, + }; + + // validate blob, it should pass blob gas used validation + let pre_execution = beacon_consensus.validate_block_pre_execution(&block); + + assert!(pre_execution.is_ok()); + + let block = RecoveredBlock::new_sealed(block, vec![Address::default()]); + + let post_execution = as FullConsensus>::validate_block_post_execution( + &beacon_consensus, + &block, + &result + ); + + // validate blob, it should pass blob gas used validation + assert!(post_execution.is_ok()); + } + + #[test] + fn test_block_blob_gas_used_validation_failure_jovian() { + const BLOB_GAS_USED: u64 = 1000; + const GAS_USED: u64 = 10; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: GAS_USED, + logs: vec![], + }); + + let header = Header { + base_fee_per_gas: Some(1337), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(BLOB_GAS_USED), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: GAS_USED, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + ..Default::default() + }; + let body = BlockBody { + transactions: vec![transaction], + ommers: vec![], + withdrawals: Some(Withdrawals::default()), + }; + + let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body }); + + let result = BlockExecutionResult:: { + blob_gas_used: BLOB_GAS_USED + 1, + receipts: vec![receipt], + requests: Requests::default(), + gas_used: GAS_USED, + }; + + // validate blob, it should pass blob gas used validation + let pre_execution = beacon_consensus.validate_block_pre_execution(&block); + + assert!(pre_execution.is_ok()); + + let block = RecoveredBlock::new_sealed(block, vec![Address::default()]); + + let post_execution = as FullConsensus>::validate_block_post_execution( + &beacon_consensus, + &block, + &result + ); + + // validate blob, it should fail blob gas used validation post execution. + assert!(post_execution.is_err()); + assert_eq!( + post_execution.unwrap_err(), + ConsensusError::BlobGasUsedDiff(GotExpected { + got: BLOB_GAS_USED + 1, + expected: BLOB_GAS_USED, + }) + ); + } +} diff --git a/crates/optimism/consensus/src/validation/mod.rs b/crates/optimism/consensus/src/validation/mod.rs index 2dd4cea0904..8509a97e7a4 100644 --- a/crates/optimism/consensus/src/validation/mod.rs +++ b/crates/optimism/consensus/src/validation/mod.rs @@ -4,6 +4,7 @@ pub mod canyon; pub mod isthmus; // Re-export the decode_holocene_base_fee function for compatibility +use reth_execution_types::BlockExecutionResult; pub use reth_optimism_chainspec::decode_holocene_base_fee; use crate::proof::calculate_receipt_root_optimism; @@ -87,8 +88,24 @@ where pub fn validate_block_post_execution( header: impl BlockHeader, chain_spec: impl OpHardforks, - receipts: &[R], + result: &BlockExecutionResult, ) -> Result<(), ConsensusError> { + // Validate that the blob gas used is present and correctly computed if Jovian is active. + if chain_spec.is_jovian_active_at_timestamp(header.timestamp()) { + let computed_blob_gas_used = result.blob_gas_used; + let header_blob_gas_used = + header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + + if computed_blob_gas_used != header_blob_gas_used { + return Err(ConsensusError::BlobGasUsedDiff(GotExpected { + got: computed_blob_gas_used, + expected: header_blob_gas_used, + })); + } + } + + let receipts = &result.receipts; + // Before Byzantium, receipts contained state root that would mean that expensive // operation as hashing that is required for state root got calculated in every // transaction This was replaced with is_success flag. @@ -176,11 +193,13 @@ fn compare_receipts_root_and_logs_bloom( mod tests { use super::*; use alloy_consensus::Header; + use alloy_eips::eip7685::Requests; use alloy_primitives::{b256, hex, Bytes, U256}; use op_alloy_consensus::OpTxEnvelope; use reth_chainspec::{BaseFeeParams, ChainSpec, EthChainSpec, ForkCondition, Hardfork}; use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; use reth_optimism_forks::{OpHardfork, BASE_SEPOLIA_HARDFORKS}; + use reth_optimism_primitives::OpReceipt; use std::sync::Arc; const JOVIAN_TIMESTAMP: u64 = 1900000000; @@ -502,4 +521,52 @@ mod tests { body.withdrawals.take(); validate_body_against_header_op(&chainspec, &body, &header).unwrap_err(); } + + #[test] + fn test_jovian_blob_gas_used_validation() { + const BLOB_GAS_USED: u64 = 1000; + const GAS_USED: u64 = 5000; + + let chainspec = jovian_chainspec(); + let header = Header { + timestamp: JOVIAN_TIMESTAMP, + blob_gas_used: Some(BLOB_GAS_USED), + ..Default::default() + }; + + let result = BlockExecutionResult:: { + blob_gas_used: BLOB_GAS_USED, + receipts: vec![], + requests: Requests::default(), + gas_used: GAS_USED, + }; + validate_block_post_execution(&header, &chainspec, &result).unwrap(); + } + + #[test] + fn test_jovian_blob_gas_used_validation_mismatched() { + const BLOB_GAS_USED: u64 = 1000; + const GAS_USED: u64 = 5000; + + let chainspec = jovian_chainspec(); + let header = Header { + timestamp: JOVIAN_TIMESTAMP, + blob_gas_used: Some(BLOB_GAS_USED + 1), + ..Default::default() + }; + + let result = BlockExecutionResult:: { + blob_gas_used: BLOB_GAS_USED, + receipts: vec![], + requests: Requests::default(), + gas_used: GAS_USED, + }; + assert_eq!( + validate_block_post_execution(&header, &chainspec, &result), + Err(ConsensusError::BlobGasUsedDiff(GotExpected { + got: BLOB_GAS_USED, + expected: BLOB_GAS_USED + 1, + })) + ); + } } From 0569e884c4be872ff70b250fff802e5bb257f042 Mon Sep 17 00:00:00 2001 From: Gengar Date: Mon, 27 Oct 2025 17:59:48 +0200 Subject: [PATCH 218/371] docs: improve documentation for mock database and transactions (#19302) --- crates/storage/db-api/src/mock.rs | 135 ++++++++++++++++++++++++++++-- 1 file changed, 129 insertions(+), 6 deletions(-) diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs index 4a8440cb950..60f69ae8f0d 100644 --- a/crates/storage/db-api/src/mock.rs +++ b/crates/storage/db-api/src/mock.rs @@ -1,4 +1,7 @@ -//! Mock database +//! Mock database implementation for testing and development. +//! +//! Provides lightweight mock implementations of database traits. All operations +//! are no-ops that return default values without persisting data. use crate::{ common::{IterPairResult, PairResult, ValueOnlyResult}, @@ -15,20 +18,35 @@ use crate::{ use core::ops::Bound; use std::{collections::BTreeMap, ops::RangeBounds}; -/// Mock database used for testing with inner `BTreeMap` structure +/// Mock database implementation for testing and development. +/// +/// Provides a lightweight implementation of the [`Database`] trait suitable +/// for testing scenarios where actual database operations are not required. #[derive(Clone, Debug, Default)] pub struct DatabaseMock { - /// Main data. TODO (Make it table aware) + /// Internal data storage using a `BTreeMap`. + /// + /// TODO: Make the mock database table-aware by properly utilizing + /// this data structure to simulate realistic database behavior during testing. pub data: BTreeMap, Vec>, } impl Database for DatabaseMock { type TX = TxMock; type TXMut = TxMock; + + /// Creates a new read-only transaction. + /// + /// This always succeeds and returns a default [`TxMock`] instance. + /// The mock transaction doesn't actually perform any database operations. fn tx(&self) -> Result { Ok(TxMock::default()) } + /// Creates a new read-write transaction. + /// + /// This always succeeds and returns a default [`TxMock`] instance. + /// The mock transaction doesn't actually perform any database operations. fn tx_mut(&self) -> Result { Ok(TxMock::default()) } @@ -36,10 +54,14 @@ impl Database for DatabaseMock { impl DatabaseMetrics for DatabaseMock {} -/// Mock read only tx +/// Mock transaction implementation for testing and development. +/// +/// Implements both [`DbTx`] and [`DbTxMut`] traits. All operations are no-ops +/// that return success or default values, suitable for testing database operations +/// without side effects. #[derive(Debug, Clone, Default)] pub struct TxMock { - /// Table representation + /// Internal table representation (currently unused). _table: BTreeMap, Vec>, } @@ -47,10 +69,20 @@ impl DbTx for TxMock { type Cursor = CursorMock; type DupCursor = CursorMock; + /// Retrieves a value by key from the specified table. + /// + /// **Mock behavior**: Always returns `None` regardless of the key. + /// This simulates a table with no data, which is typical for testing + /// scenarios where you want to verify that read operations are called + /// correctly without actually storing data. fn get(&self, _key: T::Key) -> Result, DatabaseError> { Ok(None) } + /// Retrieves a value by encoded key from the specified table. + /// + /// **Mock behavior**: Always returns `None` regardless of the encoded key. + /// This is equivalent to [`Self::get`] but works with pre-encoded keys. fn get_by_encoded_key( &self, _key: &::Encoded, @@ -58,24 +90,48 @@ impl DbTx for TxMock { Ok(None) } + /// Commits the transaction. + /// + /// **Mock behavior**: Always returns `Ok(true)`, indicating successful commit. + /// No actual data is persisted since this is a mock implementation. fn commit(self) -> Result { Ok(true) } + /// Aborts the transaction. + /// + /// **Mock behavior**: No-op. Since no data is actually stored in the mock, + /// there's nothing to rollback. fn abort(self) {} + /// Creates a read-only cursor for the specified table. + /// + /// **Mock behavior**: Returns a default [`CursorMock`] that will not + /// iterate over any data (all cursor operations return `None`). fn cursor_read(&self) -> Result, DatabaseError> { Ok(CursorMock { _cursor: 0 }) } + /// Creates a read-only duplicate cursor for the specified duplicate sort table. + /// + /// **Mock behavior**: Returns a default [`CursorMock`] that will not + /// iterate over any data (all cursor operations return `None`). fn cursor_dup_read(&self) -> Result, DatabaseError> { Ok(CursorMock { _cursor: 0 }) } + /// Returns the number of entries in the specified table. + /// + /// **Mock behavior**: Returns the length of the internal `_table` `BTreeMap`, + /// which is typically 0 since no data is actually stored. fn entries(&self) -> Result { Ok(self._table.len()) } + /// Disables long read transaction safety checks. + /// + /// **Mock behavior**: No-op. This is a performance optimization that + /// doesn't apply to the mock implementation. fn disable_long_read_transaction_safety(&mut self) {} } @@ -83,10 +139,19 @@ impl DbTxMut for TxMock { type CursorMut = CursorMock; type DupCursorMut = CursorMock; + /// Inserts or updates a key-value pair in the specified table. + /// + /// **Mock behavior**: Always returns `Ok(())` without actually storing + /// the data. This allows tests to verify that write operations are called + /// correctly without side effects. fn put(&self, _key: T::Key, _value: T::Value) -> Result<(), DatabaseError> { Ok(()) } + /// Deletes a key-value pair from the specified table. + /// + /// **Mock behavior**: Always returns `Ok(true)`, indicating successful + /// deletion, without actually removing any data. fn delete( &self, _key: T::Key, @@ -95,14 +160,26 @@ impl DbTxMut for TxMock { Ok(true) } + /// Clears all entries from the specified table. + /// + /// **Mock behavior**: Always returns `Ok(())` without actually clearing + /// any data. This simulates successful table clearing for testing purposes. fn clear(&self) -> Result<(), DatabaseError> { Ok(()) } + /// Creates a write cursor for the specified table. + /// + /// **Mock behavior**: Returns a default [`CursorMock`] that will not + /// iterate over any data and all write operations will be no-ops. fn cursor_write(&self) -> Result, DatabaseError> { Ok(CursorMock { _cursor: 0 }) } + /// Creates a write duplicate cursor for the specified duplicate sort table. + /// + /// **Mock behavior**: Returns a default [`CursorMock`] that will not + /// iterate over any data and all write operations will be no-ops. fn cursor_dup_write(&self) -> Result, DatabaseError> { Ok(CursorMock { _cursor: 0 }) } @@ -110,41 +187,61 @@ impl DbTxMut for TxMock { impl TableImporter for TxMock {} -/// Cursor that iterates over table +/// Mock cursor implementation for testing and development. +/// +/// Implements all cursor traits. All operations are no-ops that return empty +/// results, suitable for testing cursor operations without side effects. #[derive(Debug)] pub struct CursorMock { + /// Internal cursor position (currently unused). _cursor: u32, } impl DbCursorRO for CursorMock { + /// Moves to the first entry in the table. + /// **Mock behavior**: Always returns `None`. fn first(&mut self) -> PairResult { Ok(None) } + /// Seeks to an exact key match. + /// **Mock behavior**: Always returns `None`. fn seek_exact(&mut self, _key: T::Key) -> PairResult { Ok(None) } + /// Seeks to the first key greater than or equal to the given key. + /// **Mock behavior**: Always returns `None`. fn seek(&mut self, _key: T::Key) -> PairResult { Ok(None) } + /// Moves to the next entry. + /// **Mock behavior**: Always returns `None`. fn next(&mut self) -> PairResult { Ok(None) } + /// Moves to the previous entry. + /// **Mock behavior**: Always returns `None`. fn prev(&mut self) -> PairResult { Ok(None) } + /// Moves to the last entry in the table. + /// **Mock behavior**: Always returns `None`. fn last(&mut self) -> PairResult { Ok(None) } + /// Returns the current entry without moving the cursor. + /// **Mock behavior**: Always returns `None`. fn current(&mut self) -> PairResult { Ok(None) } + /// Creates a forward walker starting from the given key. + /// **Mock behavior**: Returns an empty walker that won't iterate over any data. fn walk(&mut self, start_key: Option) -> Result, DatabaseError> { let start: IterPairResult = match start_key { Some(key) => >::seek(self, key).transpose(), @@ -154,6 +251,8 @@ impl DbCursorRO for CursorMock { Ok(Walker::new(self, start)) } + /// Creates a range walker for the specified key range. + /// **Mock behavior**: Returns an empty walker that won't iterate over any data. fn walk_range( &mut self, range: impl RangeBounds, @@ -176,6 +275,8 @@ impl DbCursorRO for CursorMock { Ok(RangeWalker::new(self, start, end_key)) } + /// Creates a backward walker starting from the given key. + /// **Mock behavior**: Returns an empty walker that won't iterate over any data. fn walk_back( &mut self, start_key: Option, @@ -189,18 +290,26 @@ impl DbCursorRO for CursorMock { } impl DbDupCursorRO for CursorMock { + /// Moves to the next duplicate entry. + /// **Mock behavior**: Always returns `None`. fn next_dup(&mut self) -> PairResult { Ok(None) } + /// Moves to the next entry with a different key. + /// **Mock behavior**: Always returns `None`. fn next_no_dup(&mut self) -> PairResult { Ok(None) } + /// Moves to the next duplicate value. + /// **Mock behavior**: Always returns `None`. fn next_dup_val(&mut self) -> ValueOnlyResult { Ok(None) } + /// Seeks to a specific key-subkey combination. + /// **Mock behavior**: Always returns `None`. fn seek_by_key_subkey( &mut self, _key: ::Key, @@ -209,6 +318,8 @@ impl DbDupCursorRO for CursorMock { Ok(None) } + /// Creates a duplicate walker for the specified key and subkey. + /// **Mock behavior**: Returns an empty walker that won't iterate over any data. fn walk_dup( &mut self, _key: Option<::Key>, @@ -219,6 +330,8 @@ impl DbDupCursorRO for CursorMock { } impl DbCursorRW for CursorMock { + /// Inserts or updates a key-value pair at the current cursor position. + /// **Mock behavior**: Always succeeds without modifying any data. fn upsert( &mut self, _key: ::Key, @@ -227,6 +340,8 @@ impl DbCursorRW for CursorMock { Ok(()) } + /// Inserts a key-value pair at the current cursor position. + /// **Mock behavior**: Always succeeds without modifying any data. fn insert( &mut self, _key: ::Key, @@ -235,6 +350,8 @@ impl DbCursorRW for CursorMock { Ok(()) } + /// Appends a key-value pair at the end of the table. + /// **Mock behavior**: Always succeeds without modifying any data. fn append( &mut self, _key: ::Key, @@ -243,16 +360,22 @@ impl DbCursorRW for CursorMock { Ok(()) } + /// Deletes the entry at the current cursor position. + /// **Mock behavior**: Always succeeds without modifying any data. fn delete_current(&mut self) -> Result<(), DatabaseError> { Ok(()) } } impl DbDupCursorRW for CursorMock { + /// Deletes all duplicate entries at the current cursor position. + /// **Mock behavior**: Always succeeds without modifying any data. fn delete_current_duplicates(&mut self) -> Result<(), DatabaseError> { Ok(()) } + /// Appends a duplicate key-value pair. + /// **Mock behavior**: Always succeeds without modifying any data. fn append_dup(&mut self, _key: ::Key, _value: ::Value) -> Result<(), DatabaseError> { Ok(()) } From b1dfbc7e88ac0ce4e72c6d684f44891d8a5850f5 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 27 Oct 2025 13:07:37 -0400 Subject: [PATCH 219/371] chore: remove trie capacity metrics (#19327) --- .../configured_sparse_trie.rs | 15 ---- crates/trie/sparse-parallel/src/lower.rs | 16 ---- crates/trie/sparse-parallel/src/trie.rs | 25 ------- crates/trie/sparse/src/metrics.rs | 37 ++-------- crates/trie/sparse/src/state.rs | 74 +------------------ crates/trie/sparse/src/traits.rs | 6 -- crates/trie/sparse/src/trie.rs | 24 ------ 7 files changed, 10 insertions(+), 187 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index 9e8f787823a..b587a721398 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -172,21 +172,6 @@ impl SparseTrieInterface for ConfiguredSparseTrie { Self::Parallel(trie) => trie.updates_ref(), } } - - fn node_capacity(&self) -> usize { - match self { - Self::Serial(trie) => trie.node_capacity(), - Self::Parallel(trie) => trie.node_capacity(), - } - } - - fn value_capacity(&self) -> usize { - match self { - Self::Serial(trie) => trie.value_capacity(), - Self::Parallel(trie) => trie.value_capacity(), - } - } - fn shrink_nodes_to(&mut self, size: usize) { match self { Self::Serial(trie) => trie.shrink_nodes_to(size), diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs index bc8ae006074..b7eceb133b8 100644 --- a/crates/trie/sparse-parallel/src/lower.rs +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -107,22 +107,6 @@ impl LowerSparseSubtrie { } } - /// Returns the capacity of any maps containing trie nodes - pub(crate) fn node_capacity(&self) -> usize { - match self { - Self::Revealed(trie) | Self::Blind(Some(trie)) => trie.node_capacity(), - Self::Blind(None) => 0, - } - } - - /// Returns the capacity of any maps containing trie values - pub(crate) fn value_capacity(&self) -> usize { - match self { - Self::Revealed(trie) | Self::Blind(Some(trie)) => trie.value_capacity(), - Self::Blind(None) => 0, - } - } - /// Shrinks the capacity of the subtrie's node storage. /// Works for both revealed and blind tries with allocated storage. pub(crate) fn shrink_nodes_to(&mut self, size: usize) { diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index c6a99e21071..133cdfece4c 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -876,16 +876,6 @@ impl SparseTrieInterface for ParallelSparseTrie { } } - fn node_capacity(&self) -> usize { - self.upper_subtrie.node_capacity() + - self.lower_subtries.iter().map(|trie| trie.node_capacity()).sum::() - } - - fn value_capacity(&self) -> usize { - self.upper_subtrie.value_capacity() + - self.lower_subtries.iter().map(|trie| trie.value_capacity()).sum::() - } - fn shrink_nodes_to(&mut self, size: usize) { // Distribute the capacity across upper and lower subtries // @@ -2140,16 +2130,6 @@ impl SparseSubtrie { self.inner.clear(); } - /// Returns the capacity of the map containing trie nodes. - pub(crate) fn node_capacity(&self) -> usize { - self.nodes.capacity() - } - - /// Returns the capacity of the map containing trie values. - pub(crate) fn value_capacity(&self) -> usize { - self.inner.value_capacity() - } - /// Shrinks the capacity of the subtrie's node storage. pub(crate) fn shrink_nodes_to(&mut self, size: usize) { self.nodes.shrink_to(size); @@ -2492,11 +2472,6 @@ impl SparseSubtrieInner { self.values.clear(); self.buffers.clear(); } - - /// Returns the capacity of the map storing leaf values - fn value_capacity(&self) -> usize { - self.values.capacity() - } } /// Represents the outcome of processing a node during leaf insertion diff --git a/crates/trie/sparse/src/metrics.rs b/crates/trie/sparse/src/metrics.rs index 3f39e6df6f9..8dc64ddc599 100644 --- a/crates/trie/sparse/src/metrics.rs +++ b/crates/trie/sparse/src/metrics.rs @@ -1,6 +1,5 @@ //! Metrics for the sparse state trie -use metrics::Gauge; use reth_metrics::{metrics::Histogram, Metrics}; /// Metrics for the sparse state trie @@ -16,24 +15,24 @@ pub(crate) struct SparseStateTrieMetrics { pub(crate) multiproof_skipped_storage_nodes: u64, /// Number of total storage nodes, including those that were skipped. pub(crate) multiproof_total_storage_nodes: u64, - /// The actual metrics we will record - pub(crate) inner_metrics: SparseStateTrieInnerMetrics, + /// The actual metrics we will record into the histogram + pub(crate) histograms: SparseStateTrieInnerMetrics, } impl SparseStateTrieMetrics { /// Record the metrics into the histograms pub(crate) fn record(&mut self) { use core::mem::take; - self.inner_metrics + self.histograms .multiproof_skipped_account_nodes .record(take(&mut self.multiproof_skipped_account_nodes) as f64); - self.inner_metrics + self.histograms .multiproof_total_account_nodes .record(take(&mut self.multiproof_total_account_nodes) as f64); - self.inner_metrics + self.histograms .multiproof_skipped_storage_nodes .record(take(&mut self.multiproof_skipped_storage_nodes) as f64); - self.inner_metrics + self.histograms .multiproof_total_storage_nodes .record(take(&mut self.multiproof_total_storage_nodes) as f64); } @@ -57,22 +56,6 @@ impl SparseStateTrieMetrics { pub(crate) const fn increment_total_storage_nodes(&mut self, count: u64) { self.multiproof_total_storage_nodes += count; } - - /// Set the value capacity for the sparse state trie - pub(crate) fn set_value_capacity(&self, capacity: usize) { - self.inner_metrics.value_capacity.set(capacity as f64); - } - - /// Set the node capacity for the sparse state trie - pub(crate) fn set_node_capacity(&self, capacity: usize) { - self.inner_metrics.node_capacity.set(capacity as f64); - } - - /// Set the number of cleared and active storage tries - pub(crate) fn set_storage_trie_metrics(&self, cleared: usize, active: usize) { - self.inner_metrics.cleared_storage_tries.set(cleared as f64); - self.inner_metrics.active_storage_tries.set(active as f64); - } } /// Metrics for the sparse state trie @@ -89,12 +72,4 @@ pub(crate) struct SparseStateTrieInnerMetrics { pub(crate) multiproof_skipped_storage_nodes: Histogram, /// Histogram of total storage nodes, including those that were skipped. pub(crate) multiproof_total_storage_nodes: Histogram, - /// Gauge for the trie's node capacity - pub(crate) node_capacity: Gauge, - /// Gauge for the trie's value capacity - pub(crate) value_capacity: Gauge, - /// The current number of cleared storage tries. - pub(crate) cleared_storage_tries: Gauge, - /// The number of currently active storage tries, i.e., not cleared - pub(crate) active_storage_tries: Gauge, } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index e45a1e13fc8..f142385c3cd 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -611,17 +611,9 @@ where &mut self, provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult { - // record revealed node metrics and capacity metrics + // record revealed node metrics #[cfg(feature = "metrics")] - { - self.metrics.record(); - self.metrics.set_node_capacity(self.node_capacity()); - self.metrics.set_value_capacity(self.value_capacity()); - self.metrics.set_storage_trie_metrics( - self.storage.cleared_tries.len(), - self.storage.tries.len(), - ); - } + self.metrics.record(); Ok(self.revealed_trie_mut(provider_factory)?.root()) } @@ -632,17 +624,9 @@ where &mut self, provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<(B256, TrieUpdates)> { - // record revealed node metrics and capacity metrics + // record revealed node metrics #[cfg(feature = "metrics")] - { - self.metrics.record(); - self.metrics.set_node_capacity(self.node_capacity()); - self.metrics.set_value_capacity(self.value_capacity()); - self.metrics.set_storage_trie_metrics( - self.storage.cleared_tries.len(), - self.storage.tries.len(), - ); - } + self.metrics.record(); let storage_tries = self.storage_trie_updates(); let revealed = self.revealed_trie_mut(provider_factory)?; @@ -847,16 +831,6 @@ where storage_trie.remove_leaf(slot, provider)?; Ok(()) } - - /// The sum of the account trie's node capacity and the storage tries' node capacity - pub fn node_capacity(&self) -> usize { - self.state.node_capacity() + self.storage.total_node_capacity() - } - - /// The sum of the account trie's value capacity and the storage tries' value capacity - pub fn value_capacity(&self) -> usize { - self.state.value_capacity() + self.storage.total_value_capacity() - } } /// The fields of [`SparseStateTrie`] related to storage tries. This is kept separate from the rest @@ -957,46 +931,6 @@ impl StorageTries { .remove(account) .unwrap_or_else(|| self.cleared_revealed_paths.pop().unwrap_or_default()) } - - /// Sums the total node capacity in `cleared_tries` - fn total_cleared_tries_node_capacity(&self) -> usize { - self.cleared_tries.iter().map(|trie| trie.node_capacity()).sum() - } - - /// Sums the total value capacity in `cleared_tries` - fn total_cleared_tries_value_capacity(&self) -> usize { - self.cleared_tries.iter().map(|trie| trie.value_capacity()).sum() - } - - /// Calculates the sum of the active storage trie node capacity, ie the tries in `tries` - fn total_active_tries_node_capacity(&self) -> usize { - self.tries.values().map(|trie| trie.node_capacity()).sum() - } - - /// Calculates the sum of the active storage trie value capacity, ie the tries in `tries` - fn total_active_tries_value_capacity(&self) -> usize { - self.tries.values().map(|trie| trie.value_capacity()).sum() - } - - /// Calculates the sum of active and cleared storage trie node capacity, i.e. the sum of - /// * [`StorageTries::total_active_tries_node_capacity`], and - /// * [`StorageTries::total_cleared_tries_node_capacity`] - /// * the default trie's node capacity - fn total_node_capacity(&self) -> usize { - self.total_active_tries_node_capacity() + - self.total_cleared_tries_node_capacity() + - self.default_trie.node_capacity() - } - - /// Calculates the sum of active and cleared storage trie value capacity, i.e. the sum of - /// * [`StorageTries::total_active_tries_value_capacity`], and - /// * [`StorageTries::total_cleared_tries_value_capacity`], and - /// * the default trie's value capacity - fn total_value_capacity(&self) -> usize { - self.total_active_tries_value_capacity() + - self.total_cleared_tries_value_capacity() + - self.default_trie.value_capacity() - } } #[derive(Debug, PartialEq, Eq, Default)] diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 5b7b6193f96..308695ec0fd 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -223,12 +223,6 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// This is useful for reusing the trie without needing to reallocate memory. fn clear(&mut self); - /// This returns the capacity of any inner data structures which store nodes. - fn node_capacity(&self) -> usize; - - /// This returns the capacity of any inner data structures which store leaf values. - fn value_capacity(&self) -> usize; - /// Shrink the capacity of the sparse trie's node storage to the given size. /// This will reduce memory usage if the current capacity is higher than the given size. fn shrink_nodes_to(&mut self, size: usize); diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 87df9cab2f6..891b718693a 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -258,22 +258,6 @@ impl SparseTrie { Ok(()) } - /// Returns the allocated capacity for sparse trie nodes. - pub fn node_capacity(&self) -> usize { - match self { - Self::Blind(Some(trie)) | Self::Revealed(trie) => trie.node_capacity(), - _ => 0, - } - } - - /// Returns the allocated capacity for sparse trie values. - pub fn value_capacity(&self) -> usize { - match self { - Self::Blind(Some(trie)) | Self::Revealed(trie) => trie.value_capacity(), - _ => 0, - } - } - /// Shrinks the capacity of the sparse trie's node storage. /// Works for both revealed and blind tries with allocated storage. pub fn shrink_nodes_to(&mut self, size: usize) { @@ -1101,14 +1085,6 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(LeafLookup::NonExistent) } - fn node_capacity(&self) -> usize { - self.nodes.capacity() - } - - fn value_capacity(&self) -> usize { - self.values.capacity() - } - fn shrink_nodes_to(&mut self, size: usize) { self.nodes.shrink_to(size); self.branch_node_tree_masks.shrink_to(size); From a264ccbbc27bc2c5bdf83b7dd9a1fbb789482f67 Mon Sep 17 00:00:00 2001 From: Mablr <59505383+mablr@users.noreply.github.com> Date: Mon, 27 Oct 2025 18:11:23 +0100 Subject: [PATCH 220/371] feat(metrics): add push gateway support for Prometheus metrics (#19243) --- crates/node/builder/src/launch/common.rs | 2 +- crates/node/core/src/args/metric.rs | 26 ++++++- crates/node/core/src/node_config.rs | 2 +- crates/node/metrics/Cargo.toml | 1 + crates/node/metrics/src/server.rs | 92 ++++++++++++++++++++++-- docs/vocs/docs/pages/cli/reth/node.mdx | 12 ++++ 6 files changed, 125 insertions(+), 10 deletions(-) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 190cfdc8817..92e3a7aa811 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -610,7 +610,7 @@ where } }) .build(), - ); + ).with_push_gateway(self.node_config().metrics.push_gateway_url.clone(), self.node_config().metrics.push_gateway_interval); MetricServer::new(config).serve().await?; } diff --git a/crates/node/core/src/args/metric.rs b/crates/node/core/src/args/metric.rs index d46018b8e77..5ef18787a81 100644 --- a/crates/node/core/src/args/metric.rs +++ b/crates/node/core/src/args/metric.rs @@ -1,6 +1,6 @@ use clap::Parser; -use reth_cli_util::parse_socket_address; -use std::net::SocketAddr; +use reth_cli_util::{parse_duration_from_secs, parse_socket_address}; +use std::{net::SocketAddr, time::Duration}; /// Metrics configuration. #[derive(Debug, Clone, Default, Parser)] @@ -10,4 +10,26 @@ pub struct MetricArgs { /// The metrics will be served at the given interface and port. #[arg(long="metrics", alias = "metrics.prometheus", value_name = "PROMETHEUS", value_parser = parse_socket_address, help_heading = "Metrics")] pub prometheus: Option, + + /// URL for pushing Prometheus metrics to a push gateway. + /// + /// If set, the node will periodically push metrics to the specified push gateway URL. + #[arg( + long = "metrics.prometheus.push.url", + value_name = "PUSH_GATEWAY_URL", + help_heading = "Metrics" + )] + pub push_gateway_url: Option, + + /// Interval in seconds for pushing metrics to push gateway. + /// + /// Default: 5 seconds + #[arg( + long = "metrics.prometheus.push.interval", + default_value = "5", + value_parser = parse_duration_from_secs, + value_name = "SECONDS", + help_heading = "Metrics" + )] + pub push_gateway_interval: Duration, } diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 61eb29db38b..c69593adf07 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -234,7 +234,7 @@ impl NodeConfig { } /// Set the metrics address for the node - pub const fn with_metrics(mut self, metrics: MetricArgs) -> Self { + pub fn with_metrics(mut self, metrics: MetricArgs) -> Self { self.metrics = metrics; self } diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml index 39884fa73ef..9687c9c20ac 100644 --- a/crates/node/metrics/Cargo.toml +++ b/crates/node/metrics/Cargo.toml @@ -21,6 +21,7 @@ tokio.workspace = true jsonrpsee-server.workspace = true http.workspace = true tower.workspace = true +reqwest.workspace = true tracing.workspace = true eyre.workspace = true diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index c029b773718..d7beb6c3a1d 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -8,9 +8,10 @@ use eyre::WrapErr; use http::{header::CONTENT_TYPE, HeaderValue, Response}; use metrics::describe_gauge; use metrics_process::Collector; +use reqwest::Client; use reth_metrics::metrics::Unit; use reth_tasks::TaskExecutor; -use std::{convert::Infallible, net::SocketAddr, sync::Arc}; +use std::{convert::Infallible, net::SocketAddr, sync::Arc, time::Duration}; /// Configuration for the [`MetricServer`] #[derive(Debug)] @@ -20,6 +21,8 @@ pub struct MetricServerConfig { chain_spec_info: ChainSpecInfo, task_executor: TaskExecutor, hooks: Hooks, + push_gateway_url: Option, + push_gateway_interval: Duration, } impl MetricServerConfig { @@ -31,7 +34,22 @@ impl MetricServerConfig { task_executor: TaskExecutor, hooks: Hooks, ) -> Self { - Self { listen_addr, hooks, task_executor, version_info, chain_spec_info } + Self { + listen_addr, + hooks, + task_executor, + version_info, + chain_spec_info, + push_gateway_url: None, + push_gateway_interval: Duration::from_secs(5), + } + } + + /// Set the gateway URL and interval for pushing metrics + pub fn with_push_gateway(mut self, url: Option, interval: Duration) -> Self { + self.push_gateway_url = url; + self.push_gateway_interval = interval; + self } } @@ -49,18 +67,35 @@ impl MetricServer { /// Spawns the metrics server pub async fn serve(&self) -> eyre::Result<()> { - let MetricServerConfig { listen_addr, hooks, task_executor, version_info, chain_spec_info } = - &self.config; + let MetricServerConfig { + listen_addr, + hooks, + task_executor, + version_info, + chain_spec_info, + push_gateway_url, + push_gateway_interval, + } = &self.config; - let hooks = hooks.clone(); + let hooks_for_endpoint = hooks.clone(); self.start_endpoint( *listen_addr, - Arc::new(move || hooks.iter().for_each(|hook| hook())), + Arc::new(move || hooks_for_endpoint.iter().for_each(|hook| hook())), task_executor.clone(), ) .await .wrap_err_with(|| format!("Could not start Prometheus endpoint at {listen_addr}"))?; + // Start push-gateway task if configured + if let Some(url) = push_gateway_url { + self.start_push_gateway_task( + url.clone(), + *push_gateway_interval, + hooks.clone(), + task_executor.clone(), + )?; + } + // Describe metrics after recorder installation describe_db_metrics(); describe_static_file_metrics(); @@ -128,6 +163,51 @@ impl MetricServer { Ok(()) } + + /// Starts a background task to push metrics to a metrics gateway + fn start_push_gateway_task( + &self, + url: String, + interval: Duration, + hooks: Hooks, + task_executor: TaskExecutor, + ) -> eyre::Result<()> { + let client = Client::builder() + .build() + .wrap_err("Could not create HTTP client to push metrics to gateway")?; + task_executor.spawn_with_graceful_shutdown_signal(move |mut signal| { + Box::pin(async move { + tracing::info!(url = %url, interval = ?interval, "Starting task to push metrics to gateway"); + let handle = install_prometheus_recorder(); + loop { + tokio::select! { + _ = &mut signal => { + tracing::info!("Shutting down task to push metrics to gateway"); + break; + } + _ = tokio::time::sleep(interval) => { + hooks.iter().for_each(|hook| hook()); + let metrics = handle.handle().render(); + match client.put(&url).header("Content-Type", "text/plain").body(metrics).send().await { + Ok(response) => { + if !response.status().is_success() { + tracing::warn!( + status = %response.status(), + "Failed to push metrics to gateway" + ); + } + } + Err(err) => { + tracing::warn!(%err, "Failed to push metrics to gateway"); + } + } + } + } + } + }) + }); + Ok(()) + } } fn describe_db_metrics() { diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index db25b9e80c0..6f8b6ae88a7 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -44,6 +44,18 @@ Metrics: The metrics will be served at the given interface and port. + --metrics.prometheus.push.url + URL for pushing Prometheus metrics to a push gateway. + + If set, the node will periodically push metrics to the specified push gateway URL. + + --metrics.prometheus.push.interval + Interval in seconds for pushing metrics to push gateway. + + Default: 5 seconds + + [default: 5] + Datadir: --datadir The path to the data dir for all reth files and subdirectories. From ffeaa4772d01313062b0b127eb4dcc980550aec0 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 27 Oct 2025 20:09:21 +0100 Subject: [PATCH 221/371] chore(engine): Remove ConsistentDbView (#19188) Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- Cargo.lock | 2 - crates/engine/tree/Cargo.toml | 2 - crates/engine/tree/benches/state_root_task.rs | 6 +- crates/engine/tree/src/persistence.rs | 8 +- crates/engine/tree/src/tree/mod.rs | 5 +- .../tree/src/tree/payload_processor/mod.rs | 60 ++-- .../src/tree/payload_processor/multiproof.rs | 42 ++- .../engine/tree/src/tree/payload_validator.rs | 284 ++++++------------ crates/engine/tree/src/tree/state.rs | 3 +- .../provider/src/providers/state/overlay.rs | 36 ++- .../storage/provider/src/test_utils/mock.rs | 23 +- .../storage-api/src/database_provider.rs | 23 ++ crates/trie/parallel/Cargo.toml | 4 +- crates/trie/parallel/benches/root.rs | 11 +- crates/trie/parallel/src/proof.rs | 44 +-- crates/trie/parallel/src/proof_task.rs | 269 ++++++----------- crates/trie/parallel/src/root.rs | 100 +++--- crates/trie/trie/src/proof/trie_node.rs | 23 +- 18 files changed, 385 insertions(+), 560 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b8c0da68164..7c5012b4b53 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8084,7 +8084,6 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "reth-trie", - "reth-trie-db", "reth-trie-parallel", "reth-trie-sparse", "reth-trie-sparse-parallel", @@ -10770,7 +10769,6 @@ dependencies = [ "proptest-arbitrary-interop", "rand 0.9.2", "rayon", - "reth-db-api", "reth-execution-errors", "reth-metrics", "reth-primitives-traits", diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 503b5af2630..ba99898a842 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -30,7 +30,6 @@ reth-prune.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true -reth-trie-db.workspace = true reth-trie-parallel.workspace = true reth-trie-sparse = { workspace = true, features = ["std", "metrics"] } reth-trie-sparse-parallel = { workspace = true, features = ["std"] } @@ -134,7 +133,6 @@ test-utils = [ "reth-trie/test-utils", "reth-trie-sparse/test-utils", "reth-prune-types?/test-utils", - "reth-trie-db/test-utils", "reth-trie-parallel/test-utils", "reth-ethereum-primitives/test-utils", "reth-node-ethereum/test-utils", diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index 70d9e037e9d..e13ad26bc6b 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -20,11 +20,10 @@ use reth_evm::OnStateHook; use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::{Account as RethAccount, Recovered, StorageEntry}; use reth_provider::{ - providers::{BlockchainProvider, ConsistentDbView}, + providers::{BlockchainProvider, OverlayStateProviderFactory}, test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, AccountReader, ChainSpecProvider, HashingWriter, ProviderFactory, }; -use reth_trie::TrieInput; use revm_primitives::{HashMap, U256}; use revm_state::{Account as RevmAccount, AccountInfo, AccountStatus, EvmState, EvmStorageSlot}; use std::{hint::black_box, sync::Arc}; @@ -238,8 +237,7 @@ fn bench_state_root(c: &mut Criterion) { >, >(), StateProviderBuilder::new(provider.clone(), genesis_hash, None), - ConsistentDbView::new_with_latest_tip(provider).unwrap(), - TrieInput::default(), + OverlayStateProviderFactory::new(provider), &TreeConfig::default(), ) .map_err(|(err, ..)| err) diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 751356fc399..12482b1a162 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -142,7 +142,10 @@ where &self, blocks: Vec>, ) -> Result, PersistenceError> { - debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.recovered_block.num_hash()), last=?blocks.last().map(|b| b.recovered_block.num_hash()), "Saving range of blocks"); + let first_block_hash = blocks.first().map(|b| b.recovered_block.num_hash()); + let last_block_hash = blocks.last().map(|b| b.recovered_block.num_hash()); + debug!(target: "engine::persistence", first=?first_block_hash, last=?last_block_hash, "Saving range of blocks"); + let start_time = Instant::now(); let last_block_hash_num = blocks.last().map(|block| BlockNumHash { hash: block.recovered_block().hash(), @@ -155,6 +158,9 @@ where provider_rw.save_blocks(blocks)?; provider_rw.commit()?; } + + debug!(target: "engine::persistence", first=?first_block_hash, last=?last_block_hash, "Saved range of blocks"); + self.metrics.save_blocks_duration_seconds.record(start_time.elapsed()); Ok(last_block_hash_num) } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index a189b643f98..324e3375d2c 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -29,9 +29,8 @@ use reth_payload_primitives::{ }; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; use reth_provider::{ - providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, HashedPostStateProvider, - ProviderError, StateProviderBox, StateProviderFactory, StateReader, TransactionVariant, - TrieReader, + BlockReader, DatabaseProviderFactory, HashedPostStateProvider, ProviderError, StateProviderBox, + StateProviderFactory, StateReader, TransactionVariant, TrieReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index ac16c60dd67..7e54d8a38e2 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -26,12 +26,12 @@ use reth_evm::{ ConfigureEvm, EvmEnvFor, OnStateHook, SpecFor, TxEnvFor, }; use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, StateProviderFactory, - StateReader, -}; +use reth_provider::{BlockReader, DatabaseProviderROFactory, StateProviderFactory, StateReader}; use reth_revm::{db::BundleState, state::EvmState}; -use reth_trie::TrieInput; +use reth_trie::{ + hashed_cursor::HashedCursorFactory, prefix_set::TriePrefixSetsMut, + trie_cursor::TrieCursorFactory, +}; use reth_trie_parallel::{ proof_task::{ProofTaskCtx, ProofWorkerHandle}, root::ParallelStateRootError, @@ -121,8 +121,6 @@ where >, /// Whether to disable the parallel sparse trie. disable_parallel_sparse_trie: bool, - /// A cleared trie input, kept around to be reused so allocations can be minimized. - trie_input: Option, /// Maximum concurrency for prewarm task. prewarm_max_concurrency: usize, } @@ -149,7 +147,6 @@ where precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, sparse_state_trie: Arc::default(), - trie_input: None, disable_parallel_sparse_trie: config.disable_parallel_sparse_trie(), prewarm_max_concurrency: config.prewarm_max_concurrency(), } @@ -200,50 +197,45 @@ where name = "payload processor", skip_all )] - pub fn spawn>( + pub fn spawn>( &mut self, env: ExecutionEnv, transactions: I, provider_builder: StateProviderBuilder, - consistent_view: ConsistentDbView

, - trie_input: TrieInput, + multiproof_provider_factory: F, config: &TreeConfig, ) -> Result< PayloadHandle, I::Tx>, I::Error>, (ParallelStateRootError, I, ExecutionEnv, StateProviderBuilder), > where - P: DatabaseProviderFactory - + BlockReader - + StateProviderFactory - + StateReader + P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, + F: DatabaseProviderROFactory + Clone + + Send + 'static, { let span = tracing::Span::current(); let (to_sparse_trie, sparse_trie_rx) = channel(); - // spawn multiproof task, save the trie input - let (trie_input, state_root_config) = MultiProofConfig::from_input(trie_input); - self.trie_input = Some(trie_input); + + // We rely on the cursor factory to provide whatever DB overlay is necessary to see a + // consistent view of the database, including the trie tables. Because of this there is no + // need for an overarching prefix set to invalidate any section of the trie tables, and so + // we use an empty prefix set. + let prefix_sets = Arc::new(TriePrefixSetsMut::default()); // Create and spawn the storage proof task - let task_ctx = ProofTaskCtx::new( - state_root_config.nodes_sorted.clone(), - state_root_config.state_sorted.clone(), - state_root_config.prefix_sets.clone(), - ); + let task_ctx = ProofTaskCtx::new(multiproof_provider_factory, prefix_sets); let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), - consistent_view, task_ctx, storage_worker_count, account_worker_count, ); let multi_proof_task = MultiProofTask::new( - state_root_config, proof_handle.clone(), to_sparse_trie, config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), @@ -393,11 +385,6 @@ where CacheTaskHandle { cache, to_prewarm_task: Some(to_prewarm_task), cache_metrics } } - /// Takes the trie input from the inner payload processor, if it exists. - pub const fn take_trie_input(&mut self) -> Option { - self.trie_input.take() - } - /// Returns the cache for the given parent hash. /// /// If the given hash is different then what is recently cached, then this will create a new @@ -718,12 +705,12 @@ mod tests { use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::{Account, Recovered, StorageEntry}; use reth_provider::{ - providers::{BlockchainProvider, ConsistentDbView}, + providers::{BlockchainProvider, OverlayStateProviderFactory}, test_utils::create_test_provider_factory_with_chain_spec, ChainSpecProvider, HashingWriter, }; use reth_testing_utils::generators; - use reth_trie::{test_utils::state_root, HashedPostState, TrieInput}; + use reth_trie::{test_utils::state_root, HashedPostState}; use revm_primitives::{Address, HashMap, B256, KECCAK_EMPTY, U256}; use revm_state::{AccountInfo, AccountStatus, EvmState, EvmStorageSlot}; use std::sync::Arc; @@ -905,7 +892,9 @@ mod tests { &TreeConfig::default(), PrecompileCacheMap::default(), ); - let provider = BlockchainProvider::new(factory).unwrap(); + + let provider_factory = BlockchainProvider::new(factory).unwrap(); + let mut handle = payload_processor .spawn( @@ -913,9 +902,8 @@ mod tests { core::iter::empty::< Result, core::convert::Infallible>, >(), - StateProviderBuilder::new(provider.clone(), genesis_hash, None), - ConsistentDbView::new_with_latest_tip(provider).unwrap(), - TrieInput::from_state(hashed_state), + StateProviderBuilder::new(provider_factory.clone(), genesis_hash, None), + OverlayStateProviderFactory::new(provider_factory), &TreeConfig::default(), ) .map_err(|(err, ..)| err) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 321de725bec..a000e7a5adf 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -57,8 +57,8 @@ impl SparseTrieUpdate { } /// Common configuration for multi proof tasks -#[derive(Debug, Clone)] -pub(super) struct MultiProofConfig { +#[derive(Debug, Clone, Default)] +pub(crate) struct MultiProofConfig { /// The sorted collection of cached in-memory intermediate trie nodes that /// can be reused for computation. pub nodes_sorted: Arc, @@ -75,7 +75,7 @@ impl MultiProofConfig { /// /// This returns a cleared [`TrieInput`] so that we can reuse any allocated space in the /// [`TrieInput`]. - pub(super) fn from_input(mut input: TrieInput) -> (TrieInput, Self) { + pub(crate) fn from_input(mut input: TrieInput) -> (TrieInput, Self) { let config = Self { nodes_sorted: Arc::new(input.nodes.drain_into_sorted()), state_sorted: Arc::new(input.state.drain_into_sorted()), @@ -289,7 +289,6 @@ impl StorageMultiproofInput { /// Input parameters for dispatching a multiproof calculation. #[derive(Debug)] struct MultiproofInput { - config: MultiProofConfig, source: Option, hashed_state_update: HashedPostState, proof_targets: MultiProofTargets, @@ -458,7 +457,6 @@ impl MultiproofManager { /// Dispatches a single multiproof calculation to worker pool. fn dispatch_multiproof(&mut self, multiproof_input: MultiproofInput) { let MultiproofInput { - config, source, hashed_state_update, proof_targets, @@ -485,7 +483,7 @@ impl MultiproofManager { // Extend prefix sets with targets let frozen_prefix_sets = - ParallelProof::extend_prefix_sets_with_targets(&config.prefix_sets, &proof_targets); + ParallelProof::extend_prefix_sets_with_targets(&Default::default(), &proof_targets); // Dispatch account multiproof to worker pool with result sender let input = AccountMultiproofInput { @@ -671,8 +669,6 @@ pub(super) struct MultiProofTask { /// The size of proof targets chunk to spawn in one calculation. /// If None, chunking is disabled and all targets are processed in a single proof. chunk_size: Option, - /// Task configuration. - config: MultiProofConfig, /// Receiver for state root related messages (prefetch, state updates, finish signal). rx: CrossbeamReceiver, /// Sender for state root related messages. @@ -696,7 +692,6 @@ pub(super) struct MultiProofTask { impl MultiProofTask { /// Creates a new multi proof task with the unified message channel pub(super) fn new( - config: MultiProofConfig, proof_worker_handle: ProofWorkerHandle, to_sparse_trie: std::sync::mpsc::Sender, chunk_size: Option, @@ -707,7 +702,6 @@ impl MultiProofTask { Self { chunk_size, - config, rx, tx, proof_result_rx, @@ -761,7 +755,6 @@ impl MultiProofTask { let mut dispatch = |proof_targets| { self.multiproof_manager.dispatch( MultiproofInput { - config: self.config.clone(), source: None, hashed_state_update: Default::default(), proof_targets, @@ -909,7 +902,6 @@ impl MultiProofTask { self.multiproof_manager.dispatch( MultiproofInput { - config: self.config.clone(), source: Some(source), hashed_state_update, proof_targets, @@ -1253,10 +1245,11 @@ mod tests { use super::*; use alloy_primitives::map::B256Set; use reth_provider::{ - providers::ConsistentDbView, test_utils::create_test_provider_factory, BlockReader, - DatabaseProviderFactory, + providers::OverlayStateProviderFactory, test_utils::create_test_provider_factory, + BlockReader, DatabaseProviderFactory, PruneCheckpointReader, StageCheckpointReader, + TrieReader, }; - use reth_trie::{MultiProof, TrieInput}; + use reth_trie::MultiProof; use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use revm_primitives::{B256, U256}; use std::sync::OnceLock; @@ -1275,20 +1268,19 @@ mod tests { fn create_test_state_root_task(factory: F) -> MultiProofTask where - F: DatabaseProviderFactory + Clone + 'static, + F: DatabaseProviderFactory< + Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + > + Clone + + Send + + 'static, { let rt_handle = get_test_runtime_handle(); - let (_trie_input, config) = MultiProofConfig::from_input(TrieInput::default()); - let task_ctx = ProofTaskCtx::new( - config.nodes_sorted.clone(), - config.state_sorted.clone(), - config.prefix_sets.clone(), - ); - let consistent_view = ConsistentDbView::new(factory, None); - let proof_handle = ProofWorkerHandle::new(rt_handle, consistent_view, task_ctx, 1, 1); + let overlay_factory = OverlayStateProviderFactory::new(factory); + let task_ctx = ProofTaskCtx::new(overlay_factory, Default::default()); + let proof_handle = ProofWorkerHandle::new(rt_handle, task_ctx, 1, 1); let (to_sparse_trie, _receiver) = std::sync::mpsc::channel(); - MultiProofTask::new(config, proof_handle, to_sparse_trie, Some(1)) + MultiProofTask::new(proof_handle, to_sparse_trie, Some(1)) } #[test] diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 2770d9a3f9d..ecc475dd53a 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -5,17 +5,17 @@ use crate::tree::{ error::{InsertBlockError, InsertBlockErrorKind, InsertPayloadError}, executor::WorkloadExecutor, instrumented_state::InstrumentedStateProvider, - payload_processor::PayloadProcessor, + payload_processor::{multiproof::MultiProofConfig, PayloadProcessor}, persistence_state::CurrentPersistenceAction, precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}, sparse_trie::StateRootComputeOutcome, - ConsistentDbView, EngineApiMetrics, EngineApiTreeState, ExecutionEnv, PayloadHandle, - PersistenceState, PersistingKind, StateProviderBuilder, StateProviderDatabase, TreeConfig, + EngineApiMetrics, EngineApiTreeState, ExecutionEnv, PayloadHandle, PersistenceState, + PersistingKind, StateProviderBuilder, StateProviderDatabase, TreeConfig, }; use alloy_consensus::transaction::Either; use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_evm::Evm; -use alloy_primitives::B256; +use alloy_primitives::{BlockNumber, B256}; use reth_chain_state::{CanonicalInMemoryState, ExecutedBlock}; use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_primitives::{ @@ -33,16 +33,13 @@ use reth_primitives_traits::{ AlloyBlockHeader, BlockTy, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, }; use reth_provider::{ - BlockExecutionOutput, BlockNumReader, BlockReader, DBProvider, DatabaseProviderFactory, - ExecutionOutcome, HashedPostStateProvider, ProviderError, StateProvider, StateProviderFactory, - StateReader, StateRootProvider, TrieReader, + providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockNumReader, BlockReader, + DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, ProviderError, + PruneCheckpointReader, StageCheckpointReader, StateProvider, StateProviderFactory, StateReader, + StateRootProvider, TrieReader, }; use reth_revm::db::State; -use reth_trie::{ - updates::{TrieUpdates, TrieUpdatesSorted}, - HashedPostState, KeccakKeyHasher, TrieInput, -}; -use reth_trie_db::DatabaseHashedPostState; +use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use std::{collections::HashMap, sync::Arc, time::Instant}; use tracing::{debug, debug_span, error, info, instrument, trace, warn}; @@ -162,13 +159,16 @@ where metrics: EngineApiMetrics, /// Validator for the payload. validator: V, + /// A cleared trie input, kept around to be reused so allocations can be minimized. + trie_input: Option, } impl BasicEngineValidator where N: NodePrimitives, - P: DatabaseProviderFactory - + BlockReader

+ P: DatabaseProviderFactory< + Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + > + BlockReader
+ StateProviderFactory + StateReader + HashedPostStateProvider @@ -204,6 +204,7 @@ where invalid_block_hook, metrics: EngineApiMetrics::default(), validator, + trie_input: Default::default(), } } @@ -407,8 +408,7 @@ where let env = ExecutionEnv { evm_env, hash: input.hash(), parent_hash: input.parent_hash() }; // Plan the strategy used for state root computation. - let state_root_plan = self.plan_state_root_computation(&input, &ctx); - let persisting_kind = state_root_plan.persisting_kind; + let state_root_plan = self.plan_state_root_computation(); let strategy = state_root_plan.strategy; debug!( @@ -425,7 +425,6 @@ where env.clone(), txs, provider_builder, - persisting_kind, parent_hash, ctx.state(), strategy, @@ -495,7 +494,6 @@ where StateRootStrategy::Parallel => { debug!(target: "engine::tree::payload_validator", "Using parallel state root algorithm"); match self.compute_state_root_parallel( - persisting_kind, block.parent_hash(), &hashed_state, ctx.state(), @@ -530,7 +528,7 @@ where if self.config.state_root_fallback() { debug!(target: "engine::tree::payload_validator", "Using state root fallback for testing"); } else { - warn!(target: "engine::tree::payload_validator", ?persisting_kind, "Failed to compute state root in parallel"); + warn!(target: "engine::tree::payload_validator", "Failed to compute state root in parallel"); self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); } @@ -678,24 +676,35 @@ where #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn compute_state_root_parallel( &self, - persisting_kind: PersistingKind, parent_hash: B256, hashed_state: &HashedPostState, state: &EngineApiTreeState, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { - let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; + let provider = self.provider.database_provider_ro()?; + + let (mut input, block_number) = + self.compute_trie_input(provider, parent_hash, state, None)?; - let mut input = self.compute_trie_input( - persisting_kind, - consistent_view.provider_ro()?, - parent_hash, - state, - None, - )?; // Extend with block we are validating root for. input.append_ref(hashed_state); - ParallelStateRoot::new(consistent_view, input).incremental_root_with_updates() + // Convert the TrieInput into a MultProofConfig, since everything uses the sorted + // forms of the state/trie fields. + let (_, multiproof_config) = MultiProofConfig::from_input(input); + + let factory = OverlayStateProviderFactory::new(self.provider.clone()) + .with_block_number(Some(block_number)) + .with_trie_overlay(Some(multiproof_config.nodes_sorted)) + .with_hashed_state_overlay(Some(multiproof_config.state_sorted)); + + // The `hashed_state` argument is already taken into account as part of the overlay, but we + // need to use the prefix sets which were generated from it to indicate to the + // ParallelStateRoot which parts of the trie need to be recomputed. + let prefix_sets = Arc::into_inner(multiproof_config.prefix_sets) + .expect("MultiProofConfig was never cloned") + .freeze(); + + ParallelStateRoot::new(factory, prefix_sets).incremental_root_with_updates() } /// Validates the block after execution. @@ -777,7 +786,6 @@ where env: ExecutionEnv, txs: T, provider_builder: StateProviderBuilder, - persisting_kind: PersistingKind, parent_hash: B256, state: &EngineApiTreeState, strategy: StateRootStrategy, @@ -793,17 +801,13 @@ where > { match strategy { StateRootStrategy::StateRootTask => { - // use background tasks for state root calc - let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; - // get allocated trie input if it exists - let allocated_trie_input = self.payload_processor.take_trie_input(); + let allocated_trie_input = self.trie_input.take(); // Compute trie input let trie_input_start = Instant::now(); - let trie_input = self.compute_trie_input( - persisting_kind, - consistent_view.provider_ro()?, + let (trie_input, block_number) = self.compute_trie_input( + self.provider.database_provider_ro()?, parent_hash, state, allocated_trie_input, @@ -814,50 +818,49 @@ where .trie_input_duration .record(trie_input_start.elapsed().as_secs_f64()); + // Convert the TrieInput into a MultProofConfig, since everything uses the sorted + // forms of the state/trie fields. + let (trie_input, multiproof_config) = MultiProofConfig::from_input(trie_input); + self.trie_input.replace(trie_input); + + // Create OverlayStateProviderFactory with the multiproof config, for use with + // multiproofs. + let multiproof_provider_factory = + OverlayStateProviderFactory::new(self.provider.clone()) + .with_block_number(Some(block_number)) + .with_trie_overlay(Some(multiproof_config.nodes_sorted)) + .with_hashed_state_overlay(Some(multiproof_config.state_sorted)); + // Use state root task only if prefix sets are empty, otherwise proof generation is // too expensive because it requires walking all paths in every proof. let spawn_start = Instant::now(); - let (handle, strategy) = if trie_input.prefix_sets.is_empty() { - match self.payload_processor.spawn( - env, - txs, - provider_builder, - consistent_view, - trie_input, - &self.config, - ) { - Ok(handle) => { - // Successfully spawned with state root task support - (handle, StateRootStrategy::StateRootTask) - } - Err((error, txs, env, provider_builder)) => { - // Failed to spawn proof workers, fallback to parallel state root - error!( - target: "engine::tree::payload_validator", - ?error, - "Failed to spawn proof workers, falling back to parallel state root" - ); - ( - self.payload_processor.spawn_cache_exclusive( - env, - txs, - provider_builder, - ), - StateRootStrategy::Parallel, - ) - } + let (handle, strategy) = match self.payload_processor.spawn( + env, + txs, + provider_builder, + multiproof_provider_factory, + &self.config, + ) { + Ok(handle) => { + // Successfully spawned with state root task support + (handle, StateRootStrategy::StateRootTask) + } + Err((error, txs, env, provider_builder)) => { + // Failed to spawn proof workers, fallback to parallel state root + error!( + target: "engine::tree::payload_validator", + ?error, + "Failed to spawn proof workers, falling back to parallel state root" + ); + ( + self.payload_processor.spawn_cache_exclusive( + env, + txs, + provider_builder, + ), + StateRootStrategy::Parallel, + ) } - // if prefix sets are not empty, we spawn a task that exclusively handles cache - // prewarming for transaction execution - } else { - debug!( - target: "engine::tree::payload_validator", - "Disabling state root task due to non-empty prefix sets" - ); - ( - self.payload_processor.spawn_cache_exclusive(env, txs, provider_builder), - StateRootStrategy::Parallel, - ) }; // record prewarming initialization duration @@ -915,48 +918,24 @@ where Ok(None) } - /// Determines the state root computation strategy based on persistence state and configuration. + /// Determines the state root computation strategy based on configuration. #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] - fn plan_state_root_computation>>( - &self, - input: &BlockOrPayload, - ctx: &TreeCtx<'_, N>, - ) -> StateRootPlan { - // We only run the parallel state root if we are not currently persisting any blocks or - // persisting blocks that are all ancestors of the one we are executing. - // - // If we're committing ancestor blocks, then: any trie updates being committed are a subset - // of the in-memory trie updates collected before fetching reverts. So any diff in - // reverts (pre vs post commit) is already covered by the in-memory trie updates we - // collect in `compute_state_root_parallel`. - // - // See https://github.com/paradigmxyz/reth/issues/12688 for more details - let persisting_kind = ctx.persisting_kind_for(input.block_with_parent()); - let can_run_parallel = - persisting_kind.can_run_parallel_state_root() && !self.config.state_root_fallback(); - - // Decide on the strategy. - // Use state root task only if: - // 1. No persistence is in progress - // 2. Config allows it - let strategy = if can_run_parallel { - if self.config.use_state_root_task() { - StateRootStrategy::StateRootTask - } else { - StateRootStrategy::Parallel - } - } else { + fn plan_state_root_computation(&self) -> StateRootPlan { + let strategy = if self.config.state_root_fallback() { StateRootStrategy::Synchronous + } else if self.config.use_state_root_task() { + StateRootStrategy::StateRootTask + } else { + StateRootStrategy::Parallel }; debug!( target: "engine::tree::payload_validator", - block=?input.num_hash(), ?strategy, "Planned state root computation strategy" ); - StateRootPlan { strategy, persisting_kind } + StateRootPlan { strategy } } /// Called when an invalid block is encountered during validation. @@ -975,7 +954,8 @@ where self.invalid_block_hook.on_invalid_block(parent_header, block, output, trie_updates); } - /// Computes the trie input at the provided parent hash. + /// Computes the trie input at the provided parent hash, as well as the block number of the + /// highest persisted ancestor. /// /// The goal of this function is to take in-memory blocks and generate a [`TrieInput`] that /// serves as an overlay to the database blocks. @@ -994,105 +974,40 @@ where level = "debug", target = "engine::tree::payload_validator", skip_all, - fields(persisting_kind, parent_hash) + fields(parent_hash) )] fn compute_trie_input( &self, - persisting_kind: PersistingKind, provider: TP, parent_hash: B256, state: &EngineApiTreeState, allocated_trie_input: Option, - ) -> ProviderResult { + ) -> ProviderResult<(TrieInput, BlockNumber)> { // get allocated trie input or use a default trie input let mut input = allocated_trie_input.unwrap_or_default(); - let best_block_number = provider.best_block_number()?; - - let (mut historical, mut blocks) = state + let (historical, blocks) = state .tree_state .blocks_by_hash(parent_hash) .map_or_else(|| (parent_hash.into(), vec![]), |(hash, blocks)| (hash.into(), blocks)); - // If the current block is a descendant of the currently persisting blocks, then we need to - // filter in-memory blocks, so that none of them are already persisted in the database. - let _enter = - debug_span!(target: "engine::tree::payload_validator", "filter in-memory blocks", len = blocks.len()) - .entered(); - if persisting_kind.is_descendant() { - // Iterate over the blocks from oldest to newest. - while let Some(block) = blocks.last() { - let recovered_block = block.recovered_block(); - if recovered_block.number() <= best_block_number { - // Remove those blocks that lower than or equal to the highest database - // block. - blocks.pop(); - } else { - // If the block is higher than the best block number, stop filtering, as it's - // the first block that's not in the database. - break - } - } - - historical = if let Some(block) = blocks.last() { - // If there are any in-memory blocks left after filtering, set the anchor to the - // parent of the oldest block. - (block.recovered_block().number() - 1).into() - } else { - // Otherwise, set the anchor to the original provided parent hash. - parent_hash.into() - }; - } - drop(_enter); - - let blocks_empty = blocks.is_empty(); - if blocks_empty { + if blocks.is_empty() { debug!(target: "engine::tree::payload_validator", "Parent found on disk"); } else { debug!(target: "engine::tree::payload_validator", %historical, blocks = blocks.len(), "Parent found in memory"); } - // Convert the historical block to the block number. + // Convert the historical block to the block number let block_number = provider .convert_hash_or_number(historical)? .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; - let _enter = - debug_span!(target: "engine::tree::payload_validator", "revert state", blocks_empty) - .entered(); - // Retrieve revert state for historical block. - let (revert_state, revert_trie) = if block_number == best_block_number { - // We do not check against the `last_block_number` here because - // `HashedPostState::from_reverts` / `trie_reverts` only use the database tables, and - // not static files. - debug!(target: "engine::tree::payload_validator", block_number, best_block_number, "Empty revert state"); - (HashedPostState::default(), TrieUpdatesSorted::default()) - } else { - let revert_state = HashedPostState::from_reverts::( - provider.tx_ref(), - block_number + 1.., - ) - .map_err(ProviderError::from)?; - let revert_trie = provider.trie_reverts(block_number + 1)?; - debug!( - target: "engine::tree::payload_validator", - block_number, - best_block_number, - accounts = revert_state.accounts.len(), - storages = revert_state.storages.len(), - "Non-empty revert state" - ); - (revert_state, revert_trie) - }; - - input.append_cached(revert_trie.into(), revert_state); - // Extend with contents of parent in-memory blocks. input.extend_with_blocks( blocks.iter().rev().map(|block| (block.hashed_state(), block.trie_updates())), ); - Ok(input) + Ok((input, block_number)) } } @@ -1114,8 +1029,6 @@ enum StateRootStrategy { struct StateRootPlan { /// Strategy that should be attempted for computing the state root. strategy: StateRootStrategy, - /// The persisting kind for this block. - persisting_kind: PersistingKind, } /// Type that validates the payloads processed by the engine. @@ -1171,8 +1084,9 @@ pub trait EngineValidator< impl EngineValidator for BasicEngineValidator where - P: DatabaseProviderFactory - + BlockReader
+ P: DatabaseProviderFactory< + Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + > + BlockReader
+ StateProviderFactory + StateReader + HashedPostStateProvider diff --git a/crates/engine/tree/src/tree/state.rs b/crates/engine/tree/src/tree/state.rs index f38faf6524c..a10d26e3f27 100644 --- a/crates/engine/tree/src/tree/state.rs +++ b/crates/engine/tree/src/tree/state.rs @@ -76,7 +76,8 @@ impl TreeState { } /// Returns all available blocks for the given hash that lead back to the canonical chain, from - /// newest to oldest. And the parent hash of the oldest block that is missing from the buffer. + /// newest to oldest, and the parent hash of the oldest returned block. This parent hash is the + /// highest persisted block connected to this chain. /// /// Returns `None` if the block for the given hash is not found. pub(crate) fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec>)> { diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 98bd17aa4f9..28f04f9f767 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -4,7 +4,8 @@ use reth_errors::ProviderError; use reth_prune_types::PruneSegment; use reth_stages_types::StageId; use reth_storage_api::{ - DBProvider, DatabaseProviderFactory, PruneCheckpointReader, StageCheckpointReader, TrieReader, + DBProvider, DatabaseProviderFactory, DatabaseProviderROFactory, PruneCheckpointReader, + StageCheckpointReader, TrieReader, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -34,11 +35,7 @@ pub struct OverlayStateProviderFactory { hashed_state_overlay: Option>, } -impl OverlayStateProviderFactory -where - F: DatabaseProviderFactory, - F::Provider: Clone + TrieReader + StageCheckpointReader + PruneCheckpointReader, -{ +impl OverlayStateProviderFactory { /// Create a new overlay state provider factory pub const fn new(factory: F) -> Self { Self { factory, block_number: None, trie_overlay: None, hashed_state_overlay: None } @@ -69,7 +66,13 @@ where self.hashed_state_overlay = hashed_state_overlay; self } +} +impl OverlayStateProviderFactory +where + F: DatabaseProviderFactory, + F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader, +{ /// Validates that there are sufficient changesets to revert to the requested block number. /// /// Returns an error if the `MerkleChangeSets` checkpoint doesn't cover the requested block. @@ -104,13 +107,8 @@ where let prune_lower_bound = prune_checkpoint.and_then(|chk| chk.block_number.map(|block| block + 1)); - // Use the higher of the two lower bounds (or error if neither is available) - let Some(lower_bound) = stage_lower_bound.max(prune_lower_bound) else { - return Err(ProviderError::InsufficientChangesets { - requested: requested_block, - available: 0..=upper_bound, - }) - }; + // Use the higher of the two lower bounds. If neither is available assume unbounded. + let lower_bound = stage_lower_bound.max(prune_lower_bound).unwrap_or(0); let available_range = lower_bound..=upper_bound; @@ -124,9 +122,17 @@ where Ok(()) } +} + +impl DatabaseProviderROFactory for OverlayStateProviderFactory +where + F: DatabaseProviderFactory, + F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader, +{ + type Provider = OverlayStateProvider; /// Create a read-only [`OverlayStateProvider`]. - pub fn provider_ro(&self) -> Result, ProviderError> { + fn database_provider_ro(&self) -> Result, ProviderError> { // Get a read-only provider let provider = self.factory.database_provider_ro()?; @@ -184,7 +190,7 @@ where /// This provider uses in-memory trie updates and hashed post state as an overlay /// on top of a database provider, implementing [`TrieCursorFactory`] and [`HashedCursorFactory`] /// using the in-memory overlay factories. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct OverlayStateProvider { provider: Provider, trie_updates: Arc, diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 4b3829cf8ed..16388de91ae 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,9 +1,9 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, ChangeSetReader, HeaderProvider, ReceiptProviderIdExt, StateProvider, - StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, - TransactionsProvider, + ChainSpecProvider, ChangeSetReader, HeaderProvider, PruneCheckpointReader, + ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateReader, + StateRootProvider, TransactionVariant, TransactionsProvider, }; use alloy_consensus::{ constants::EMPTY_ROOT_HASH, @@ -29,7 +29,7 @@ use reth_primitives_traits::{ Account, Block, BlockBody, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, SignerRecoverable, }; -use reth_prune_types::PruneModes; +use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory, @@ -756,6 +756,21 @@ impl StageCheckpointReader } } +impl PruneCheckpointReader + for MockEthProvider +{ + fn get_prune_checkpoint( + &self, + _segment: PruneSegment, + ) -> ProviderResult> { + Ok(None) + } + + fn get_prune_checkpoints(&self) -> ProviderResult> { + Ok(vec![]) + } +} + impl StateRootProvider for MockEthProvider where T: NodePrimitives, diff --git a/crates/storage/storage-api/src/database_provider.rs b/crates/storage/storage-api/src/database_provider.rs index c0e94a044bf..8b5d8281f42 100644 --- a/crates/storage/storage-api/src/database_provider.rs +++ b/crates/storage/storage-api/src/database_provider.rs @@ -160,6 +160,29 @@ pub trait DatabaseProviderFactory: Send + Sync { /// Helper type alias to get the associated transaction type from a [`DatabaseProviderFactory`]. pub type FactoryTx = <::DB as Database>::TX; +/// A trait which can be used to describe any factory-like type which returns a read-only provider. +pub trait DatabaseProviderROFactory { + /// Provider type returned by this factory. + /// + /// This type is intentionally left unconstrained; constraints can be added as-needed when this + /// is used. + type Provider; + + /// Creates and returns a Provider. + fn database_provider_ro(&self) -> ProviderResult; +} + +impl DatabaseProviderROFactory for T +where + T: DatabaseProviderFactory, +{ + type Provider = T::Provider; + + fn database_provider_ro(&self) -> ProviderResult { + ::database_provider_ro(self) + } +} + fn range_size_hint(range: &impl RangeBounds) -> Option { let start = match range.start_bound().cloned() { Bound::Included(start) => start, diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index b4463d9ede3..9fb882b44a5 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -13,12 +13,10 @@ workspace = true [dependencies] # reth -reth-db-api.workspace = true reth-execution-errors.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true reth-trie-common.workspace = true -reth-trie-db.workspace = true reth-trie-sparse = { workspace = true, features = ["std"] } reth-trie.workspace = true @@ -46,6 +44,7 @@ metrics = { workspace = true, optional = true } # reth reth-primitives-traits.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } +reth-trie-db.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } # misc @@ -59,7 +58,6 @@ tokio = { workspace = true, features = ["rt", "rt-multi-thread", "macros"] } default = ["metrics"] metrics = ["reth-metrics", "dep:metrics", "reth-trie/metrics", "reth-trie-sparse/metrics"] test-utils = [ - "reth-db-api/test-utils", "reth-primitives-traits/test-utils", "reth-provider/test-utils", "reth-trie-common/test-utils", diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index 48657cc8a70..53719892748 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -5,7 +5,8 @@ use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use proptest_arbitrary_interop::arb; use reth_primitives_traits::Account; use reth_provider::{ - providers::ConsistentDbView, test_utils::create_test_provider_factory, StateWriter, TrieWriter, + providers::OverlayStateProviderFactory, test_utils::create_test_provider_factory, StateWriter, + TrieWriter, }; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StateRoot, @@ -37,7 +38,7 @@ pub fn calculate_state_root(c: &mut Criterion) { provider_rw.commit().unwrap(); } - let view = ConsistentDbView::new(provider_factory.clone(), None); + let factory = OverlayStateProviderFactory::new(provider_factory.clone()); // state root group.bench_function(BenchmarkId::new("sync root", size), |b| { @@ -65,10 +66,8 @@ pub fn calculate_state_root(c: &mut Criterion) { group.bench_function(BenchmarkId::new("parallel root", size), |b| { b.iter_with_setup( || { - ParallelStateRoot::new( - view.clone(), - TrieInput::from_state(updated_state.clone()), - ) + let trie_input = TrieInput::from_state(updated_state.clone()); + ParallelStateRoot::new(factory.clone(), trie_input.prefix_sets.freeze()) }, |calculator| calculator.incremental_root(), ); diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 4d54359d1bf..09f5e56e771 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -14,9 +14,7 @@ use reth_execution_errors::StorageRootError; use reth_storage_errors::db::DatabaseError; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets, TriePrefixSetsMut}, - updates::TrieUpdatesSorted, - DecodedMultiProof, DecodedStorageMultiProof, HashedPostState, HashedPostStateSorted, - MultiProofTargets, Nibbles, + DecodedMultiProof, DecodedStorageMultiProof, HashedPostState, MultiProofTargets, Nibbles, }; use reth_trie_common::added_removed_keys::MultiAddedRemovedKeys; use std::{sync::Arc, time::Instant}; @@ -28,14 +26,7 @@ use tracing::trace; /// that has proof targets. #[derive(Debug)] pub struct ParallelProof { - /// The sorted collection of cached in-memory intermediate trie nodes that - /// can be reused for computation. - pub nodes_sorted: Arc, - /// The sorted in-memory overlay hashed state. - pub state_sorted: Arc, - /// The collection of prefix sets for the computation. Since the prefix sets _always_ - /// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here, - /// if we have cached nodes for them. + /// The collection of prefix sets for the computation. pub prefix_sets: Arc, /// Flag indicating whether to include branch node masks in the proof. collect_branch_node_masks: bool, @@ -53,15 +44,11 @@ pub struct ParallelProof { impl ParallelProof { /// Create new state proof generator. pub fn new( - nodes_sorted: Arc, - state_sorted: Arc, prefix_sets: Arc, missed_leaves_storage_roots: Arc>, proof_worker_handle: ProofWorkerHandle, ) -> Self { Self { - nodes_sorted, - state_sorted, prefix_sets, missed_leaves_storage_roots, collect_branch_node_masks: false, @@ -272,9 +259,7 @@ mod tests { }; use rand::Rng; use reth_primitives_traits::{Account, StorageEntry}; - use reth_provider::{ - providers::ConsistentDbView, test_utils::create_test_provider_factory, HashingWriter, - }; + use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; use reth_trie::proof::Proof; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use tokio::runtime::Runtime; @@ -282,7 +267,6 @@ mod tests { #[test] fn random_parallel_proof() { let factory = create_test_provider_factory(); - let consistent_view = ConsistentDbView::new(factory.clone(), None); let mut rng = rand::rng(); let state = (0..100) @@ -344,20 +328,14 @@ mod tests { let rt = Runtime::new().unwrap(); - let task_ctx = - ProofTaskCtx::new(Default::default(), Default::default(), Default::default()); - let proof_worker_handle = - ProofWorkerHandle::new(rt.handle().clone(), consistent_view, task_ctx, 1, 1); - - let parallel_result = ParallelProof::new( - Default::default(), - Default::default(), - Default::default(), - Default::default(), - proof_worker_handle.clone(), - ) - .decoded_multiproof(targets.clone()) - .unwrap(); + let factory = reth_provider::providers::OverlayStateProviderFactory::new(factory); + let task_ctx = ProofTaskCtx::new(factory, Default::default()); + let proof_worker_handle = ProofWorkerHandle::new(rt.handle().clone(), task_ctx, 1, 1); + + let parallel_result = + ParallelProof::new(Default::default(), Default::default(), proof_worker_handle.clone()) + .decoded_multiproof(targets.clone()) + .unwrap(); let sequential_result_raw = Proof::new(trie_cursor_factory, hashed_cursor_factory) .multiproof(targets.clone()) diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 1b50dbe73ef..7e453cbc7c3 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -28,9 +28,6 @@ //! | v //! ProofResultMessage <-------- ProofResultSender --- //! ``` -//! -//! Individual [`ProofTaskTx`] instances manage a dedicated [`InMemoryTrieCursorFactory`] and -//! [`HashedPostStateCursorFactory`], which are each backed by a database transaction. use crate::{ root::ParallelStateRootError, @@ -44,29 +41,24 @@ use alloy_primitives::{ use alloy_rlp::{BufMut, Encodable}; use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use dashmap::DashMap; -use reth_db_api::transaction::DbTx; use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; -use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, -}; +use reth_provider::{DatabaseProviderROFactory, ProviderError}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, + hashed_cursor::HashedCursorFactory, node_iter::{TrieElement, TrieNodeIter}, prefix_set::{TriePrefixSets, TriePrefixSetsMut}, - proof::{ProofTrieNodeProviderFactory, StorageProof}, - trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, - updates::TrieUpdatesSorted, + proof::{ProofBlindedAccountProvider, ProofBlindedStorageProvider, StorageProof}, + trie_cursor::TrieCursorFactory, walker::TrieWalker, - DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostState, - HashedPostStateSorted, MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, + DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostState, MultiProofTargets, + Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::{PrefixSet, PrefixSetMut}, proof::{DecodedProofNodes, ProofRetainer}, }; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ sync::{ @@ -214,19 +206,20 @@ enum StorageWorkerJob { /// /// Worker shuts down when the crossbeam channel closes (all senders dropped). fn storage_worker_loop( - view: ConsistentDbView, - task_ctx: ProofTaskCtx, + task_ctx: ProofTaskCtx, work_rx: CrossbeamReceiver, worker_id: usize, available_workers: Arc, #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where - Factory: DatabaseProviderFactory, + Factory: DatabaseProviderROFactory, { - // Create db transaction before entering work loop - let provider = - view.provider_ro().expect("Storage worker failed to initialize: database unavailable"); - let proof_tx = ProofTaskTx::new(provider.into_tx(), task_ctx, worker_id); + // Create provider from factory + let provider = task_ctx + .factory + .database_provider_ro() + .expect("Storage worker failed to initialize: unable to create provider"); + let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); trace!( target: "trie::proof_task", @@ -234,16 +227,6 @@ fn storage_worker_loop( "Storage worker started" ); - // Create factories once at worker startup to avoid recreation overhead. - let (trie_cursor_factory, hashed_cursor_factory) = proof_tx.create_factories(); - - // Create blinded provider factory once for all blinded node requests - let blinded_provider_factory = ProofTrieNodeProviderFactory::new( - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), - proof_tx.task_ctx.prefix_sets.clone(), - ); - let mut storage_proofs_processed = 0u64; let mut storage_nodes_processed = 0u64; @@ -270,12 +253,7 @@ fn storage_worker_loop( ); let proof_start = Instant::now(); - - let result = proof_tx.compute_storage_proof( - input, - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), - ); + let result = proof_tx.compute_storage_proof(input); let proof_elapsed = proof_start.elapsed(); storage_proofs_processed += 1; @@ -325,9 +303,15 @@ fn storage_worker_loop( "Processing blinded storage node" ); + let storage_node_provider = ProofBlindedStorageProvider::new( + &proof_tx.provider, + &proof_tx.provider, + proof_tx.prefix_sets.clone(), + account, + ); + let start = Instant::now(); - let result = - blinded_provider_factory.storage_node_provider(account).trie_node(&path); + let result = storage_node_provider.trie_node(&path); let elapsed = start.elapsed(); storage_nodes_processed += 1; @@ -393,20 +377,21 @@ fn storage_worker_loop( /// /// Worker shuts down when the crossbeam channel closes (all senders dropped). fn account_worker_loop( - view: ConsistentDbView, - task_ctx: ProofTaskCtx, + task_ctx: ProofTaskCtx, work_rx: CrossbeamReceiver, storage_work_tx: CrossbeamSender, worker_id: usize, available_workers: Arc, #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, ) where - Factory: DatabaseProviderFactory, + Factory: DatabaseProviderROFactory, { - // Create db transaction before entering work loop - let provider = - view.provider_ro().expect("Account worker failed to initialize: database unavailable"); - let proof_tx = ProofTaskTx::new(provider.into_tx(), task_ctx, worker_id); + // Create provider from factory + let provider = task_ctx + .factory + .database_provider_ro() + .expect("Account worker failed to initialize: unable to create provider"); + let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); trace!( target: "trie::proof_task", @@ -414,16 +399,6 @@ fn account_worker_loop( "Account worker started" ); - // Create factories once at worker startup to avoid recreation overhead. - let (trie_cursor_factory, hashed_cursor_factory) = proof_tx.create_factories(); - - // Create blinded provider factory once for all blinded node requests - let blinded_provider_factory = ProofTrieNodeProviderFactory::new( - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), - proof_tx.task_ctx.prefix_sets.clone(), - ); - let mut account_proofs_processed = 0u64; let mut account_nodes_processed = 0u64; @@ -511,8 +486,7 @@ fn account_worker_loop( }; let result = build_account_multiproof_with_storage_roots( - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), + &proof_tx.provider, ctx, &mut tracker, ); @@ -568,8 +542,14 @@ fn account_worker_loop( "Processing blinded account node" ); + let account_node_provider = ProofBlindedAccountProvider::new( + &proof_tx.provider, + &proof_tx.provider, + proof_tx.prefix_sets.clone(), + ); + let start = Instant::now(); - let result = blinded_provider_factory.account_node_provider().trie_node(&path); + let result = account_node_provider.trie_node(&path); let elapsed = start.elapsed(); account_nodes_processed += 1; @@ -617,22 +597,20 @@ fn account_worker_loop( /// enabling interleaved parallelism between account trie traversal and storage proof computation. /// /// Returns a `DecodedMultiProof` containing the account subtree and storage proofs. -fn build_account_multiproof_with_storage_roots( - trie_cursor_factory: C, - hashed_cursor_factory: H, +fn build_account_multiproof_with_storage_roots

( + provider: &P, ctx: AccountMultiproofParams<'_>, tracker: &mut ParallelTrieTracker, ) -> Result where - C: TrieCursorFactory + Clone, - H: HashedCursorFactory + Clone, + P: TrieCursorFactory + HashedCursorFactory, { let accounts_added_removed_keys = ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); // Create the walker. let walker = TrieWalker::<_>::state_trie( - trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, + provider.account_trie_cursor().map_err(ProviderError::Database)?, ctx.prefix_set, ) .with_added_removed_keys(accounts_added_removed_keys) @@ -656,7 +634,7 @@ where let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::state_trie( walker, - hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, + provider.hashed_account_cursor().map_err(ProviderError::Database)?, ); let mut storage_proof_receivers = ctx.storage_proof_receivers; @@ -708,23 +686,23 @@ where match ctx.missed_leaves_storage_roots.entry(hashed_address) { dashmap::Entry::Occupied(occ) => *occ.get(), dashmap::Entry::Vacant(vac) => { - let root = StorageProof::new_hashed( - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), - hashed_address, - ) - .with_prefix_set_mut(Default::default()) - .storage_multiproof( - ctx.targets.get(&hashed_address).cloned().unwrap_or_default(), - ) - .map_err(|e| { - ParallelStateRootError::StorageRoot( - reth_execution_errors::StorageRootError::Database( - DatabaseError::Other(e.to_string()), - ), - ) - })? - .root; + let root = + StorageProof::new_hashed(provider, provider, hashed_address) + .with_prefix_set_mut(Default::default()) + .storage_multiproof( + ctx.targets + .get(&hashed_address) + .cloned() + .unwrap_or_default(), + ) + .map_err(|e| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(e.to_string()), + ), + ) + })? + .root; vac.insert(root); root @@ -835,64 +813,35 @@ fn dispatch_storage_proofs( Ok(storage_proof_receivers) } -/// Type alias for the factory tuple returned by `create_factories` -type ProofFactories<'a, Tx> = ( - InMemoryTrieCursorFactory, &'a TrieUpdatesSorted>, - HashedPostStateCursorFactory, &'a HashedPostStateSorted>, -); - /// This contains all information shared between all storage proof instances. #[derive(Debug)] -pub struct ProofTaskTx { - /// The tx that is reused for proof calculations. - tx: Tx, +pub struct ProofTaskTx { + /// The provider that implements `TrieCursorFactory` and `HashedCursorFactory`. + provider: Provider, - /// Trie updates, prefix sets, and state updates - task_ctx: ProofTaskCtx, + /// The prefix sets for the computation. + prefix_sets: Arc, /// Identifier for the worker within the worker pool, used only for tracing. id: usize, } -impl ProofTaskTx { - /// Initializes a [`ProofTaskTx`] using the given transaction and a [`ProofTaskCtx`]. The id is - /// used only for tracing. - const fn new(tx: Tx, task_ctx: ProofTaskCtx, id: usize) -> Self { - Self { tx, task_ctx, id } +impl ProofTaskTx { + /// Initializes a [`ProofTaskTx`] with the given provider, prefix sets, and ID. + const fn new(provider: Provider, prefix_sets: Arc, id: usize) -> Self { + Self { provider, prefix_sets, id } } } -impl ProofTaskTx +impl ProofTaskTx where - Tx: DbTx, + Provider: TrieCursorFactory + HashedCursorFactory, { - #[inline] - fn create_factories(&self) -> ProofFactories<'_, Tx> { - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(&self.tx), - self.task_ctx.nodes_sorted.as_ref(), - ); - - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(&self.tx), - self.task_ctx.state_sorted.as_ref(), - ); - - (trie_cursor_factory, hashed_cursor_factory) - } - - /// Compute storage proof with pre-created factories. + /// Compute storage proof. /// - /// Accepts cursor factories as parameters to allow reuse across multiple proofs. - /// Used by storage workers in the worker pool to avoid factory recreation - /// overhead on each proof computation. + /// Used by storage workers in the worker pool to compute storage proofs. #[inline] - fn compute_storage_proof( - &self, - input: StorageProofInput, - trie_cursor_factory: impl TrieCursorFactory, - hashed_cursor_factory: impl HashedCursorFactory, - ) -> StorageProofResult { + fn compute_storage_proof(&self, input: StorageProofInput) -> StorageProofResult { // Consume the input so we can move large collections (e.g. target slots) without cloning. let StorageProofInput { hashed_address, @@ -919,7 +868,7 @@ where // Compute raw storage multiproof let raw_proof_result = - StorageProof::new_hashed(trie_cursor_factory, hashed_cursor_factory, hashed_address) + StorageProof::new_hashed(&self.provider, &self.provider, hashed_address) .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().copied())) .with_branch_node_masks(with_branch_node_masks) .with_added_removed_keys(added_removed_keys) @@ -1034,27 +983,20 @@ enum AccountWorkerJob { } /// Data used for initializing cursor factories that is shared across all storage proof instances. -#[derive(Debug, Clone)] -pub struct ProofTaskCtx { - /// The sorted collection of cached in-memory intermediate trie nodes that can be reused for - /// computation. - nodes_sorted: Arc, - /// The sorted in-memory overlay hashed state. - state_sorted: Arc, +#[derive(Clone, Debug)] +pub struct ProofTaskCtx { + /// The factory for creating state providers. + factory: Factory, /// The collection of prefix sets for the computation. Since the prefix sets _always_ /// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here, /// if we have cached nodes for them. prefix_sets: Arc, } -impl ProofTaskCtx { - /// Creates a new [`ProofTaskCtx`] with the given sorted nodes and state. - pub const fn new( - nodes_sorted: Arc, - state_sorted: Arc, - prefix_sets: Arc, - ) -> Self { - Self { nodes_sorted, state_sorted, prefix_sets } +impl ProofTaskCtx { + /// Creates a new [`ProofTaskCtx`] with the given factory and prefix sets. + pub const fn new(factory: Factory, prefix_sets: Arc) -> Self { + Self { factory, prefix_sets } } } @@ -1085,19 +1027,20 @@ impl ProofWorkerHandle { /// /// # Parameters /// - `executor`: Tokio runtime handle for spawning blocking tasks - /// - `view`: Consistent database view for creating transactions - /// - `task_ctx`: Shared context with trie updates and prefix sets + /// - `task_ctx`: Shared context with database view and prefix sets /// - `storage_worker_count`: Number of storage workers to spawn /// - `account_worker_count`: Number of account workers to spawn pub fn new( executor: Handle, - view: ConsistentDbView, - task_ctx: ProofTaskCtx, + task_ctx: ProofTaskCtx, storage_worker_count: usize, account_worker_count: usize, ) -> Self where - Factory: DatabaseProviderFactory + Clone + 'static, + Factory: DatabaseProviderROFactory + + Clone + + Send + + 'static, { let (storage_work_tx, storage_work_rx) = unbounded::(); let (account_work_tx, account_work_rx) = unbounded::(); @@ -1120,7 +1063,6 @@ impl ProofWorkerHandle { // Spawn storage workers for worker_id in 0..storage_worker_count { let span = debug_span!(target: "trie::proof_task", "storage worker", ?worker_id); - let view_clone = view.clone(); let task_ctx_clone = task_ctx.clone(); let work_rx_clone = storage_work_rx.clone(); let storage_available_workers_clone = storage_available_workers.clone(); @@ -1131,7 +1073,6 @@ impl ProofWorkerHandle { let _guard = span.enter(); storage_worker_loop( - view_clone, task_ctx_clone, work_rx_clone, worker_id, @@ -1149,7 +1090,6 @@ impl ProofWorkerHandle { // Spawn account workers for worker_id in 0..account_worker_count { let span = debug_span!(target: "trie::proof_task", "account worker", ?worker_id); - let view_clone = view.clone(); let task_ctx_clone = task_ctx.clone(); let work_rx_clone = account_work_rx.clone(); let storage_work_tx_clone = storage_work_tx.clone(); @@ -1161,7 +1101,6 @@ impl ProofWorkerHandle { let _guard = span.enter(); account_worker_loop( - view_clone, task_ctx_clone, work_rx_clone, storage_work_tx_clone, @@ -1357,24 +1296,13 @@ impl TrieNodeProvider for ProofTaskTrieNodeProvider { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::map::B256Map; - use reth_provider::{providers::ConsistentDbView, test_utils::create_test_provider_factory}; - use reth_trie_common::{ - prefix_set::TriePrefixSetsMut, updates::TrieUpdatesSorted, HashedAccountsSorted, - HashedPostStateSorted, - }; + use reth_provider::test_utils::create_test_provider_factory; + use reth_trie_common::prefix_set::TriePrefixSetsMut; use std::sync::Arc; use tokio::{runtime::Builder, task}; - fn test_ctx() -> ProofTaskCtx { - ProofTaskCtx::new( - Arc::new(TrieUpdatesSorted::default()), - Arc::new(HashedPostStateSorted::new( - HashedAccountsSorted::default(), - B256Map::default(), - )), - Arc::new(TriePrefixSetsMut::default()), - ) + fn test_ctx(factory: Factory) -> ProofTaskCtx { + ProofTaskCtx::new(factory, Arc::new(TriePrefixSetsMut::default())) } /// Ensures `ProofWorkerHandle::new` spawns workers correctly. @@ -1383,11 +1311,12 @@ mod tests { let runtime = Builder::new_multi_thread().worker_threads(1).enable_all().build().unwrap(); runtime.block_on(async { let handle = tokio::runtime::Handle::current(); - let factory = create_test_provider_factory(); - let view = ConsistentDbView::new(factory, None); - let ctx = test_ctx(); + let provider_factory = create_test_provider_factory(); + let factory = + reth_provider::providers::OverlayStateProviderFactory::new(provider_factory); + let ctx = test_ctx(factory); - let proof_handle = ProofWorkerHandle::new(handle.clone(), view, ctx, 5, 3); + let proof_handle = ProofWorkerHandle::new(handle.clone(), ctx, 5, 3); // Verify handle can be cloned let _cloned_handle = proof_handle.clone(); diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index 61d8f69a1d2..5c9294e8f92 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -5,22 +5,20 @@ use alloy_primitives::B256; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; use reth_execution_errors::StorageRootError; -use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, -}; +use reth_provider::{DatabaseProviderROFactory, ProviderError}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, + hashed_cursor::HashedCursorFactory, node_iter::{TrieElement, TrieNodeIter}, - trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, + prefix_set::TriePrefixSets, + trie_cursor::TrieCursorFactory, updates::TrieUpdates, walker::TrieWalker, - HashBuilder, Nibbles, StorageRoot, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE, + HashBuilder, Nibbles, StorageRoot, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::{ collections::HashMap, - sync::{mpsc, Arc, OnceLock}, + sync::{mpsc, OnceLock}, time::Duration, }; use thiserror::Error; @@ -34,20 +32,15 @@ use tracing::*; /// nodes in the process. Upon encountering a leaf node, it will poll the storage root /// task for the corresponding hashed address. /// -/// Internally, the calculator uses [`ConsistentDbView`] since -/// it needs to rely on database state saying the same until -/// the last transaction is open. -/// See docs of using [`ConsistentDbView`] for caveats. -/// /// Note: This implementation only serves as a fallback for the sparse trie-based /// state root calculation. The sparse trie approach is more efficient as it avoids traversing /// the entire trie, only operating on the modified parts. #[derive(Debug)] pub struct ParallelStateRoot { - /// Consistent view of the database. - view: ConsistentDbView, - /// Trie input. - input: TrieInput, + /// Factory for creating state providers. + factory: Factory, + // Prefix sets indicating which portions of the trie need to be recomputed. + prefix_sets: TriePrefixSets, /// Parallel state root metrics. #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics, @@ -55,10 +48,10 @@ pub struct ParallelStateRoot { impl ParallelStateRoot { /// Create new parallel state root calculator. - pub fn new(view: ConsistentDbView, input: TrieInput) -> Self { + pub fn new(factory: Factory, prefix_sets: TriePrefixSets) -> Self { Self { - view, - input, + factory, + prefix_sets, #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics::default(), } @@ -67,7 +60,10 @@ impl ParallelStateRoot { impl ParallelStateRoot where - Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, + Factory: DatabaseProviderROFactory + + Clone + + Send + + 'static, { /// Calculate incremental state root in parallel. pub fn incremental_root(self) -> Result { @@ -88,12 +84,12 @@ where retain_updates: bool, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { let mut tracker = ParallelTrieTracker::default(); - let trie_nodes_sorted = Arc::new(self.input.nodes.into_sorted()); - let hashed_state_sorted = Arc::new(self.input.state.into_sorted()); - let prefix_sets = self.input.prefix_sets.freeze(); let storage_root_targets = StorageRootTargets::new( - prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), - prefix_sets.storage_prefix_sets, + self.prefix_sets + .account_prefix_set + .iter() + .map(|nibbles| B256::from_slice(&nibbles.pack())), + self.prefix_sets.storage_prefix_sets, ); // Pre-calculate storage roots in parallel for accounts which were changed. @@ -107,9 +103,7 @@ where for (hashed_address, prefix_set) in storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) { - let view = self.view.clone(); - let hashed_state_sorted = hashed_state_sorted.clone(); - let trie_nodes_sorted = trie_nodes_sorted.clone(); + let factory = self.factory.clone(); #[cfg(feature = "metrics")] let metrics = self.metrics.storage_trie.clone(); @@ -118,18 +112,10 @@ where // Spawn a blocking task to calculate account's storage root from database I/O drop(handle.spawn_blocking(move || { let result = (|| -> Result<_, ParallelStateRootError> { - let provider_ro = view.provider_ro()?; - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - &trie_nodes_sorted, - ); - let hashed_state = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &hashed_state_sorted, - ); + let provider = factory.database_provider_ro()?; Ok(StorageRoot::new_hashed( - trie_cursor_factory, - hashed_state, + &provider, + &provider, hashed_address, prefix_set, #[cfg(feature = "metrics")] @@ -145,24 +131,16 @@ where trace!(target: "trie::parallel_state_root", "calculating state root"); let mut trie_updates = TrieUpdates::default(); - let provider_ro = self.view.provider_ro()?; - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - &trie_nodes_sorted, - ); - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &hashed_state_sorted, - ); + let provider = self.factory.database_provider_ro()?; let walker = TrieWalker::<_>::state_trie( - trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, - prefix_sets.account_prefix_set, + provider.account_trie_cursor().map_err(ProviderError::Database)?, + self.prefix_sets.account_prefix_set, ) .with_deletions_retained(retain_updates); let mut account_node_iter = TrieNodeIter::state_trie( walker, - hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, + provider.hashed_account_cursor().map_err(ProviderError::Database)?, ); let mut hash_builder = HashBuilder::default().with_updates(retain_updates); @@ -186,8 +164,8 @@ where None => { tracker.inc_missed_leaves(); StorageRoot::new_hashed( - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), + &provider, + &provider, hashed_address, Default::default(), #[cfg(feature = "metrics")] @@ -223,7 +201,7 @@ where let root = hash_builder.root(); let removed_keys = account_node_iter.walker.take_removed_keys(); - trie_updates.finalize(hash_builder, removed_keys, prefix_sets.destroyed_accounts); + trie_updates.finalize(hash_builder, removed_keys, self.prefix_sets.destroyed_accounts); let stats = tracker.finish(); @@ -306,11 +284,13 @@ mod tests { use reth_primitives_traits::{Account, StorageEntry}; use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; use reth_trie::{test_utils, HashedPostState, HashedStorage}; + use std::sync::Arc; #[tokio::test] async fn random_parallel_root() { let factory = create_test_provider_factory(); - let consistent_view = ConsistentDbView::new(factory.clone(), None); + let mut overlay_factory = + reth_provider::providers::OverlayStateProviderFactory::new(factory.clone()); let mut rng = rand::rng(); let mut state = (0..100) @@ -353,7 +333,7 @@ mod tests { } assert_eq!( - ParallelStateRoot::new(consistent_view.clone(), Default::default()) + ParallelStateRoot::new(overlay_factory.clone(), Default::default()) .incremental_root() .unwrap(), test_utils::state_root(state.clone()) @@ -384,8 +364,12 @@ mod tests { } } + let prefix_sets = hashed_state.construct_prefix_sets(); + overlay_factory = + overlay_factory.with_hashed_state_overlay(Some(Arc::new(hashed_state.into_sorted()))); + assert_eq!( - ParallelStateRoot::new(consistent_view, TrieInput::from_state(hashed_state)) + ParallelStateRoot::new(overlay_factory, prefix_sets.freeze()) .incremental_root() .unwrap(), test_utils::state_root(state) diff --git a/crates/trie/trie/src/proof/trie_node.rs b/crates/trie/trie/src/proof/trie_node.rs index 3d964cf5e8b..3e197072d49 100644 --- a/crates/trie/trie/src/proof/trie_node.rs +++ b/crates/trie/trie/src/proof/trie_node.rs @@ -81,19 +81,18 @@ impl ProofBlindedAccountProvider { impl TrieNodeProvider for ProofBlindedAccountProvider where - T: TrieCursorFactory + Clone + Send + Sync, - H: HashedCursorFactory + Clone + Send + Sync, + T: TrieCursorFactory, + H: HashedCursorFactory, { fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let start = enabled!(target: "trie::proof::blinded", Level::TRACE).then(Instant::now); let targets = MultiProofTargets::from_iter([(pad_path_to_key(path), HashSet::default())]); - let mut proof = - Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) - .with_prefix_sets_mut(self.prefix_sets.as_ref().clone()) - .with_branch_node_masks(true) - .multiproof(targets) - .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + let mut proof = Proof::new(&self.trie_cursor_factory, &self.hashed_cursor_factory) + .with_prefix_sets_mut(self.prefix_sets.as_ref().clone()) + .with_branch_node_masks(true) + .multiproof(targets) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; let node = proof.account_subtree.into_inner().remove(path); let tree_mask = proof.branch_node_tree_masks.remove(path); let hash_mask = proof.branch_node_hash_masks.remove(path); @@ -138,8 +137,8 @@ impl ProofBlindedStorageProvider { impl TrieNodeProvider for ProofBlindedStorageProvider where - T: TrieCursorFactory + Clone + Send + Sync, - H: HashedCursorFactory + Clone + Send + Sync, + T: TrieCursorFactory, + H: HashedCursorFactory, { fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let start = enabled!(target: "trie::proof::blinded", Level::TRACE).then(Instant::now); @@ -148,8 +147,8 @@ where let storage_prefix_set = self.prefix_sets.storage_prefix_sets.get(&self.account).cloned().unwrap_or_default(); let mut proof = StorageProof::new_hashed( - self.trie_cursor_factory.clone(), - self.hashed_cursor_factory.clone(), + &self.trie_cursor_factory, + &self.hashed_cursor_factory, self.account, ) .with_prefix_set_mut(storage_prefix_set) From 1581aaa615e0f30f7893210d5f7b62e8edb1ccfd Mon Sep 17 00:00:00 2001 From: Jennifer Date: Mon, 27 Oct 2025 22:46:29 +0100 Subject: [PATCH 222/371] fix: update section name in expected failures, add more concise comments (#19328) --- .github/assets/hive/expected_failures.yaml | 35 +++++++++++++++------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index f4f20ae832e..df111f97beb 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -43,15 +43,30 @@ sync: [] engine-auth: [] -# 7702 test - no fix: it’s too expensive to check whether the storage is empty on each creation -# 6110 related tests - may start passing when fixtures improve -# 7002 related tests - post-fork test, should fix for spec compliance but not -# realistic on mainnet -# 7251 related tests - modified contract, not necessarily practical on mainnet, -# 7594: https://github.com/paradigmxyz/reth/issues/18975 -# 7610: tests are related to empty account that has storage, close to impossible to trigger -# worth re-visiting when more of these related tests are passing -eest/consume-engine: +# tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage +# no fix: it's too expensive to check whether the storage is empty on each creation (? - need more context on WHY) +# +# tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment +# modified consolidation contract, not necessarily practical on mainnet (? - need more context) +# +# tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout and test_invalid_log_length +# system contract is already fixed and deployed; tests cover scenarios where contract is +# malformed which can't happen retroactively. No point in adding checks. +# +# tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment +# post-fork test contract deployment, should fix for spec compliance but not realistic on mainnet (? - need more context) +# +# tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py::test_max_blobs_per_tx_fork_transition +# reth enforces 6 blob limit from EIP-7594, but EIP-7892 raises it to 9. +# Needs constant update in alloy. https://github.com/paradigmxyz/reth/issues/18975 +# +# tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_* +# status (27th June 2024): was discussed in ACDT meeting, need to be raised in ACDE. +# tests require hash collision on already deployed accounts with storage - mathematically +# impossible to trigger on mainnet. ~20-30 such accounts exist from before the state-clear +# EIP, but creating new accounts targeting these requires hash collision. +# ref: https://github.com/ethereum/go-ethereum/pull/28666#issuecomment-1891997143 +eels/consume-engine: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-nonzero_balance]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-zero_balance]-reth @@ -131,7 +146,7 @@ eest/consume-engine: - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth -eest/consume-rlp: +eels/consume-rlp: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test-zero_nonce]-reth - tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py::test_system_contract_errors[fork_Prague-blockchain_test_engine-system_contract_reaches_gas_limit-system_contract_0x0000bbddc7ce488642fb579f8b00f3a590007251]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-nonzero_balance]-reth From 50e88c29be4294f24e1bb403ac94992db4b02e74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?jos=C3=A9=20v?= <52646071+Peponks9@users.noreply.github.com> Date: Mon, 27 Oct 2025 16:00:58 -0600 Subject: [PATCH 223/371] chore: replace `CacheDB` with `State` in RPC crate (#19330) Co-authored-by: Arsenii Kulikov --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 20 +++++++++---------- .../rpc/rpc-eth-api/src/helpers/estimate.rs | 11 +++++----- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 14 +++++++------ crates/rpc/rpc-eth-types/src/cache/db.rs | 6 +++--- crates/rpc/rpc/src/debug.rs | 15 +++++++------- crates/rpc/rpc/src/eth/bundle.rs | 4 ++-- crates/rpc/rpc/src/eth/sim_bundle.rs | 5 +++-- crates/rpc/rpc/src/trace.rs | 5 +++-- 8 files changed, 41 insertions(+), 39 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 221fef3680f..7eb10c10534 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -25,10 +25,7 @@ use reth_evm::{ }; use reth_node_api::BlockBody; use reth_primitives_traits::Recovered; -use reth_revm::{ - database::StateProviderDatabase, - db::{CacheDB, State}, -}; +use reth_revm::{database::StateProviderDatabase, db::State}; use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -286,7 +283,8 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let this = self.clone(); self.spawn_with_state_at_block(at.into(), move |state| { let mut all_results = Vec::with_capacity(bundles.len()); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); if replay_block_txs { // only need to replay the transactions in the block if not all transactions are @@ -399,7 +397,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA { self.spawn_blocking_io_fut(move |this| async move { let state = this.state_at_block_id(at).await?; - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); if let Some(state_overrides) = state_override { apply_state_overrides(state_overrides, &mut db) @@ -629,8 +627,9 @@ pub trait Call: let this = self.clone(); self.spawn_blocking_io_fut(move |_| async move { let state = this.state_at_block_id(at).await?; - let mut db = - CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); + let mut db = State::builder() + .with_database(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))) + .build(); let (evm_env, tx_env) = this.prepare_call_env(evm_env, request, &mut db, overrides)?; @@ -681,7 +680,8 @@ pub trait Call: let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); let block_txs = block.transactions_recovered(); // replay all transactions prior to the targeted transaction @@ -700,7 +700,7 @@ pub trait Call: /// Replays all the transactions until the target transaction is found. /// /// All transactions before the target transaction are executed and their changes are written to - /// the _runtime_ db ([`CacheDB`]). + /// the _runtime_ db ([`State`]). /// /// Note: This assumes the target transaction is in the given iterator. /// Returns the index of the target transaction in the given iterator. diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index cd2518345ce..6c14f96049c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -10,7 +10,7 @@ use futures::Future; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_errors::ProviderError; use reth_evm::{ConfigureEvm, Database, Evm, EvmEnvFor, EvmFor, TransactionEnv, TxEnvFor}; -use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_revm::{database::StateProviderDatabase, db::State}; use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ error::{api::FromEvmHalt, FromEvmError}, @@ -81,7 +81,7 @@ pub trait EstimateCall: Call { .unwrap_or(max_gas_limit); // Configure the evm env - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); // Apply any state overrides if specified. if let Some(state_override) = state_override { @@ -93,7 +93,7 @@ pub trait EstimateCall: Call { // Check if this is a basic transfer (no input data to account with no code) let is_basic_transfer = if tx_env.input().is_empty() && let TxKind::Call(to) = tx_env.kind() && - let Ok(code) = db.db.account_code(&to) + let Ok(code) = db.database.account_code(&to) { code.map(|code| code.is_empty()).unwrap_or(true) } else { @@ -234,9 +234,8 @@ pub trait EstimateCall: Call { // An estimation error is allowed once the current gas limit range used in the binary // search is small enough (less than 1.5% of the highest gas limit) // > { + 'static, { self.with_state_at_block(at, move |this, state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); let mut inspector = TracingInspector::new(config); let res = this.inspect(&mut db, evm_env, tx_env, &mut inspector)?; f(inspector, res) @@ -103,7 +103,7 @@ pub trait Trace: LoadState> { { let this = self.clone(); self.spawn_with_state_at_block(at, move |state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); let mut inspector = TracingInspector::new(config); let res = this.inspect(&mut db, evm_env, tx_env, &mut inspector)?; f(inspector, res, db) @@ -184,7 +184,8 @@ pub trait Trace: LoadState> { let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); let block_txs = block.transactions_recovered(); this.apply_pre_execution_changes(&block, &mut db, &evm_env)?; @@ -306,8 +307,9 @@ pub trait Trace: LoadState> { // now get the state let state = this.state_at_block_id(state_at.into()).await?; - let mut db = - CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); + let mut db = State::builder() + .with_database(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))) + .build(); this.apply_pre_execution_changes(&block, &mut db, &evm_env)?; diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index abb8983485a..8209af0fa53 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -8,14 +8,14 @@ use reth_revm::{database::StateProviderDatabase, DatabaseRef}; use reth_storage_api::{BytecodeReader, HashedPostStateProvider, StateProvider}; use reth_trie::{HashedStorage, MultiProofTargets}; use revm::{ - database::{BundleState, CacheDB}, + database::{BundleState, State}, primitives::HashMap, state::{AccountInfo, Bytecode}, Database, DatabaseCommit, }; -/// Helper alias type for the state's [`CacheDB`] -pub type StateCacheDb<'a> = CacheDB>>; +/// Helper alias type for the state's [`State`] +pub type StateCacheDb<'a> = State>>; /// Hack to get around 'higher-ranked lifetime error', see /// diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 066f7180c85..99b37a09d9d 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -22,11 +22,7 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::{execute::Executor, ConfigureEvm, EvmEnvFor, TxEnvFor}; use reth_primitives_traits::{Block as _, BlockBody, ReceiptWithBloom, RecoveredBlock}; -use reth_revm::{ - database::StateProviderDatabase, - db::{CacheDB, State}, - witness::ExecutionWitnessRecord, -}; +use reth_revm::{database::StateProviderDatabase, db::State, witness::ExecutionWitnessRecord}; use reth_rpc_api::DebugApiServer; use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ @@ -100,7 +96,8 @@ where self.eth_api() .spawn_with_state_at_block(block.parent_hash().into(), move |state| { let mut results = Vec::with_capacity(block.body().transactions().len()); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); this.eth_api().apply_pre_execution_changes(&block, &mut db, &evm_env)?; @@ -230,7 +227,8 @@ where // configure env for the target transaction let tx = transaction.into_recovered(); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); this.eth_api().apply_pre_execution_changes(&block, &mut db, &evm_env)?; @@ -535,7 +533,8 @@ where .spawn_with_state_at_block(at.into(), move |state| { // the outer vec for the bundles let mut all_bundles = Vec::with_capacity(bundles.len()); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); if replay_block_txs { // only need to replay the transactions in the block if not all transactions are diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 0797c2f1f8c..d49b5486d3d 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -8,7 +8,7 @@ use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTra use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ConfigureEvm, Evm}; -use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_revm::{database::StateProviderDatabase, State}; use reth_rpc_eth_api::{ helpers::{Call, EthTransactions, LoadPendingBlock}, EthCallBundleApiServer, FromEthApiError, FromEvmError, @@ -150,7 +150,7 @@ where .spawn_with_state_at_block(at, move |state| { let coinbase = evm_env.block_env.beneficiary(); let basefee = evm_env.block_env.basefee(); - let db = CacheDB::new(StateProviderDatabase::new(state)); + let db = State::builder().with_database(StateProviderDatabase::new(state)).build(); let initial_coinbase = db .basic_ref(coinbase) diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 328ea29193f..fa3fd46e45c 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -12,7 +12,7 @@ use alloy_rpc_types_mev::{ use jsonrpsee::core::RpcResult; use reth_evm::{ConfigureEvm, Evm}; use reth_primitives_traits::Recovered; -use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_revm::{database::StateProviderDatabase, State}; use reth_rpc_api::MevSimApiServer; use reth_rpc_eth_api::{ helpers::{block::LoadBlock, Call, EthTransactions}, @@ -246,7 +246,8 @@ where let current_block_number = current_block.number(); let coinbase = evm_env.block_env.beneficiary(); let basefee = evm_env.block_env.basefee(); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); // apply overrides apply_block_overrides(block_overrides, &mut db, evm_env.block_env.inner_mut()); diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 4ed42bc721d..6e4205eead4 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -20,7 +20,7 @@ use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardfork, MAINNET, SEPOLIA}; use reth_evm::ConfigureEvm; use reth_primitives_traits::{BlockBody, BlockHeader}; -use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_revm::{database::StateProviderDatabase, State}; use reth_rpc_api::TraceApiServer; use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ @@ -158,7 +158,8 @@ where self.eth_api() .spawn_with_state_at_block(at, move |state| { let mut results = Vec::with_capacity(calls.len()); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); let mut calls = calls.into_iter().peekable(); From e2b5c7367c63568f8ff411b27dceaa5d3700317d Mon Sep 17 00:00:00 2001 From: YK Date: Tue, 28 Oct 2025 14:44:19 +0800 Subject: [PATCH 224/371] chore: update Grafana dashboard with split pending multiproof metrics (#19339) --- etc/grafana/dashboards/overview.json | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 46a465ca4a4..7cd11369646 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -4239,11 +4239,23 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_pending_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, - "legendFormat": "{{quantile}} percentile", + "legendFormat": "storage {{quantile}} percentile", "range": true, - "refId": "Branch Nodes" + "refId": "Storage" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "account {{quantile}} percentile", + "range": true, + "refId": "Account" } ], "title": "Pending MultiProof requests", From 7e6f676d16b9f13b785dda5cfb724c1a7157a2a4 Mon Sep 17 00:00:00 2001 From: YK Date: Tue, 28 Oct 2025 17:00:22 +0800 Subject: [PATCH 225/371] feat(metrics): improve multiproof worker metrics (#19337) --- .../src/tree/payload_processor/multiproof.rs | 53 ++++++++----- crates/trie/parallel/src/proof_task.rs | 32 ++++++++ etc/grafana/dashboards/overview.json | 78 +++++++++++++++++-- 3 files changed, 140 insertions(+), 23 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index a000e7a5adf..ca3bd380d4d 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -9,7 +9,7 @@ use alloy_primitives::{ use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use dashmap::DashMap; use derive_more::derive::Deref; -use metrics::Histogram; +use metrics::{Gauge, Histogram}; use reth_metrics::Metrics; use reth_revm::state::EvmState; use reth_trie::{ @@ -319,8 +319,6 @@ impl MultiproofInput { /// `ProofSequencer`. #[derive(Debug)] pub struct MultiproofManager { - /// Currently running calculations. - inflight: usize, /// Handle to the proof worker pools (storage and account). proof_worker_handle: ProofWorkerHandle, /// Cached storage proof roots for missed leaves; this maps @@ -349,8 +347,11 @@ impl MultiproofManager { proof_worker_handle: ProofWorkerHandle, proof_result_tx: CrossbeamSender, ) -> Self { + // Initialize the max worker gauges with the worker pool sizes + metrics.max_storage_workers.set(proof_worker_handle.total_storage_workers() as f64); + metrics.max_account_workers.set(proof_worker_handle.total_account_workers() as f64); + Self { - inflight: 0, metrics, proof_worker_handle, missed_leaves_storage_roots: Default::default(), @@ -359,7 +360,7 @@ impl MultiproofManager { } /// Dispatches a new multiproof calculation to worker pools. - fn dispatch(&mut self, input: PendingMultiproofTask) { + fn dispatch(&self, input: PendingMultiproofTask) { // If there are no proof targets, we can just send an empty multiproof back immediately if input.proof_targets_is_empty() { debug!( @@ -381,7 +382,7 @@ impl MultiproofManager { } /// Dispatches a single storage proof calculation to worker pool. - fn dispatch_storage_proof(&mut self, storage_multiproof_input: StorageMultiproofInput) { + fn dispatch_storage_proof(&self, storage_multiproof_input: StorageMultiproofInput) { let StorageMultiproofInput { hashed_state_update, hashed_address, @@ -432,8 +433,12 @@ impl MultiproofManager { return; } - self.inflight += 1; - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + self.metrics + .active_storage_workers_histogram + .record(self.proof_worker_handle.active_storage_workers() as f64); + self.metrics + .active_account_workers_histogram + .record(self.proof_worker_handle.active_account_workers() as f64); self.metrics .pending_storage_multiproofs_histogram .record(self.proof_worker_handle.pending_storage_tasks() as f64); @@ -443,9 +448,13 @@ impl MultiproofManager { } /// Signals that a multiproof calculation has finished. - fn on_calculation_complete(&mut self) { - self.inflight = self.inflight.saturating_sub(1); - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + fn on_calculation_complete(&self) { + self.metrics + .active_storage_workers_histogram + .record(self.proof_worker_handle.active_storage_workers() as f64); + self.metrics + .active_account_workers_histogram + .record(self.proof_worker_handle.active_account_workers() as f64); self.metrics .pending_storage_multiproofs_histogram .record(self.proof_worker_handle.pending_storage_tasks() as f64); @@ -455,7 +464,7 @@ impl MultiproofManager { } /// Dispatches a single multiproof calculation to worker pool. - fn dispatch_multiproof(&mut self, multiproof_input: MultiproofInput) { + fn dispatch_multiproof(&self, multiproof_input: MultiproofInput) { let MultiproofInput { source, hashed_state_update, @@ -506,8 +515,12 @@ impl MultiproofManager { return; } - self.inflight += 1; - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + self.metrics + .active_storage_workers_histogram + .record(self.proof_worker_handle.active_storage_workers() as f64); + self.metrics + .active_account_workers_histogram + .record(self.proof_worker_handle.active_account_workers() as f64); self.metrics .pending_storage_multiproofs_histogram .record(self.proof_worker_handle.pending_storage_tasks() as f64); @@ -520,8 +533,14 @@ impl MultiproofManager { #[derive(Metrics, Clone)] #[metrics(scope = "tree.root")] pub(crate) struct MultiProofTaskMetrics { - /// Histogram of inflight multiproofs. - pub inflight_multiproofs_histogram: Histogram, + /// Histogram of active storage workers processing proofs. + pub active_storage_workers_histogram: Histogram, + /// Histogram of active account workers processing proofs. + pub active_account_workers_histogram: Histogram, + /// Gauge for the maximum number of storage workers in the pool. + pub max_storage_workers: Gauge, + /// Gauge for the maximum number of account workers in the pool. + pub max_account_workers: Gauge, /// Histogram of pending storage multiproofs in the queue. pub pending_storage_multiproofs_histogram: Histogram, /// Histogram of pending account multiproofs in the queue. @@ -583,7 +602,6 @@ pub(crate) struct MultiProofTaskMetrics { /// ▼ │ /// ┌──────────────────────────────────────────────────────────────┐ │ /// │ MultiproofManager │ │ -/// │ - Tracks inflight calculations │ │ /// │ - Deduplicates against fetched_proof_targets │ │ /// │ - Routes to appropriate worker pool │ │ /// └──┬───────────────────────────────────────────────────────────┘ │ @@ -624,7 +642,6 @@ pub(crate) struct MultiProofTaskMetrics { /// /// - **[`MultiproofManager`]**: Calculation orchestrator /// - Decides between fast path ([`EmptyProof`]) and worker dispatch -/// - Tracks inflight calculations /// - Routes storage-only vs full multiproofs to appropriate workers /// - Records metrics for monitoring /// diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 7e453cbc7c3..caca8687534 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -1017,6 +1017,10 @@ pub struct ProofWorkerHandle { /// Counter tracking available account workers. Workers decrement when starting work, /// increment when finishing. Used to determine whether to chunk multiproofs. account_available_workers: Arc, + /// Total number of storage workers spawned + storage_worker_count: usize, + /// Total number of account workers spawned + account_worker_count: usize, } impl ProofWorkerHandle { @@ -1118,6 +1122,8 @@ impl ProofWorkerHandle { account_work_tx, storage_available_workers, account_available_workers, + storage_worker_count, + account_worker_count, } } @@ -1141,6 +1147,32 @@ impl ProofWorkerHandle { self.account_work_tx.len() } + /// Returns the total number of storage workers in the pool. + pub const fn total_storage_workers(&self) -> usize { + self.storage_worker_count + } + + /// Returns the total number of account workers in the pool. + pub const fn total_account_workers(&self) -> usize { + self.account_worker_count + } + + /// Returns the number of storage workers currently processing tasks. + /// + /// This is calculated as total workers minus available workers. + pub fn active_storage_workers(&self) -> usize { + self.storage_worker_count + .saturating_sub(self.storage_available_workers.load(Ordering::Relaxed)) + } + + /// Returns the number of account workers currently processing tasks. + /// + /// This is calculated as total workers minus available workers. + pub fn active_account_workers(&self) -> usize { + self.account_worker_count + .saturating_sub(self.account_available_workers.load(Ordering::Relaxed)) + } + /// Dispatch a storage proof computation to storage worker pool /// /// The result will be sent via the `proof_result_sender` channel. diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 7cd11369646..7337b2b886b 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -4320,7 +4320,38 @@ }, "unit": "none" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Max storage workers" + }, + "properties": [ + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Max account workers" + }, + "properties": [ + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + } + ] + } + ] }, "gridPos": { "h": 8, @@ -4328,6 +4359,7 @@ "x": 12, "y": 104 }, + "description": "The max metrics (Max storage workers and Max account workers) are displayed as dotted lines to highlight the configured upper limits.", "id": 256, "options": { "legend": { @@ -4350,14 +4382,50 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_inflight_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, - "legendFormat": "{{quantile}} percentile", + "legendFormat": "Storage workers {{quantile}} percentile", "range": true, - "refId": "Branch Nodes" + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "Account workers {{quantile}} percentile", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_tree_root_max_storage_workers{$instance_label=\"$instance\"}", + "instant": false, + "legendFormat": "Max storage workers", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_tree_root_max_account_workers{$instance_label=\"$instance\"}", + "instant": false, + "legendFormat": "Max account workers", + "range": true, + "refId": "D" } ], - "title": "In-flight MultiProof requests", + "title": "Active MultiProof Workers", "type": "timeseries" }, { From e547c027f3694c9b3cb0aba03c2f03aea12b663d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Oct 2025 10:22:33 +0100 Subject: [PATCH 226/371] chore(deps): bump actions/upload-artifact from 4 to 5 (#19335) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/hive.yml | 2 +- .github/workflows/prepare-reth.yml | 2 +- .github/workflows/release.yml | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index ae147977580..215209f4b9f 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -68,7 +68,7 @@ jobs: chmod +x hive - name: Upload hive assets - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: hive_assets path: ./hive_assets diff --git a/.github/workflows/prepare-reth.yml b/.github/workflows/prepare-reth.yml index 37a9445af72..17be3767dce 100644 --- a/.github/workflows/prepare-reth.yml +++ b/.github/workflows/prepare-reth.yml @@ -51,7 +51,7 @@ jobs: - name: Upload reth image id: upload - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: artifacts path: ./artifacts diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f871b163a2d..4aa192e1a45 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -153,28 +153,28 @@ jobs: - name: Upload artifact if: ${{ github.event.inputs.dry_run != 'true' }} - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz - name: Upload signature if: ${{ github.event.inputs.dry_run != 'true' }} - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc - name: Upload deb package if: ${{ github.event.inputs.dry_run != 'true' && matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb - name: Upload deb package signature if: ${{ github.event.inputs.dry_run != 'true' && matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb.asc path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb.asc From 0da38b9732396473a4a6c6500101a97b62baa593 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Oct 2025 10:23:53 +0100 Subject: [PATCH 227/371] chore(deps): bump actions/download-artifact from 5 to 6 (#19336) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/hive.yml | 4 ++-- .github/workflows/kurtosis-op.yml | 2 +- .github/workflows/kurtosis.yml | 2 +- .github/workflows/release.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 215209f4b9f..7d0ac65bee7 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -189,13 +189,13 @@ jobs: fetch-depth: 0 - name: Download hive assets - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 with: name: hive_assets path: /tmp - name: Download reth image - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 with: name: artifacts path: /tmp diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml index 0e08d1641de..7477e759209 100644 --- a/.github/workflows/kurtosis-op.yml +++ b/.github/workflows/kurtosis-op.yml @@ -42,7 +42,7 @@ jobs: fetch-depth: 0 - name: Download reth image - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 with: name: artifacts path: /tmp diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index f78fc81235a..b45e997ef73 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -40,7 +40,7 @@ jobs: fetch-depth: 0 - name: Download reth image - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 with: name: artifacts path: /tmp diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4aa192e1a45..70960d2fe00 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -196,7 +196,7 @@ jobs: with: fetch-depth: 0 - name: Download artifacts - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 - name: Generate full changelog id: changelog run: | From 5e2ed163f3e8d4cf776afdfd4f6c1553de8a77f1 Mon Sep 17 00:00:00 2001 From: Francis Li Date: Tue, 28 Oct 2025 02:27:33 -0700 Subject: [PATCH 228/371] fix(engine): Eliminates spurious warning logs in prewarm task (#19133) Co-authored-by: Matthias Seitz --- .../src/tree/payload_processor/prewarm.rs | 83 +++++++------------ 1 file changed, 32 insertions(+), 51 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 9815ea81228..de831d1858b 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -146,7 +146,6 @@ where let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", parent: span, "spawn_all").entered(); let (done_tx, done_rx) = mpsc::channel(); - let mut executing = 0usize; // When transaction_count_hint is 0, it means the count is unknown. In this case, spawn // max workers to handle potentially many transactions in parallel rather @@ -165,62 +164,44 @@ where handles.push(ctx.spawn_worker(i, &executor, actions_tx.clone(), done_tx.clone())); } + // Distribute transactions to workers let mut tx_index = 0usize; + while let Ok(tx) = pending.recv() { + // Stop distributing if termination was requested + if ctx.terminate_execution.load(Ordering::Relaxed) { + trace!( + target: "engine::tree::payload_processor::prewarm", + "Termination requested, stopping transaction distribution" + ); + break; + } + + let indexed_tx = IndexedTransaction { index: tx_index, tx }; + let is_system_tx = indexed_tx.tx.tx().ty() > MAX_STANDARD_TX_TYPE; - // Handle first transaction - special case for system transactions - if let Ok(first_tx) = pending.recv() { - // Move the transaction into the indexed wrapper to avoid an extra clone - let indexed_tx = IndexedTransaction { index: tx_index, tx: first_tx }; - // Compute metadata from the moved value - let tx_ref = indexed_tx.tx.tx(); - let is_system_tx = tx_ref.ty() > MAX_STANDARD_TX_TYPE; - let first_tx_hash = tx_ref.tx_hash(); - - // Check if this is a system transaction (type > 4) - // System transactions in the first position typically set critical metadata - // that affects all subsequent transactions (e.g., L1 block info, fees on L2s). - if is_system_tx { - // Broadcast system transaction to all workers to ensure they have the - // critical state. This is particularly important for L2s like Optimism - // where the first deposit transaction contains essential block metadata. + // System transactions (type > 4) in the first position set critical metadata + // that affects all subsequent transactions (e.g., L1 block info on L2s). + // Broadcast the first system transaction to all workers to ensure they have + // the critical state. This is particularly important for L2s like Optimism + // where the first deposit transaction (type 126) contains essential block metadata. + if tx_index == 0 && is_system_tx { for handle in &handles { - if let Err(err) = handle.send(indexed_tx.clone()) { - warn!( - target: "engine::tree::payload_processor::prewarm", - tx_hash = %first_tx_hash, - error = %err, - "Failed to send deposit transaction to worker" - ); - } + // Ignore send errors: workers listen to terminate_execution and may + // exit early when signaled. Sending to a disconnected worker is + // possible and harmless and should happen at most once due to + // the terminate_execution check above. + let _ = handle.send(indexed_tx.clone()); } } else { - // Not a deposit, send to first worker via round-robin - if let Err(err) = handles[0].send(indexed_tx) { - warn!( - target: "engine::tree::payload_processor::prewarm", - task_idx = 0, - error = %err, - "Failed to send transaction to worker" - ); - } + // Round-robin distribution for all other transactions + let worker_idx = tx_index % workers_needed; + // Ignore send errors: workers listen to terminate_execution and may + // exit early when signaled. Sending to a disconnected worker is + // possible and harmless and should happen at most once due to + // the terminate_execution check above. + let _ = handles[worker_idx].send(indexed_tx); } - executing += 1; - tx_index += 1; - } - // Process remaining transactions with round-robin distribution - while let Ok(executable) = pending.recv() { - let indexed_tx = IndexedTransaction { index: tx_index, tx: executable }; - let task_idx = executing % workers_needed; - if let Err(err) = handles[task_idx].send(indexed_tx) { - warn!( - target: "engine::tree::payload_processor::prewarm", - task_idx, - error = %err, - "Failed to send transaction to worker" - ); - } - executing += 1; tx_index += 1; } @@ -230,7 +211,7 @@ where while done_rx.recv().is_ok() {} let _ = actions_tx - .send(PrewarmTaskEvent::FinishedTxExecution { executed_transactions: executing }); + .send(PrewarmTaskEvent::FinishedTxExecution { executed_transactions: tx_index }); }); } From 0f3e0eee637b432a34332fc690dbe0c857f96c34 Mon Sep 17 00:00:00 2001 From: Avory Date: Tue, 28 Oct 2025 12:14:07 +0200 Subject: [PATCH 229/371] refactor: make DatabaseProof trait stateful (#18753) --- .../src/providers/state/historical.rs | 6 +- .../provider/src/providers/state/latest.rs | 6 +- crates/trie/db/src/proof.rs | 59 ++++++++----------- crates/trie/db/tests/proof.rs | 15 +++-- crates/trie/db/tests/witness.rs | 9 ++- crates/trie/trie/src/proof/mod.rs | 10 ++++ 6 files changed, 60 insertions(+), 45 deletions(-) diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index f3e69bf7d91..666138fae7b 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -368,7 +368,8 @@ impl StateProofProvider slots: &[B256], ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_account_proof(self.tx(), input, address, slots).map_err(ProviderError::from) + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_account_proof(input, address, slots).map_err(ProviderError::from) } fn multiproof( @@ -377,7 +378,8 @@ impl StateProofProvider targets: MultiProofTargets, ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_multiproof(self.tx(), input, targets).map_err(ProviderError::from) + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_multiproof(input, targets).map_err(ProviderError::from) } fn witness(&self, mut input: TrieInput, target: HashedPostState) -> ProviderResult> { diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index de8eef2cc9c..092feb37c43 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -124,7 +124,8 @@ impl StateProofProvider for LatestStateProviderRef< address: Address, slots: &[B256], ) -> ProviderResult { - Proof::overlay_account_proof(self.tx(), input, address, slots).map_err(ProviderError::from) + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_account_proof(input, address, slots).map_err(ProviderError::from) } fn multiproof( @@ -132,7 +133,8 @@ impl StateProofProvider for LatestStateProviderRef< input: TrieInput, targets: MultiProofTargets, ) -> ProviderResult { - Proof::overlay_multiproof(self.tx(), input, targets).map_err(ProviderError::from) + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_multiproof(input, targets).map_err(ProviderError::from) } fn witness(&self, input: TrieInput, target: HashedPostState) -> ProviderResult> { diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 8b338001fae..8f79c21c156 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -11,13 +11,16 @@ use reth_trie::{ }; /// Extends [`Proof`] with operations specific for working with a database transaction. -pub trait DatabaseProof<'a, TX> { - /// Create a new [Proof] from database transaction. - fn from_tx(tx: &'a TX) -> Self; +pub trait DatabaseProof<'a> { + /// Associated type for the database transaction. + type Tx; + + /// Create a new [`Proof`] instance from database transaction. + fn from_tx(tx: &'a Self::Tx) -> Self; /// Generates the state proof for target account based on [`TrieInput`]. fn overlay_account_proof( - tx: &'a TX, + &self, input: TrieInput, address: Address, slots: &[B256], @@ -25,59 +28,49 @@ pub trait DatabaseProof<'a, TX> { /// Generates the state [`MultiProof`] for target hashed account and storage keys. fn overlay_multiproof( - tx: &'a TX, + &self, input: TrieInput, targets: MultiProofTargets, ) -> Result; } -impl<'a, TX: DbTx> DatabaseProof<'a, TX> +impl<'a, TX: DbTx> DatabaseProof<'a> for Proof, DatabaseHashedCursorFactory<&'a TX>> { - /// Create a new [Proof] instance from database transaction. - fn from_tx(tx: &'a TX) -> Self { + type Tx = TX; + + fn from_tx(tx: &'a Self::Tx) -> Self { Self::new(DatabaseTrieCursorFactory::new(tx), DatabaseHashedCursorFactory::new(tx)) } - fn overlay_account_proof( - tx: &'a TX, + &self, input: TrieInput, address: Address, slots: &[B256], ) -> Result { let nodes_sorted = input.nodes.into_sorted(); let state_sorted = input.state.into_sorted(); - Self::from_tx(tx) - .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(tx), - &nodes_sorted, - )) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(tx), - &state_sorted, - )) - .with_prefix_sets_mut(input.prefix_sets) - .account_proof(address, slots) + Proof::new( + InMemoryTrieCursorFactory::new(self.trie_cursor_factory().clone(), &nodes_sorted), + HashedPostStateCursorFactory::new(self.hashed_cursor_factory().clone(), &state_sorted), + ) + .with_prefix_sets_mut(input.prefix_sets) + .account_proof(address, slots) } fn overlay_multiproof( - tx: &'a TX, + &self, input: TrieInput, targets: MultiProofTargets, ) -> Result { let nodes_sorted = input.nodes.into_sorted(); let state_sorted = input.state.into_sorted(); - Self::from_tx(tx) - .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(tx), - &nodes_sorted, - )) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(tx), - &state_sorted, - )) - .with_prefix_sets_mut(input.prefix_sets) - .multiproof(targets) + Proof::new( + InMemoryTrieCursorFactory::new(self.trie_cursor_factory().clone(), &nodes_sorted), + HashedPostStateCursorFactory::new(self.hashed_cursor_factory().clone(), &state_sorted), + ) + .with_prefix_sets_mut(input.prefix_sets) + .multiproof(targets) } } diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs index 401ba07b22d..402f0cabff3 100644 --- a/crates/trie/db/tests/proof.rs +++ b/crates/trie/db/tests/proof.rs @@ -86,7 +86,8 @@ fn testspec_proofs() { let provider = factory.provider().unwrap(); for (target, expected_proof) in data { let target = Address::from_str(target).unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!( account_proof.proof, expected_proof, @@ -106,7 +107,8 @@ fn testspec_empty_storage_proof() { let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]); let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &slots).unwrap(); assert_eq!(account_proof.storage_root, EMPTY_ROOT_HASH, "expected empty storage root"); assert_eq!(slots.len(), account_proof.storage_proofs.len()); @@ -141,7 +143,8 @@ fn mainnet_genesis_account_proof() { ]); let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); assert_eq!(account_proof.verify(root), Ok(())); } @@ -164,7 +167,8 @@ fn mainnet_genesis_account_proof_nonexistent() { ]); let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); assert_eq!(account_proof.verify(root), Ok(())); } @@ -259,7 +263,8 @@ fn holesky_deposit_contract_proof() { }; let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &slots).unwrap(); similar_asserts::assert_eq!(account_proof, expected); assert_eq!(account_proof.verify(root), Ok(())); } diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 5dfa1c3e4ae..14457fccc6e 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -41,7 +41,8 @@ fn includes_empty_node_preimage() { provider.insert_account_for_hashing([(address, Some(Account::default()))]).unwrap(); let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); - let multiproof = Proof::from_tx(provider.tx_ref()) + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let multiproof = proof .multiproof(MultiProofTargets::from_iter([( hashed_address, HashSet::from_iter([hashed_slot]), @@ -82,7 +83,8 @@ fn includes_nodes_for_destroyed_storage_nodes() { .unwrap(); let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); - let multiproof = Proof::from_tx(provider.tx_ref()) + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let multiproof = proof .multiproof(MultiProofTargets::from_iter([( hashed_address, HashSet::from_iter([hashed_slot]), @@ -130,7 +132,8 @@ fn correctly_decodes_branch_node_values() { .unwrap(); let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); - let multiproof = Proof::from_tx(provider.tx_ref()) + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let multiproof = proof .multiproof(MultiProofTargets::from_iter([( hashed_address, HashSet::from_iter([hashed_slot1, hashed_slot2]), diff --git a/crates/trie/trie/src/proof/mod.rs b/crates/trie/trie/src/proof/mod.rs index 348cdb430a2..efd958e5743 100644 --- a/crates/trie/trie/src/proof/mod.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -80,6 +80,16 @@ impl Proof { self.collect_branch_node_masks = branch_node_masks; self } + + /// Get a reference to the trie cursor factory. + pub const fn trie_cursor_factory(&self) -> &T { + &self.trie_cursor_factory + } + + /// Get a reference to the hashed cursor factory. + pub const fn hashed_cursor_factory(&self) -> &H { + &self.hashed_cursor_factory + } } impl Proof From 5091482dec7ccd7df03b349030eb1f0382a8f1d5 Mon Sep 17 00:00:00 2001 From: YK Date: Tue, 28 Oct 2025 19:14:08 +0800 Subject: [PATCH 230/371] refactor(trie): reorder proof_task.rs for better code organization (#19342) --- crates/trie/parallel/src/proof_task.rs | 2152 ++++++++++++------------ 1 file changed, 1070 insertions(+), 1082 deletions(-) diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index caca8687534..93eb03bde91 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -77,291 +77,542 @@ use crate::proof_task_metrics::ProofTaskTrieMetrics; type StorageProofResult = Result; type TrieNodeProviderResult = Result, SparseTrieError>; -/// Result of a proof calculation, which can be either an account multiproof or a storage proof. -#[derive(Debug)] -pub enum ProofResult { - /// Account multiproof with statistics - AccountMultiproof { - /// The account multiproof - proof: DecodedMultiProof, - /// Statistics collected during proof computation - stats: ParallelTrieStats, - }, - /// Storage proof for a specific account - StorageProof { - /// The hashed address this storage proof belongs to - hashed_address: B256, - /// The storage multiproof - proof: DecodedStorageMultiProof, - }, +/// A handle that provides type-safe access to proof worker pools. +/// +/// The handle stores direct senders to both storage and account worker pools, +/// eliminating the need for a routing thread. All handles share reference-counted +/// channels, and workers shut down gracefully when all handles are dropped. +#[derive(Debug, Clone)] +pub struct ProofWorkerHandle { + /// Direct sender to storage worker pool + storage_work_tx: CrossbeamSender, + /// Direct sender to account worker pool + account_work_tx: CrossbeamSender, + /// Counter tracking available storage workers. Workers decrement when starting work, + /// increment when finishing. Used to determine whether to chunk multiproofs. + storage_available_workers: Arc, + /// Counter tracking available account workers. Workers decrement when starting work, + /// increment when finishing. Used to determine whether to chunk multiproofs. + account_available_workers: Arc, + /// Total number of storage workers spawned + storage_worker_count: usize, + /// Total number of account workers spawned + account_worker_count: usize, } -impl ProofResult { - /// Convert this proof result into a `DecodedMultiProof`. +impl ProofWorkerHandle { + /// Spawns storage and account worker pools with dedicated database transactions. /// - /// For account multiproofs, returns the multiproof directly (discarding stats). - /// For storage proofs, wraps the storage proof into a minimal multiproof. - pub fn into_multiproof(self) -> DecodedMultiProof { - match self { - Self::AccountMultiproof { proof, stats: _ } => proof, - Self::StorageProof { hashed_address, proof } => { - DecodedMultiProof::from_storage_proof(hashed_address, proof) - } + /// Returns a handle for submitting proof tasks to the worker pools. + /// Workers run until the last handle is dropped. + /// + /// # Parameters + /// - `executor`: Tokio runtime handle for spawning blocking tasks + /// - `task_ctx`: Shared context with database view and prefix sets + /// - `storage_worker_count`: Number of storage workers to spawn + /// - `account_worker_count`: Number of account workers to spawn + pub fn new( + executor: Handle, + task_ctx: ProofTaskCtx, + storage_worker_count: usize, + account_worker_count: usize, + ) -> Self + where + Factory: DatabaseProviderROFactory + + Clone + + Send + + 'static, + { + let (storage_work_tx, storage_work_rx) = unbounded::(); + let (account_work_tx, account_work_rx) = unbounded::(); + + // Initialize availability counters at zero. Each worker will increment when it + // successfully initializes, ensuring only healthy workers are counted. + let storage_available_workers = Arc::new(AtomicUsize::new(0)); + let account_available_workers = Arc::new(AtomicUsize::new(0)); + + debug!( + target: "trie::proof_task", + storage_worker_count, + account_worker_count, + "Spawning proof worker pools" + ); + + let parent_span = + debug_span!(target: "trie::proof_task", "storage proof workers", ?storage_worker_count) + .entered(); + // Spawn storage workers + for worker_id in 0..storage_worker_count { + let span = debug_span!(target: "trie::proof_task", "storage worker", ?worker_id); + let task_ctx_clone = task_ctx.clone(); + let work_rx_clone = storage_work_rx.clone(); + let storage_available_workers_clone = storage_available_workers.clone(); + + executor.spawn_blocking(move || { + #[cfg(feature = "metrics")] + let metrics = ProofTaskTrieMetrics::default(); + + let _guard = span.enter(); + storage_worker_loop( + task_ctx_clone, + work_rx_clone, + worker_id, + storage_available_workers_clone, + #[cfg(feature = "metrics")] + metrics, + ) + }); + } + drop(parent_span); + + let parent_span = + debug_span!(target: "trie::proof_task", "account proof workers", ?storage_worker_count) + .entered(); + // Spawn account workers + for worker_id in 0..account_worker_count { + let span = debug_span!(target: "trie::proof_task", "account worker", ?worker_id); + let task_ctx_clone = task_ctx.clone(); + let work_rx_clone = account_work_rx.clone(); + let storage_work_tx_clone = storage_work_tx.clone(); + let account_available_workers_clone = account_available_workers.clone(); + + executor.spawn_blocking(move || { + #[cfg(feature = "metrics")] + let metrics = ProofTaskTrieMetrics::default(); + + let _guard = span.enter(); + account_worker_loop( + task_ctx_clone, + work_rx_clone, + storage_work_tx_clone, + worker_id, + account_available_workers_clone, + #[cfg(feature = "metrics")] + metrics, + ) + }); + } + drop(parent_span); + + Self { + storage_work_tx, + account_work_tx, + storage_available_workers, + account_available_workers, + storage_worker_count, + account_worker_count, } } -} -/// Channel used by worker threads to deliver `ProofResultMessage` items back to -/// `MultiProofTask`. -/// -/// Workers use this sender to deliver proof results directly to `MultiProofTask`. -pub type ProofResultSender = CrossbeamSender; + /// Returns true if there are available storage workers to process tasks. + pub fn has_available_storage_workers(&self) -> bool { + self.storage_available_workers.load(Ordering::Relaxed) > 0 + } -/// Message containing a completed proof result with metadata for direct delivery to -/// `MultiProofTask`. -/// -/// This type enables workers to send proof results directly to the `MultiProofTask` event loop. -#[derive(Debug)] -pub struct ProofResultMessage { - /// Sequence number for ordering proofs - pub sequence_number: u64, - /// The proof calculation result (either account multiproof or storage proof) - pub result: Result, - /// Time taken for the entire proof calculation (from dispatch to completion) - pub elapsed: Duration, - /// Original state update that triggered this proof - pub state: HashedPostState, -} + /// Returns true if there are available account workers to process tasks. + pub fn has_available_account_workers(&self) -> bool { + self.account_available_workers.load(Ordering::Relaxed) > 0 + } -/// Context for sending proof calculation results back to `MultiProofTask`. -/// -/// This struct contains all context needed to send and track proof calculation results. -/// Workers use this to deliver completed proofs back to the main event loop. -#[derive(Debug, Clone)] -pub struct ProofResultContext { - /// Channel sender for result delivery - pub sender: ProofResultSender, - /// Sequence number for proof ordering - pub sequence_number: u64, - /// Original state update that triggered this proof - pub state: HashedPostState, - /// Calculation start time for measuring elapsed duration - pub start_time: Instant, -} + /// Returns the number of pending storage tasks in the queue. + pub fn pending_storage_tasks(&self) -> usize { + self.storage_work_tx.len() + } -impl ProofResultContext { - /// Creates a new proof result context. - pub const fn new( - sender: ProofResultSender, - sequence_number: u64, - state: HashedPostState, - start_time: Instant, - ) -> Self { - Self { sender, sequence_number, state, start_time } + /// Returns the number of pending account tasks in the queue. + pub fn pending_account_tasks(&self) -> usize { + self.account_work_tx.len() } -} -/// Internal message for storage workers. -#[derive(Debug)] -enum StorageWorkerJob { - /// Storage proof computation request - StorageProof { - /// Storage proof input parameters + /// Returns the total number of storage workers in the pool. + pub const fn total_storage_workers(&self) -> usize { + self.storage_worker_count + } + + /// Returns the total number of account workers in the pool. + pub const fn total_account_workers(&self) -> usize { + self.account_worker_count + } + + /// Returns the number of storage workers currently processing tasks. + /// + /// This is calculated as total workers minus available workers. + pub fn active_storage_workers(&self) -> usize { + self.storage_worker_count + .saturating_sub(self.storage_available_workers.load(Ordering::Relaxed)) + } + + /// Returns the number of account workers currently processing tasks. + /// + /// This is calculated as total workers minus available workers. + pub fn active_account_workers(&self) -> usize { + self.account_worker_count + .saturating_sub(self.account_available_workers.load(Ordering::Relaxed)) + } + + /// Dispatch a storage proof computation to storage worker pool + /// + /// The result will be sent via the `proof_result_sender` channel. + pub fn dispatch_storage_proof( + &self, input: StorageProofInput, - /// Context for sending the proof result. proof_result_sender: ProofResultContext, - }, - /// Blinded storage node retrieval request - BlindedStorageNode { - /// Target account - account: B256, - /// Path to the storage node - path: Nibbles, - /// Channel to send result back to original caller - result_sender: Sender, - }, -} + ) -> Result<(), ProviderError> { + self.storage_work_tx + .send(StorageWorkerJob::StorageProof { input, proof_result_sender }) + .map_err(|err| { + let error = + ProviderError::other(std::io::Error::other("storage workers unavailable")); -/// Worker loop for storage trie operations. -/// -/// # Lifecycle -/// -/// Each worker: -/// 1. Receives `StorageWorkerJob` from crossbeam unbounded channel -/// 2. Computes result using its dedicated long-lived transaction -/// 3. Sends result directly to original caller via `std::mpsc` -/// 4. Repeats until channel closes (graceful shutdown) -/// -/// # Transaction Reuse -/// -/// Reuses the same transaction and cursor factories across multiple operations -/// to avoid transaction creation and cursor factory setup overhead. -/// -/// # Panic Safety -/// -/// If this function panics, the worker thread terminates but other workers -/// continue operating and the system degrades gracefully. -/// -/// # Shutdown -/// -/// Worker shuts down when the crossbeam channel closes (all senders dropped). -fn storage_worker_loop( - task_ctx: ProofTaskCtx, - work_rx: CrossbeamReceiver, - worker_id: usize, - available_workers: Arc, - #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, -) where - Factory: DatabaseProviderROFactory, -{ - // Create provider from factory - let provider = task_ctx - .factory - .database_provider_ro() - .expect("Storage worker failed to initialize: unable to create provider"); - let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); + if let StorageWorkerJob::StorageProof { proof_result_sender, .. } = err.0 { + let ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + } = proof_result_sender; + + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(ParallelStateRootError::Provider(error.clone())), + elapsed: start.elapsed(), + state, + }); + } + + error + }) + } + + /// Dispatch an account multiproof computation + /// + /// The result will be sent via the `result_sender` channel included in the input. + pub fn dispatch_account_multiproof( + &self, + input: AccountMultiproofInput, + ) -> Result<(), ProviderError> { + self.account_work_tx + .send(AccountWorkerJob::AccountMultiproof { input: Box::new(input) }) + .map_err(|err| { + let error = + ProviderError::other(std::io::Error::other("account workers unavailable")); + + if let AccountWorkerJob::AccountMultiproof { input } = err.0 { + let AccountMultiproofInput { + proof_result_sender: + ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + }, + .. + } = *input; + + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(ParallelStateRootError::Provider(error.clone())), + elapsed: start.elapsed(), + state, + }); + } + + error + }) + } - trace!( - target: "trie::proof_task", - worker_id, - "Storage worker started" - ); + /// Dispatch blinded storage node request to storage worker pool + pub(crate) fn dispatch_blinded_storage_node( + &self, + account: B256, + path: Nibbles, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.storage_work_tx + .send(StorageWorkerJob::BlindedStorageNode { account, path, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("storage workers unavailable")) + })?; - let mut storage_proofs_processed = 0u64; - let mut storage_nodes_processed = 0u64; + Ok(rx) + } - // Initially mark this worker as available. - available_workers.fetch_add(1, Ordering::Relaxed); + /// Dispatch blinded account node request to account worker pool + pub(crate) fn dispatch_blinded_account_node( + &self, + path: Nibbles, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.account_work_tx + .send(AccountWorkerJob::BlindedAccountNode { path, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("account workers unavailable")) + })?; - while let Ok(job) = work_rx.recv() { - // Mark worker as busy. - available_workers.fetch_sub(1, Ordering::Relaxed); + Ok(rx) + } +} - match job { - StorageWorkerJob::StorageProof { input, proof_result_sender } => { - let hashed_address = input.hashed_address; - let ProofResultContext { sender, sequence_number: seq, state, start_time } = - proof_result_sender; +/// Data used for initializing cursor factories that is shared across all storage proof instances. +#[derive(Clone, Debug)] +pub struct ProofTaskCtx { + /// The factory for creating state providers. + factory: Factory, + /// The collection of prefix sets for the computation. Since the prefix sets _always_ + /// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here, + /// if we have cached nodes for them. + prefix_sets: Arc, +} - trace!( - target: "trie::proof_task", - worker_id, - hashed_address = ?hashed_address, - prefix_set_len = input.prefix_set.len(), - target_slots_len = input.target_slots.len(), - "Processing storage proof" - ); +impl ProofTaskCtx { + /// Creates a new [`ProofTaskCtx`] with the given factory and prefix sets. + pub const fn new(factory: Factory, prefix_sets: Arc) -> Self { + Self { factory, prefix_sets } + } +} - let proof_start = Instant::now(); - let result = proof_tx.compute_storage_proof(input); +/// This contains all information shared between all storage proof instances. +#[derive(Debug)] +pub struct ProofTaskTx { + /// The provider that implements `TrieCursorFactory` and `HashedCursorFactory`. + provider: Provider, - let proof_elapsed = proof_start.elapsed(); - storage_proofs_processed += 1; + /// The prefix sets for the computation. + prefix_sets: Arc, - let result_msg = result.map(|storage_proof| ProofResult::StorageProof { - hashed_address, - proof: storage_proof, - }); + /// Identifier for the worker within the worker pool, used only for tracing. + id: usize, +} - if sender - .send(ProofResultMessage { - sequence_number: seq, - result: result_msg, - elapsed: start_time.elapsed(), - state, - }) - .is_err() - { - trace!( - target: "trie::proof_task", - worker_id, - hashed_address = ?hashed_address, - storage_proofs_processed, - "Proof result receiver dropped, discarding result" - ); - } +impl ProofTaskTx { + /// Initializes a [`ProofTaskTx`] with the given provider, prefix sets, and ID. + const fn new(provider: Provider, prefix_sets: Arc, id: usize) -> Self { + Self { provider, prefix_sets, id } + } +} - trace!( - target: "trie::proof_task", - worker_id, - hashed_address = ?hashed_address, - proof_time_us = proof_elapsed.as_micros(), - total_processed = storage_proofs_processed, - "Storage proof completed" - ); +impl ProofTaskTx +where + Provider: TrieCursorFactory + HashedCursorFactory, +{ + /// Compute storage proof. + /// + /// Used by storage workers in the worker pool to compute storage proofs. + #[inline] + fn compute_storage_proof(&self, input: StorageProofInput) -> StorageProofResult { + // Consume the input so we can move large collections (e.g. target slots) without cloning. + let StorageProofInput { + hashed_address, + prefix_set, + target_slots, + with_branch_node_masks, + multi_added_removed_keys, + } = input; - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); - } + // Get or create added/removed keys context + let multi_added_removed_keys = + multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); + let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); - StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { - trace!( - target: "trie::proof_task", - worker_id, - ?account, - ?path, - "Processing blinded storage node" - ); + let span = debug_span!( + target: "trie::proof_task", + "Storage proof calculation", + hashed_address = ?hashed_address, + worker_id = self.id, + ); + let _span_guard = span.enter(); - let storage_node_provider = ProofBlindedStorageProvider::new( - &proof_tx.provider, - &proof_tx.provider, - proof_tx.prefix_sets.clone(), - account, - ); + let proof_start = Instant::now(); - let start = Instant::now(); - let result = storage_node_provider.trie_node(&path); - let elapsed = start.elapsed(); + // Compute raw storage multiproof + let raw_proof_result = + StorageProof::new_hashed(&self.provider, &self.provider, hashed_address) + .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().copied())) + .with_branch_node_masks(with_branch_node_masks) + .with_added_removed_keys(added_removed_keys) + .storage_multiproof(target_slots) + .map_err(|e| ParallelStateRootError::Other(e.to_string())); - storage_nodes_processed += 1; + // Decode proof into DecodedStorageMultiProof + let decoded_result = raw_proof_result.and_then(|raw_proof| { + raw_proof.try_into().map_err(|e: alloy_rlp::Error| { + ParallelStateRootError::Other(format!( + "Failed to decode storage proof for {}: {}", + hashed_address, e + )) + }) + }); - if result_sender.send(result).is_err() { - trace!( - target: "trie::proof_task", - worker_id, - ?account, - ?path, - storage_nodes_processed, - "Blinded storage node receiver dropped, discarding result" - ); - } + trace!( + target: "trie::proof_task", + hashed_address = ?hashed_address, + proof_time_us = proof_start.elapsed().as_micros(), + worker_id = self.id, + "Completed storage proof calculation" + ); - trace!( - target: "trie::proof_task", - worker_id, - ?account, - ?path, - elapsed_us = elapsed.as_micros(), - total_processed = storage_nodes_processed, - "Blinded storage node completed" - ); + decoded_result + } +} +impl TrieNodeProviderFactory for ProofWorkerHandle { + type AccountNodeProvider = ProofTaskTrieNodeProvider; + type StorageNodeProvider = ProofTaskTrieNodeProvider; + + fn account_node_provider(&self) -> Self::AccountNodeProvider { + ProofTaskTrieNodeProvider::AccountNode { handle: self.clone() } + } + + fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { + ProofTaskTrieNodeProvider::StorageNode { account, handle: self.clone() } + } +} + +/// Trie node provider for retrieving trie nodes by path. +#[derive(Debug)] +pub enum ProofTaskTrieNodeProvider { + /// Blinded account trie node provider. + AccountNode { + /// Handle to the proof worker pools. + handle: ProofWorkerHandle, + }, + /// Blinded storage trie node provider. + StorageNode { + /// Target account. + account: B256, + /// Handle to the proof worker pools. + handle: ProofWorkerHandle, + }, +} + +impl TrieNodeProvider for ProofTaskTrieNodeProvider { + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { + match self { + Self::AccountNode { handle } => { + let rx = handle + .dispatch_blinded_account_node(*path) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? + } + Self::StorageNode { handle, account } => { + let rx = handle + .dispatch_blinded_storage_node(*account, *path) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? + } + } + } +} +/// Result of a proof calculation, which can be either an account multiproof or a storage proof. +#[derive(Debug)] +pub enum ProofResult { + /// Account multiproof with statistics + AccountMultiproof { + /// The account multiproof + proof: DecodedMultiProof, + /// Statistics collected during proof computation + stats: ParallelTrieStats, + }, + /// Storage proof for a specific account + StorageProof { + /// The hashed address this storage proof belongs to + hashed_address: B256, + /// The storage multiproof + proof: DecodedStorageMultiProof, + }, +} - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); +impl ProofResult { + /// Convert this proof result into a `DecodedMultiProof`. + /// + /// For account multiproofs, returns the multiproof directly (discarding stats). + /// For storage proofs, wraps the storage proof into a minimal multiproof. + pub fn into_multiproof(self) -> DecodedMultiProof { + match self { + Self::AccountMultiproof { proof, stats: _ } => proof, + Self::StorageProof { hashed_address, proof } => { + DecodedMultiProof::from_storage_proof(hashed_address, proof) } } } +} +/// Channel used by worker threads to deliver `ProofResultMessage` items back to +/// `MultiProofTask`. +/// +/// Workers use this sender to deliver proof results directly to `MultiProofTask`. +pub type ProofResultSender = CrossbeamSender; - trace!( - target: "trie::proof_task", - worker_id, - storage_proofs_processed, - storage_nodes_processed, - "Storage worker shutting down" - ); +/// Message containing a completed proof result with metadata for direct delivery to +/// `MultiProofTask`. +/// +/// This type enables workers to send proof results directly to the `MultiProofTask` event loop. +#[derive(Debug)] +pub struct ProofResultMessage { + /// Sequence number for ordering proofs + pub sequence_number: u64, + /// The proof calculation result (either account multiproof or storage proof) + pub result: Result, + /// Time taken for the entire proof calculation (from dispatch to completion) + pub elapsed: Duration, + /// Original state update that triggered this proof + pub state: HashedPostState, +} - #[cfg(feature = "metrics")] - metrics.record_storage_nodes(storage_nodes_processed as usize); +/// Context for sending proof calculation results back to `MultiProofTask`. +/// +/// This struct contains all context needed to send and track proof calculation results. +/// Workers use this to deliver completed proofs back to the main event loop. +#[derive(Debug, Clone)] +pub struct ProofResultContext { + /// Channel sender for result delivery + pub sender: ProofResultSender, + /// Sequence number for proof ordering + pub sequence_number: u64, + /// Original state update that triggered this proof + pub state: HashedPostState, + /// Calculation start time for measuring elapsed duration + pub start_time: Instant, } -/// Worker loop for account trie operations. +impl ProofResultContext { + /// Creates a new proof result context. + pub const fn new( + sender: ProofResultSender, + sequence_number: u64, + state: HashedPostState, + start_time: Instant, + ) -> Self { + Self { sender, sequence_number, state, start_time } + } +} +/// Internal message for storage workers. +#[derive(Debug)] +enum StorageWorkerJob { + /// Storage proof computation request + StorageProof { + /// Storage proof input parameters + input: StorageProofInput, + /// Context for sending the proof result. + proof_result_sender: ProofResultContext, + }, + /// Blinded storage node retrieval request + BlindedStorageNode { + /// Target account + account: B256, + /// Path to the storage node + path: Nibbles, + /// Channel to send result back to original caller + result_sender: Sender, + }, +} +/// Worker loop for storage trie operations. /// /// # Lifecycle /// -/// Each worker initializes its providers, advertises availability, then loops: -/// take a job, mark busy, compute the proof, send the result, and mark available again. -/// The loop ends gracefully once the channel closes. +/// Each worker: +/// 1. Receives `StorageWorkerJob` from crossbeam unbounded channel +/// 2. Computes result using its dedicated long-lived transaction +/// 3. Sends result directly to original caller via `std::mpsc` +/// 4. Repeats until channel closes (graceful shutdown) /// /// # Transaction Reuse /// @@ -376,10 +627,9 @@ fn storage_worker_loop( /// # Shutdown /// /// Worker shuts down when the crossbeam channel closes (all senders dropped). -fn account_worker_loop( +fn storage_worker_loop( task_ctx: ProofTaskCtx, - work_rx: CrossbeamReceiver, - storage_work_tx: CrossbeamSender, + work_rx: CrossbeamReceiver, worker_id: usize, available_workers: Arc, #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, @@ -390,941 +640,679 @@ fn account_worker_loop( let provider = task_ctx .factory .database_provider_ro() - .expect("Account worker failed to initialize: unable to create provider"); + .expect("Storage worker failed to initialize: unable to create provider"); let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); trace!( target: "trie::proof_task", worker_id, - "Account worker started" - ); - - let mut account_proofs_processed = 0u64; - let mut account_nodes_processed = 0u64; - - // Count this worker as available only after successful initialization. - available_workers.fetch_add(1, Ordering::Relaxed); - - while let Ok(job) = work_rx.recv() { - // Mark worker as busy. - available_workers.fetch_sub(1, Ordering::Relaxed); - - match job { - AccountWorkerJob::AccountMultiproof { input } => { - let AccountMultiproofInput { - targets, - mut prefix_sets, - collect_branch_node_masks, - multi_added_removed_keys, - missed_leaves_storage_roots, - proof_result_sender: - ProofResultContext { - sender: result_tx, - sequence_number: seq, - state, - start_time: start, - }, - } = *input; - - let span = debug_span!( - target: "trie::proof_task", - "Account multiproof calculation", - targets = targets.len(), - worker_id, - ); - let _span_guard = span.enter(); - - trace!( - target: "trie::proof_task", - "Processing account multiproof" - ); - - let proof_start = Instant::now(); - - let mut tracker = ParallelTrieTracker::default(); - - let mut storage_prefix_sets = std::mem::take(&mut prefix_sets.storage_prefix_sets); - - let storage_root_targets_len = StorageRootTargets::count( - &prefix_sets.account_prefix_set, - &storage_prefix_sets, - ); - - tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); - - let storage_proof_receivers = match dispatch_storage_proofs( - &storage_work_tx, - &targets, - &mut storage_prefix_sets, - collect_branch_node_masks, - multi_added_removed_keys.as_ref(), - ) { - Ok(receivers) => receivers, - Err(error) => { - // Send error through result channel - error!(target: "trie::proof_task", "Failed to dispatch storage proofs: {error}"); - let _ = result_tx.send(ProofResultMessage { - sequence_number: seq, - result: Err(error), - elapsed: start.elapsed(), - state, - }); - continue; - } - }; - - // Use the missed leaves cache passed from the multiproof manager - let account_prefix_set = std::mem::take(&mut prefix_sets.account_prefix_set); - - let ctx = AccountMultiproofParams { - targets: &targets, - prefix_set: account_prefix_set, - collect_branch_node_masks, - multi_added_removed_keys: multi_added_removed_keys.as_ref(), - storage_proof_receivers, - missed_leaves_storage_roots: missed_leaves_storage_roots.as_ref(), - }; - - let result = build_account_multiproof_with_storage_roots( - &proof_tx.provider, - ctx, - &mut tracker, - ); - - let proof_elapsed = proof_start.elapsed(); - let total_elapsed = start.elapsed(); - let stats = tracker.finish(); - let result = result.map(|proof| ProofResult::AccountMultiproof { proof, stats }); - account_proofs_processed += 1; - - // Send result to MultiProofTask - if result_tx - .send(ProofResultMessage { - sequence_number: seq, - result, - elapsed: total_elapsed, - state, - }) - .is_err() - { - trace!( - target: "trie::proof_task", - worker_id, - account_proofs_processed, - "Account multiproof receiver dropped, discarding result" - ); - } - - trace!( - target: "trie::proof_task", - proof_time_us = proof_elapsed.as_micros(), - total_elapsed_us = total_elapsed.as_micros(), - total_processed = account_proofs_processed, - "Account multiproof completed" - ); - drop(_span_guard); + "Storage worker started" + ); - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); - } + let mut storage_proofs_processed = 0u64; + let mut storage_nodes_processed = 0u64; - AccountWorkerJob::BlindedAccountNode { path, result_sender } => { - let span = debug_span!( - target: "trie::proof_task", - "Blinded account node calculation", - ?path, - worker_id, - ); - let _span_guard = span.enter(); + // Initially mark this worker as available. + available_workers.fetch_add(1, Ordering::Relaxed); + + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); + + match job { + StorageWorkerJob::StorageProof { input, proof_result_sender } => { + let hashed_address = input.hashed_address; + let ProofResultContext { sender, sequence_number: seq, state, start_time } = + proof_result_sender; trace!( target: "trie::proof_task", - "Processing blinded account node" + worker_id, + hashed_address = ?hashed_address, + prefix_set_len = input.prefix_set.len(), + target_slots_len = input.target_slots.len(), + "Processing storage proof" ); - let account_node_provider = ProofBlindedAccountProvider::new( - &proof_tx.provider, - &proof_tx.provider, - proof_tx.prefix_sets.clone(), - ); + let proof_start = Instant::now(); + let result = proof_tx.compute_storage_proof(input); - let start = Instant::now(); - let result = account_node_provider.trie_node(&path); - let elapsed = start.elapsed(); + let proof_elapsed = proof_start.elapsed(); + storage_proofs_processed += 1; - account_nodes_processed += 1; + let result_msg = result.map(|storage_proof| ProofResult::StorageProof { + hashed_address, + proof: storage_proof, + }); - if result_sender.send(result).is_err() { + if sender + .send(ProofResultMessage { + sequence_number: seq, + result: result_msg, + elapsed: start_time.elapsed(), + state, + }) + .is_err() + { trace!( target: "trie::proof_task", worker_id, - ?path, - account_nodes_processed, - "Blinded account node receiver dropped, discarding result" + hashed_address = ?hashed_address, + storage_proofs_processed, + "Proof result receiver dropped, discarding result" ); } trace!( target: "trie::proof_task", - node_time_us = elapsed.as_micros(), - total_processed = account_nodes_processed, - "Blinded account node completed" - ); - drop(_span_guard); - - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); - } - } - } - - trace!( - target: "trie::proof_task", - worker_id, - account_proofs_processed, - account_nodes_processed, - "Account worker shutting down" - ); - - #[cfg(feature = "metrics")] - metrics.record_account_nodes(account_nodes_processed as usize); -} - -/// Builds an account multiproof by consuming storage proof receivers lazily during trie walk. -/// -/// This is a helper function used by account workers to build the account subtree proof -/// while storage proofs are still being computed. Receivers are consumed only when needed, -/// enabling interleaved parallelism between account trie traversal and storage proof computation. -/// -/// Returns a `DecodedMultiProof` containing the account subtree and storage proofs. -fn build_account_multiproof_with_storage_roots

( - provider: &P, - ctx: AccountMultiproofParams<'_>, - tracker: &mut ParallelTrieTracker, -) -> Result -where - P: TrieCursorFactory + HashedCursorFactory, -{ - let accounts_added_removed_keys = - ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); - - // Create the walker. - let walker = TrieWalker::<_>::state_trie( - provider.account_trie_cursor().map_err(ProviderError::Database)?, - ctx.prefix_set, - ) - .with_added_removed_keys(accounts_added_removed_keys) - .with_deletions_retained(true); - - // Create a hash builder to rebuild the root node since it is not available in the database. - let retainer = ctx - .targets - .keys() - .map(Nibbles::unpack) - .collect::() - .with_added_removed_keys(accounts_added_removed_keys); - let mut hash_builder = HashBuilder::default() - .with_proof_retainer(retainer) - .with_updates(ctx.collect_branch_node_masks); - - // Initialize storage multiproofs map with pre-allocated capacity. - // Proofs will be inserted as they're consumed from receivers during trie walk. - let mut collected_decoded_storages: B256Map = - B256Map::with_capacity_and_hasher(ctx.targets.len(), Default::default()); - let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); - let mut account_node_iter = TrieNodeIter::state_trie( - walker, - provider.hashed_account_cursor().map_err(ProviderError::Database)?, - ); - - let mut storage_proof_receivers = ctx.storage_proof_receivers; - - while let Some(account_node) = account_node_iter.try_next().map_err(ProviderError::Database)? { - match account_node { - TrieElement::Branch(node) => { - hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); - } - TrieElement::Leaf(hashed_address, account) => { - let root = match storage_proof_receivers.remove(&hashed_address) { - Some(receiver) => { - // Block on this specific storage proof receiver - enables interleaved - // parallelism - let proof_msg = receiver.recv().map_err(|_| { - ParallelStateRootError::StorageRoot( - reth_execution_errors::StorageRootError::Database( - DatabaseError::Other(format!( - "Storage proof channel closed for {hashed_address}" - )), - ), - ) - })?; - - // Extract storage proof from the result - let proof = match proof_msg.result? { - ProofResult::StorageProof { hashed_address: addr, proof } => { - debug_assert_eq!( - addr, - hashed_address, - "storage worker must return same address: expected {hashed_address}, got {addr}" - ); - proof - } - ProofResult::AccountMultiproof { .. } => { - unreachable!("storage worker only sends StorageProof variant") - } - }; - - let root = proof.root; - collected_decoded_storages.insert(hashed_address, proof); - root - } - // Since we do not store all intermediate nodes in the database, there might - // be a possibility of re-adding a non-modified leaf to the hash builder. - None => { - tracker.inc_missed_leaves(); - - match ctx.missed_leaves_storage_roots.entry(hashed_address) { - dashmap::Entry::Occupied(occ) => *occ.get(), - dashmap::Entry::Vacant(vac) => { - let root = - StorageProof::new_hashed(provider, provider, hashed_address) - .with_prefix_set_mut(Default::default()) - .storage_multiproof( - ctx.targets - .get(&hashed_address) - .cloned() - .unwrap_or_default(), - ) - .map_err(|e| { - ParallelStateRootError::StorageRoot( - reth_execution_errors::StorageRootError::Database( - DatabaseError::Other(e.to_string()), - ), - ) - })? - .root; - - vac.insert(root); - root - } - } - } - }; - - // Encode account - account_rlp.clear(); - let account = account.into_trie_account(root); - account.encode(&mut account_rlp as &mut dyn BufMut); - - hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - } - } - } - - // Consume remaining storage proof receivers for accounts not encountered during trie walk. - for (hashed_address, receiver) in storage_proof_receivers { - if let Ok(proof_msg) = receiver.recv() { - // Extract storage proof from the result - if let Ok(ProofResult::StorageProof { proof, .. }) = proof_msg.result { - collected_decoded_storages.insert(hashed_address, proof); - } - } - } - - let _ = hash_builder.root(); - - let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); - let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; - - let (branch_node_hash_masks, branch_node_tree_masks) = if ctx.collect_branch_node_masks { - let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); - ( - updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), - updated_branch_nodes.into_iter().map(|(path, node)| (path, node.tree_mask)).collect(), - ) - } else { - (Default::default(), Default::default()) - }; - - Ok(DecodedMultiProof { - account_subtree: decoded_account_subtree, - branch_node_hash_masks, - branch_node_tree_masks, - storages: collected_decoded_storages, - }) -} + worker_id, + hashed_address = ?hashed_address, + proof_time_us = proof_elapsed.as_micros(), + total_processed = storage_proofs_processed, + "Storage proof completed" + ); -/// Queues storage proofs for all accounts in the targets and returns receivers. -/// -/// This function queues all storage proof tasks to the worker pool but returns immediately -/// with receivers, allowing the account trie walk to proceed in parallel with storage proof -/// computation. This enables interleaved parallelism for better performance. -/// -/// Propagates errors up if queuing fails. Receivers must be consumed by the caller. -fn dispatch_storage_proofs( - storage_work_tx: &CrossbeamSender, - targets: &MultiProofTargets, - storage_prefix_sets: &mut B256Map, - with_branch_node_masks: bool, - multi_added_removed_keys: Option<&Arc>, -) -> Result>, ParallelStateRootError> { - let mut storage_proof_receivers = - B256Map::with_capacity_and_hasher(targets.len(), Default::default()); + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); + } - // Dispatch all storage proofs to worker pool - for (hashed_address, target_slots) in targets.iter() { - let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); + StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + "Processing blinded storage node" + ); - // Create channel for receiving ProofResultMessage - let (result_tx, result_rx) = crossbeam_channel::unbounded(); - let start = Instant::now(); + let storage_node_provider = ProofBlindedStorageProvider::new( + &proof_tx.provider, + &proof_tx.provider, + proof_tx.prefix_sets.clone(), + account, + ); - // Create computation input (data only, no communication channel) - let input = StorageProofInput::new( - *hashed_address, - prefix_set, - target_slots.clone(), - with_branch_node_masks, - multi_added_removed_keys.cloned(), - ); + let start = Instant::now(); + let result = storage_node_provider.trie_node(&path); + let elapsed = start.elapsed(); - // Always dispatch a storage proof so we obtain the storage root even when no slots are - // requested. - storage_work_tx - .send(StorageWorkerJob::StorageProof { - input, - proof_result_sender: ProofResultContext::new( - result_tx, - 0, - HashedPostState::default(), - start, - ), - }) - .map_err(|_| { - ParallelStateRootError::Other(format!( - "Failed to queue storage proof for {}: storage worker pool unavailable", - hashed_address - )) - })?; + storage_nodes_processed += 1; - storage_proof_receivers.insert(*hashed_address, result_rx); - } + if result_sender.send(result).is_err() { + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + storage_nodes_processed, + "Blinded storage node receiver dropped, discarding result" + ); + } - Ok(storage_proof_receivers) -} + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + elapsed_us = elapsed.as_micros(), + total_processed = storage_nodes_processed, + "Blinded storage node completed" + ); -/// This contains all information shared between all storage proof instances. -#[derive(Debug)] -pub struct ProofTaskTx { - /// The provider that implements `TrieCursorFactory` and `HashedCursorFactory`. - provider: Provider, + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); + } + } + } - /// The prefix sets for the computation. - prefix_sets: Arc, + trace!( + target: "trie::proof_task", + worker_id, + storage_proofs_processed, + storage_nodes_processed, + "Storage worker shutting down" + ); - /// Identifier for the worker within the worker pool, used only for tracing. - id: usize, + #[cfg(feature = "metrics")] + metrics.record_storage_nodes(storage_nodes_processed as usize); } +/// Worker loop for account trie operations. +/// +/// # Lifecycle +/// +/// Each worker initializes its providers, advertises availability, then loops: +/// take a job, mark busy, compute the proof, send the result, and mark available again. +/// The loop ends gracefully once the channel closes. +/// +/// # Transaction Reuse +/// +/// Reuses the same transaction and cursor factories across multiple operations +/// to avoid transaction creation and cursor factory setup overhead. +/// +/// # Panic Safety +/// +/// If this function panics, the worker thread terminates but other workers +/// continue operating and the system degrades gracefully. +/// +/// # Shutdown +/// +/// Worker shuts down when the crossbeam channel closes (all senders dropped). +fn account_worker_loop( + task_ctx: ProofTaskCtx, + work_rx: CrossbeamReceiver, + storage_work_tx: CrossbeamSender, + worker_id: usize, + available_workers: Arc, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, +) where + Factory: DatabaseProviderROFactory, +{ + // Create provider from factory + let provider = task_ctx + .factory + .database_provider_ro() + .expect("Account worker failed to initialize: unable to create provider"); + let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); -impl ProofTaskTx { - /// Initializes a [`ProofTaskTx`] with the given provider, prefix sets, and ID. - const fn new(provider: Provider, prefix_sets: Arc, id: usize) -> Self { - Self { provider, prefix_sets, id } - } -} + trace!( + target: "trie::proof_task", + worker_id, + "Account worker started" + ); -impl ProofTaskTx -where - Provider: TrieCursorFactory + HashedCursorFactory, -{ - /// Compute storage proof. - /// - /// Used by storage workers in the worker pool to compute storage proofs. - #[inline] - fn compute_storage_proof(&self, input: StorageProofInput) -> StorageProofResult { - // Consume the input so we can move large collections (e.g. target slots) without cloning. - let StorageProofInput { - hashed_address, - prefix_set, - target_slots, - with_branch_node_masks, - multi_added_removed_keys, - } = input; + let mut account_proofs_processed = 0u64; + let mut account_nodes_processed = 0u64; - // Get or create added/removed keys context - let multi_added_removed_keys = - multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); - let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); + // Count this worker as available only after successful initialization. + available_workers.fetch_add(1, Ordering::Relaxed); - let span = debug_span!( - target: "trie::proof_task", - "Storage proof calculation", - hashed_address = ?hashed_address, - worker_id = self.id, - ); - let _span_guard = span.enter(); + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); - let proof_start = Instant::now(); + match job { + AccountWorkerJob::AccountMultiproof { input } => { + let AccountMultiproofInput { + targets, + mut prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys, + missed_leaves_storage_roots, + proof_result_sender: + ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + }, + } = *input; - // Compute raw storage multiproof - let raw_proof_result = - StorageProof::new_hashed(&self.provider, &self.provider, hashed_address) - .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().copied())) - .with_branch_node_masks(with_branch_node_masks) - .with_added_removed_keys(added_removed_keys) - .storage_multiproof(target_slots) - .map_err(|e| ParallelStateRootError::Other(e.to_string())); + let span = debug_span!( + target: "trie::proof_task", + "Account multiproof calculation", + targets = targets.len(), + worker_id, + ); + let _span_guard = span.enter(); - // Decode proof into DecodedStorageMultiProof - let decoded_result = raw_proof_result.and_then(|raw_proof| { - raw_proof.try_into().map_err(|e: alloy_rlp::Error| { - ParallelStateRootError::Other(format!( - "Failed to decode storage proof for {}: {}", - hashed_address, e - )) - }) - }); + trace!( + target: "trie::proof_task", + "Processing account multiproof" + ); - trace!( - target: "trie::proof_task", - hashed_address = ?hashed_address, - proof_time_us = proof_start.elapsed().as_micros(), - worker_id = self.id, - "Completed storage proof calculation" - ); + let proof_start = Instant::now(); - decoded_result - } -} + let mut tracker = ParallelTrieTracker::default(); -/// Input parameters for storage proof computation. -#[derive(Debug)] -pub struct StorageProofInput { - /// The hashed address for which the proof is calculated. - hashed_address: B256, - /// The prefix set for the proof calculation. - prefix_set: PrefixSet, - /// The target slots for the proof calculation. - target_slots: B256Set, - /// Whether or not to collect branch node masks - with_branch_node_masks: bool, - /// Provided by the user to give the necessary context to retain extra proofs. - multi_added_removed_keys: Option>, -} + let mut storage_prefix_sets = std::mem::take(&mut prefix_sets.storage_prefix_sets); -impl StorageProofInput { - /// Creates a new [`StorageProofInput`] with the given hashed address, prefix set, and target - /// slots. - pub const fn new( - hashed_address: B256, - prefix_set: PrefixSet, - target_slots: B256Set, - with_branch_node_masks: bool, - multi_added_removed_keys: Option>, - ) -> Self { - Self { - hashed_address, - prefix_set, - target_slots, - with_branch_node_masks, - multi_added_removed_keys, - } - } -} + let storage_root_targets_len = StorageRootTargets::count( + &prefix_sets.account_prefix_set, + &storage_prefix_sets, + ); -/// Input parameters for account multiproof computation. -#[derive(Debug, Clone)] -pub struct AccountMultiproofInput { - /// The targets for which to compute the multiproof. - pub targets: MultiProofTargets, - /// The prefix sets for the proof calculation. - pub prefix_sets: TriePrefixSets, - /// Whether or not to collect branch node masks. - pub collect_branch_node_masks: bool, - /// Provided by the user to give the necessary context to retain extra proofs. - pub multi_added_removed_keys: Option>, - /// Cached storage proof roots for missed leaves encountered during account trie walk. - pub missed_leaves_storage_roots: Arc>, - /// Context for sending the proof result. - pub proof_result_sender: ProofResultContext, -} + tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); -/// Parameters for building an account multiproof with pre-computed storage roots. -struct AccountMultiproofParams<'a> { - /// The targets for which to compute the multiproof. - targets: &'a MultiProofTargets, - /// The prefix set for the account trie walk. - prefix_set: PrefixSet, - /// Whether or not to collect branch node masks. - collect_branch_node_masks: bool, - /// Provided by the user to give the necessary context to retain extra proofs. - multi_added_removed_keys: Option<&'a Arc>, - /// Receivers for storage proofs being computed in parallel. - storage_proof_receivers: B256Map>, - /// Cached storage proof roots for missed leaves encountered during account trie walk. - missed_leaves_storage_roots: &'a DashMap, -} + let storage_proof_receivers = match dispatch_storage_proofs( + &storage_work_tx, + &targets, + &mut storage_prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys.as_ref(), + ) { + Ok(receivers) => receivers, + Err(error) => { + // Send error through result channel + error!(target: "trie::proof_task", "Failed to dispatch storage proofs: {error}"); + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(error), + elapsed: start.elapsed(), + state, + }); + continue; + } + }; -/// Internal message for account workers. -#[derive(Debug)] -enum AccountWorkerJob { - /// Account multiproof computation request - AccountMultiproof { - /// Account multiproof input parameters - input: Box, - }, - /// Blinded account node retrieval request - BlindedAccountNode { - /// Path to the account node - path: Nibbles, - /// Channel to send result back to original caller - result_sender: Sender, - }, -} + // Use the missed leaves cache passed from the multiproof manager + let account_prefix_set = std::mem::take(&mut prefix_sets.account_prefix_set); -/// Data used for initializing cursor factories that is shared across all storage proof instances. -#[derive(Clone, Debug)] -pub struct ProofTaskCtx { - /// The factory for creating state providers. - factory: Factory, - /// The collection of prefix sets for the computation. Since the prefix sets _always_ - /// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here, - /// if we have cached nodes for them. - prefix_sets: Arc, -} + let ctx = AccountMultiproofParams { + targets: &targets, + prefix_set: account_prefix_set, + collect_branch_node_masks, + multi_added_removed_keys: multi_added_removed_keys.as_ref(), + storage_proof_receivers, + missed_leaves_storage_roots: missed_leaves_storage_roots.as_ref(), + }; -impl ProofTaskCtx { - /// Creates a new [`ProofTaskCtx`] with the given factory and prefix sets. - pub const fn new(factory: Factory, prefix_sets: Arc) -> Self { - Self { factory, prefix_sets } - } -} + let result = build_account_multiproof_with_storage_roots( + &proof_tx.provider, + ctx, + &mut tracker, + ); -/// A handle that provides type-safe access to proof worker pools. -/// -/// The handle stores direct senders to both storage and account worker pools, -/// eliminating the need for a routing thread. All handles share reference-counted -/// channels, and workers shut down gracefully when all handles are dropped. -#[derive(Debug, Clone)] -pub struct ProofWorkerHandle { - /// Direct sender to storage worker pool - storage_work_tx: CrossbeamSender, - /// Direct sender to account worker pool - account_work_tx: CrossbeamSender, - /// Counter tracking available storage workers. Workers decrement when starting work, - /// increment when finishing. Used to determine whether to chunk multiproofs. - storage_available_workers: Arc, - /// Counter tracking available account workers. Workers decrement when starting work, - /// increment when finishing. Used to determine whether to chunk multiproofs. - account_available_workers: Arc, - /// Total number of storage workers spawned - storage_worker_count: usize, - /// Total number of account workers spawned - account_worker_count: usize, -} + let proof_elapsed = proof_start.elapsed(); + let total_elapsed = start.elapsed(); + let stats = tracker.finish(); + let result = result.map(|proof| ProofResult::AccountMultiproof { proof, stats }); + account_proofs_processed += 1; -impl ProofWorkerHandle { - /// Spawns storage and account worker pools with dedicated database transactions. - /// - /// Returns a handle for submitting proof tasks to the worker pools. - /// Workers run until the last handle is dropped. - /// - /// # Parameters - /// - `executor`: Tokio runtime handle for spawning blocking tasks - /// - `task_ctx`: Shared context with database view and prefix sets - /// - `storage_worker_count`: Number of storage workers to spawn - /// - `account_worker_count`: Number of account workers to spawn - pub fn new( - executor: Handle, - task_ctx: ProofTaskCtx, - storage_worker_count: usize, - account_worker_count: usize, - ) -> Self - where - Factory: DatabaseProviderROFactory - + Clone - + Send - + 'static, - { - let (storage_work_tx, storage_work_rx) = unbounded::(); - let (account_work_tx, account_work_rx) = unbounded::(); + // Send result to MultiProofTask + if result_tx + .send(ProofResultMessage { + sequence_number: seq, + result, + elapsed: total_elapsed, + state, + }) + .is_err() + { + trace!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + "Account multiproof receiver dropped, discarding result" + ); + } - // Initialize availability counters at zero. Each worker will increment when it - // successfully initializes, ensuring only healthy workers are counted. - let storage_available_workers = Arc::new(AtomicUsize::new(0)); - let account_available_workers = Arc::new(AtomicUsize::new(0)); + trace!( + target: "trie::proof_task", + proof_time_us = proof_elapsed.as_micros(), + total_elapsed_us = total_elapsed.as_micros(), + total_processed = account_proofs_processed, + "Account multiproof completed" + ); + drop(_span_guard); - debug!( - target: "trie::proof_task", - storage_worker_count, - account_worker_count, - "Spawning proof worker pools" - ); + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); + } - let parent_span = - debug_span!(target: "trie::proof_task", "storage proof workers", ?storage_worker_count) - .entered(); - // Spawn storage workers - for worker_id in 0..storage_worker_count { - let span = debug_span!(target: "trie::proof_task", "storage worker", ?worker_id); - let task_ctx_clone = task_ctx.clone(); - let work_rx_clone = storage_work_rx.clone(); - let storage_available_workers_clone = storage_available_workers.clone(); + AccountWorkerJob::BlindedAccountNode { path, result_sender } => { + let span = debug_span!( + target: "trie::proof_task", + "Blinded account node calculation", + ?path, + worker_id, + ); + let _span_guard = span.enter(); - executor.spawn_blocking(move || { - #[cfg(feature = "metrics")] - let metrics = ProofTaskTrieMetrics::default(); + trace!( + target: "trie::proof_task", + "Processing blinded account node" + ); + + let account_node_provider = ProofBlindedAccountProvider::new( + &proof_tx.provider, + &proof_tx.provider, + proof_tx.prefix_sets.clone(), + ); - let _guard = span.enter(); - storage_worker_loop( - task_ctx_clone, - work_rx_clone, - worker_id, - storage_available_workers_clone, - #[cfg(feature = "metrics")] - metrics, - ) - }); - } - drop(parent_span); + let start = Instant::now(); + let result = account_node_provider.trie_node(&path); + let elapsed = start.elapsed(); - let parent_span = - debug_span!(target: "trie::proof_task", "account proof workers", ?storage_worker_count) - .entered(); - // Spawn account workers - for worker_id in 0..account_worker_count { - let span = debug_span!(target: "trie::proof_task", "account worker", ?worker_id); - let task_ctx_clone = task_ctx.clone(); - let work_rx_clone = account_work_rx.clone(); - let storage_work_tx_clone = storage_work_tx.clone(); - let account_available_workers_clone = account_available_workers.clone(); + account_nodes_processed += 1; - executor.spawn_blocking(move || { - #[cfg(feature = "metrics")] - let metrics = ProofTaskTrieMetrics::default(); + if result_sender.send(result).is_err() { + trace!( + target: "trie::proof_task", + worker_id, + ?path, + account_nodes_processed, + "Blinded account node receiver dropped, discarding result" + ); + } - let _guard = span.enter(); - account_worker_loop( - task_ctx_clone, - work_rx_clone, - storage_work_tx_clone, - worker_id, - account_available_workers_clone, - #[cfg(feature = "metrics")] - metrics, - ) - }); - } - drop(parent_span); + trace!( + target: "trie::proof_task", + node_time_us = elapsed.as_micros(), + total_processed = account_nodes_processed, + "Blinded account node completed" + ); + drop(_span_guard); - Self { - storage_work_tx, - account_work_tx, - storage_available_workers, - account_available_workers, - storage_worker_count, - account_worker_count, + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); + } } } - /// Returns true if there are available storage workers to process tasks. - pub fn has_available_storage_workers(&self) -> bool { - self.storage_available_workers.load(Ordering::Relaxed) > 0 - } + trace!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + account_nodes_processed, + "Account worker shutting down" + ); - /// Returns true if there are available account workers to process tasks. - pub fn has_available_account_workers(&self) -> bool { - self.account_available_workers.load(Ordering::Relaxed) > 0 - } + #[cfg(feature = "metrics")] + metrics.record_account_nodes(account_nodes_processed as usize); +} +/// Builds an account multiproof by consuming storage proof receivers lazily during trie walk. +/// +/// This is a helper function used by account workers to build the account subtree proof +/// while storage proofs are still being computed. Receivers are consumed only when needed, +/// enabling interleaved parallelism between account trie traversal and storage proof computation. +/// +/// Returns a `DecodedMultiProof` containing the account subtree and storage proofs. +fn build_account_multiproof_with_storage_roots

( + provider: &P, + ctx: AccountMultiproofParams<'_>, + tracker: &mut ParallelTrieTracker, +) -> Result +where + P: TrieCursorFactory + HashedCursorFactory, +{ + let accounts_added_removed_keys = + ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); - /// Returns the number of pending storage tasks in the queue. - pub fn pending_storage_tasks(&self) -> usize { - self.storage_work_tx.len() - } + // Create the walker. + let walker = TrieWalker::<_>::state_trie( + provider.account_trie_cursor().map_err(ProviderError::Database)?, + ctx.prefix_set, + ) + .with_added_removed_keys(accounts_added_removed_keys) + .with_deletions_retained(true); - /// Returns the number of pending account tasks in the queue. - pub fn pending_account_tasks(&self) -> usize { - self.account_work_tx.len() - } + // Create a hash builder to rebuild the root node since it is not available in the database. + let retainer = ctx + .targets + .keys() + .map(Nibbles::unpack) + .collect::() + .with_added_removed_keys(accounts_added_removed_keys); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(ctx.collect_branch_node_masks); - /// Returns the total number of storage workers in the pool. - pub const fn total_storage_workers(&self) -> usize { - self.storage_worker_count - } + // Initialize storage multiproofs map with pre-allocated capacity. + // Proofs will be inserted as they're consumed from receivers during trie walk. + let mut collected_decoded_storages: B256Map = + B256Map::with_capacity_and_hasher(ctx.targets.len(), Default::default()); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); + let mut account_node_iter = TrieNodeIter::state_trie( + walker, + provider.hashed_account_cursor().map_err(ProviderError::Database)?, + ); - /// Returns the total number of account workers in the pool. - pub const fn total_account_workers(&self) -> usize { - self.account_worker_count - } + let mut storage_proof_receivers = ctx.storage_proof_receivers; - /// Returns the number of storage workers currently processing tasks. - /// - /// This is calculated as total workers minus available workers. - pub fn active_storage_workers(&self) -> usize { - self.storage_worker_count - .saturating_sub(self.storage_available_workers.load(Ordering::Relaxed)) - } + while let Some(account_node) = account_node_iter.try_next().map_err(ProviderError::Database)? { + match account_node { + TrieElement::Branch(node) => { + hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); + } + TrieElement::Leaf(hashed_address, account) => { + let root = match storage_proof_receivers.remove(&hashed_address) { + Some(receiver) => { + // Block on this specific storage proof receiver - enables interleaved + // parallelism + let proof_msg = receiver.recv().map_err(|_| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(format!( + "Storage proof channel closed for {hashed_address}" + )), + ), + ) + })?; - /// Returns the number of account workers currently processing tasks. - /// - /// This is calculated as total workers minus available workers. - pub fn active_account_workers(&self) -> usize { - self.account_worker_count - .saturating_sub(self.account_available_workers.load(Ordering::Relaxed)) - } + // Extract storage proof from the result + let proof = match proof_msg.result? { + ProofResult::StorageProof { hashed_address: addr, proof } => { + debug_assert_eq!( + addr, + hashed_address, + "storage worker must return same address: expected {hashed_address}, got {addr}" + ); + proof + } + ProofResult::AccountMultiproof { .. } => { + unreachable!("storage worker only sends StorageProof variant") + } + }; - /// Dispatch a storage proof computation to storage worker pool - /// - /// The result will be sent via the `proof_result_sender` channel. - pub fn dispatch_storage_proof( - &self, - input: StorageProofInput, - proof_result_sender: ProofResultContext, - ) -> Result<(), ProviderError> { - self.storage_work_tx - .send(StorageWorkerJob::StorageProof { input, proof_result_sender }) - .map_err(|err| { - let error = - ProviderError::other(std::io::Error::other("storage workers unavailable")); + let root = proof.root; + collected_decoded_storages.insert(hashed_address, proof); + root + } + // Since we do not store all intermediate nodes in the database, there might + // be a possibility of re-adding a non-modified leaf to the hash builder. + None => { + tracker.inc_missed_leaves(); - if let StorageWorkerJob::StorageProof { proof_result_sender, .. } = err.0 { - let ProofResultContext { - sender: result_tx, - sequence_number: seq, - state, - start_time: start, - } = proof_result_sender; + match ctx.missed_leaves_storage_roots.entry(hashed_address) { + dashmap::Entry::Occupied(occ) => *occ.get(), + dashmap::Entry::Vacant(vac) => { + let root = + StorageProof::new_hashed(provider, provider, hashed_address) + .with_prefix_set_mut(Default::default()) + .storage_multiproof( + ctx.targets + .get(&hashed_address) + .cloned() + .unwrap_or_default(), + ) + .map_err(|e| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(e.to_string()), + ), + ) + })? + .root; - let _ = result_tx.send(ProofResultMessage { - sequence_number: seq, - result: Err(ParallelStateRootError::Provider(error.clone())), - elapsed: start.elapsed(), - state, - }); - } + vac.insert(root); + root + } + } + } + }; + + // Encode account + account_rlp.clear(); + let account = account.into_trie_account(root); + account.encode(&mut account_rlp as &mut dyn BufMut); - error - }) + hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + } + } } - /// Dispatch an account multiproof computation - /// - /// The result will be sent via the `result_sender` channel included in the input. - pub fn dispatch_account_multiproof( - &self, - input: AccountMultiproofInput, - ) -> Result<(), ProviderError> { - self.account_work_tx - .send(AccountWorkerJob::AccountMultiproof { input: Box::new(input) }) - .map_err(|err| { - let error = - ProviderError::other(std::io::Error::other("account workers unavailable")); + // Consume remaining storage proof receivers for accounts not encountered during trie walk. + for (hashed_address, receiver) in storage_proof_receivers { + if let Ok(proof_msg) = receiver.recv() { + // Extract storage proof from the result + if let Ok(ProofResult::StorageProof { proof, .. }) = proof_msg.result { + collected_decoded_storages.insert(hashed_address, proof); + } + } + } - if let AccountWorkerJob::AccountMultiproof { input } = err.0 { - let AccountMultiproofInput { - proof_result_sender: - ProofResultContext { - sender: result_tx, - sequence_number: seq, - state, - start_time: start, - }, - .. - } = *input; + let _ = hash_builder.root(); - let _ = result_tx.send(ProofResultMessage { - sequence_number: seq, - result: Err(ParallelStateRootError::Provider(error.clone())), - elapsed: start.elapsed(), - state, - }); - } + let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); + let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; - error - }) - } + let (branch_node_hash_masks, branch_node_tree_masks) = if ctx.collect_branch_node_masks { + let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); + ( + updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), + updated_branch_nodes.into_iter().map(|(path, node)| (path, node.tree_mask)).collect(), + ) + } else { + (Default::default(), Default::default()) + }; - /// Dispatch blinded storage node request to storage worker pool - pub(crate) fn dispatch_blinded_storage_node( - &self, - account: B256, - path: Nibbles, - ) -> Result, ProviderError> { - let (tx, rx) = channel(); - self.storage_work_tx - .send(StorageWorkerJob::BlindedStorageNode { account, path, result_sender: tx }) - .map_err(|_| { - ProviderError::other(std::io::Error::other("storage workers unavailable")) - })?; + Ok(DecodedMultiProof { + account_subtree: decoded_account_subtree, + branch_node_hash_masks, + branch_node_tree_masks, + storages: collected_decoded_storages, + }) +} +/// Queues storage proofs for all accounts in the targets and returns receivers. +/// +/// This function queues all storage proof tasks to the worker pool but returns immediately +/// with receivers, allowing the account trie walk to proceed in parallel with storage proof +/// computation. This enables interleaved parallelism for better performance. +/// +/// Propagates errors up if queuing fails. Receivers must be consumed by the caller. +fn dispatch_storage_proofs( + storage_work_tx: &CrossbeamSender, + targets: &MultiProofTargets, + storage_prefix_sets: &mut B256Map, + with_branch_node_masks: bool, + multi_added_removed_keys: Option<&Arc>, +) -> Result>, ParallelStateRootError> { + let mut storage_proof_receivers = + B256Map::with_capacity_and_hasher(targets.len(), Default::default()); - Ok(rx) - } + // Dispatch all storage proofs to worker pool + for (hashed_address, target_slots) in targets.iter() { + let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); - /// Dispatch blinded account node request to account worker pool - pub(crate) fn dispatch_blinded_account_node( - &self, - path: Nibbles, - ) -> Result, ProviderError> { - let (tx, rx) = channel(); - self.account_work_tx - .send(AccountWorkerJob::BlindedAccountNode { path, result_sender: tx }) + // Create channel for receiving ProofResultMessage + let (result_tx, result_rx) = crossbeam_channel::unbounded(); + let start = Instant::now(); + + // Create computation input (data only, no communication channel) + let input = StorageProofInput::new( + *hashed_address, + prefix_set, + target_slots.clone(), + with_branch_node_masks, + multi_added_removed_keys.cloned(), + ); + + // Always dispatch a storage proof so we obtain the storage root even when no slots are + // requested. + storage_work_tx + .send(StorageWorkerJob::StorageProof { + input, + proof_result_sender: ProofResultContext::new( + result_tx, + 0, + HashedPostState::default(), + start, + ), + }) .map_err(|_| { - ProviderError::other(std::io::Error::other("account workers unavailable")) + ParallelStateRootError::Other(format!( + "Failed to queue storage proof for {}: storage worker pool unavailable", + hashed_address + )) })?; - Ok(rx) + storage_proof_receivers.insert(*hashed_address, result_rx); } -} -impl TrieNodeProviderFactory for ProofWorkerHandle { - type AccountNodeProvider = ProofTaskTrieNodeProvider; - type StorageNodeProvider = ProofTaskTrieNodeProvider; + Ok(storage_proof_receivers) +} +/// Input parameters for storage proof computation. +#[derive(Debug)] +pub struct StorageProofInput { + /// The hashed address for which the proof is calculated. + hashed_address: B256, + /// The prefix set for the proof calculation. + prefix_set: PrefixSet, + /// The target slots for the proof calculation. + target_slots: B256Set, + /// Whether or not to collect branch node masks + with_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + multi_added_removed_keys: Option>, +} - fn account_node_provider(&self) -> Self::AccountNodeProvider { - ProofTaskTrieNodeProvider::AccountNode { handle: self.clone() } +impl StorageProofInput { + /// Creates a new [`StorageProofInput`] with the given hashed address, prefix set, and target + /// slots. + pub const fn new( + hashed_address: B256, + prefix_set: PrefixSet, + target_slots: B256Set, + with_branch_node_masks: bool, + multi_added_removed_keys: Option>, + ) -> Self { + Self { + hashed_address, + prefix_set, + target_slots, + with_branch_node_masks, + multi_added_removed_keys, + } } +} +/// Input parameters for account multiproof computation. +#[derive(Debug, Clone)] +pub struct AccountMultiproofInput { + /// The targets for which to compute the multiproof. + pub targets: MultiProofTargets, + /// The prefix sets for the proof calculation. + pub prefix_sets: TriePrefixSets, + /// Whether or not to collect branch node masks. + pub collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + pub multi_added_removed_keys: Option>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + pub missed_leaves_storage_roots: Arc>, + /// Context for sending the proof result. + pub proof_result_sender: ProofResultContext, +} - fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { - ProofTaskTrieNodeProvider::StorageNode { account, handle: self.clone() } - } +struct AccountMultiproofParams<'a> { + /// The targets for which to compute the multiproof. + targets: &'a MultiProofTargets, + /// The prefix set for the account trie walk. + prefix_set: PrefixSet, + /// Whether or not to collect branch node masks. + collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + multi_added_removed_keys: Option<&'a Arc>, + /// Receivers for storage proofs being computed in parallel. + storage_proof_receivers: B256Map>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + missed_leaves_storage_roots: &'a DashMap, } -/// Trie node provider for retrieving trie nodes by path. #[derive(Debug)] -pub enum ProofTaskTrieNodeProvider { - /// Blinded account trie node provider. - AccountNode { - /// Handle to the proof worker pools. - handle: ProofWorkerHandle, +enum AccountWorkerJob { + /// Account multiproof computation request + AccountMultiproof { + /// Account multiproof input parameters + input: Box, }, - /// Blinded storage trie node provider. - StorageNode { - /// Target account. - account: B256, - /// Handle to the proof worker pools. - handle: ProofWorkerHandle, + /// Blinded account node retrieval request + BlindedAccountNode { + /// Path to the account node + path: Nibbles, + /// Channel to send result back to original caller + result_sender: Sender, }, } -impl TrieNodeProvider for ProofTaskTrieNodeProvider { - fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { - match self { - Self::AccountNode { handle } => { - let rx = handle - .dispatch_blinded_account_node(*path) - .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; - rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? - } - Self::StorageNode { handle, account } => { - let rx = handle - .dispatch_blinded_storage_node(*account, *path) - .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; - rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? - } - } - } -} - #[cfg(test)] mod tests { use super::*; From 020eb6ad7e8825754a2c3e4ade1c0ebf9fa98566 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 28 Oct 2025 15:02:19 +0000 Subject: [PATCH 231/371] fix(pipeline): ensure we dont pass an outdated target to header stage (#19351) --- crates/stages/stages/src/sets.rs | 10 ++++++++-- crates/stages/stages/src/stages/era.rs | 23 +++++++++++++++++------ 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 015be507336..48a2a995809 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -270,8 +270,14 @@ where Stage, { fn builder(self) -> StageSetBuilder { - StageSetBuilder::default() - .add_stage(EraStage::new(self.era_import_source, self.stages_config.etl.clone())) + let mut builder = StageSetBuilder::default(); + + if self.era_import_source.is_some() { + builder = builder + .add_stage(EraStage::new(self.era_import_source, self.stages_config.etl.clone())); + } + + builder .add_stage(HeaderStage::new( self.provider, self.header_downloader, diff --git a/crates/stages/stages/src/stages/era.rs b/crates/stages/stages/src/stages/era.rs index 10598f90112..6fa10a297c7 100644 --- a/crates/stages/stages/src/stages/era.rs +++ b/crates/stages/stages/src/stages/era.rs @@ -204,13 +204,24 @@ where height } else { - // It's possible for a pipeline sync to be executed with a None target, e.g. after a - // stage was manually dropped, and `reth node` is then called without a `--debug.tip`. + // No era files to process. Return the highest block we're aware of to avoid + // limiting subsequent stages with an outdated checkpoint. // - // In this case we don't want to simply default to zero, as that would overwrite the - // previously stored checkpoint block number. Instead we default to that previous - // checkpoint. - input.target.unwrap_or_else(|| input.checkpoint().block_number) + // This can happen when: + // 1. Era import is complete (all pre-merge blocks imported) + // 2. No era import source was configured + // + // We return max(checkpoint, highest_header, target) to ensure we don't return + // a stale checkpoint that could limit subsequent stages like Headers. + let highest_header = provider + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::Headers) + .unwrap_or_default(); + + let checkpoint = input.checkpoint().block_number; + let from_target = input.target.unwrap_or(checkpoint); + + checkpoint.max(highest_header).max(from_target) }; Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height >= input.target() }) From 3ce6e87ab9d4439d8a0fa8065c6ff479cc3c5b95 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 Oct 2025 17:07:39 +0100 Subject: [PATCH 232/371] chore: update docs for expected test failure (#19343) --- .github/assets/hive/expected_failures.yaml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index df111f97beb..7443ec5ee9a 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -30,7 +30,7 @@ engine-withdrawals: - Corrupted Block Hash Payload (INVALID) (Paris) (reth) - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org (Paris) (reth) -engine-api: [] +engine-api: [ ] # no fix due to https://github.com/paradigmxyz/reth/issues/8732 engine-cancun: @@ -39,9 +39,9 @@ engine-cancun: # in hive or its dependencies - Blob Transaction Ordering, Multiple Clients (Cancun) (reth) -sync: [] +sync: [ ] -engine-auth: [] +engine-auth: [ ] # tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage # no fix: it's too expensive to check whether the storage is empty on each creation (? - need more context on WHY) @@ -56,10 +56,6 @@ engine-auth: [] # tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment # post-fork test contract deployment, should fix for spec compliance but not realistic on mainnet (? - need more context) # -# tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py::test_max_blobs_per_tx_fork_transition -# reth enforces 6 blob limit from EIP-7594, but EIP-7892 raises it to 9. -# Needs constant update in alloy. https://github.com/paradigmxyz/reth/issues/18975 -# # tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_* # status (27th June 2024): was discussed in ACDT meeting, need to be raised in ACDE. # tests require hash collision on already deployed accounts with storage - mathematically @@ -146,6 +142,11 @@ eels/consume-engine: - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + +# tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py::test_max_blobs_per_tx_fork_transition[fork_PragueToOsakaAtTime15k-blob_count_7-blockchain_test] +# this test inserts a chain via chain.rlp where the last block is invalid, but expects import to stop there, this doesn't work properly with our pipeline import approach hence the import fails when the invalid block is detected. +#. In other words, if this test fails, this means we're correctly rejecting the block. +#. The same test exists in the consume-engine simulator where it is passing as expected eels/consume-rlp: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test-zero_nonce]-reth - tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py::test_system_contract_errors[fork_Prague-blockchain_test_engine-system_contract_reaches_gas_limit-system_contract_0x0000bbddc7ce488642fb579f8b00f3a590007251]-reth From ac4f80ded34a718eaf1300ea5c12cb0476814da8 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 28 Oct 2025 17:22:20 +0000 Subject: [PATCH 233/371] chore: dont write receipts to both storages on archive node (#19361) --- crates/storage/provider/src/providers/database/provider.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 93baa4309d2..ece6ef56c85 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1650,9 +1650,9 @@ impl StateWriter if let Some(writer) = &mut receipts_static_writer { writer.append_receipt(receipt_idx, receipt)?; + } else { + receipts_cursor.append(receipt_idx, receipt)?; } - - receipts_cursor.append(receipt_idx, receipt)?; } } From 6651ae78521730740cc4af59b41c7c4d3cb1d1e7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 Oct 2025 20:36:23 +0100 Subject: [PATCH 234/371] chore: add ChainHardforks::extend (#19332) --- .../ethereum/hardforks/src/hardforks/mod.rs | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/crates/ethereum/hardforks/src/hardforks/mod.rs b/crates/ethereum/hardforks/src/hardforks/mod.rs index 1c67c380d96..32db1381acd 100644 --- a/crates/ethereum/hardforks/src/hardforks/mod.rs +++ b/crates/ethereum/hardforks/src/hardforks/mod.rs @@ -133,6 +133,14 @@ impl ChainHardforks { } } + /// Extends the list with multiple forks, updating existing entries with new + /// [`ForkCondition`]s if they already exist. + pub fn extend(&mut self, forks: impl IntoIterator) { + for (fork, condition) in forks { + self.insert(fork, condition); + } + } + /// Removes `fork` from list. pub fn remove(&mut self, fork: H) { self.forks.retain(|(inner_fork, _)| inner_fork.name() != fork.name()); @@ -157,3 +165,22 @@ impl From<[(T, ForkCondition); N]> for ChainHardfor ) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_hardforks::hardfork; + + hardfork!(AHardfork { A1 }); + hardfork!(BHardfork { B1 }); + + #[test] + fn add_hardforks() { + let mut forks = ChainHardforks::default(); + forks.insert(AHardfork::A1, ForkCondition::Block(1)); + forks.insert(BHardfork::B1, ForkCondition::Block(1)); + assert_eq!(forks.len(), 2); + forks.is_fork_active_at_block(AHardfork::A1, 1); + forks.is_fork_active_at_block(BHardfork::B1, 1); + } +} From adb4f48471d0947a693cfbdafe79549922da0022 Mon Sep 17 00:00:00 2001 From: Mablr <59505383+mablr@users.noreply.github.com> Date: Tue, 28 Oct 2025 21:13:44 +0100 Subject: [PATCH 235/371] feat(reth-optimism-node): Add OP E2E mineblock test with isthmus activated at genesis (#19305) --- .../src/testsuite/actions/produce_blocks.rs | 69 ++++++++++++++----- .../node/tests/e2e-testsuite/testsuite.rs | 47 ++++++++++++- 2 files changed, 98 insertions(+), 18 deletions(-) diff --git a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs index 92bbba93b89..fe9e9133aec 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs @@ -98,31 +98,66 @@ where finalized_block_hash: parent_hash, }; - let fcu_result = EngineApiClient::::fork_choice_updated_v2( + // Try v2 first for backwards compatibility, fall back to v3 on error. + match EngineApiClient::::fork_choice_updated_v2( &engine_client, fork_choice_state, Some(self.payload_attributes.clone()), ) - .await?; - - debug!("FCU result: {:?}", fcu_result); - - // check if we got a valid payload ID - match fcu_result.payload_status.status { - PayloadStatusEnum::Valid => { - if let Some(payload_id) = fcu_result.payload_id { - debug!("Got payload ID: {payload_id}"); + .await + { + Ok(fcu_result) => { + debug!(?fcu_result, "FCU v2 result"); + match fcu_result.payload_status.status { + PayloadStatusEnum::Valid => { + if let Some(payload_id) = fcu_result.payload_id { + debug!(id=%payload_id, "Got payload"); + let _engine_payload = EngineApiClient::::get_payload_v2( + &engine_client, + payload_id, + ) + .await?; + Ok(()) + } else { + Err(eyre::eyre!("No payload ID returned from forkchoiceUpdated")) + } + } + _ => Err(eyre::eyre!( + "Payload status not valid: {:?}", + fcu_result.payload_status + ))?, + } + } + Err(_) => { + // If v2 fails due to unsupported fork/missing fields, try v3 + let fcu_result = EngineApiClient::::fork_choice_updated_v3( + &engine_client, + fork_choice_state, + Some(self.payload_attributes.clone()), + ) + .await?; - // get the payload that was built - let _engine_payload = - EngineApiClient::::get_payload_v2(&engine_client, payload_id) + debug!(?fcu_result, "FCU v3 result"); + match fcu_result.payload_status.status { + PayloadStatusEnum::Valid => { + if let Some(payload_id) = fcu_result.payload_id { + debug!(id=%payload_id, "Got payload"); + let _engine_payload = EngineApiClient::::get_payload_v3( + &engine_client, + payload_id, + ) .await?; - Ok(()) - } else { - Err(eyre::eyre!("No payload ID returned from forkchoiceUpdated")) + Ok(()) + } else { + Err(eyre::eyre!("No payload ID returned from forkchoiceUpdated")) + } + } + _ => Err(eyre::eyre!( + "Payload status not valid: {:?}", + fcu_result.payload_status + )), } } - _ => Err(eyre::eyre!("Payload status not valid: {:?}", fcu_result.payload_status)), } }) } diff --git a/crates/optimism/node/tests/e2e-testsuite/testsuite.rs b/crates/optimism/node/tests/e2e-testsuite/testsuite.rs index 75dff49c141..b031b3a8266 100644 --- a/crates/optimism/node/tests/e2e-testsuite/testsuite.rs +++ b/crates/optimism/node/tests/e2e-testsuite/testsuite.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Address, B256}; +use alloy_primitives::{Address, B256, B64}; use eyre::Result; use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_e2e_test_utils::testsuite::{ @@ -53,3 +53,48 @@ async fn test_testsuite_op_assert_mine_block() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_testsuite_op_assert_mine_block_isthmus_activated() -> Result<()> { + reth_tracing::init_test_tracing(); + + let setup = Setup::default() + .with_chain_spec(Arc::new( + OpChainSpecBuilder::default() + .chain(OP_MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .isthmus_activated() + .build() + .into(), + )) + .with_network(NetworkSetup::single_node()); + + let test = + TestBuilder::new().with_setup(setup).with_action(AssertMineBlock::::new( + 0, + vec![], + Some(B256::ZERO), + // TODO: refactor once we have actions to generate payload attributes. + OpPayloadAttributes { + payload_attributes: alloy_rpc_types_engine::PayloadAttributes { + timestamp: std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }, + transactions: None, + no_tx_pool: None, + eip_1559_params: Some(B64::ZERO), + min_base_fee: None, + gas_limit: Some(30_000_000), + }, + )); + + test.run::().await?; + + Ok(()) +} From ff46daddb60e6938661e3efad1bb67daabfdb48b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 28 Oct 2025 21:29:26 +0100 Subject: [PATCH 236/371] feat: insert at timestamp (#19365) --- crates/chainspec/src/spec.rs | 2 +- .../ethereum/hardforks/src/hardforks/mod.rs | 182 +++++++++++++++--- 2 files changed, 160 insertions(+), 24 deletions(-) diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index e8d16886aac..22ddddbc719 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -883,7 +883,7 @@ impl ChainSpecBuilder { /// Remove the given fork from the spec. pub fn without_fork(mut self, fork: H) -> Self { - self.hardforks.remove(fork); + self.hardforks.remove(&fork); self } diff --git a/crates/ethereum/hardforks/src/hardforks/mod.rs b/crates/ethereum/hardforks/src/hardforks/mod.rs index 32db1381acd..dad175e8f66 100644 --- a/crates/ethereum/hardforks/src/hardforks/mod.rs +++ b/crates/ethereum/hardforks/src/hardforks/mod.rs @@ -4,11 +4,7 @@ pub use dev::DEV_HARDFORKS; use crate::{ForkCondition, ForkFilter, ForkId, Hardfork, Head}; #[cfg(feature = "std")] use rustc_hash::FxHashMap; -#[cfg(feature = "std")] -use std::collections::hash_map::Entry; -#[cfg(not(feature = "std"))] -use alloc::collections::btree_map::Entry; use alloc::{boxed::Box, vec::Vec}; /// Generic trait over a set of ordered hardforks @@ -115,34 +111,74 @@ impl ChainHardforks { self.fork(fork).active_at_block(block_number) } - /// Inserts `fork` into list, updating with a new [`ForkCondition`] if it already exists. + /// Inserts a fork with the given [`ForkCondition`], maintaining forks in ascending order + /// based on the `Ord` implementation of [`ForkCondition`]. + /// + /// If the fork already exists (regardless of its current condition type), it will be removed + /// and re-inserted at the appropriate position based on the new condition. + /// + /// # Ordering Behavior + /// + /// Forks are ordered according to [`ForkCondition`]'s `Ord` implementation: + /// - [`ForkCondition::Never`] comes first + /// - [`ForkCondition::Block`] ordered by block number + /// - [`ForkCondition::Timestamp`] ordered by timestamp value + /// - [`ForkCondition::TTD`] ordered by total difficulty + /// + /// # Example + /// + /// ```ignore + /// let mut forks = ChainHardforks::default(); + /// forks.insert(Fork::Frontier, ForkCondition::Block(0)); + /// forks.insert(Fork::Homestead, ForkCondition::Block(1_150_000)); + /// forks.insert(Fork::Cancun, ForkCondition::Timestamp(1710338135)); + /// + /// // Forks are ordered: Frontier (Block 0), Homestead (Block 1150000), Cancun (Timestamp) + /// ``` pub fn insert(&mut self, fork: H, condition: ForkCondition) { - match self.map.entry(fork.name()) { - Entry::Occupied(mut entry) => { - *entry.get_mut() = condition; - if let Some((_, inner)) = - self.forks.iter_mut().find(|(inner, _)| inner.name() == fork.name()) - { - *inner = condition; - } - } - Entry::Vacant(entry) => { - entry.insert(condition); - self.forks.push((Box::new(fork), condition)); - } - } + // Remove existing fork if it exists + self.remove(&fork); + + // Find the correct position based on ForkCondition's Ord implementation + let pos = self + .forks + .iter() + .position(|(_, existing_condition)| *existing_condition > condition) + .unwrap_or(self.forks.len()); + + self.map.insert(fork.name(), condition); + self.forks.insert(pos, (Box::new(fork), condition)); } /// Extends the list with multiple forks, updating existing entries with new /// [`ForkCondition`]s if they already exist. - pub fn extend(&mut self, forks: impl IntoIterator) { + /// + /// Each fork is inserted using [`Self::insert`], maintaining proper ordering based on + /// [`ForkCondition`]'s `Ord` implementation. + /// + /// # Example + /// + /// ```ignore + /// let mut forks = ChainHardforks::default(); + /// forks.extend([ + /// (Fork::Homestead, ForkCondition::Block(1_150_000)), + /// (Fork::Frontier, ForkCondition::Block(0)), + /// (Fork::Cancun, ForkCondition::Timestamp(1710338135)), + /// ]); + /// + /// // Forks will be automatically ordered: Frontier, Homestead, Cancun + /// ``` + pub fn extend( + &mut self, + forks: impl IntoIterator, + ) { for (fork, condition) in forks { self.insert(fork, condition); } } /// Removes `fork` from list. - pub fn remove(&mut self, fork: H) { + pub fn remove(&mut self, fork: &H) { self.forks.retain(|(inner_fork, _)| inner_fork.name() != fork.name()); self.map.remove(fork.name()); } @@ -171,8 +207,8 @@ mod tests { use super::*; use alloy_hardforks::hardfork; - hardfork!(AHardfork { A1 }); - hardfork!(BHardfork { B1 }); + hardfork!(AHardfork { A1, A2, A3 }); + hardfork!(BHardfork { B1, B2 }); #[test] fn add_hardforks() { @@ -183,4 +219,104 @@ mod tests { forks.is_fork_active_at_block(AHardfork::A1, 1); forks.is_fork_active_at_block(BHardfork::B1, 1); } + + #[test] + fn insert_maintains_fork_order() { + let mut forks = ChainHardforks::default(); + + // Insert forks in random order + forks.insert(BHardfork::B1, ForkCondition::Timestamp(2000)); + forks.insert(AHardfork::A1, ForkCondition::Block(100)); + forks.insert(AHardfork::A2, ForkCondition::Block(50)); + forks.insert(BHardfork::B2, ForkCondition::Timestamp(1000)); + + assert_eq!(forks.len(), 4); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + // Verify ordering: Block conditions come before Timestamp conditions + // and within each type, they're ordered by value + assert_eq!(fork_list[0].0.name(), "A2"); + assert_eq!(fork_list[0].1, ForkCondition::Block(50)); + assert_eq!(fork_list[1].0.name(), "A1"); + assert_eq!(fork_list[1].1, ForkCondition::Block(100)); + assert_eq!(fork_list[2].0.name(), "B2"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(1000)); + assert_eq!(fork_list[3].0.name(), "B1"); + assert_eq!(fork_list[3].1, ForkCondition::Timestamp(2000)); + } + + #[test] + fn insert_replaces_and_reorders_existing_fork() { + let mut forks = ChainHardforks::default(); + + // Insert initial forks + forks.insert(AHardfork::A1, ForkCondition::Block(100)); + forks.insert(BHardfork::B1, ForkCondition::Block(200)); + forks.insert(AHardfork::A2, ForkCondition::Timestamp(1000)); + + assert_eq!(forks.len(), 3); + + // Update A1 from Block to Timestamp - should move it after B1 + forks.insert(AHardfork::A1, ForkCondition::Timestamp(500)); + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + // Verify new ordering + assert_eq!(fork_list[0].0.name(), "B1"); + assert_eq!(fork_list[0].1, ForkCondition::Block(200)); + assert_eq!(fork_list[1].0.name(), "A1"); + assert_eq!(fork_list[1].1, ForkCondition::Timestamp(500)); + assert_eq!(fork_list[2].0.name(), "A2"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(1000)); + + // Update A1 timestamp to move it after A2 + forks.insert(AHardfork::A1, ForkCondition::Timestamp(2000)); + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + assert_eq!(fork_list[0].0.name(), "B1"); + assert_eq!(fork_list[0].1, ForkCondition::Block(200)); + assert_eq!(fork_list[1].0.name(), "A2"); + assert_eq!(fork_list[1].1, ForkCondition::Timestamp(1000)); + assert_eq!(fork_list[2].0.name(), "A1"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(2000)); + } + + #[test] + fn extend_maintains_order() { + let mut forks = ChainHardforks::default(); + + // Use extend to insert multiple forks at once in random order + forks.extend([ + (AHardfork::A1, ForkCondition::Block(100)), + (AHardfork::A2, ForkCondition::Timestamp(1000)), + ]); + forks.extend([(BHardfork::B1, ForkCondition::Timestamp(2000))]); + + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + // Verify ordering is maintained + assert_eq!(fork_list[0].0.name(), "A1"); + assert_eq!(fork_list[0].1, ForkCondition::Block(100)); + assert_eq!(fork_list[1].0.name(), "A2"); + assert_eq!(fork_list[1].1, ForkCondition::Timestamp(1000)); + assert_eq!(fork_list[2].0.name(), "B1"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(2000)); + + // Extend again with an update to A2 + forks.extend([(AHardfork::A2, ForkCondition::Timestamp(3000))]); + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + assert_eq!(fork_list[0].0.name(), "A1"); + assert_eq!(fork_list[1].0.name(), "B1"); + assert_eq!(fork_list[2].0.name(), "A2"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(3000)); + } } From 77ef028aca842e53b5a96a78a4ad9451ae205886 Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Wed, 29 Oct 2025 03:39:29 -0400 Subject: [PATCH 237/371] fix(op-reth/consensus): fixes header validation for jovian. decouple excess blob gas and blob gas used (#19338) --- crates/optimism/consensus/src/lib.rs | 325 ++++++++++++++++++++++++++- 1 file changed, 316 insertions(+), 9 deletions(-) diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 25e11be9ace..34a003bad32 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -18,9 +18,9 @@ use core::fmt::Debug; use reth_chainspec::EthChainSpec; use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; use reth_consensus_common::validation::{ - validate_against_parent_4844, validate_against_parent_eip1559_base_fee, - validate_against_parent_hash_number, validate_against_parent_timestamp, validate_cancun_gas, - validate_header_base_fee, validate_header_extra_data, validate_header_gas, + validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, + validate_against_parent_timestamp, validate_cancun_gas, validate_header_base_fee, + validate_header_extra_data, validate_header_gas, }; use reth_execution_types::BlockExecutionResult; use reth_optimism_forks::OpHardforks; @@ -188,9 +188,32 @@ where &self.chain_spec, )?; - // ensure that the blob gas fields for this block - if let Some(blob_params) = self.chain_spec.blob_params_at_timestamp(header.timestamp()) { - validate_against_parent_4844(header.header(), parent.header(), blob_params)?; + // Ensure that the blob gas fields for this block are correctly set. + // In the op-stack, the excess blob gas is always 0 for all blocks after ecotone. + // The blob gas used and the excess blob gas should both be set after ecotone. + // After Jovian, the blob gas used contains the current DA footprint. + if self.chain_spec.is_ecotone_active_at_timestamp(header.timestamp()) { + let blob_gas_used = header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + + // Before Jovian and after ecotone, the blob gas used should be 0. + if !self.chain_spec.is_jovian_active_at_timestamp(header.timestamp()) && + blob_gas_used != 0 + { + return Err(ConsensusError::BlobGasUsedDiff(GotExpected { + got: blob_gas_used, + expected: 0, + })); + } + + let excess_blob_gas = + header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; + if excess_blob_gas != 0 { + return Err(ConsensusError::ExcessBlobGasDiff { + diff: GotExpected { got: excess_blob_gas, expected: 0 }, + parent_excess_blob_gas: parent.excess_blob_gas().unwrap_or(0), + parent_blob_gas_used: parent.blob_gas_used().unwrap_or(0), + }) + } } Ok(()) @@ -204,11 +227,14 @@ mod tests { use alloy_consensus::{BlockBody, Eip658Value, Header, Receipt, TxEip7702, TxReceipt}; use alloy_eips::{eip4895::Withdrawals, eip7685::Requests}; use alloy_primitives::{Address, Bytes, Signature, U256}; - use op_alloy_consensus::OpTypedTransaction; - use reth_consensus::{Consensus, ConsensusError, FullConsensus}; + use op_alloy_consensus::{ + encode_holocene_extra_data, encode_jovian_extra_data, OpTypedTransaction, + }; + use reth_chainspec::BaseFeeParams; + use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder, OP_MAINNET}; use reth_optimism_primitives::{OpPrimitives, OpReceipt, OpTransactionSigned}; - use reth_primitives_traits::{proofs, GotExpected, RecoveredBlock, SealedBlock}; + use reth_primitives_traits::{proofs, GotExpected, RecoveredBlock, SealedBlock, SealedHeader}; use reth_provider::BlockExecutionResult; use crate::OpBeaconConsensus; @@ -452,4 +478,285 @@ mod tests { }) ); } + + #[test] + fn test_header_min_base_fee_validation() { + const MIN_BASE_FEE: u64 = 1000; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE / 10), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + extra_data: encode_jovian_extra_data( + Default::default(), + BaseFeeParams::optimism(), + MIN_BASE_FEE, + ) + .unwrap(), + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_ok()); + } + + #[test] + fn test_header_min_base_fee_validation_failure() { + const MIN_BASE_FEE: u64 = 1000; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE / 10), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + extra_data: encode_jovian_extra_data( + Default::default(), + BaseFeeParams::optimism(), + MIN_BASE_FEE, + ) + .unwrap(), + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE - 1), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_err()); + assert_eq!( + result.unwrap_err(), + ConsensusError::BaseFeeDiff(GotExpected { + got: MIN_BASE_FEE - 1, + expected: MIN_BASE_FEE, + }) + ); + } + + #[test] + fn test_header_da_footprint_validation() { + const MIN_BASE_FEE: u64 = 100_000; + const DA_FOOTPRINT: u64 = GAS_LIMIT - 1; + const GAS_LIMIT: u64 = 100_000_000; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + extra_data: encode_jovian_extra_data( + Default::default(), + BaseFeeParams::optimism(), + MIN_BASE_FEE, + ) + .unwrap(), + gas_limit: GAS_LIMIT, + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE + MIN_BASE_FEE / 10), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_ok()); + } + + #[test] + fn test_header_isthmus_validation() { + const MIN_BASE_FEE: u64 = 100_000; + const DA_FOOTPRINT: u64 = GAS_LIMIT - 1; + const GAS_LIMIT: u64 = 100_000_000; + + let chain_spec = OpChainSpecBuilder::default() + .isthmus_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + extra_data: encode_holocene_extra_data(Default::default(), BaseFeeParams::optimism()) + .unwrap(), + gas_limit: GAS_LIMIT, + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE - 2 * MIN_BASE_FEE / 100), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_err()); + assert_eq!( + result.unwrap_err(), + ConsensusError::BlobGasUsedDiff(GotExpected { got: DA_FOOTPRINT, expected: 0 }) + ); + } } From 10d9a7e3c65703495e370925b1dcd9740543fbaf Mon Sep 17 00:00:00 2001 From: YK Date: Wed, 29 Oct 2025 16:09:39 +0800 Subject: [PATCH 238/371] refactor(trie): restructure proof task workers into structs (#19344) --- crates/trie/parallel/src/proof_task.rs | 857 +++++++++++++++---------- 1 file changed, 509 insertions(+), 348 deletions(-) diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 93eb03bde91..06ac673dd4e 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -153,14 +153,15 @@ impl ProofWorkerHandle { let metrics = ProofTaskTrieMetrics::default(); let _guard = span.enter(); - storage_worker_loop( + let worker = StorageProofWorker::new( task_ctx_clone, work_rx_clone, worker_id, storage_available_workers_clone, #[cfg(feature = "metrics")] metrics, - ) + ); + worker.run() }); } drop(parent_span); @@ -181,15 +182,16 @@ impl ProofWorkerHandle { let metrics = ProofTaskTrieMetrics::default(); let _guard = span.enter(); - account_worker_loop( + let worker = AccountProofWorker::new( task_ctx_clone, work_rx_clone, - storage_work_tx_clone, worker_id, + storage_work_tx_clone, account_available_workers_clone, #[cfg(feature = "metrics")] metrics, - ) + ); + worker.run() }); } drop(parent_span); @@ -453,6 +455,35 @@ where decoded_result } + + /// Process a blinded storage node request. + /// + /// Used by storage workers to retrieve blinded storage trie nodes for proof construction. + fn process_blinded_storage_node( + &self, + account: B256, + path: &Nibbles, + ) -> TrieNodeProviderResult { + let storage_node_provider = ProofBlindedStorageProvider::new( + &self.provider, + &self.provider, + self.prefix_sets.clone(), + account, + ); + storage_node_provider.trie_node(path) + } + + /// Process a blinded account node request. + /// + /// Used by account workers to retrieve blinded account trie nodes for proof construction. + fn process_blinded_account_node(&self, path: &Nibbles) -> TrieNodeProviderResult { + let account_node_provider = ProofBlindedAccountProvider::new( + &self.provider, + &self.provider, + self.prefix_sets.clone(), + ); + account_node_provider.trie_node(path) + } } impl TrieNodeProviderFactory for ProofWorkerHandle { type AccountNodeProvider = ProofTaskTrieNodeProvider; @@ -604,412 +635,540 @@ enum StorageWorkerJob { result_sender: Sender, }, } -/// Worker loop for storage trie operations. -/// -/// # Lifecycle -/// -/// Each worker: -/// 1. Receives `StorageWorkerJob` from crossbeam unbounded channel -/// 2. Computes result using its dedicated long-lived transaction -/// 3. Sends result directly to original caller via `std::mpsc` -/// 4. Repeats until channel closes (graceful shutdown) -/// -/// # Transaction Reuse -/// -/// Reuses the same transaction and cursor factories across multiple operations -/// to avoid transaction creation and cursor factory setup overhead. -/// -/// # Panic Safety -/// -/// If this function panics, the worker thread terminates but other workers -/// continue operating and the system degrades gracefully. -/// -/// # Shutdown + +/// Worker for storage trie operations. /// -/// Worker shuts down when the crossbeam channel closes (all senders dropped). -fn storage_worker_loop( +/// Each worker maintains a dedicated database transaction and processes +/// storage proof requests and blinded node lookups. +struct StorageProofWorker { + /// Shared task context with database factory and prefix sets task_ctx: ProofTaskCtx, + /// Channel for receiving work work_rx: CrossbeamReceiver, + /// Unique identifier for this worker (used for tracing) worker_id: usize, + /// Counter tracking worker availability available_workers: Arc, - #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, -) where + /// Metrics collector for this worker + #[cfg(feature = "metrics")] + metrics: ProofTaskTrieMetrics, +} + +impl StorageProofWorker +where Factory: DatabaseProviderROFactory, { - // Create provider from factory - let provider = task_ctx - .factory - .database_provider_ro() - .expect("Storage worker failed to initialize: unable to create provider"); - let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); - - trace!( - target: "trie::proof_task", - worker_id, - "Storage worker started" - ); - - let mut storage_proofs_processed = 0u64; - let mut storage_nodes_processed = 0u64; - - // Initially mark this worker as available. - available_workers.fetch_add(1, Ordering::Relaxed); + /// Creates a new storage proof worker. + const fn new( + task_ctx: ProofTaskCtx, + work_rx: CrossbeamReceiver, + worker_id: usize, + available_workers: Arc, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, + ) -> Self { + Self { + task_ctx, + work_rx, + worker_id, + available_workers, + #[cfg(feature = "metrics")] + metrics, + } + } - while let Ok(job) = work_rx.recv() { - // Mark worker as busy. - available_workers.fetch_sub(1, Ordering::Relaxed); + /// Runs the worker loop, processing jobs until the channel closes. + /// + /// # Lifecycle + /// + /// 1. Initializes database provider and transaction + /// 2. Advertises availability + /// 3. Processes jobs in a loop: + /// - Receives job from channel + /// - Marks worker as busy + /// - Processes the job + /// - Marks worker as available + /// 4. Shuts down when channel closes + /// + /// # Panic Safety + /// + /// If this function panics, the worker thread terminates but other workers + /// continue operating and the system degrades gracefully. + fn run(self) { + let Self { + task_ctx, + work_rx, + worker_id, + available_workers, + #[cfg(feature = "metrics")] + metrics, + } = self; + + // Create provider from factory + let provider = task_ctx + .factory + .database_provider_ro() + .expect("Storage worker failed to initialize: unable to create provider"); + let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); - match job { - StorageWorkerJob::StorageProof { input, proof_result_sender } => { - let hashed_address = input.hashed_address; - let ProofResultContext { sender, sequence_number: seq, state, start_time } = - proof_result_sender; + trace!( + target: "trie::proof_task", + worker_id, + "Storage worker started" + ); - trace!( - target: "trie::proof_task", - worker_id, - hashed_address = ?hashed_address, - prefix_set_len = input.prefix_set.len(), - target_slots_len = input.target_slots.len(), - "Processing storage proof" - ); + let mut storage_proofs_processed = 0u64; + let mut storage_nodes_processed = 0u64; - let proof_start = Instant::now(); - let result = proof_tx.compute_storage_proof(input); + // Initially mark this worker as available. + available_workers.fetch_add(1, Ordering::Relaxed); - let proof_elapsed = proof_start.elapsed(); - storage_proofs_processed += 1; + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); - let result_msg = result.map(|storage_proof| ProofResult::StorageProof { - hashed_address, - proof: storage_proof, - }); + match job { + StorageWorkerJob::StorageProof { input, proof_result_sender } => { + Self::process_storage_proof( + worker_id, + &proof_tx, + input, + proof_result_sender, + &mut storage_proofs_processed, + ); + } - if sender - .send(ProofResultMessage { - sequence_number: seq, - result: result_msg, - elapsed: start_time.elapsed(), - state, - }) - .is_err() - { - trace!( - target: "trie::proof_task", + StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { + Self::process_blinded_node( worker_id, - hashed_address = ?hashed_address, - storage_proofs_processed, - "Proof result receiver dropped, discarding result" + &proof_tx, + account, + path, + result_sender, + &mut storage_nodes_processed, ); } + } - trace!( - target: "trie::proof_task", - worker_id, - hashed_address = ?hashed_address, - proof_time_us = proof_elapsed.as_micros(), - total_processed = storage_proofs_processed, - "Storage proof completed" - ); + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); + } - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); - } + trace!( + target: "trie::proof_task", + worker_id, + storage_proofs_processed, + storage_nodes_processed, + "Storage worker shutting down" + ); - StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { - trace!( - target: "trie::proof_task", - worker_id, - ?account, - ?path, - "Processing blinded storage node" - ); + #[cfg(feature = "metrics")] + metrics.record_storage_nodes(storage_nodes_processed as usize); + } - let storage_node_provider = ProofBlindedStorageProvider::new( - &proof_tx.provider, - &proof_tx.provider, - proof_tx.prefix_sets.clone(), - account, - ); + /// Processes a storage proof request. + fn process_storage_proof( + worker_id: usize, + proof_tx: &ProofTaskTx, + input: StorageProofInput, + proof_result_sender: ProofResultContext, + storage_proofs_processed: &mut u64, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let hashed_address = input.hashed_address; + let ProofResultContext { sender, sequence_number: seq, state, start_time } = + proof_result_sender; - let start = Instant::now(); - let result = storage_node_provider.trie_node(&path); - let elapsed = start.elapsed(); + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + prefix_set_len = input.prefix_set.len(), + target_slots_len = input.target_slots.len(), + "Processing storage proof" + ); - storage_nodes_processed += 1; + let proof_start = Instant::now(); + let result = proof_tx.compute_storage_proof(input); - if result_sender.send(result).is_err() { - trace!( - target: "trie::proof_task", - worker_id, - ?account, - ?path, - storage_nodes_processed, - "Blinded storage node receiver dropped, discarding result" - ); - } + let proof_elapsed = proof_start.elapsed(); + *storage_proofs_processed += 1; - trace!( - target: "trie::proof_task", - worker_id, - ?account, - ?path, - elapsed_us = elapsed.as_micros(), - total_processed = storage_nodes_processed, - "Blinded storage node completed" - ); + let result_msg = result.map(|storage_proof| ProofResult::StorageProof { + hashed_address, + proof: storage_proof, + }); - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); - } + if sender + .send(ProofResultMessage { + sequence_number: seq, + result: result_msg, + elapsed: start_time.elapsed(), + state, + }) + .is_err() + { + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + storage_proofs_processed, + "Proof result receiver dropped, discarding result" + ); } + + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + proof_time_us = proof_elapsed.as_micros(), + total_processed = storage_proofs_processed, + "Storage proof completed" + ); } - trace!( - target: "trie::proof_task", - worker_id, - storage_proofs_processed, - storage_nodes_processed, - "Storage worker shutting down" - ); + /// Processes a blinded storage node lookup request. + fn process_blinded_node( + worker_id: usize, + proof_tx: &ProofTaskTx, + account: B256, + path: Nibbles, + result_sender: Sender, + storage_nodes_processed: &mut u64, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + "Processing blinded storage node" + ); - #[cfg(feature = "metrics")] - metrics.record_storage_nodes(storage_nodes_processed as usize); + let start = Instant::now(); + let result = proof_tx.process_blinded_storage_node(account, &path); + let elapsed = start.elapsed(); + + *storage_nodes_processed += 1; + + if result_sender.send(result).is_err() { + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + storage_nodes_processed, + "Blinded storage node receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + elapsed_us = elapsed.as_micros(), + total_processed = storage_nodes_processed, + "Blinded storage node completed" + ); + } } -/// Worker loop for account trie operations. -/// -/// # Lifecycle -/// -/// Each worker initializes its providers, advertises availability, then loops: -/// take a job, mark busy, compute the proof, send the result, and mark available again. -/// The loop ends gracefully once the channel closes. -/// -/// # Transaction Reuse -/// -/// Reuses the same transaction and cursor factories across multiple operations -/// to avoid transaction creation and cursor factory setup overhead. -/// -/// # Panic Safety -/// -/// If this function panics, the worker thread terminates but other workers -/// continue operating and the system degrades gracefully. -/// -/// # Shutdown + +/// Worker for account trie operations. /// -/// Worker shuts down when the crossbeam channel closes (all senders dropped). -fn account_worker_loop( +/// Each worker maintains a dedicated database transaction and processes +/// account multiproof requests and blinded node lookups. +struct AccountProofWorker { + /// Shared task context with database factory and prefix sets task_ctx: ProofTaskCtx, + /// Channel for receiving work work_rx: CrossbeamReceiver, - storage_work_tx: CrossbeamSender, + /// Unique identifier for this worker (used for tracing) worker_id: usize, + /// Channel for dispatching storage proof work + storage_work_tx: CrossbeamSender, + /// Counter tracking worker availability available_workers: Arc, - #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, -) where + /// Metrics collector for this worker + #[cfg(feature = "metrics")] + metrics: ProofTaskTrieMetrics, +} + +impl AccountProofWorker +where Factory: DatabaseProviderROFactory, { - // Create provider from factory - let provider = task_ctx - .factory - .database_provider_ro() - .expect("Account worker failed to initialize: unable to create provider"); - let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); - - trace!( - target: "trie::proof_task", - worker_id, - "Account worker started" - ); - - let mut account_proofs_processed = 0u64; - let mut account_nodes_processed = 0u64; - - // Count this worker as available only after successful initialization. - available_workers.fetch_add(1, Ordering::Relaxed); - - while let Ok(job) = work_rx.recv() { - // Mark worker as busy. - available_workers.fetch_sub(1, Ordering::Relaxed); - - match job { - AccountWorkerJob::AccountMultiproof { input } => { - let AccountMultiproofInput { - targets, - mut prefix_sets, - collect_branch_node_masks, - multi_added_removed_keys, - missed_leaves_storage_roots, - proof_result_sender: - ProofResultContext { - sender: result_tx, - sequence_number: seq, - state, - start_time: start, - }, - } = *input; - - let span = debug_span!( - target: "trie::proof_task", - "Account multiproof calculation", - targets = targets.len(), - worker_id, - ); - let _span_guard = span.enter(); - - trace!( - target: "trie::proof_task", - "Processing account multiproof" - ); - - let proof_start = Instant::now(); - - let mut tracker = ParallelTrieTracker::default(); - - let mut storage_prefix_sets = std::mem::take(&mut prefix_sets.storage_prefix_sets); - - let storage_root_targets_len = StorageRootTargets::count( - &prefix_sets.account_prefix_set, - &storage_prefix_sets, - ); + /// Creates a new account proof worker. + const fn new( + task_ctx: ProofTaskCtx, + work_rx: CrossbeamReceiver, + worker_id: usize, + storage_work_tx: CrossbeamSender, + available_workers: Arc, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, + ) -> Self { + Self { + task_ctx, + work_rx, + worker_id, + storage_work_tx, + available_workers, + #[cfg(feature = "metrics")] + metrics, + } + } - tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); - - let storage_proof_receivers = match dispatch_storage_proofs( - &storage_work_tx, - &targets, - &mut storage_prefix_sets, - collect_branch_node_masks, - multi_added_removed_keys.as_ref(), - ) { - Ok(receivers) => receivers, - Err(error) => { - // Send error through result channel - error!(target: "trie::proof_task", "Failed to dispatch storage proofs: {error}"); - let _ = result_tx.send(ProofResultMessage { - sequence_number: seq, - result: Err(error), - elapsed: start.elapsed(), - state, - }); - continue; - } - }; + /// Runs the worker loop, processing jobs until the channel closes. + /// + /// # Lifecycle + /// + /// 1. Initializes database provider and transaction + /// 2. Advertises availability + /// 3. Processes jobs in a loop: + /// - Receives job from channel + /// - Marks worker as busy + /// - Processes the job + /// - Marks worker as available + /// 4. Shuts down when channel closes + /// + /// # Panic Safety + /// + /// If this function panics, the worker thread terminates but other workers + /// continue operating and the system degrades gracefully. + fn run(self) { + let Self { + task_ctx, + work_rx, + worker_id, + storage_work_tx, + available_workers, + #[cfg(feature = "metrics")] + metrics, + } = self; + + // Create provider from factory + let provider = task_ctx + .factory + .database_provider_ro() + .expect("Account worker failed to initialize: unable to create provider"); + let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); - // Use the missed leaves cache passed from the multiproof manager - let account_prefix_set = std::mem::take(&mut prefix_sets.account_prefix_set); + trace!( + target: "trie::proof_task", + worker_id, + "Account worker started" + ); - let ctx = AccountMultiproofParams { - targets: &targets, - prefix_set: account_prefix_set, - collect_branch_node_masks, - multi_added_removed_keys: multi_added_removed_keys.as_ref(), - storage_proof_receivers, - missed_leaves_storage_roots: missed_leaves_storage_roots.as_ref(), - }; + let mut account_proofs_processed = 0u64; + let mut account_nodes_processed = 0u64; - let result = build_account_multiproof_with_storage_roots( - &proof_tx.provider, - ctx, - &mut tracker, - ); + // Count this worker as available only after successful initialization. + available_workers.fetch_add(1, Ordering::Relaxed); - let proof_elapsed = proof_start.elapsed(); - let total_elapsed = start.elapsed(); - let stats = tracker.finish(); - let result = result.map(|proof| ProofResult::AccountMultiproof { proof, stats }); - account_proofs_processed += 1; + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); - // Send result to MultiProofTask - if result_tx - .send(ProofResultMessage { - sequence_number: seq, - result, - elapsed: total_elapsed, - state, - }) - .is_err() - { - trace!( - target: "trie::proof_task", + match job { + AccountWorkerJob::AccountMultiproof { input } => { + Self::process_account_multiproof( worker_id, - account_proofs_processed, - "Account multiproof receiver dropped, discarding result" + &proof_tx, + storage_work_tx.clone(), + *input, + &mut account_proofs_processed, ); } - trace!( - target: "trie::proof_task", - proof_time_us = proof_elapsed.as_micros(), - total_elapsed_us = total_elapsed.as_micros(), - total_processed = account_proofs_processed, - "Account multiproof completed" - ); - drop(_span_guard); - - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); + AccountWorkerJob::BlindedAccountNode { path, result_sender } => { + Self::process_blinded_node( + worker_id, + &proof_tx, + path, + result_sender, + &mut account_nodes_processed, + ); + } } - AccountWorkerJob::BlindedAccountNode { path, result_sender } => { - let span = debug_span!( - target: "trie::proof_task", - "Blinded account node calculation", - ?path, - worker_id, - ); - let _span_guard = span.enter(); + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); + } - trace!( - target: "trie::proof_task", - "Processing blinded account node" - ); + trace!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + account_nodes_processed, + "Account worker shutting down" + ); - let account_node_provider = ProofBlindedAccountProvider::new( - &proof_tx.provider, - &proof_tx.provider, - proof_tx.prefix_sets.clone(), - ); + #[cfg(feature = "metrics")] + metrics.record_account_nodes(account_nodes_processed as usize); + } - let start = Instant::now(); - let result = account_node_provider.trie_node(&path); - let elapsed = start.elapsed(); + /// Processes an account multiproof request. + fn process_account_multiproof( + worker_id: usize, + proof_tx: &ProofTaskTx, + storage_work_tx: CrossbeamSender, + input: AccountMultiproofInput, + account_proofs_processed: &mut u64, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let AccountMultiproofInput { + targets, + mut prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys, + missed_leaves_storage_roots, + proof_result_sender: + ProofResultContext { sender: result_tx, sequence_number: seq, state, start_time: start }, + } = input; - account_nodes_processed += 1; + let span = debug_span!( + target: "trie::proof_task", + "Account multiproof calculation", + targets = targets.len(), + worker_id, + ); + let _span_guard = span.enter(); - if result_sender.send(result).is_err() { - trace!( - target: "trie::proof_task", - worker_id, - ?path, - account_nodes_processed, - "Blinded account node receiver dropped, discarding result" - ); - } + trace!( + target: "trie::proof_task", + "Processing account multiproof" + ); - trace!( - target: "trie::proof_task", - node_time_us = elapsed.as_micros(), - total_processed = account_nodes_processed, - "Blinded account node completed" - ); - drop(_span_guard); + let proof_start = Instant::now(); - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); + let mut tracker = ParallelTrieTracker::default(); + + let mut storage_prefix_sets = std::mem::take(&mut prefix_sets.storage_prefix_sets); + + let storage_root_targets_len = + StorageRootTargets::count(&prefix_sets.account_prefix_set, &storage_prefix_sets); + + tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); + + let storage_proof_receivers = match dispatch_storage_proofs( + &storage_work_tx, + &targets, + &mut storage_prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys.as_ref(), + ) { + Ok(receivers) => receivers, + Err(error) => { + // Send error through result channel + error!(target: "trie::proof_task", "Failed to dispatch storage proofs: {error}"); + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(error), + elapsed: start.elapsed(), + state, + }); + return; } + }; + + // Use the missed leaves cache passed from the multiproof manager + let account_prefix_set = std::mem::take(&mut prefix_sets.account_prefix_set); + + let ctx = AccountMultiproofParams { + targets: &targets, + prefix_set: account_prefix_set, + collect_branch_node_masks, + multi_added_removed_keys: multi_added_removed_keys.as_ref(), + storage_proof_receivers, + missed_leaves_storage_roots: missed_leaves_storage_roots.as_ref(), + }; + + let result = + build_account_multiproof_with_storage_roots(&proof_tx.provider, ctx, &mut tracker); + + let proof_elapsed = proof_start.elapsed(); + let total_elapsed = start.elapsed(); + let stats = tracker.finish(); + let result = result.map(|proof| ProofResult::AccountMultiproof { proof, stats }); + *account_proofs_processed += 1; + + // Send result to MultiProofTask + if result_tx + .send(ProofResultMessage { + sequence_number: seq, + result, + elapsed: total_elapsed, + state, + }) + .is_err() + { + trace!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + "Account multiproof receiver dropped, discarding result" + ); } + + trace!( + target: "trie::proof_task", + proof_time_us = proof_elapsed.as_micros(), + total_elapsed_us = total_elapsed.as_micros(), + total_processed = account_proofs_processed, + "Account multiproof completed" + ); } - trace!( - target: "trie::proof_task", - worker_id, - account_proofs_processed, - account_nodes_processed, - "Account worker shutting down" - ); + /// Processes a blinded account node lookup request. + fn process_blinded_node( + worker_id: usize, + proof_tx: &ProofTaskTx, + path: Nibbles, + result_sender: Sender, + account_nodes_processed: &mut u64, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let span = debug_span!( + target: "trie::proof_task", + "Blinded account node calculation", + ?path, + worker_id, + ); + let _span_guard = span.enter(); - #[cfg(feature = "metrics")] - metrics.record_account_nodes(account_nodes_processed as usize); + trace!( + target: "trie::proof_task", + "Processing blinded account node" + ); + + let start = Instant::now(); + let result = proof_tx.process_blinded_account_node(&path); + let elapsed = start.elapsed(); + + *account_nodes_processed += 1; + + if result_sender.send(result).is_err() { + trace!( + target: "trie::proof_task", + worker_id, + ?path, + account_nodes_processed, + "Blinded account node receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + node_time_us = elapsed.as_micros(), + total_processed = account_nodes_processed, + "Blinded account node completed" + ); + } } + /// Builds an account multiproof by consuming storage proof receivers lazily during trie walk. /// /// This is a helper function used by account workers to build the account subtree proof @@ -1282,6 +1441,7 @@ pub struct AccountMultiproofInput { pub proof_result_sender: ProofResultContext, } +/// Parameters for building an account multiproof with pre-computed storage roots. struct AccountMultiproofParams<'a> { /// The targets for which to compute the multiproof. targets: &'a MultiProofTargets, @@ -1297,6 +1457,7 @@ struct AccountMultiproofParams<'a> { missed_leaves_storage_roots: &'a DashMap, } +/// Internal message for account workers. #[derive(Debug)] enum AccountWorkerJob { /// Account multiproof computation request From 3827e5cb1db1c9074035129dff6bda6e71418e64 Mon Sep 17 00:00:00 2001 From: Karl Yu <43113774+0xKarl98@users.noreply.github.com> Date: Wed, 29 Oct 2025 16:30:29 +0800 Subject: [PATCH 239/371] perf: wrap tx with Arc to avoid deep cloning (#19350) --- crates/engine/tree/src/tree/payload_processor/mod.rs | 2 +- crates/engine/tree/src/tree/payload_processor/prewarm.rs | 2 +- crates/evm/evm/src/engine.rs | 4 ++-- crates/evm/evm/src/execute.rs | 8 ++++---- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 7e54d8a38e2..42587737298 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -318,7 +318,7 @@ where let (execute_tx, execute_rx) = mpsc::channel(); self.executor.spawn_blocking(move || { for tx in transactions { - let tx = tx.map(|tx| WithTxEnv { tx_env: tx.to_tx_env(), tx }); + let tx = tx.map(|tx| WithTxEnv { tx_env: tx.to_tx_env(), tx: Arc::new(tx) }); // only send Ok(_) variants to prewarming task if let Ok(tx) = &tx { let _ = prewarm_tx.send(tx.clone()); diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index de831d1858b..ddbfc0715a1 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -521,7 +521,7 @@ where done_tx: Sender<()>, ) -> mpsc::Sender> where - Tx: ExecutableTxFor + Clone + Send + 'static, + Tx: ExecutableTxFor + Send + 'static, { let (tx, rx) = mpsc::channel(); let ctx = self.clone(); diff --git a/crates/evm/evm/src/engine.rs b/crates/evm/evm/src/engine.rs index 5b46a086170..e8316426079 100644 --- a/crates/evm/evm/src/engine.rs +++ b/crates/evm/evm/src/engine.rs @@ -23,14 +23,14 @@ pub trait ExecutableTxIterator: Iterator> + Send + 'static { /// The executable transaction type iterator yields. - type Tx: ExecutableTxFor + Clone + Send + 'static; + type Tx: ExecutableTxFor + Clone + Send + Sync + 'static; /// Errors that may occur while recovering or decoding transactions. type Error: core::error::Error + Send + Sync + 'static; } impl ExecutableTxIterator for T where - Tx: ExecutableTxFor + Clone + Send + 'static, + Tx: ExecutableTxFor + Clone + Send + Sync + 'static, Err: core::error::Error + Send + Sync + 'static, T: Iterator> + Send + 'static, { diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 76a9b078394..fca8f6241d5 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -1,7 +1,7 @@ //! Traits for execution. use crate::{ConfigureEvm, Database, OnStateHook, TxEnvFor}; -use alloc::{boxed::Box, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::{BlockHeader, Header}; use alloy_eips::eip2718::WithEncoded; pub use alloy_evm::block::{BlockExecutor, BlockExecutorFactory}; @@ -447,7 +447,7 @@ impl ExecutorTx for Recovered ExecutorTx for WithTxEnv<<::Evm as Evm>::Tx, T> where - T: ExecutorTx, + T: ExecutorTx + Clone, Executor: BlockExecutor, <::Evm as Evm>::Tx: Clone, Self: RecoveredTx, @@ -457,7 +457,7 @@ where } fn into_recovered(self) -> Recovered { - self.tx.into_recovered() + Arc::unwrap_or_clone(self.tx).into_recovered() } } @@ -641,7 +641,7 @@ pub struct WithTxEnv { /// The transaction environment for EVM. pub tx_env: TxEnv, /// The recovered transaction. - pub tx: T, + pub tx: Arc, } impl> RecoveredTx for WithTxEnv { From 17a984929bdca0d908cf51e8fd061be4b1e88e06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90=E1=BA=A1t=20Nguy=E1=BB=85n?= Date: Wed, 29 Oct 2025 17:00:37 +0700 Subject: [PATCH 240/371] feat: impl a function to create new instance of TransactionEvents (#19375) Co-authored-by: Neo Krypt Co-authored-by: Matthias Seitz --- crates/transaction-pool/src/pool/listener.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index 280fb4ad10c..64eb756f38a 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -29,6 +29,11 @@ pub struct TransactionEvents { } impl TransactionEvents { + /// Create a new instance of this stream. + pub const fn new(hash: TxHash, events: UnboundedReceiver) -> Self { + Self { hash, events } + } + /// The hash for this transaction pub const fn hash(&self) -> TxHash { self.hash From 527c24df6d666c0199720c2b2546dc13a7dbc22d Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 29 Oct 2025 11:34:51 +0100 Subject: [PATCH 241/371] fix(trie): use block hash in OverlayStateProviderFactory (#19353) --- .../engine/tree/src/tree/payload_validator.rs | 43 ++++++------------- .../provider/src/providers/state/overlay.rs | 34 +++++++++------ 2 files changed, 34 insertions(+), 43 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index ecc475dd53a..b30eae1d1cb 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -15,7 +15,7 @@ use crate::tree::{ use alloy_consensus::transaction::Either; use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_evm::Evm; -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::B256; use reth_chain_state::{CanonicalInMemoryState, ExecutedBlock}; use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_primitives::{ @@ -33,8 +33,8 @@ use reth_primitives_traits::{ AlloyBlockHeader, BlockTy, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, }; use reth_provider::{ - providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockNumReader, BlockReader, - DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, ProviderError, + providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockReader, + DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProvider, StateProviderFactory, StateReader, StateRootProvider, TrieReader, }; @@ -680,10 +680,7 @@ where hashed_state: &HashedPostState, state: &EngineApiTreeState, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { - let provider = self.provider.database_provider_ro()?; - - let (mut input, block_number) = - self.compute_trie_input(provider, parent_hash, state, None)?; + let (mut input, block_hash) = self.compute_trie_input(parent_hash, state, None)?; // Extend with block we are validating root for. input.append_ref(hashed_state); @@ -693,7 +690,7 @@ where let (_, multiproof_config) = MultiProofConfig::from_input(input); let factory = OverlayStateProviderFactory::new(self.provider.clone()) - .with_block_number(Some(block_number)) + .with_block_hash(Some(block_hash)) .with_trie_overlay(Some(multiproof_config.nodes_sorted)) .with_hashed_state_overlay(Some(multiproof_config.state_sorted)); @@ -806,12 +803,8 @@ where // Compute trie input let trie_input_start = Instant::now(); - let (trie_input, block_number) = self.compute_trie_input( - self.provider.database_provider_ro()?, - parent_hash, - state, - allocated_trie_input, - )?; + let (trie_input, block_hash) = + self.compute_trie_input(parent_hash, state, allocated_trie_input)?; self.metrics .block_validation @@ -827,7 +820,7 @@ where // multiproofs. let multiproof_provider_factory = OverlayStateProviderFactory::new(self.provider.clone()) - .with_block_number(Some(block_number)) + .with_block_hash(Some(block_hash)) .with_trie_overlay(Some(multiproof_config.nodes_sorted)) .with_hashed_state_overlay(Some(multiproof_config.state_sorted)); @@ -976,38 +969,30 @@ where skip_all, fields(parent_hash) )] - fn compute_trie_input( + fn compute_trie_input( &self, - provider: TP, parent_hash: B256, state: &EngineApiTreeState, allocated_trie_input: Option, - ) -> ProviderResult<(TrieInput, BlockNumber)> { + ) -> ProviderResult<(TrieInput, B256)> { // get allocated trie input or use a default trie input let mut input = allocated_trie_input.unwrap_or_default(); - let (historical, blocks) = state - .tree_state - .blocks_by_hash(parent_hash) - .map_or_else(|| (parent_hash.into(), vec![]), |(hash, blocks)| (hash.into(), blocks)); + let (block_hash, blocks) = + state.tree_state.blocks_by_hash(parent_hash).unwrap_or_else(|| (parent_hash, vec![])); if blocks.is_empty() { debug!(target: "engine::tree::payload_validator", "Parent found on disk"); } else { - debug!(target: "engine::tree::payload_validator", %historical, blocks = blocks.len(), "Parent found in memory"); + debug!(target: "engine::tree::payload_validator", historical = ?block_hash, blocks = blocks.len(), "Parent found in memory"); } - // Convert the historical block to the block number - let block_number = provider - .convert_hash_or_number(historical)? - .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; - // Extend with contents of parent in-memory blocks. input.extend_with_blocks( blocks.iter().rev().map(|block| (block.hashed_state(), block.trie_updates())), ); - Ok((input, block_number)) + Ok((input, block_hash)) } } diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 28f04f9f767..6a3ba7da124 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -4,8 +4,8 @@ use reth_errors::ProviderError; use reth_prune_types::PruneSegment; use reth_stages_types::StageId; use reth_storage_api::{ - DBProvider, DatabaseProviderFactory, DatabaseProviderROFactory, PruneCheckpointReader, - StageCheckpointReader, TrieReader, + BlockNumReader, DBProvider, DatabaseProviderFactory, DatabaseProviderROFactory, + PruneCheckpointReader, StageCheckpointReader, TrieReader, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -27,8 +27,8 @@ use tracing::debug; pub struct OverlayStateProviderFactory { /// The underlying database provider factory factory: F, - /// Optional block number for collecting reverts - block_number: Option, + /// Optional block hash for collecting reverts + block_hash: Option, /// Optional trie overlay trie_overlay: Option>, /// Optional hashed state overlay @@ -38,19 +38,19 @@ pub struct OverlayStateProviderFactory { impl OverlayStateProviderFactory { /// Create a new overlay state provider factory pub const fn new(factory: F) -> Self { - Self { factory, block_number: None, trie_overlay: None, hashed_state_overlay: None } + Self { factory, block_hash: None, trie_overlay: None, hashed_state_overlay: None } } - /// Set the block number for collecting reverts. All state will be reverted to the point + /// Set the block hash for collecting reverts. All state will be reverted to the point /// _after_ this block has been processed. - pub const fn with_block_number(mut self, block_number: Option) -> Self { - self.block_number = block_number; + pub const fn with_block_hash(mut self, block_hash: Option) -> Self { + self.block_hash = block_hash; self } /// Set the trie overlay. /// - /// This overlay will be applied on top of any reverts applied via `with_block_number`. + /// This overlay will be applied on top of any reverts applied via `with_block_hash`. pub fn with_trie_overlay(mut self, trie_overlay: Option>) -> Self { self.trie_overlay = trie_overlay; self @@ -58,7 +58,7 @@ impl OverlayStateProviderFactory { /// Set the hashed state overlay /// - /// This overlay will be applied on top of any reverts applied via `with_block_number`. + /// This overlay will be applied on top of any reverts applied via `with_block_hash`. pub fn with_hashed_state_overlay( mut self, hashed_state_overlay: Option>, @@ -127,7 +127,7 @@ where impl DatabaseProviderROFactory for OverlayStateProviderFactory where F: DatabaseProviderFactory, - F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader, + F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader + BlockNumReader, { type Provider = OverlayStateProvider; @@ -136,8 +136,13 @@ where // Get a read-only provider let provider = self.factory.database_provider_ro()?; - // If block_number is provided, collect reverts - let (trie_updates, hashed_state) = if let Some(from_block) = self.block_number { + // If block_hash is provided, collect reverts + let (trie_updates, hashed_state) = if let Some(block_hash) = self.block_hash { + // Convert block hash to block number + let from_block = provider + .convert_hash_or_number(block_hash.into())? + .ok_or_else(|| ProviderError::BlockHashNotFound(block_hash))?; + // Validate that we have sufficient changesets for the requested block self.validate_changesets_availability(&provider, from_block)?; @@ -162,6 +167,7 @@ where debug!( target: "providers::state::overlay", + ?block_hash, ?from_block, num_trie_updates = ?trie_updates_mut.total_len(), num_state_updates = ?hashed_state_mut.total_len(), @@ -170,7 +176,7 @@ where (Arc::new(trie_updates_mut), Arc::new(hashed_state_mut)) } else { - // If no block_number, use overlays directly or defaults + // If no block_hash, use overlays directly or defaults let trie_updates = self.trie_overlay.clone().unwrap_or_else(|| Arc::new(TrieUpdatesSorted::default())); let hashed_state = self From 644be056591aa003dd412ab969d6b71822d7dbef Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 29 Oct 2025 11:50:51 +0000 Subject: [PATCH 242/371] feat: add pruning of transactions from static-files (#19241) --- .github/assets/check_wasm.sh | 1 + crates/node/builder/src/launch/common.rs | 41 +--- crates/node/builder/src/launch/engine.rs | 3 - crates/node/core/src/args/pruning.rs | 6 +- crates/prune/prune/src/segments/mod.rs | 4 +- crates/prune/prune/src/segments/set.rs | 8 +- .../prune/prune/src/segments/user/bodies.rs | 210 ++++++++++++++++++ crates/prune/prune/src/segments/user/mod.rs | 2 + crates/prune/types/src/segment.rs | 3 + crates/static-file/types/src/segment.rs | 10 +- .../src/providers/static_file/manager.rs | 61 +++-- 11 files changed, 280 insertions(+), 69 deletions(-) create mode 100644 crates/prune/prune/src/segments/user/bodies.rs diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 3c72a8d189e..8a380837b10 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -68,6 +68,7 @@ exclude_crates=( reth-payload-builder # reth-metrics reth-provider # tokio reth-prune # tokio + reth-prune-static-files # reth-provider reth-stages-api # reth-provider, reth-prune reth-static-file # tokio reth-transaction-pool # c-kzg diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 92e3a7aa811..080945a76cc 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -34,12 +34,11 @@ use crate::{ hooks::OnComponentInitializedHook, BuilderContext, ExExLauncher, NodeAdapter, PrimitivesTy, }; -use alloy_consensus::BlockHeader as _; use alloy_eips::eip2124::Head; use alloy_primitives::{BlockNumber, B256}; use eyre::Context; use rayon::ThreadPoolBuilder; -use reth_chainspec::{Chain, EthChainSpec, EthereumHardfork, EthereumHardforks}; +use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; use reth_consensus::noop::NoopConsensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; @@ -67,8 +66,8 @@ use reth_node_metrics::{ }; use reth_provider::{ providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, BlockNumReader, BlockReaderIdExt, ProviderError, ProviderFactory, - ProviderResult, StageCheckpointReader, StaticFileProviderFactory, + BlockHashReader, BlockNumReader, ProviderError, ProviderFactory, ProviderResult, + StageCheckpointReader, StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_builder::config::RethRpcServerConfig; @@ -945,40 +944,6 @@ where Ok(None) } - /// Expire the pre-merge transactions if the node is configured to do so and the chain has a - /// merge block. - /// - /// If the node is configured to prune pre-merge transactions and it has synced past the merge - /// block, it will delete the pre-merge transaction static files if they still exist. - pub fn expire_pre_merge_transactions(&self) -> eyre::Result<()> - where - T: FullNodeTypes, - { - if self.node_config().pruning.bodies_pre_merge && - let Some(merge_block) = self - .chain_spec() - .ethereum_fork_activation(EthereumHardfork::Paris) - .block_number() - { - // Ensure we only expire transactions after we synced past the merge block. - let Some(latest) = self.blockchain_db().latest_header()? else { return Ok(()) }; - if latest.number() > merge_block { - let provider = self.blockchain_db().static_file_provider(); - if provider - .get_lowest_transaction_static_file_block() - .is_some_and(|lowest| lowest < merge_block) - { - info!(target: "reth::cli", merge_block, "Expiring pre-merge transactions"); - provider.delete_transactions_below(merge_block)?; - } else { - debug!(target: "reth::cli", merge_block, "No pre-merge transactions to expire"); - } - } - } - - Ok(()) - } - /// Returns the metrics sender. pub fn sync_metrics_tx(&self) -> UnboundedSender { self.right().db_provider_container.metrics_sender.clone() diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 93309b65b19..9faf9fcfa95 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -117,9 +117,6 @@ impl EngineNodeLauncher { })? .with_components(components_builder, on_component_initialized).await?; - // Try to expire pre-merge transaction history if configured - ctx.expire_pre_merge_transactions()?; - // spawn exexs if any let maybe_exex_manager_handle = ctx.launch_exex(installed_exex).await?; diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 2ff67446bbf..0bd72e207ea 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -131,8 +131,10 @@ impl PruningArgs { receipts: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - // TODO: set default to pre-merge block if available - bodies_history: None, + bodies_history: chain_spec + .ethereum_fork_activation(EthereumHardfork::Paris) + .block_number() + .map(PruneMode::Before), merkle_changesets: PruneMode::Distance(MINIMUM_PRUNING_DISTANCE), #[expect(deprecated)] receipts_log_filter: (), diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index 43be33a75d1..f4df3d2a0dd 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -10,8 +10,8 @@ pub use set::SegmentSet; use std::{fmt::Debug, ops::RangeInclusive}; use tracing::error; pub use user::{ - AccountHistory, MerkleChangeSets, Receipts as UserReceipts, SenderRecovery, StorageHistory, - TransactionLookup, + AccountHistory, Bodies, MerkleChangeSets, Receipts as UserReceipts, SenderRecovery, + StorageHistory, TransactionLookup, }; /// A segment represents a pruning of some portion of the data. diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 4538773d7d2..acd71f52e1b 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -1,6 +1,6 @@ use crate::segments::{ - AccountHistory, MerkleChangeSets, Segment, SenderRecovery, StorageHistory, TransactionLookup, - UserReceipts, + AccountHistory, Bodies, MerkleChangeSets, Segment, SenderRecovery, StorageHistory, + TransactionLookup, UserReceipts, }; use alloy_eips::eip2718::Encodable2718; use reth_db_api::{table::Value, transaction::DbTxMut}; @@ -66,12 +66,14 @@ where receipts, account_history, storage_history, - bodies_history: _, + bodies_history, merkle_changesets, receipts_log_filter: (), } = prune_modes; Self::default() + // Bodies - run first since file deletion is fast + .segment_opt(bodies_history.map(Bodies::new)) // Merkle changesets .segment(MerkleChangeSets::new(merkle_changesets)) // Account history diff --git a/crates/prune/prune/src/segments/user/bodies.rs b/crates/prune/prune/src/segments/user/bodies.rs new file mode 100644 index 00000000000..db050234d96 --- /dev/null +++ b/crates/prune/prune/src/segments/user/bodies.rs @@ -0,0 +1,210 @@ +use crate::{ + segments::{PruneInput, Segment}, + PrunerError, +}; +use reth_provider::{BlockReader, StaticFileProviderFactory}; +use reth_prune_types::{ + PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, +}; +use reth_static_file_types::StaticFileSegment; + +/// Segment responsible for pruning transactions in static files. +/// +/// This segment is controlled by the `bodies_history` configuration. +#[derive(Debug)] +pub struct Bodies { + mode: PruneMode, +} + +impl Bodies { + /// Creates a new [`Bodies`] segment with the given prune mode. + pub const fn new(mode: PruneMode) -> Self { + Self { mode } + } +} + +impl Segment for Bodies +where + Provider: StaticFileProviderFactory + BlockReader, +{ + fn segment(&self) -> PruneSegment { + PruneSegment::Bodies + } + + fn mode(&self) -> Option { + Some(self.mode) + } + + fn purpose(&self) -> PrunePurpose { + PrunePurpose::User + } + + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { + let deleted_headers = provider + .static_file_provider() + .delete_segment_below_block(StaticFileSegment::Transactions, input.to_block + 1)?; + + if deleted_headers.is_empty() { + return Ok(SegmentOutput::done()) + } + + let tx_ranges = deleted_headers.iter().filter_map(|header| header.tx_range()); + + let pruned = tx_ranges.clone().map(|range| range.len()).sum::() as usize; + + Ok(SegmentOutput { + progress: PruneProgress::Finished, + pruned, + checkpoint: Some(SegmentOutputCheckpoint { + block_number: Some(input.to_block), + tx_number: tx_ranges.map(|range| range.end()).max(), + }), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Pruner; + use alloy_primitives::BlockNumber; + use reth_exex_types::FinishedExExHeight; + use reth_provider::{ + test_utils::{create_test_provider_factory, MockNodeTypesWithDB}, + ProviderFactory, StaticFileWriter, + }; + use reth_prune_types::{PruneMode, PruneProgress, PruneSegment}; + use reth_static_file_types::{ + SegmentHeader, SegmentRangeInclusive, StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE, + }; + + /// Creates empty static file jars at 500k block intervals up to the tip block. + /// + /// Each jar contains sequential transaction ranges for testing deletion logic. + fn setup_static_file_jars(provider: &P, tip_block: u64) { + let num_jars = (tip_block + 1) / DEFAULT_BLOCKS_PER_STATIC_FILE; + let txs_per_jar = 1000; + let static_file_provider = provider.static_file_provider(); + + let mut writer = + static_file_provider.latest_writer(StaticFileSegment::Transactions).unwrap(); + + for jar_idx in 0..num_jars { + let block_start = jar_idx * DEFAULT_BLOCKS_PER_STATIC_FILE; + let block_end = ((jar_idx + 1) * DEFAULT_BLOCKS_PER_STATIC_FILE - 1).min(tip_block); + + let tx_start = jar_idx * txs_per_jar; + let tx_end = tx_start + txs_per_jar - 1; + + *writer.user_header_mut() = SegmentHeader::new( + SegmentRangeInclusive::new(block_start, block_end), + Some(SegmentRangeInclusive::new(block_start, block_end)), + Some(SegmentRangeInclusive::new(tx_start, tx_end)), + StaticFileSegment::Transactions, + ); + + writer.inner().set_dirty(); + writer.commit().expect("commit empty jar"); + + if jar_idx < num_jars - 1 { + writer.increment_block(block_end + 1).expect("increment block"); + } + } + + static_file_provider.initialize_index().expect("initialize index"); + } + + struct PruneTestCase { + prune_mode: PruneMode, + expected_pruned: usize, + expected_lowest_block: Option, + } + + fn run_prune_test( + factory: &ProviderFactory, + finished_exex_height_rx: &tokio::sync::watch::Receiver, + test_case: PruneTestCase, + tip: BlockNumber, + ) { + let bodies = Bodies::new(test_case.prune_mode); + let segments: Vec>> = vec![Box::new(bodies)]; + + let mut pruner = Pruner::new_with_factory( + factory.clone(), + segments, + 5, + 10000, + None, + finished_exex_height_rx.clone(), + ); + + let result = pruner.run(tip).expect("pruner run"); + + assert_eq!(result.progress, PruneProgress::Finished); + assert_eq!(result.segments.len(), 1); + + let (segment, output) = &result.segments[0]; + assert_eq!(*segment, PruneSegment::Bodies); + assert_eq!(output.pruned, test_case.expected_pruned); + + let static_provider = factory.static_file_provider(); + assert_eq!( + static_provider.get_lowest_static_file_block(StaticFileSegment::Transactions), + test_case.expected_lowest_block + ); + assert_eq!( + static_provider.get_highest_static_file_block(StaticFileSegment::Transactions), + Some(tip) + ); + } + + #[test] + fn bodies_prune_through_pruner() { + let factory = create_test_provider_factory(); + let tip = 2_499_999; + setup_static_file_jars(&factory, tip); + + let (_, finished_exex_height_rx) = tokio::sync::watch::channel(FinishedExExHeight::NoExExs); + + let test_cases = vec![ + // Test 1: PruneMode::Before(750_000) → deletes jar 1 (0-499_999) + PruneTestCase { + prune_mode: PruneMode::Before(750_000), + expected_pruned: 1000, + expected_lowest_block: Some(999_999), + }, + // Test 2: PruneMode::Before(850_000) → no deletion (jar 2: 500_000-999_999 contains + // target) + PruneTestCase { + prune_mode: PruneMode::Before(850_000), + expected_pruned: 0, + expected_lowest_block: Some(999_999), + }, + // Test 3: PruneMode::Before(1_599_999) → deletes jar 2 (500_000-999_999) and jar 3 + // (1_000_000-1_499_999) + PruneTestCase { + prune_mode: PruneMode::Before(1_599_999), + expected_pruned: 2000, + expected_lowest_block: Some(1_999_999), + }, + // Test 4: PruneMode::Distance(500_000) with tip=2_499_999 → deletes jar 4 + // (1_500_000-1_999_999) + PruneTestCase { + prune_mode: PruneMode::Distance(500_000), + expected_pruned: 1000, + expected_lowest_block: Some(2_499_999), + }, + // Test 5: PruneMode::Before(2_300_000) → no deletion (jar 5: 2_000_000-2_499_999 + // contains target) + PruneTestCase { + prune_mode: PruneMode::Before(2_300_000), + expected_pruned: 0, + expected_lowest_block: Some(2_499_999), + }, + ]; + + for test_case in test_cases { + run_prune_test(&factory, &finished_exex_height_rx, test_case, tip); + } + } +} diff --git a/crates/prune/prune/src/segments/user/mod.rs b/crates/prune/prune/src/segments/user/mod.rs index bdbc27f22f0..ef7ae05a9d5 100644 --- a/crates/prune/prune/src/segments/user/mod.rs +++ b/crates/prune/prune/src/segments/user/mod.rs @@ -1,4 +1,5 @@ mod account_history; +mod bodies; mod history; mod merkle_change_sets; mod receipts; @@ -7,6 +8,7 @@ mod storage_history; mod transaction_lookup; pub use account_history::AccountHistory; +pub use bodies::Bodies; pub use merkle_change_sets::MerkleChangeSets; pub use receipts::Receipts; pub use sender_recovery::SenderRecovery; diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index cfc812a1a0e..faab12c70ad 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -36,6 +36,8 @@ pub enum PruneSegment { /// Prune segment responsible for all rows in `AccountsTrieChangeSets` and /// `StoragesTrieChangeSets` table. MerkleChangeSets, + /// Prune segment responsible for bodies (transactions in static files). + Bodies, } #[cfg(test)] @@ -56,6 +58,7 @@ impl PruneSegment { Self::AccountHistory | Self::StorageHistory | Self::MerkleChangeSets | + Self::Bodies | Self::Receipts => MINIMUM_PRUNING_DISTANCE, #[expect(deprecated)] #[expect(clippy::match_same_arms)] diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index 0458bea1678..ca7d9ef24d5 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -217,12 +217,12 @@ impl SegmentHeader { /// Number of transactions. pub fn tx_len(&self) -> Option { - self.tx_range.as_ref().map(|r| (r.end() + 1) - r.start()) + self.tx_range.as_ref().map(|r| r.len()) } /// Number of blocks. pub fn block_len(&self) -> Option { - self.block_range.as_ref().map(|r| (r.end() + 1) - r.start()) + self.block_range.as_ref().map(|r| r.len()) } /// Increments block end range depending on segment @@ -329,6 +329,12 @@ impl SegmentRangeInclusive { pub const fn end(&self) -> u64 { self.end } + + /// Returns the length of the inclusive range. + #[allow(clippy::len_without_is_empty)] + pub const fn len(&self) -> u64 { + self.end.saturating_sub(self.start).saturating_add(1) + } } impl core::fmt::Display for SegmentRangeInclusive { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index ea7eec9e9d9..28d13cfbe29 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -242,7 +242,7 @@ pub struct StaticFileProviderInner { /// `expired_history_height + 1`. /// /// This is effectively the transaction range that has been expired: - /// [`StaticFileProvider::delete_transactions_below`] and mirrors + /// [`StaticFileProvider::delete_segment_below_block`] and mirrors /// `static_files_min_block[transactions] - blocks_per_file`. /// /// This additional tracker exists for more efficient lookups because the node must be aware of @@ -443,43 +443,59 @@ impl StaticFileProvider { self.map.remove(&(fixed_block_range_end, segment)); } - /// This handles history expiry by deleting all transaction static files below the given block. + /// This handles history expiry by deleting all static files for the given segment below the + /// given block. /// /// For example if block is 1M and the blocks per file are 500K this will delete all individual /// files below 1M, so 0-499K and 500K-999K. /// /// This will not delete the file that contains the block itself, because files can only be /// removed entirely. - pub fn delete_transactions_below(&self, block: BlockNumber) -> ProviderResult<()> { + /// + /// # Safety + /// + /// This method will never delete the highest static file for the segment, even if the + /// requested block is higher than the highest block in static files. This ensures we always + /// maintain at least one static file if any exist. + /// + /// Returns a list of `SegmentHeader`s from the deleted jars. + pub fn delete_segment_below_block( + &self, + segment: StaticFileSegment, + block: BlockNumber, + ) -> ProviderResult> { // Nothing to delete if block is 0. if block == 0 { - return Ok(()) + return Ok(Vec::new()) } + let highest_block = self.get_highest_static_file_block(segment); + let mut deleted_headers = Vec::new(); + loop { - let Some(block_height) = - self.get_lowest_static_file_block(StaticFileSegment::Transactions) - else { - return Ok(()) + let Some(block_height) = self.get_lowest_static_file_block(segment) else { + return Ok(deleted_headers) }; - if block_height >= block { - return Ok(()) + // Stop if we've reached the target block or the highest static file + if block_height >= block || Some(block_height) == highest_block { + return Ok(deleted_headers) } debug!( target: "provider::static_file", + ?segment, ?block_height, - "Deleting transaction static file below block" + "Deleting static file below block" ); // now we need to wipe the static file, this will take care of updating the index and - // advance the lowest tracked block height for the transactions segment. - self.delete_jar(StaticFileSegment::Transactions, block_height) - .inspect_err(|err| { - warn!( target: "provider::static_file", %block_height, ?err, "Failed to delete transaction static file below block") - }) - ?; + // advance the lowest tracked block height for the segment. + let header = self.delete_jar(segment, block_height).inspect_err(|err| { + warn!( target: "provider::static_file", ?segment, %block_height, ?err, "Failed to delete static file below block") + })?; + + deleted_headers.push(header); } } @@ -488,7 +504,13 @@ impl StaticFileProvider { /// CAUTION: destructive. Deletes files on disk. /// /// This will re-initialize the index after deletion, so all files are tracked. - pub fn delete_jar(&self, segment: StaticFileSegment, block: BlockNumber) -> ProviderResult<()> { + /// + /// Returns the `SegmentHeader` of the deleted jar. + pub fn delete_jar( + &self, + segment: StaticFileSegment, + block: BlockNumber, + ) -> ProviderResult { let fixed_block_range = self.find_fixed_range(block); let key = (fixed_block_range.end(), segment); let jar = if let Some((_, jar)) = self.map.remove(&key) { @@ -505,11 +527,12 @@ impl StaticFileProvider { NippyJar::::load(&file).map_err(ProviderError::other)? }; + let header = jar.user_header().clone(); jar.delete().map_err(ProviderError::other)?; self.initialize_index()?; - Ok(()) + Ok(header) } /// Given a segment and block range it returns a cached From caaedfadcbee8cd79d3ce3915d0a507e4b529021 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 Oct 2025 13:07:30 +0100 Subject: [PATCH 243/371] chore: bump 1.8.3 (#19379) --- Cargo.lock | 276 +++++++++++++++++++-------------------- Cargo.toml | 2 +- docs/vocs/vocs.config.ts | 2 +- 3 files changed, 140 insertions(+), 140 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7c5012b4b53..0f5f5b87758 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3097,7 +3097,7 @@ dependencies = [ [[package]] name = "ef-test-runner" -version = "1.8.2" +version = "1.8.3" dependencies = [ "clap", "ef-tests", @@ -3105,7 +3105,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3565,7 +3565,7 @@ dependencies = [ [[package]] name = "example-full-contract-state" -version = "1.8.2" +version = "1.8.3" dependencies = [ "eyre", "reth-ethereum", @@ -3704,7 +3704,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "clap", @@ -6125,7 +6125,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.8.2" +version = "1.8.3" dependencies = [ "clap", "reth-cli-util", @@ -7252,7 +7252,7 @@ checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" [[package]] name = "reth" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7299,7 +7299,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7322,7 +7322,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7361,7 +7361,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7393,7 +7393,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7413,7 +7413,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-genesis", "clap", @@ -7426,7 +7426,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7508,7 +7508,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.8.2" +version = "1.8.3" dependencies = [ "reth-tasks", "tokio", @@ -7517,7 +7517,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7537,7 +7537,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7561,7 +7561,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.8.2" +version = "1.8.3" dependencies = [ "proc-macro2", "quote", @@ -7571,7 +7571,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "eyre", @@ -7588,7 +7588,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7600,7 +7600,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7614,7 +7614,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7639,7 +7639,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7673,7 +7673,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7703,7 +7703,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7733,7 +7733,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7749,7 +7749,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7775,7 +7775,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7800,7 +7800,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7828,7 +7828,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7866,7 +7866,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7923,7 +7923,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.8.2" +version = "1.8.3" dependencies = [ "aes", "alloy-primitives", @@ -7953,7 +7953,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7976,7 +7976,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8000,7 +8000,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.8.2" +version = "1.8.3" dependencies = [ "futures", "pin-project", @@ -8029,7 +8029,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8100,7 +8100,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8127,7 +8127,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8149,7 +8149,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "bytes", @@ -8166,7 +8166,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8192,7 +8192,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.8.2" +version = "1.8.3" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -8202,7 +8202,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8240,7 +8240,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8265,7 +8265,7 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8305,7 +8305,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.8.2" +version = "1.8.3" dependencies = [ "clap", "eyre", @@ -8329,7 +8329,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8345,7 +8345,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8363,7 +8363,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8376,7 +8376,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8404,7 +8404,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8431,7 +8431,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "rayon", @@ -8441,7 +8441,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8465,7 +8465,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8489,7 +8489,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-evm", "alloy-primitives", @@ -8501,7 +8501,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8521,7 +8521,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8565,7 +8565,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "eyre", @@ -8596,7 +8596,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8613,7 +8613,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.8.2" +version = "1.8.3" dependencies = [ "serde", "serde_json", @@ -8622,7 +8622,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8655,7 +8655,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.8.2" +version = "1.8.3" dependencies = [ "bytes", "futures", @@ -8677,7 +8677,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.8.2" +version = "1.8.3" dependencies = [ "bitflags 2.10.0", "byteorder", @@ -8695,7 +8695,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.8.2" +version = "1.8.3" dependencies = [ "bindgen 0.71.1", "cc", @@ -8703,7 +8703,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.8.2" +version = "1.8.3" dependencies = [ "futures", "metrics", @@ -8714,14 +8714,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.8.2" +version = "1.8.3" dependencies = [ "futures-util", "if-addrs", @@ -8735,7 +8735,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8795,7 +8795,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8819,7 +8819,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8841,7 +8841,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8858,7 +8858,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -8871,7 +8871,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.8.2" +version = "1.8.3" dependencies = [ "anyhow", "bincode 1.3.3", @@ -8889,7 +8889,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8912,7 +8912,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8983,7 +8983,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9036,7 +9036,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9089,7 +9089,7 @@ dependencies = [ [[package]] name = "reth-node-ethstats" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9112,7 +9112,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9135,7 +9135,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.8.2" +version = "1.8.3" dependencies = [ "eyre", "http", @@ -9157,7 +9157,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.8.2" +version = "1.8.3" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9168,7 +9168,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.8.2" +version = "1.8.3" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9208,7 +9208,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9235,7 +9235,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9286,7 +9286,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9317,7 +9317,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9346,7 +9346,7 @@ dependencies = [ [[package]] name = "reth-optimism-flashblocks" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9385,7 +9385,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9395,7 +9395,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9453,7 +9453,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9492,7 +9492,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9519,7 +9519,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9581,7 +9581,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "reth-codecs", @@ -9593,7 +9593,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9630,7 +9630,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9650,7 +9650,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.8.2" +version = "1.8.3" dependencies = [ "pin-project", "reth-payload-primitives", @@ -9661,7 +9661,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9681,7 +9681,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9690,7 +9690,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9699,7 +9699,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9721,7 +9721,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9758,7 +9758,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9806,7 +9806,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9836,11 +9836,11 @@ dependencies = [ [[package]] name = "reth-prune-db" -version = "1.8.2" +version = "1.8.3" [[package]] name = "reth-prune-types" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "arbitrary", @@ -9858,7 +9858,7 @@ dependencies = [ [[package]] name = "reth-ress-protocol" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9884,7 +9884,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9910,7 +9910,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9924,7 +9924,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10007,7 +10007,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-genesis", @@ -10034,7 +10034,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10053,7 +10053,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-network", @@ -10108,7 +10108,7 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-json-rpc", @@ -10135,7 +10135,7 @@ dependencies = [ [[package]] name = "reth-rpc-e2e-tests" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -10155,7 +10155,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10191,7 +10191,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10234,7 +10234,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10281,7 +10281,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10298,7 +10298,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10313,7 +10313,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10370,7 +10370,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10399,7 +10399,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "arbitrary", @@ -10415,7 +10415,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10442,7 +10442,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "assert_matches", @@ -10465,7 +10465,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "clap", @@ -10477,7 +10477,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10499,7 +10499,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10514,7 +10514,7 @@ dependencies = [ [[package]] name = "reth-storage-rpc-provider" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10543,7 +10543,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.8.2" +version = "1.8.3" dependencies = [ "auto_impl", "dyn-clone", @@ -10560,7 +10560,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10575,7 +10575,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.8.2" +version = "1.8.3" dependencies = [ "tokio", "tokio-stream", @@ -10584,7 +10584,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.8.2" +version = "1.8.3" dependencies = [ "clap", "eyre", @@ -10600,7 +10600,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.8.2" +version = "1.8.3" dependencies = [ "clap", "eyre", @@ -10616,7 +10616,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10664,7 +10664,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10697,7 +10697,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10730,7 +10730,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10755,7 +10755,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10785,7 +10785,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10818,7 +10818,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.8.2" +version = "1.8.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10847,7 +10847,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.8.2" +version = "1.8.3" dependencies = [ "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index 324135b2233..8cccc50dfd4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.8.2" +version = "1.8.3" edition = "2024" rust-version = "1.88" license = "MIT OR Apache-2.0" diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index 92aee418311..e98af7701a2 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -21,7 +21,7 @@ export default defineConfig({ }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { - text: 'v1.8.2', + text: 'v1.8.3', items: [ { text: 'Releases', From 6659080dc0b0fbf514b8fd6fda399e48aa665434 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 29 Oct 2025 14:18:26 +0100 Subject: [PATCH 244/371] fix: Don't always clone in-memory overlays in OverlayStateProviderFactory (#19383) --- .../provider/src/providers/state/overlay.rs | 56 ++++++++++++------- crates/trie/common/src/hashed_state.rs | 7 +++ 2 files changed, 43 insertions(+), 20 deletions(-) diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 6a3ba7da124..519fe56d73c 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -1,6 +1,6 @@ use alloy_primitives::{BlockNumber, B256}; use reth_db_api::DatabaseError; -use reth_errors::ProviderError; +use reth_errors::{ProviderError, ProviderResult}; use reth_prune_types::PruneSegment; use reth_stages_types::StageId; use reth_storage_api::{ @@ -71,7 +71,7 @@ impl OverlayStateProviderFactory { impl OverlayStateProviderFactory where F: DatabaseProviderFactory, - F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader, + F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader + BlockNumReader, { /// Validates that there are sufficient changesets to revert to the requested block number. /// @@ -82,7 +82,7 @@ where &self, provider: &F::Provider, requested_block: BlockNumber, - ) -> Result<(), ProviderError> { + ) -> ProviderResult<()> { // Get the MerkleChangeSets stage and prune checkpoints. let stage_checkpoint = provider.get_stage_checkpoint(StageId::MerkleChangeSets)?; let prune_checkpoint = provider.get_prune_checkpoint(PruneSegment::MerkleChangeSets)?; @@ -132,7 +132,7 @@ where type Provider = OverlayStateProvider; /// Create a read-only [`OverlayStateProvider`]. - fn database_provider_ro(&self) -> Result, ProviderError> { + fn database_provider_ro(&self) -> ProviderResult> { // Get a read-only provider let provider = self.factory.database_provider_ro()?; @@ -147,34 +147,50 @@ where self.validate_changesets_availability(&provider, from_block)?; // Collect trie reverts - let mut trie_updates_mut = provider.trie_reverts(from_block + 1)?; + let mut trie_reverts = provider.trie_reverts(from_block + 1)?; - // Collect state reverts using HashedPostState::from_reverts - let reverted_state = HashedPostState::from_reverts::( + // Collect state reverts + // + // TODO(mediocregopher) make from_reverts return sorted + // https://github.com/paradigmxyz/reth/issues/19382 + let mut hashed_state_reverts = HashedPostState::from_reverts::( provider.tx_ref(), from_block + 1.., - )?; - let mut hashed_state_mut = reverted_state.into_sorted(); - - // Extend with overlays if provided - if let Some(trie_overlay) = &self.trie_overlay { - trie_updates_mut.extend_ref(trie_overlay); - } + )? + .into_sorted(); + + // Extend with overlays if provided. If the reverts are empty we should just use the + // overlays directly, because `extend_ref` will actually clone the overlay. + let trie_updates = match self.trie_overlay.as_ref() { + Some(trie_overlay) if trie_reverts.is_empty() => Arc::clone(trie_overlay), + Some(trie_overlay) => { + trie_reverts.extend_ref(trie_overlay); + Arc::new(trie_reverts) + } + None => Arc::new(trie_reverts), + }; - if let Some(hashed_state_overlay) = &self.hashed_state_overlay { - hashed_state_mut.extend_ref(hashed_state_overlay); - } + let hashed_state_updates = match self.hashed_state_overlay.as_ref() { + Some(hashed_state_overlay) if hashed_state_reverts.is_empty() => { + Arc::clone(hashed_state_overlay) + } + Some(hashed_state_overlay) => { + hashed_state_reverts.extend_ref(hashed_state_overlay); + Arc::new(hashed_state_reverts) + } + None => Arc::new(hashed_state_reverts), + }; debug!( target: "providers::state::overlay", ?block_hash, ?from_block, - num_trie_updates = ?trie_updates_mut.total_len(), - num_state_updates = ?hashed_state_mut.total_len(), + num_trie_updates = ?trie_updates.total_len(), + num_state_updates = ?hashed_state_updates.total_len(), "Reverted to target block", ); - (Arc::new(trie_updates_mut), Arc::new(hashed_state_mut)) + (trie_updates, hashed_state_updates) } else { // If no block_hash, use overlays directly or defaults let trie_updates = diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 8fb994daddd..e693776c4e8 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -486,6 +486,13 @@ impl HashedPostStateSorted { &self.storages } + /// Returns `true` if there are no account or storage updates. + pub fn is_empty(&self) -> bool { + self.accounts.accounts.is_empty() && + self.accounts.destroyed_accounts.is_empty() && + self.storages.is_empty() + } + /// Returns the total number of updates including all accounts and storage updates. pub fn total_len(&self) -> usize { self.accounts.accounts.len() + From 5a4287aa6d17ec8464c3fe35f5b80ed18e0ffb22 Mon Sep 17 00:00:00 2001 From: emiliano-conduitxyz Date: Wed, 29 Oct 2025 14:35:42 +0100 Subject: [PATCH 245/371] fix(op-reth): use latest for runtime image (#19331) --- DockerfileOp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DockerfileOp b/DockerfileOp index d195ca21601..ff65dc276b1 100644 --- a/DockerfileOp +++ b/DockerfileOp @@ -31,7 +31,7 @@ RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --bin op-reth -- RUN ls -la /app/target/$BUILD_PROFILE/op-reth RUN cp /app/target/$BUILD_PROFILE/op-reth /app/op-reth -FROM ubuntu:22.04 AS runtime +FROM ubuntu AS runtime RUN apt-get update && \ apt-get install -y ca-certificates libssl-dev pkg-config strace && \ From d5a7ecf45a2849eb0db499b777031dd0ecf0799c Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 29 Oct 2025 15:39:03 +0100 Subject: [PATCH 246/371] chore: Update nix flake (#19386) --- flake.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.lock b/flake.lock index fd2bf9ac61e..4efd90828f9 100644 --- a/flake.lock +++ b/flake.lock @@ -23,11 +23,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1760942671, - "narHash": "sha256-LyO+TwzM7C8TJJkgbqC+BMnPiJX8XHQJmssTWS2Ze9k=", + "lastModified": 1761720242, + "narHash": "sha256-Zi9nWw68oUDMVOhf/+Z97wVbNV2K7eEAGZugQKqU7xw=", "owner": "nix-community", "repo": "fenix", - "rev": "b5e669194d67dbd4c659c40bb67476d9285b9a13", + "rev": "8e4d32f4cc12b3f106af6e4515b36ac046a1ec91", "type": "github" }, "original": { @@ -63,11 +63,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1760898410, - "narHash": "sha256-bTMk3D0V+6t3qOjXUfWSwjztEuLoAsgtAtqp6/wwfOc=", + "lastModified": 1761686505, + "narHash": "sha256-jX6UrGS/hABDaM4jdx3+xgH3KCHP2zKHeTa8CD5myEo=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "c7e7eb9dc42df01016d795b0fd3a9ae87b7ada1c", + "rev": "d08d54f3c10dfa41033eb780c3bddb50e09d30fc", "type": "github" }, "original": { From 8a795e7d40535d7bd7c8e7b3493936a599a30377 Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Wed, 29 Oct 2025 11:28:17 -0400 Subject: [PATCH 247/371] feat(jovian/timestamps): add jovian timestamps to op-reth (#19290) --- Cargo.lock | 9 +- Cargo.toml | 4 +- crates/optimism/chainspec/Cargo.toml | 2 + .../chainspec/res/superchain-configs.tar | Bin 9708032 -> 9879040 bytes .../chainspec/res/superchain_registry_commit | 2 +- crates/optimism/chainspec/src/lib.rs | 81 +++++++--- .../chainspec/src/superchain/chain_specs.rs | 1 + .../chainspec/src/superchain/configs.rs | 147 +++++++++++++++++- .../optimism/consensus/src/validation/mod.rs | 19 ++- crates/optimism/hardforks/src/lib.rs | 19 +-- 10 files changed, 234 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f5f5b87758..62ce53a3f98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -290,9 +290,9 @@ dependencies = [ [[package]] name = "alloy-hardforks" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd78f8e1c274581c663d7949c863b10c8b015e48f2774a4b8e8efc82d43ea95c" +checksum = "52ffa71f397f89c72a27d9c7e3340eed7981a18df9a257dd16b835ef7f53aef6" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -388,9 +388,9 @@ dependencies = [ [[package]] name = "alloy-op-hardforks" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "777759314eaa14fb125c1deba5cbc06eee953bbe77bc7cc60b4e8685bd03479e" +checksum = "b43e1c305c2f0e4b8878b943fa2f75234803bfca5cd4a4dc0a0a772842a278ea" dependencies = [ "alloy-chains", "alloy-hardforks", @@ -9215,6 +9215,7 @@ dependencies = [ "alloy-eips", "alloy-genesis", "alloy-hardforks", + "alloy-op-hardforks", "alloy-primitives", "derive_more", "miniz_oxide", diff --git a/Cargo.toml b/Cargo.toml index 8cccc50dfd4..cfa4aa845ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -493,7 +493,7 @@ alloy-sol-macro = "1.4.1" alloy-sol-types = { version = "1.4.1", default-features = false } alloy-trie = { version = "0.9.1", default-features = false } -alloy-hardforks = "0.4.0" +alloy-hardforks = "0.4.2" alloy-consensus = { version = "1.0.41", default-features = false } alloy-contract = { version = "1.0.41", default-features = false } @@ -525,7 +525,7 @@ alloy-transport-ws = { version = "1.0.41", default-features = false } # op alloy-op-evm = { version = "0.22.4", default-features = false } -alloy-op-hardforks = "0.4.0" +alloy-op-hardforks = "0.4.2" op-alloy-rpc-types = { version = "0.21.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.21.0", default-features = false } op-alloy-network = { version = "0.21.0", default-features = false } diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index 55201164701..a4ef9263b1c 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -48,6 +48,7 @@ op-alloy-consensus.workspace = true [dev-dependencies] reth-chainspec = { workspace = true, features = ["test-utils"] } +alloy-op-hardforks.workspace = true [features] default = ["std"] @@ -85,4 +86,5 @@ serde = [ "reth-optimism-primitives/serde", "reth-primitives-traits/serde", "op-alloy-consensus/serde", + "alloy-op-hardforks/serde", ] diff --git a/crates/optimism/chainspec/res/superchain-configs.tar b/crates/optimism/chainspec/res/superchain-configs.tar index da035a32da56be379423fff207576fdff0a00b22..77bfe0cd472d71cf44d694f563f80c2a778a53f6 100644 GIT binary patch delta 50308 zcmcF}bzGEb_xH{S3I-TRmngzWO$RA0EhP=YFmyKqBC16dfSqVSIC?ylgnAk+X*{$=maskhEaXZb9&$kUi-f$-|!HL3Aeh z{OASy(MS+lI2=yc?hFzCKKW-i0{*@Gdv8d5r&3DkD0?+S9PgFCrwdJou2LA^i5>qR zX%Yl!I{rgdkZ=?RPKOc@q1jR%Q6K0J31E?EdXr0GVwTLDIPCuw00N7|fb#uYO^k<8 zX{9XKDJb86kQ8$9JnwAlVdLxUP6Clc;IK#n7KH>ZX+zRVZxJ*Y;#l^_@0a)G!XVLq zJ_Sct%mB;^GFf`~Wc9x~#zWbMA@0<#=)mbStU+S_feZ$Rq$f}SwgCYdHM+?3$801V z38y!^6lV3ywpc8Z9((}IYkOO#^GsYA#Ge2`B2Z}1R{s|EQkd7>9sdS$2frYOxWFG+ zAh2*8eOvMZNeT96;zT3dSv2ywuXDDkFxtC%-_5#L(LlfMskeQa#8l z4)cGRMPM*EddQc;S~~v~DU^0ChPZB!&p&UDg=6UpAP71Ze@Glvs@j|XFVIA8D~@|< zbiW}(+`C`7rmHxmuwe^kE;JJNCyucgx~2)hgrO7hY~v%qS8K>YrXLRK+i@tqRCGcM z@^7qa|J1|(II>w^E-d^{_Z4`<(sd)Hu(`f}fp=PI%9090+Kr_{io7> zo74XS>QpKKfoL*Nc0#Ujf%og5-~-K_o=mB*U63TGz&|>BI3ivWrb=B8hxS7#dm*6G z{N(`9Jm{LABXo+&&I0NC7i^?aZybP_Fi_Y*Hn8x&s3rpbPf$_!aYE+*#UbC-(u11D z15sk22ts};CJu?B=R~NmBarQz^eyD|g zm~BJB{6#krh<`pH36kD`D_@FM*sz5rbzJ>89m zh0Rbo3n6aXlKrdiLC{-Ag;60+|3b17Rgkjpc6HF%jpX4gVQ=eeE8%YI?BPN3jX!c+ zG1gvy8-x9Gunu~21U=`yZ5YhoHxpE`^#%6#LL@|dzJK4|TaxJME_E!(P(xc)*GNUt zNYO^o*yxmvk)D>SZb&?W4F+613E9v62MLnio^Eb_=Olk_5MP*S4X}noICi4&Z~~G5 zC^|#-{htEdsx2#P2I0;2|3vBx6_XjK#ePa-0fZQ%$k9!r3O`VMroLHMBvY`lI0F9Zu5 zXonavVNhr=AV8wxFI^Wi|cP2=*ukH1O>?s+ki5a%N(g>|1TqU=av(Q2gaeX5MBAy^5>d8rdxWAI)(#v+g3$sRt}G-T??vMw zt6~sH2P{DaZ>vhO$6<b?avtD)_c@MtU^r>10!Q^t_+c%lDZK5 zcu>T)D3q-#5{a`1{gDy|gU92Qk?=oO8+-WJI*^pyJc+J`&W_uVk-uXGN8ptd?Uhl= za5xf+#e)J@!{HpTwgjRB)*gkhCt$!!1Fxj^$39B7zCJ;TDu6nzq@s+q#S(4pLE+dVFb)V? zRWuSyK;z-0KSD0_SQ_^5bpdjlPCx9=!}a9s%a4}B;E3SkFFsHh>;2qZPE8XAc} zpx^|!s=XP>)*eG7;J}g*5sgFr+)>p)8HYqDc{|%Xl786`??22e_?+5d96(8-@Msk<%E751aY_g^G!CJP1`hdSPW`eCQbol9i^h^{Z9zYRS5#GmgXKEVU_rT)(5eIn2ZRa%i?g$j zkEIBc5h#MP1017-LaV@4mC*zhl#-ef0S8BeUx=!9iXiy}jJ*ot4-QU~yg^y}ID2}K z>{Xn7&iVP0)NQ|)i9lC|P$CfQP)N}9F!tN0lCXGXuug|`uvfxiKu4{nW@oF6Q^bBZ zsof#FfBP+d?ej^>lV5`p(2D2=C01ZaSP%KXB?kI8bjzA&Eb4+4D-=BCgqT$sNm*I+e+&h}si zHA>yj1kL`RTxU?v?14rwQ1(N^I5B@oSKwIsig5sX`F}EyLDf7A1sEvY&|lyLuPnM` zZ|f#6bUR)73z}Fo-OPdtRx*rs@cu)xhy@V$At>Oz3sivmN)&pSks|pkNuX<`OVV}* z1^-SG^3Q4wW)<}DAwd6k(qce;D+^_0q$q8p|Cjs>fuNV4{U@_ffdc<1Un-a(D>72F zLG1w_4*rz=f#E#8-Kj7gs5|}22AF$e(cdc@N2pXi=m{vLc{{5xfc}zIfFUVeK)tq8 z((izx|B@@vFSl$PGQeUDo%&I&`c4P1U9F;u+d$C}AcGmgc@!iKjRLEh|DX)pm4my~ z^|R1SFn0x!$Nd?3-1n!Uzs>2WFh^+kzfd(QsA>>K%DHX9{#k9X|En6~4W&T;oi=+> zRs5hSVAc!D7>)W<>Ihcl>DG3supp>6eLlcobi06^M#Yh#xnRP(jnbbq2)vOH^mFQ6 zkO?Z=RcHo;Zbbt;f?CH6k%UriY!i?8lP(~^sxf^9jey$zPvY-VMPi_p43valEFH{E zKi;I8vcMUKGK=hHg!b3X@gm?3<|a#)U+~&nP1n@t)zy7>%g`XT)&GH z^|Ne%q3etRaK%vg&gxy%oz+a&3V$pCjf&oF7!Gq~2HS2b%yrMNhamsq8I0#Jc*yD= z>dw`@*T#Pwg2kcf6(Yl7w0&S>p~7gaB#WObFi83#4WG&&l|gC$jbEM&G25d1Th|Z{ zQ{4}?15}vmL6ZB=18_+CdORjcfP~wX9R%AUs$ChIxWkWiI2_#=E*!>l7;NlR7!L=@ z@y7}X} zfJ(Qf77o+n1sfj~rpNy))?i*m2Rr~|2_!I|pURjoaJ}r0L%@O=-Iu>`kT|d%0f`j) z1#eIT=t&5G$zTV=89Ic(c9hD{aqLF(9|wVj3M@Tu77j}m2HSBeELnsU^v4R&H^AxX z8UUMf1J#pL6l^f+PR%w8B z3RGC>$zP-j7JBF@8z|MVLy&OUD~e!KqRL)THu&X~e|l%(Qz7m6!N))Pg2Tq|!AS+)qXtyNB6mz3L}NGLBi#N)xf4sl?y(l|3h6sr$N`Cx7Tf} z*=`N6X;STXYn{0ElQOW#K^Ne7t#DYpHVB>0PlX06VgGO&()P(i!X4Ljz@|%eT-Ou% zp_vHK@zPU)aG1$yu<27_CSc{^CvEh72LSsH_7j6}<|l?!<|jtGj1K*$PW*qW*nogE zRuQldR1@O(Q?!Ugp#Q-xw*g~xpO`3I0-OyHLOa4|f`MoQgaq_7KTuq_MJ%s*Z4yJI2Ghr;dN#kg@YP!$HV!UN zp3U<(PfX zGo^iMec~e2&OY6C&CNDen>T?r<*F8^5S>d2v;2gGumq@0%WaBY%Szs}(>;NUErK(W zb5$*b+A7|zs^sPAdsP1xn`8Pbg%!QSRZ5z-Lu&!=OKJzn&1O<*ok&35u(7>6J>Fp8 zuvFTEg+m6>5p82Z+{4YW90q$ozUx!AI72Em(yh(R&#rV;y6KNrY^zIP*{GqUxCf8q z3C@U)SA6p;m|-bjukDUcC=gtEZ!VR7WLUUppZA5iaT(fcKarvq$Gr3;&A$Auc(z>W zgp6SS2f|UkIEC7#u0;Q!kh)3r4!uO8VH34g=WA9`u$A;mL@<@|rmwDtyJ4(qG9)SP zK5{&0eyDMreJEcZkh!cTL=JquQux~6rOjB$zK~4)&=ppHas zrNe8>Rlsn~ru8nlmJAhrS(N!Mi@*}cl)QT1NMCYh9Pt2R+tocbOQAIJF)=+C7) z)L<$AGW!#6hku`Ctb>=Gw)dVc<~wXC)LV`(uhHY!T)UZ=+~iZ(qN<90(wR(csV z$O&+idN0Ze{%7%RfM$U+X z25e#KYyrSXcJO-UmU*=W-+>!P)Rfmm2hX3cKPsfur}avA;#$XCGWk*Ip`*E>36ou# zzL+nBXg|Ym8CjLx*f7fdsEG?J2Aa=GByB1pT?!1#p58-J(wiAq+FXpqj8r|h5|h*> z_+j1!!O9|2(lRa_uv=pyCW%_EjTYx;LqY}>h8=V6`v(pK{+YsjWT}z7xH^MIpxtv* zmKtNdl6i+=<8!Wy;ZQ-AI3>fuxhnljcFqF_cf-A*=AR8Dk99qd@BM1f%rE&&bmdLE z>P&d(^hBtKI>!sefm7}R1I=0!`sau585N!~4{f##?Om|lm|IAHbk;SGBbU_4`Nb-~ zVqQwOy+p-En!31E!A_P5eKdcrscp_3j&uTYvV^f@U#=zPX2aK-K^82Tz1WFyDLFLK?+P+_DZjma3euK$9!p*HL!<>TnU00{820 zGQtHFfnuaq*&{A&=O}E`gI-OgDt%iT^ap+$FCfu6T^Gbj3 zX)|}Eju{t}3Jm9Oz+jg%KIeBcgjM`*DK!6Oey}V|Dqw0P>{WTh>(X8g>oYy#>f*Gn zRs;0V`@GteW0$CCp`NVkBsT&N&K4B)jw2$DDDrpaadoQ6d=*3?pTE6d6O$~l2D z7S5}+0+|^Kx&~B^u2Zq{Pv5q79bld6rn-Oq-A1WEZXxIO8T$h<&l)Lar!ZObR^=YA zDZMRXVdasF6Hk$e)C=<=eOt@rn9%HXi>gtsl3|-csgP2ax=i^uCgv#v(_eF}BPCp> zY@Nvpm17azF`-gb3WL2xJ_7aghKnb4T_(eVMO{-YtVo#+Gog)sYTUc5gSL$OMXU%Z znsdF>PNVtH!K5gn4(Eo;d^9TvOw5Y|k2NB6K>< zm8#C?Zt=uTNBbr08ZNh)%wQAi&oP{6v^*Yt@k(nQNj*otEPGr%$Ym%lj$K*0%6&17 zI@2=EVThqEnzPVC5^JO=(+M8ckF-)7JYDAp2j}~TIGgO`CFde6XkSEu_YZ`5$eQkA z<2VWBTwp+#u`{rJ@Z-_GNV_|`-B(^BD1J6;C0yeMQ0iEv!=2xwR}wPH9+~$K=6Yrp z!Y2ATysd7(Ma8A|x@S4ivf1SfiE+s^g`9-$yM{oJbrw&lY2u^yW1v z<#)#XSLX{5YV&V%IW-(D%PqJ+Wepgnn~3JH)A;nYKN(KM3QL5UH@a{oY&_JDxmWfm zbUnF`ceeOdYur-hXiv3lAjSt{Hg$TyCJr~AZ9rCC3r$Q!PgS5B?!vx!lC29rRkD`& zC+WH#guk(#Skg3oe$aDt@S}}k{7l|LO(lmUg?lCSyl>$AfM8e1zFdPOwd7Yv^9%fM zi^=Sn`zkF@IjZ*#HhlZ2C)a9Y=qc)>iLM7PWP3a_xSus&q~&K_kjU!et~6b>So6!@YYVk?28 z;DfmYSxe@Y-x=wFHR;V*SBAODtW>*}qBf$4mV%~S`KhCWgIwZOUqA4Wy{GWLA-Fdy zk>e&C(Uq#>t!Uk%imc+i*ksMntfFT9M)Sdj*_%aai_LdeVq1b<5miHBv<2ydcKI*r zC|!na8P6qO*rye`O0#^dwlto5d~Q96S~=}J`S^ld8jq{VqY>iU2k{r;UdCNUgg@d+ z`l!t>w^~SZwIHlteN$gahxF{D7XBNPd>d4Wx0EBuOu3dl!4A=|#)AoSMqUmE809~ST- z^Et=#omjgs6;n|cRa&I>zE1x0W@Ule`;dH-5vFQOrgbT`_u*)05xK9agp+)hq1I(0 zrIQqrC7#8;+(C8W&-gIZ9&@!lE%NH1s7*1gh^8k~x2S%j;1M;-B(Yt7JRBQSlN*%h zUff&sVmd$Sz67T`)u~F!9I0r5Y+rUKZnYP>spb^y<#DHqC1<)wq_u5asn+fyT9^N3 zXrolf)ivP6Q{{5ilsrC_^f7B}Gf*e?_YVG>o1Yrb2i8P8XV1aSLwY*xRDUMww3(4=kFebOSL1S~2`BXAC$?F!~ zU>@{_)x1gr228{(B)3~Pv3W{(ltuAuNPKbqmSjVAE#i+|ppo07hF9-UUfHc&GS?v9 z+x(nrA+xb3D?_}j)lDwO<3ZB0M4nl!!fXgFCRl6vk|$wn-{?fYWq1=6SV^9`@xfr? zUheDlS|^p&Hc8pxyqQwOYoP^7sJ^_Nod-UC{a%Z`L1d-JO@5u5++u1}rV+sbwr(U*q1Oewxm3(%T6C|l&)@;+eY|T)3wP{dH$!W5 zda%sS(FLHr7>Xq`R2BAl%Qo$J#c+b>_Z!piXE8Hbd&kzp4QgR8apU;BN3JFn_c!EG z(#d-cY$z}GW^+1K$1lx$lV>Szo`R&AIH}6Li=sEe; z`%Y6^zR#@&g>4S_K0)_byta}W z%8?nKm774&x3PB(A>Qq>IpxR3tnaV*b@_`cANZ_U>8|Kf?GoGO$3=mU>CFO?k?FfX z07)doDXC{J)(LH;@tXR9T2{gSh0dDE2Xad_KZ^6p)Q85(XRSWilx-VtZbkOicqZ9$ zJp&GuLD|U>-fqEWs}&OW?QUHyjP;KibKy9VGP86m%y>S2YtapxTy;1IF3t0KeUdww z(ViIZ6{?fEa(Gnc&DL(YLk1$$6uxFN9qx=k<8DUjP+Tc_em3A=>W#NPJyeL!j=d zS{Jz&5hPDqpQlau6mKKzivMO#!ll(YUsUFQ;zm&-iOy786En z>WFZB8P1&Qtd5bJxMPztq<^bu+){Qea^Mwj(j8qMFC&_(4_ls5-F?LB$C|YGfxVmk z2LnBiEZ=tv?qZls5OO|2jMmLB82pU;23YxW#g*P$RrayWcZR?o-wXoR=>wW;UM zj{;6N7_|XkFEMkFPZtJZ#~&R*8$C5wvgx;s}1 ziFfanw?>vwW;55&W;_>6v)v1ED?&5R-n$$5#tnRQ_1P(mk`X9sZLcZgcR3*w7v(-6 zuJHP?4sU!B*(RVFQ#+ZpRO4B`yJPK;aKiLdv%@{6JJ?f0%W8{G$NaMN9<$tobwyL{eUfJ6K1$kdP-nKf9v^1C%3wvYSfDtfaV%Ca?LGG zgP4Jx!H_fB0S;A0L9xpGta5`GWswaP1@Q#~jiVVrYLIb^L99gK{6LHCW2j3-=` z8ijX4zXE4*XSv7^13lNy8EeF}|tP&577R9s0%BC~G-bww-kA(T9H(37(E1~M^J zn!FG?cV)%;@Gqo>uqrQD8O4vsgIo0^I^^LXHCkn^k61)JpgzEK^#RiN-( zvhKY=i%8h#lGF%Q5kJjzXa4xEtmoU{JWSxo_`>jwRi~a@)=lm%&knxSnG) zZ!V&VQtpQ%=($Np=OImUtb6!9sjNpX$?ykItxFFsTWV)^hgApw-Zwc`Ro@wC`Z8`yxJEIsOix#1RJUcr2_4nFUlI3&Z%0 zPt^2%J9E6L$XWBXy6=Mq7ww$7k$a*c)kl=Sp-h3u$HJWC8iChr(kY_fPFgrm1AcPB zTW2OX*n-evuRTfMct0mx4Lq=WCdAn=bq}G_Ac=eU`Sn-Q%;x5I%X^um83%eIYNT80 zR+G8ilZ?2)c-B8=su|gbG`B*vTH=f9H?lX6rhfT*IAD9pnT+?Ev z*rbzIQfob10D~H+{Lvr<)2g9?W5uo;oZ?*;44Lc_A{G0!`nMLjdagSZq~Lg%Sb@?S zsG>`N#qkq!TXv=oZiM>{R{E-ViE!xM(kKC#G5_n5j5e*UKwWQt~n86Jz;! zzov%S^TkY-kP2xXbZ1_)&V)>|&Cr8AM>YGIXWeT)-yS+67Bw9|EbB_dyc6&EJjk1W zXTYx}Cm1+;gPDu*V;YrZ1qi*tj3JNe$gZA!r?7T*3?JdhDy|C@m2H#`C(l;kNVo@I z`X>wc7Nq^MUWPott}11pds25Gv<}h-e z|9t1Min_~zP-msnl?ypHUMtw#UsKs#))4J5Wpfp;V%@;|i1FsVmToCS#!%Vg1Af}w z+>9x$UwXtICj_r}R{Ir|Cy)D3!3S@8f|g zxN5mi25s;xg#r5?=FZt5ES3;@$ z)qA`$RyuxPm1oM6@=@)o>~aXhO93-w=`%) zYFe5npS)fd@ohzPA@JzeY++e}rl82pwvfJt(lcwRL>bDqG`bxLyGs2MiynsRt> z-r!JFG2h$+fvHQZUS26q5v%4F*|ny*UYRPEJR?u0lBrvIqO>nB^OM5SWyl3oiC7@Whr19HC*Gfi&&ZjrG zC_T$sl`CI4vU*;?e0yG*we&ccmGf}TUWnXmADkMOK4TlP=gTKn9^tv6(y%1<*;{>S zZkg$PnSezIdHia^0G{R3v3ehKP06aU&+KV!2?7&uqc`D~iF?k!sc_>Ykdd*U_K+_k=8Sq{a&zTCHp24ZG)LBL4`Zo^IKyf8mwN1@V$q6Y01tQ=6#~ zB}Heu{NU_T)yto}&E!O)iabQbO$^UZjmqYr-ko2JsNj);tpuhR@Nw*+T6`+o$eJT` zwVrAqp12n?XH3XfnI0g%Q%`}X94-mD-P$QfF~Aslc-y>XZwj9Bnn+OKNx5)}WJQy1 zr_AQvXcUjfH^*i#_qGquM#eU(j5fN(4!^C9*keMO%Uuu6slHGXQ!Nghh+)R>7d2@) zngvF|ll&3;pP%t_0=#3Gk23+4Uo(exAFc<|mCp*1VMfNCCUa$b{p1+OZnkxKa?w7H zCWqb*w2Jm%qQNyneM71u&p-PtD!uEhrbfzBhldZsjBz2QR|~FgmU3g&`ufCjjb#gKXKaQI zuKSIUafNK-FV9c3Tz$lmj2m}Gjqk3Nmk22YG*Pw&Ro2AP-;L}a@3Z#;ex&1-JfJW zIA-~=`u*uV!?9tG+=r@qxrM&tBQgwsfpB z-Qo7>W%xv)_0g>JytGqwAtT8P?#)7+9zJ!ax&BK^h0M+_d#b%g{e^bK>3 zQZW&m@2-qJpWT}`6D+{dBf}P2n-IEOZ|Why8gi{5M85WWevApCG=^@VGGNvI|Hn421!A_w3jSEPfI`0z9l|W(Mw; zF&_g2tC)8J_Y1&J=Q0qxfcf)`#{fOIvpa#~g`r0QsjtGjfaA~AjtHyIFP}dIbFcd1 zj$yTQIZ^Q3N3NO;!ni}#Xvns0CjMq$_7S|8$Xx#ms-00`cpLa%IBEC1( z-2w6^d4Tc#8|!J1!&^HVzjL&9jYwt;g-t(|5(W^Vk;j4cWW-)nbI{F%@JvD2sqs_! zZXP$;jxlP`(A!@R>i?!OMPmWK6He`pyPPSQQ};Pikjxs&4AhY$k0a_M?`}+~pAkFZ zzm&;b-nf|)8?}U+)iejzC=6>q!xu9XWqb!9yeZk z4{!sWsfZH{!@t{b19x4|o?!4{4Rr>hM}aHhI|c2W1IsaB{V9S8Xtc2Ahk`wq z0>GZ3^hhRvs>jX`c+`Vqj_p$4$GpygV|w?%v1~PP?EUswREDi&90f6NRXmlo5b+`BF^ah6>ZVzRjk`MqcZV!oE{m&6jEIhzXF(L!N zn-$4QZR2uS6$Vmw?A`^4w?r~gqnD)&puo**N0@=LtNV9S-v^yb0*&`uB+K6o>2d&y zW}HF*+Xja{weAex$R28&-tJaQ;GzTdt*yehgvrS*PfTG?h{;!y&;za*MZSqBfUaW)8ppTv@wty($AWQ(Zf;k4Nlb z)qHkl@tW(-R?el~NR^4EWY?Xly?tdFVY7wdCVFhIZcDe`M9;3Oq_=H42M|QPV7p{# z?eD%W8i&GXnph3)SIz#e@-?Z{Y5nWPR-I<=S-n&J=drl*+qVN>D|p41To;ZVicZ|f zRM2Ou-xqP!C}^#nb;gV|Hzb#584}cfvk!4cZm`jSl`JT@wtmPbD zk{CT(C^Ca|E05o>9H)3pl2SYe+JSe1d$@s{J5e0ua@pKSu)`7zvHqudClX=c`;~)arW`~=Z2M?hrT{DI@B25`5VlE>qUDltKPoQqiSPE z_V;EFhtIe;Gn%fX8gV|id2F8YwMnx1xj%lU?2Nh7#_C%B^5x|2(LUz4?VTN;b56|y zW%HuF!{1U8RIaQwt04R}83IFdMH065TWm6ly>DS7Pf|^uoH`rmv1==pu(5(61Vx_} z`oMf*cYVhdY+4ENso|Y>S~sx@3PB2(z;Kb{Mc#*=LoY>%NNOEme~QZRdL*KM&oJvk zzE(ePHN1hzVo&kD8{(bwHs4ZPY1S-yRo>6GLROhC$0UM}yLwkErJRv$_24?!a#F?X!q z@*rx7M+RY~FlB4f-QQ71;63PPUye+|>SsQ%7?_?vuW9^J*)&{*`QqJ|h{2fioP`JD z=5~*>!&yH}1iW{AVQJJxS^iLQ;XwUnY{kjnw0N`cd1JIQJCEA$KK-)Xb@#rOlTivG zwL+xsQPF2_Z{OH;{3&fmCz1D-b>d=aeptB?In9H$ql{~94`92AONHzK_EJ6YkjsFX zx^g19R$qAI?SoT;E$xTz2Tgkro=_8Ch*B@*e7%uEYrN2vR<=>V?wEy8TVv!|@+n2F z^p+ zRhSIBj2M3v$E#`JMBD8L9g8|Vq8!TqtU1gnqPy*Og0{bP*l&#i3mO{BlsL8bgZB%j z2o8OQhcw1-oxa;r@Ci<gA5rGGQipcpB=lZ_|xNP$^*zBjhG>c*EHhaX6$ zH`b83Eqpw?dXC>?*Ew?ccFi;^dzRVJ8+|pCOr3WF#2;SYI&*CA37wjkB}wn3g&nNC z+DsitgThcA$!cheX|nKOgHF&}?-|t%{et`V+ebePL(`E+xPj8#C z5vx_dwJQdcT%I$1&Oa-#63$zLyi~5~B$bS_!&xVK2_+u9uvs0t8iIbo*&y}aR$%^8 zwYb>kQC*}d1lDZx5Gp@05vwp2MV6s_4Db6C(_nCI7`|-_!xlMKB13_Vx2V6>u^T3! z$xL-Oz9g|66Fr=ZD0tygtsyjFuNi5&zv)~+mfmxXSIfKdLrY|iVho&2ZM8O?S4>0J z-C`rU4Hop){CgZ%L>ph6y`*BDoN#-vs@eQjw-Q_Bmw{@SE*nrX$xmSvO{lam?kHwq z{p@C_+TxaOKz<<~ttv(hM9yd$XY8I@XN8Ob2adRjI>?_8sh3f-P+VCi*gU3M{kEV; ztys-oxO={4M4(9O^LfftAfLkPHwv#LlAa~iw0>?)@tO;Ivp$Wht9vV@ zZMBC5Afcmtj*Kgi!N=mfpl_6`o)y_n?Vp0aeQ`v7sp-Oi!cnoBX#Kk(rAuxn27vi% zI*i=L!;$)@O`OGoxdeVwuHq5+8kTu@|L_P*L(Ah>!I7D*8pnmS2>#y==WphGxwN9D z{F3!SkLFWVEzE%4IqO#4&k@*-2d_l7c93dMFT6}Op0D^I(fQG4$H*dMlT&l!R)UlK z^Q-X+p5&mO^+jzqmF50`p@E<_f9>IqJ{vniJF0-ZeFj{Boptyja@8)AQ(l6d*6S7< zk-KI-ghU*K?ymivb$%SCKk2%BQmWyWZNQMte%tvb*)?+M0gty@<~|#T=eEp-U)V@k ztgqi#S1e01My@xi_N}BI>=iWW#}C<#*e8~sFmoJ!Uk4{pc8)t=dL}-YsawqOHoQQHvi zKmD4}Q|*~{!(}~UsbCXiQX@FN?#`jD;4L!RkrwVh!a89%o|dDt`c4^B%#mkzGq>TB zqC)Y#3Z@fA9OVf@E-Z4Y7IrTgjp7a};$j(52#fKwuA{ZUluft+qyad2Hk^HTyV_~- zIdaqypFW`CAc_apdEhN)-$yfbR^HRIL4!d|W8-4d`&d``Vs^lv*A^drGJN9f@n?~w z*&zUO4}P4yzU*^P>bls;p zGk~m?5v8-G7=`U0zNxTUq$U=6Dx0$@9_X++ML+n}X?ZHL_VJv=>+taT5j{ zpL(!DIH(c&$|MM66!>PKEGdx|^{!+9P&ibB83vrMw5PwmuRQ@E;xAo12u#q;lbz-V+vyps#OXB^^){c`I+-pW0}ojDUiNuAEU`VgN2}Hd5zJlrJ+t1OR_a+3bP_| zPg~gLiwdUXiVp2M{>#ZGOGXWeN$R(Y+A*< zy2H;DqSEI5?8Z)Z~o;I*-no1D6XT&nFVW@2-#ySZ+3Pwc?$rQjnSO4YPI z%-!m*7FTv|UA>d7QBr#>%ceKF@0#1ACoFdr?jBJ$SU1aiczo~?nr|dMRs6-~^kCZT zQ;GHWO@|%sh4_TszNIS``E=yDDN&_n#*(N?8j1=q+f_+@@si`hhj^tr*?XV(GSnX# z0S}I#4wFOLrA1mKi+>xxb#TBvxUEjIvrclVw`5^yt-Pd(oBGXl zBIGE0XAKvR2Ne}t8W*3V*fVgoVzu@S9V{4AyZ2D^VbUXOzr7}6lRG|t!E06to7apx ztG_A5>2WXw1$)j^aYhFXxA_bS?~*%d>i>F8OQdSeW;#CP+guvt5Ms6v>KCJL>oF6raL`*&=a26H0E)CGiC|-+mh*tsc&ImPAaijBDhBrKKhV zn`Xnxdx=$Eka}{0ENyvMF;&`;m?EFGUO-M=ujq1h=wa^d*(!A7pVcd2Bb+r)o9n)- zEj~mp;a#cTOJ12u&C8xt7`V%LE$i^ko(t;c`j>r1Y!oz@9!xkavN##tDDkgH1dUcQ z?UwHp>Ys3T$O$AQn#y=Hjo>b7C1mpMTxA|kzMJ|omXvq505Z~cvnqL%DK#wABdzqd zLithb&WhiA^R7BuCRntg3C1V)MLq70U%!epOyGau%rQcqBL;7J2YW^9@i%h3Sgxln zb7r!o_m(-FJ*XmVA4NI6%;4N|8HJu%DCYDixV2z)DWsRT!585QfeDJYxC>Iz%?AZ^3ATcVHvat=+~&LPY}00X=!LeKFFDC%y%J7*#Er z$D%$cxLDhg-M*y(!;VhW@hcDD^MkL>ptLd%atJ9)F1@w5MczC3G|pz}j>Mi8@AqSn z@N9A~%Y77k`dwq!;MMj7;;s8kkXODpHsqw!5$}2C(J)^m&y*-)x$=bO~daOyQdf>=(-?WDx z=Bio8sfr{r@RS=b0Ewbbb$N*IgjfOG{T>e?v(%P;5A_|uUSYfq5azDTMfNYacNqCw zw_lJY!gj>QCUWo5=3Tw~S$BlKDmn?8P%k4O6%LFR!i2s{rNRdkwZ@L*d^%aXSPZQ= z`81y^;U;go&V@WL+$%2L2Ycp);$K*>?a2^?tb7<+BW0U%n}0m>BIh^mun+N@6$=Y( z984`q=R@8c?;(G(EG|kFlfN?~*q|``Slmdx9JOo9k4utTf8If zHgF4$(jas3AJjk2_Ij^;X8ASq)efC=G_70d;e(mW+5Pw5vss)Sw71-4o#d!AT#XIf z=;m=)dxO&!^$qVzOM4@dJGIE1e<3bT=ypkl-fKVfvBDIuK(hr=;)hETOS2reU$je@ zFWod-Ta>SG#It|=wlVi=FJc$p+?`i;>W_Qk8`l)&$N?^4tRA|n(SA-)%JIlgg3~t9 z!-I9FNY{6DB-{60oN2yvv8yvova0!7?YpmN&iYk;hD)XlIsEvB}8QIZ4w z2UEnVGh{13V|F=F|GMT=<^Un;i#+O%w$~T?`kUE#j>oMJavOPdT8N-UcZ;c$A|Mlir=P_ zN<)P`wVNQP9(>zByuP+vZ2)1S`k5fa#U6BrEx2r`zP+Ga^dv>9T2+>WQF$iY)O?k8 z!d&KX{P7)56ZUdNY?T@c`VJbjGVVbyj7P7naZ1{mkhwB834HI?O5vjIrdnZBU*<@Z zYD15$I=Mf;cWiH?55Gm&@Yj`kcfNojAw_&OkdLT>vwghyi4tyYWWILw{El)j&rj5q z!J~5+2U@A+2rU5LsV|UO)PTf(R*rfSo*ew~R8?{pM;GIvQMiJbWK|~5!UvQYxy#@} zril5D>*pEG`|LT;IFpg{t@35ax_fKNu8Y1O-N_fUJq1`IRFl_m1p^Ol%6GZfzk1K@ zM%&a_TF6ZlRX7+@`EI-S?aji;$%j#gUdm%X+H`iOd5`lxcj zBh==mxN|~2;_R|SegtQp6FlpyRQ55LjG)$3a1&Fyey*OG>PW5|ruqK?U_hV0R$$ZX z5^OWMTbVI`Eq^^xDD&2rrl1m-jPzkG_jP&R(Wf)0lDWcm#V%{|xVj(f?U&LS*>r}d z2G-l34@eAY+6{+=Nme!YjZOGBL2}R@;-V8EIpWu!kRD;J?t`7ak3wUw(#RRahjuKC zee76upATa&3d(x;^B^f+JpIA_u5rJ^K_zQ9DPriMY=2UD87sPDLbIv(juG!ZN6=S| zOB!~+*1)E^haK5KpD(!IX*Jr@DEGS?0x5Ni3oC24bI5QWwLgsFk0s$A7?RZ0}0SmX6c|#ik62&el zt$*ujS*1RmmR06;HT@O0#Ck(e0|B3x6@Eh_0f)CaenYAP0a3T)fJ5*B1kt3ZKbP2o zL;X;H0u8)4xixw%T6y%Ju7>c}XyrS;8na;yd!Q@+i8%rZa63oeB=ICPaQ_ENJjvcc zV}4TNN#49I@nmvz{44Q%QsT*mDs-}4h*kn(RsANH6+AeM0UwtOM>86Kq8)?WmpCq0 zF=*(F80Q#Q1li7Qc{S*ojOiBbi`e#avK?Y}#J{??pr_WO_QcnWyBfWyGPjM{E;hDnmefztljcP?krgVL0? zq__2##fjv?a;_HLfjXBv_2jgKYni%3%W?o`Nk{oB}@MX7(4gLa)x?HyH7MAcQTqCFN4ge|Q3G&T za7uvd;TSa_efn?(>VKmKn6|MN75lq&R3GIHSTEv!)WGc1Gf@M;z|t4{IjD9oYCwiE z;3=>>PmBq=0~(T`dOh8%KN)!-L-b4JLGjPlO<5uj?!G%8-p#UfsBxng()g?R#97Oy z?93o|b*<8WDNB{z=!C_eW%EC8PTLZ~Kl-$_r_(l8s+zT}bA0=>z1W|zeYHPhzq3Ej zCoX#nWYQj!=VX;NWT#)H0gOgi1|gzWogcs>@Ef+@is>b^Z)pDn41`tNA{>U_I0m9= z#^kr~1b6=KcwU(>>i-+7x@J*dkC=Fl64DHS2wVw&oS^v>9jhxMt71M&$7gx#sE4&m zL59Ewh^uo95}zi%gX&>p$uHMJjU*4we3jo{`*nW3KHjJ&XaaIlw0Pit9N-Q5sg?Rv z4E}R>`<^ki0W@Ugf(JPpj{F#MGrr>5N(s#sAPuIHTU9;e8g+tWJeJy=tTn`4?O!QN zjcdJsUsc+s?8{sU3_sFLkviVoNK$|8Oju%HLpZpSHif@BY{l*}op| z?bp#RWNCgf+VM8GeY8&q^TkpjMHi1~PEKim$EuV=VzhtnE`okYkLx?-6q~D7WXxyG zpy6cL6Q%m-6c#a)DrDGiDPdJM#PKospz^-<8h6tNMyF-M)8D6)gG}PdDHntAvmI0)0tJZ+%HhAY`n! zZXGIM0Rfx0mmojN0|7smVkAH9f1m&L-~V^|pADh7#s7Ylf6On20DtdGLZ{wtM1R?n zN7KR(%luVvb9o=!Tx5F&HwPA?x-^{8DBOU=uA;%^DEO*rhpPmHx?DxgCMt#&M?)Q7 zH5hU$8IW9ty#TX)FMZwzjCn9d%IfTZ3lGPq7BFz?7btLNh;XR~*tc^PR)(6&+5>t6 ziV&a@^3~jrZx9v67aIeXi!x!%OX<}K9wWNsj}YMkYc1Y@q!y*_m3~J!#{Q;Zx6}m6 zNr62SoN5a51%oLac6|x8^wP426S&ze81ILdetbw86d2CNwM@Xw7XFJJIP=3`B*V45 zp_ffaI3*3Y->ZrPonBzkfbDw^m$xJ;EdhU)Q!_u8Bqb_50^oI*VkIg}0%4Z7-yveS z0hcZVZytXuZP-xP*RtFpukLnzb9dk}<@`zCJQ>RP=`Lmte>XBuW5~Y!*l+a4wwa|S zt(nifR9T2xcuI{JZ(EK-9`DQ@+4Sz}@O&;_i&|6Z(|AICCw?BVF^YqUKga1(wZ zBH%?5Q|lhR#8ZxzN5QjuN7x>+o8g|~<~SAw(UfFYqK{LIRm^^MkjsWG5}_k~Q$ zDhJtpr*z@9XxJ+&quA4e4pYlY%McW)4Rf38{)V|JHQzvl(^sBRHUZv5TO>ZJ%N@K$x(hbS9&vGV zbw0bpT%^yN`xkX_sBgqsxS9>`tB>F=(n(@Ii@Yd*C`SET_U&_?27)&PI@b`P5=ii0!2*MvdOUHI@(2!WiGaYsrX% zb%C$0anIYfHqn0c|70Ev+n7CS$%}JOeGYcd6~@?aMds~#9OJpg;i~ zH1>RSUeeW4msr1TTZ)}jZuU|f_+@B2{x>7vNjlXp#8la7H3-M3BI5I#xH-4wCg?tY z6MBsiRgX%u?Ikg?nAG8XGue7q68=J%0+7>%}x1*D##W|qDfhue)K#(U$K z(Qjutxi`v7xw5bwgDV#83r|)o@XV(xowIPdPE_TLEOF#{H+3ux)jJ786z_+8!N7WFAg`WLfv}BW`!fdcz&CNQAPp<47%zBVJR6UG}`!Mtf;w zIIN>rVxF6NJ=L$HjgM|G+8b+@3$>=&=_G>fF!+;XtVKG5=o8Rer;M@re4bAfX==7B z^J4bYAu^q=c^e{(C(U*TYT+j1gfkHLEroJ%_F>q;%m~`HHP(M1DZXN0>f&487K`8?(kEbC7z&af1t#r0mr*@FiSMw2H4xfpVFIq0hOtX+=( zHg!BN_wTw#v@2tu!Ed{N-AmWd<;A~wkS>SyTNT!g#$UZI+6j+V%b^F4xzvkfZ01`N z<#zLqv6A-&=CZ;Y3FTWB$+)zVaY!ZXP`i$Q?k0Prw2TLK0WVf2x^ozAcml17`Tw!U$2nRSp#t!w~Me^6tcMO;FVoG*_V$d;2icdcJ3(lC$ zUyj5UJkNYehv#Iq<1=0A8iOvSH`~98-u@Na<}cCx_A?^q@|VfnI*kN(nC|W5u|x~f zQKMJ`rDeMCR(j-rtO}MPg&w!zdrVDsmoTz{0+NS6aH4ebRWA=$Da5P{pCVdPTh)!O z8evX8O{DbMqlpyX=Pz!1vwLb{uO8lu5gDb7d-qv-VS|>px>2Q_9nC(RnxhmscmwK&XT-D;jgFN0#PtEXo=pY8@j4<{Yrmisq4o1XR(}^A; z+JR&+6-vFlbF}2l+LG0?rJA>r@K>Q^kAB@A zyFsDCa=_x+FN|G0jS$01$5SQHcYSpi<1CP;oN4=icyfR_B?mlgi`+#MG^MKX=rcvW z4XeDx?y}LT_>hz4wYZ~zR&gCmcp4*`J+7-<5ydvTW7@~(yKFzc3(rv-cI(jxy`#4F z>4Mrms@=@#P%rKZDemt|SM!TMUki3We695T&bC%c^bP$|n!V?c*oDdlp%lkmSO@V2#!WgiTg=d&X7GJfWn=n( zaTqlgjI^304W5sV1P{SkqGWfV)2jpD=s*%>V;%B`Lf`Ty7jnnK=7 zr*L7acwfxawuhJE6w1lZw1|2WJu{K}@LU>|rQ!Fc(qebK4*JCtDx&sKM@q++J#$Jj za_WXBP>2T40BdvW2=o(Zd*5+hlT?PDq|e4xnb+c~D8hkiz;fN!QT1pW(cqU_uuEcp z5Jf>qP6&W!5CipQRicODs^I*?u>{lxl^nj;=a35pyVXGt#P~34iq56C02>jM*CrcC z5y97JOc5>NVQ!@M6%H2(GIhW;yGV@Ql(^Z|aTCS0c;eLy9vFG3wPQDNO;QccQSF9L znQK1vBWoCAcK%(%Gd0EaiA#*RR=je5wSH;kmV$89HD?pNMlY8H6{Iy0Yp%k<*h|NI zFymU4?m;O`uxsE{s5*L3fA6ncBI3 z+ZhG>Zi^EbW33kQ4L&f%TJm@FZbpBbO$4>(m`B>9ON|qT^)UMyCq-};3}I!mzAcBY z{lYmU2dLNwrJeqTw6j8?(QYEqXbgTeaPtr-;;C++7>A~Mr zJ@dC6=oZD$%73S_T`9+vd+2W1b)DoG6F%%I`%(j5X?;(AnKH{(j(Jgj`BqwgAv4lW)?81DqKDN^ZZXBJ`?{IS zeXV8&;xqUcY?J-QeGP(%%irgFBfWoQv2)E-vH%>Sy?+__t>PNBHmu(1pyz7E@VWQq zf6dBLilMB=+^J11S21d+7tpAgF@e zmtPh&YtR&|$Lfs|7DbB+f#`zFiZZuG{Rk)ZL;9C^ZAiRmsr1J<7n$96pd zK`!koIwupt?K4Kf<}*30M1oBE5Ej?U06p1ieU3^YWd z-t^9K&ugJR#9hyK70WK_lZja)a9{gGVurzX-gfokDA&`F-N>x%l6{DO=j1}7J1Cg5T>F=Ui+<*eR*(;@&j@)ShcXh`2Z+9!N)$Cuq~x z(vfDErAT2Q(_@w2lT&0@mqp}@nli<$vVqAD+Y7KDVNsdk)9TtMsv&ws4}(Fojwc z7K%#Dv@1qLAWd~Tfu$yzv6@A)R)1wt+E*nHX~T1Dxxvl8or)@Q-MLg&l$=^L#}_z; z!4|OJ)?e7e`b%lrhq@|9`17f|;xa{Bbf84u|G9hWI)}5e>+3f)_D8C4KsLAjp$13$ zOsa|3=r+K9&wSEMj&<&E})Y#((a1Aku?=ygXKh0$LpXP`{i%CizpE%k3&y@`Pe7@5#V>dx*xvNDZ0RPiQb# z;{k}4p)))&Kc2LU(FR;IkLc17qWT}yrK3x~RF{U0^W7C$o>`Ilx^($wdo9*kUyUf& z?JD2Vk|PF}Cizuqf2!T%W`D&`E3)Nv!A8i;Ng8|Jf87<-LS2&MuH0@{YIOf zvaNDacWkR%f0U1Gie>ekB3vX!57&J%`zQzo8tXzx(n0oY zN7`TQ{_)j2&r#S{=N=gRyQf=%Hb+=<^1p*GzWtZaYcmyp@pKP7;gt!RDNj6%?R0hl zGrI^?rQ;@de_U^sr#tJd$~*XY{k+tBE+_Y?)7sVu0ips$8Cexgro>XELacnPvXs&k z*`j%ACVktOp}pf>(job6M8xZk(!e-S@mK@O?!58Cf~C)7C}Z|z-OgX$kX8RvSxULtUtP!&Ya_q;ER zj6vQd9~Bb!mC>fY$>#T&aQBrnvb05CTkW?5Pk6R3x1Ru6J}VmZctX7T49clP_Ms`? z8mc0Z^qMo5l^XRV{5VNIbe6n37u>quYM5i#$&mSf2U)$C*$cU55GQSW$7)`XJSA6#o z5AKi&7xpO9)Cn!ud*7&GIB@d9tQGTml|QVDBRfb-Z}o@(TeDVdI%0}X`0N1Zq2Ex< zBCHjEahd^(R}1Fl2_|<8w&?Zg;Cfu&U+dRNP5In%AhmIfr|N-j-099ZtT-vS(qxBk zy$^Ihg}9Ek3IZUzF$>K08b0zWBkV@?PGHxRKi2MSpW^nA(I7`%Tl4$tcH)^j)(YkX zNS1G{0ES5S1)(p)S}DTfEH|ZTnPt;gf$OW6;cYV%1YU=)f0y}fGb9#GNZ%bVrTyLU zYWl(L@#?!uS*&H#bC)x2GbDd#RMPBneY@XY&8J;?e-~}?*wDR?8OzVtx*U?Y=N8f| zimlaPP0GPl0y#V|HXW#bqk`L0j6z|nTj}Szl~mDkWcloMEeA^}v>OklO;u<_Ccy$y-D(zCl5|41zn8Q^p)-=jJ z6`%k}hM?3^byzldJWZ2`z0B020xr*`!cQGaot;w3_QFn5oJu&kh=oR}JOYdTTiaDc z%8vp|zPQwkOw+G+5P5&QjkUHA>{^s_(X25_R79$OYh+#Yl2+;XtpzU5g&M%{3gsSq zc`W7+SE;mA?}Je(2wD1bRR@t9mvv5|_rbcG&qag2_JUJQ2G*2Qe}X(g&jpvFN$cJ& z-3xSWR|yi|+ohzpx%PJPI7)me7IYEgx$O9acGv7m?Dw);sjYvQbuj%~fFwMC+flhf z_;-gYGbjo`VR%VJD0^We?kYLcW{CHkjdKft9)N~e`8{{41P(Gq)Eb*{ z`V1^y(s8l}`G+E@#qn9#dsP=h_)s`FQP36P@%r`rsQr+b;qiWC@AxpvZ-{n(Xj9nQ zSjm;SmEMt>ilKi2(aS})&jP_ey?<30tdGZbX9N?uGf4|NujGv<$p@Y=MqI=VK+ggn z1)($>?O+e}aEy)K#%5eG(`e22y49yUqqvMox=mbjV{FEJ>Nu5;voe>EfBKJYMXN7H z5qhak{kG!@M0_57MlaK-zZ{x6LI}AM9k3VI!W0|$86tnN5TcamYOI|u6H^Qhlm-(_ zi9o>cH#`FaqWvjzfNG}uI=LdxNd$}l9T&UFp~`x(u9)~)(f?^2dw9TOFy6PnJhuSf zXjT^9$tYbsP5v^xCb@vyUjUD<*#XLLA_iS-%9`) z;qr?#7j49#cLorn;met}l>wjWvO4A16Q?T}U>+#ESW%E2s9kLul!7W3Hu0uUl_otz zR&BSXkYh?9T%~0IO|k{JyG^rTBf5SML6}2%t7#sbl>5AYQX#Zj@ZVrCb*-(l4D41S zikzWHOnYuK^<-1!lB>+7>XVZpmXu6Xyf%5@nY7U|S7R!WOC1~$cB!XGttz0B zz&f-*hWhaOg#;WfmX!%+ENDrom-B{QhOK&Z^+6dMf`$Wu7$iB;+Qah@R}oNtb=juC zW@#cU;c2H{L0g=zAOKC^6^46V(eEgI4E>Jsz|rq&FWqcdcjlRp_MJ%mye*OVV?WOX z`$aE0VFdy~cY@i2t|#39%C_U>)qOhadRrk(+y6E5x(&)j>4J)?NgyuUhKxByC<&Jh zhior@#{)&Uo~dB@6M!s`HB-Q?^`Zv+S5se_XiZW(k%W(u!`ZHi3{pZ3;gS@Z0KUm^ z)kZNkBfpJ(wry5e4bA~RY3mEh!|@b{`WjR*`tTefEumf@%^m*}xe zMsnJBcgg4Gc~3s>h3^BwyeoxfIQ72tApb&tZ74@*!ijFhjq%wEUG9N147J_k7VuKv z3%OnVOT{m}iyNPT*LDpPFyCabhgSkQcmgLtU>(73zxexEe{QdA_0T>Yzl_Am#9M=#P(92DT?Sb|y1m-zcRDE2u7+Wgjk zhB0?xoqsqvUkUWGpqBsz1)O>i;y@q8f~o6gJpiUQ=lyW`OH2+ntkY_Zo@@j~EC5f- zcPW$x-O*QgHp~MQKgOS(xen+Nnr=&uP}SKZwyVTX(Vcav38JDy>iiVLZJUxsidu5H z<)aG^Jh(RK&QIZoJ5c57itW@wv0m^eVz9hK!H8N2csKC1nJvJ9>zat%nIJa&zvzn; zMH;lLh|TG@-*I!7qn=18e?8x`nYw&wD?jGhq5Buq7~Zt5;Q~{GS+@&DWTM_{F%kDS zp~hC{>&cDEqUlE|jUBfej|S6ns{~GSytmT%b1QRaFtGU@G1(Np@*dkWs(_ac=c&I8 z6)0idz6#R33cx9!{;d8$Kjaj5eKL1ui_k(fShK|+-NLU7T*|;ye-80b){Ms;J#x)3 z|M|=-|+j*%iRkgJ>wkLbdfNfcD%; zii4voAyHdu+S}rWc7U}yAZGx^iq>}TgW;>0Mkl7BH45V(_g#T`M1)dW5m75IF;&Z4 z@mB7Hw_OtQX=Lhw7n1YlRSuc=1nP--Cgi0Fl8Qt+Z+jWK4 zfnbqPHUQ}`NUw<3Wv4eR#Ro}Lig?|sqfs~+diW$ekS^MakU|;P0jjuQJMe1cPnbo2 z>Z4{6wTg;v!S71y^YMom5&UGS^qL209)8Du#xUw_!B5~g3-@;MHoiXb-qTd+uWss_ z?tQ~|eA@5zItAp_lW+I)LAKlpN%5Q_k1Y`9%L)EUZK<2MI@&RQAQ3uJ2rVhGhLW8C zU0^-n5ERmtvw`_p(_+^o|D71+1`)u2OiuJDG0Lr<6FJc}N`d0Po7&pfgm7jvG;r-z z_lkN;>gD?@>H~X^NMUwKBdY3slhJSMUpCcsPK$+WZF1=N_iPhWU-L;5MrgOAB$tk|5cmt)(q`C((dS?HFpSBto3t<)FQPaVlrW zRj$oM0@JtcUTLfIC1((1Q2=)B9L2vVc*P}eW=bFZ;2$y;o| zj&!vP(?d_DhnfwK%OZBG`H9c+{Im+RPkoK{Rj`_qKg8RaHP_If$<2iw63Qj@%w0HQx#)s z=X}wFq~?HSl?#^j34OB{Pu-Uyq(~-z)6S}E&3Y8^eWc&r5~_WVt#dB>obyzk=)3=1 z-w!nCXL{qO=Kp@*9qR$0(NS>FHLC552)3!xZ z%lm0e?j-so%!C%i*%~vomAF!q-X^9)0zsW_uu;YEf?YB7G-+v_&|Clm0BJBX-XvEijeY+?#0{a zYCM@)1;|@}249{b0fRJtd+Qm0{eGf4bvYkWCcjJfJ|8p|)T(VJBo;!cFldlk<3oE& zYrRiAMHayl5mH1r@7g+cfFz{yw`}hf`r!I+bFX-yUN~|YPvd}4i~ieNR&~z^L^;_YtYI&YLtF)=qs)nvIa}F3q$=+83m7u@cWH!1cMc0BNUx1cxXk{CJ>= zC5l*cEKE8{pkr9Z^j`P#2NTclU=6#CJXZP0oIxpJp#UmlWewtwJJ9( zU^%Qu=nh*4^W_D;6Q_zs+}j$aXbro^;pYrQ5=``0BTRk@NSgqm{M-aOCZ2 zyX|U}IX??LM09UywpG63d-a2y>h;dP#E_m z@BT~&LgmEQJB1?v4zI@GT$>|*xL3DyVa$;916w+@ltbA~yiZPd3vc3QFg$)^pIjBv zoBhbJKwa!o4>Qni0Q{1%OG=T%$ggCrKP;$0I`Sha?E zS{p=1m%q7(B0jDu+Ih{cYtjN@0habiAfNjFB*?GA+{`tv?Z)o9SxB~zEzp#5pr_Nw zLDT0T4gI_QMz6kGKTJ&i^}QG^mmj)^E`L+I_^o!)9;Wi+{Wp4*Jr9Se3yeEyNW*^e z)*ePl&flTWZ1$h;_TT;lGd@kaOTd2^lW^CVmqo!pTT0M#y|WF$npbAJcV z`+6u`;n4YaZ_)(e^DD6n3t0J9H6)G_L4zZ+dhw1&7=`Y3_i)k$|Q71u{e38zG1{R#dwO1Q*Dvk|cm zuCIjcD)F+6t6rC2V`r~Xy=UlN&Z9Nvgv1%dlAV{OCxA3d+#3~3FOk|u6fbnCTv&gC zuxH*CtZdwl$N|q6LJa7>n$aLfEXyT=1`3gix1;I{e&-Do3XH=jc9*1-Y%UHrk_M*M z(cPTd?$;^+EEw_tcIoN+PbGa?|{}!AX;s@KBB&QJ;A)Gxh*UJSF$>Q&l||p(TQ5 zkfUQqGd-dbgrosrsvxEhh~^3{1`Y9!6x_0nKPof9ta`UhnU0qMy@xE9IWR1UbpwuG!Wi`nP6e>N9*d>vK{>E|D&C36u5| z@xFZK(^vx5xGy6$#afE#7!~KX+0qc-C6*>aUJA^LvI66$#PT$XL@#AY8((fH{yY*j zmrsl+(#Wfxpkw+TG!Vgqk^9q9)rRuRs@Wvg>Utzey2r5Dk$s~`r_J8&` zK2`9A56;QyWH8rS3Z84=sYC5#XF=KK?B{g-p?T7rx?;)l>CsV|Q*Xb_?I+UHb83;5 zPmFSA^Q$9$}9D{O9 z;xp@7qWtb@9<#;L1C}sy&y^swH38$B&M_(nP(m1)#7kj5#|%t?m)BE{CV#iAp;X!5 zHFU9tQVocWZmX!qS5^_D8pBQZf{Zz>l8mVqXM&e07hc-w>}+mFMPdain6}jKxj|MOq{|8ll-l{+ge_exCG)XcP6pr^ZMqwAA} z75EmFxZX1WHRn`#a+A_`bp}-MLz!M?mw{A{A0Ah{SOv>xAH7(m=*5dwFi!ZB7pwHr z;qQ2{3Pv37mkpV086Gza@36`%_OQKzCp=A5FHd0}+a1&cSZ7oLZsRwAwWW|>E-j_~L=iL~&MnE%L)-aYCR?2TkrW60J^=!7DKk%)q#xOqPI_s zx4iAMhIk5E;l#57d%Dey$Nc6=r$IRk`|ZhDe&VS^@lz=_01HJ_Yo(>_Dyv54e+Y#kJ_O>}Jw-Q^Ix7BSc+Qe;vAX4qA&gUVG4 z8D*)S2eoQi=%da@3=3S9BU@Q5tEMexyeeN>HOF}C{rR&t-Ci}ccE3G;nEBF6m$8R4 zCs2V#+Woxi6K?CR&q%)`OV|xvN5Re|&aC!3q8vZVAl6LOwh3p=)?>~hJgY6NcA(f2 zqV$2VH_mzA8WXb2+Q;mTmTjew;d%S+xliNFTa~uwQWUplKSr@w(#8c7VO*;;zh|jQ zcls+ekEpO`%T+}5vKHg^(d3#%8`#UJWb}GJ-U_JU3HfXNsi@nto)bF- zwAF3A2DqY2Ym1fHZ7s8#f0Ib=!FLWE7cwKu#pXS4-x2UH+9!j_5aQd}ZQM}AFra%M z8AU0|x2^ww$DM-FnjXyteEj>T8=TtjmpGnmE`Ld$Z1xjU*lE!8DMAE)Yn0}g4X!#X zx~7WvjI?#K1&Ci^WT|kfXGlF*o&fiNaeIPww&z#TGYyqAK+6Pl1VJ3_f^F5H9cdXT znd;#3x;e%N9R$CNno8#|ZgK+J+v88wQ2Yr{QxD*F(VBJmEH&=nC_)Bdq$z^6kC_;D z7k@E4^cvfnm%xvB<>n>8(FCLu%#k1s3VVQ>ZjG6q7c-229}FJcV+C4wfdyeytcDn! z^=U1)dlfaK`JbA}qtESn4VrZgI&6uTV^w)Ja_s~gb;0$swUkk}5-fZX%XN-+pDh25 z-22JTI()15qTQd0v3@)BKh}Yb?jH z%!gu}b2+OQS>z!@eyu5cs+Uy9ha!I}AMdLTjY+>R`lAJ4#TeDRo!(o@UHKC$_~m%Y z1D6}EOR?Ye)(8Y~Y-xzg2=LB7hy8b*(St@it;M$3%whhkOOu@|6$^FX@`EcL^`y}^ z#N!Ah?7V-b8G_N$dkHc#suSNSK_--1+3z{<@u8k==MrQ>>3WnVdkHe3EaHFr;Pr^O z`qd@Kgo>8h67AlWLkTjf4zZLV6DqIkLoCwj;Syv}*bsGRad$6kcBm;XG-s!k3|Gsr zpr`jv>(-pP2-|mMyvsxg8o2soh3TJ|3qSXyoBU`J*}imyl}ysg&F2if?{PNCN1vcx zj68qtY{`DN4mX{b@9vSNTO2vr>)PfePc*zjk8|g1yun+?(Fbqob@}{KM+V*}{4c!D zpZh=`rYF6BwHc7#BO1n{bveYW9?(}#E z^$7{-nq{ah^$Z|3L{n#zt2R>4HsU|QRN-lSR*$RbnhKrh4oDM+8iDjETP=BhEZ75g zhJ^#fWS#nn^Nk8i1~kXP;M?z^j^$&vdtPX^-J_`w;~tb?wtwGTeBa_bmR+<#si`#O z>%?~_+88;v=|t}gB!U(H4Hs>KgKSqK>?nZqsoUimp6|y6PN7DhaDuZcP^46iknVTgGC}Y zDH_kM15W2AOrMw$Al19opNCs{*~T}QEz5@;0zV0tOv{H+8S|qPLWT5S@!ip(9<2Sq z5_;WdRz8#^WQ1`6BHgH5|nv$e-Y-_LG5m7N{HL9llRd3kV=0Mx?<#Z<=i41gf4 zYvQogO)Inrnx#_128#=)cL=qJo<*~1J)L%+h&R7f(}`U#Y_A7t0R;`V8Iwl)1QmVW zn6dXy>d}hJllrKGBnk<_>#=8XqOkCh#U?ObUW}bmrcIaqb5a?9;Y%IbY6~)jHtShB z5o6j{^7Er44x==BgrsIa=`*R%e>%;7;pQpm@$tM8>k91FN=90+&s#wmk`qo=PRT75nm6$BrjDi9#|_!WU}Sk47b2d4=dGo8ntx$9Hq z!mh~G-LWeQOApyM6`M|)EGBS{zl$)dSMjss_-wSsPf`lDdM5 z@~pQMRPxY{hp&4}5B+GOw^Qn|gMGpo7La?gAI{4xSaHAF>kH1ZP7Qd}FE&pA&9f#; z2Yj>v;!97_H$68N9YgT-v3N3~D5-Fr<-ax&<1A6`)9Rv3*`>`@!hzn?TZ^6=1Jc#r zkInUhOx6L!pj&I*dk;=P@JYr3=`%^UPeRc{E|p4|*UW;dpd&U_p`Dd&0D{aR)FPv5 zQxB;a&5|zY)9&;MPs_D#OIEX-V4I~lfuNe8J3w+fmo$_!LsVU7Z!$$_s=l zn#OT)uMV{3ilKc8ulqeT``nk!WRBngZkMiQjwF}Rl`|KY9a1F>0iKsCQYAe)XEdJw znkUi(Ko{yQvy5m{S-;!O+tv{CyQzdPGPp@T5OtZ_k_(pj#*(_?ZS=wC2 zZEW>4w$Bl;%AU?6&wASD&K&kcV$DDQ`saWCumAWT;r}a;4#WQUtIr-kl!kNTry3!v z@!5q^+992+v~A$4${^&NtufeSs`rq^^J-d;|2eIP`EZfe0}4%R86~Wko`eD=!gw~C zu0r0I*=1sE8VRC)ZE}@$MnJ%^?Q@kX(Bin9YAVsT)nD|N`AbV7MoLHpYNmjM3@LE? zLsnjq53@cI7wsXLgw;rpie}xeX)Js->yXRw&H12tU7r%!qfdGC=dyQJ9nybFdi%K| znRYI**mlU5w3>h~f4&T`QEPods3+I7Q3I(e$tna_TjSA0>L_;sw{cB@Ung!ohn{#X2 zoULpdRIQE9#27pVULe#(Dt7>R>=ex8RD2tb;= zfu?u;sU{Jd_8HPUprf@1f6mB&<{qX;lP_8RJ6r0s1Ji zD@gAGfHC5rUo8L_qkex#TU*R;2-yP1LqM>t(Kvq>j(ax!3+-|B2FvGgCj!R`%6&aX zX>nW`FxsKZ2%@57t;Y)$C0v^;nf_>E%Jv^$Ow5Ou z#FvX}jv^|msdw(s7|oN82gRgf&EHNp5a0UnX$4QeD2@b8nfeUJW7 zRj8*NPczLQQ4{v6CmVxvuab;MjOf&L0%_I6e$*0ig=X@pnvh_SWMH~*y6IKT04ll$ z6_boCwbnI@wjHcO$tISPic0!sqN}ES3a)=8_%BP)Wg_(uE*|&TTI2Sbae7qLBf-AS zKig_gZOL-IDjpy8rtmwPIydT3U9K20Jpn6+Q9ArsIZvg=K^irtVNp$wcDOx7*a;PZ zDPR~{2*Q=a-h>k!Sx89eWFHtXa1k!~E=m?h| z$4D8M$=HWKfA31ybDNrm3YZf9Co{w>)m4}ZYm$t*Ul)a`_)uLG)~v%*%H`yISnCD> zQe1SChi#v!Yuy)Ew*u1E9>627JR=wp4HrqL`(fF5M1eVt`c98K5v zae{?};O?+Ua0?P7c+dn0PO!z@ZE#DSQd$lajQpRIY|GfUVv$S!*_1dxrJrtQ9yZ*I+VTJ z4FpH)Uo)sjm?udkr8>128S7@b)5(h^ zIS`7XcA1m}k_yC%p5Rb)_!_r=X%vW6{k(Iv7*ieR!FX1}sw7X?d@hbRUZ|l8{{;Qp z{b`lK?Xu?SD_aG&4xS8C4Qrvp@Qh5Y5#UPln}Cki6T#QF$jLH#B8`|$nsM^;YN+*% zm{sHQHORlUNQ+!*OcBYqJ~t0Av=w z={Nf3L%EMvpt*!tY#Udwy$7FEf$s$;_e@xP?mNp>`wRrZqWHu&cocx$PHD}WiQ%1Q zo5g8!7xvO|_Wi{>jWqDCjXbvT0zqgk#dr~bfOd|b6;v6>t9Z2V(t@M@tu#}X#OnC4 zZk7`loR@pw>PbkEbzM>4IHs__7j}Oks3cwHO^}D)O`m=ft3Z;0{+tBwmB_O~kL%?P z45p@q*D-C96%?YC?v z9+ufM5xChSzn2ZgB4qcy2}^G#A2I~a!|@?Gx&E>sF&2{w)8jISUe6@tDFA|3_|bGB{-s= zzTUV7x#(NJua{xmNyiKSnFhq$$297~$o$z^x!N+$PRF==?Nu{ACjUYYIe^Mb7xn_p zwd2YknbX7FB|pH!JHk52Z+?Ofnyb%~Mo>+70RD#5%lbWJcRZH@%eoDH=@BVCuJd9X znflf5wQ2*UEM@2)sZIZG=fpsOQ~~dR_XfuYkhoh9{o;o2cj;p%hsJEW0m-=FKaRfo zv-@a0c!D*i*HUkV612H@hF@3LKUy*fDwm+xo_j*R8p&e&MII}-awCr<<*x97NlvxJ zLGe<+-2*&*XT@rS8- z_;tn+i;%>^!4glY_Kja@w1_ z_rZ<9{^ZR`3S2hmcOklY(+R|F!(xV&S}Y#Wk9s+Y^ce^ z$5Hx4tl)i0DVG<{RAGiWObX7Ucv3TQ^9PU&BZjY#t2!HH$j7e?_z=oG9%6`yD;O74 zO?L|}(5zYb-tfiud(n#N72gKAY=L|+nPwJ4qs*|XR9uu7*&3UC3v{NDE?tA%$#FuR zhwIPDb4gI4-cSUD7SXYjT!Hs<>gBTDqGEH|d>aTGbT`js(de|Rh2L|$@<)&iuB_@P zooO-G{iFX$lAYvazgyp$!G5h~Mt+bVf@aL{5^1>z5~Hn1%078Gw`aKZyo&UZN|ZM7hMdw`qfk+wVch@_BeZ%VcLFG<0Wt4SL!1qj$^d z-G}a~to9Jqynwc!So8^P=2lSmq25?eq~mZ2?;yJ6GN8;VCf_pPF=J*w#pKpEji;$q zNSxa8$?cWt)r&xzKhTpHTu7lCI0V#Mp@P#-Nv?^FyFOnqy&Q4lW0fvztYcR(ay!YW z=zft}PbpnD{))L8*SZO#kS6~th5lXniD#{_dij!6pZ>j#{%@gg(EZEhX|XOrDUVp? zYkI7X7u?&tok*NFBVs#aRBT%mUS^2=XlO~*pY2D;>_s22_8M%*8CG#uz?L9M8R9RS zyIVwDari%(w|Y%ZvL}Dvc(t`Iuv{K}dg{}$CQR^dKbZIGXxj7cIWB0GZ96NE4@Qhw zQkjps;&jQ#**Tm|>}lIqauWP?kG((~hMnC=iSZhxEmwPaT}=0*Wyr9UZ{*tEX`tW% z24T|;rjhGtjFuM5Be{5Y+Oa2u)dQ@-VR*(LK>pB|)lt;&rlnLnZza$b3Q+kk_UD?;&3&2 ziCZik2kg0u0Ssz94K9_%s;&Q!uD6;3Z77YaE$ASQ!pa3*`1PL}aUTLyRuWyi)Y(eK z8Rr$nzO7%-iG6DpH9V`oTiUrZ16iXU!l^dw%XHwo_NZ8JEzg&JMy)FbwtR8E>NF>( z$&k`E8xj#i3eU=$%<~$%si1`+SxVD%*Ej7yerk4k!>}H~P+kN_U1U|08BDZyOQNFd z!}_=4#F%fd1{L)5KKZpmuq+wg616g&40T0LKf#nmts3-9PE6~!p3mJtpu1t*5RY~n zQX<)=I<2JGp4bOoEb6ngY1g&*W+i6Ndq`2c4HbINmG0(J^AcpF-G&IU6|&QA!;YrB z4SRwaLP-DEkXwqb0?;;ZPX#&L&s_YVGw8?a`ko42`&y=aMzY1+NAC-hO*L>d_eXy_ zYO7bjl?wemkI9>1DtT(NU-s|< zkY38kNF+iJN!ari5-=-=5Aya^o0(Ak^aRw$Jsh<&i(cYhV|!j2X6@@XcIUEtgynv| z08Mnb^KBM3x?XL$$Z-BCypwi@tAsx?;r;@SSn>V`&Q~0Z{*Rp(pP9t^-Wb(3WrB{- zF*sW_cAiLNaQ0g>He;EO>^C9zO%L@`EfwB3S2!mvNcPPybMn<`RWSKYk0JwK#JF=o zATrhrRER1N0O`q<<|O13NgKCzPtL=e*<5Ohx`l`XMvxU0Bv>!?`bN!zg>K1pi_= zEcNCpTuFpl z`|J73(=>vk;d!?C!FtT*;Pr$?yI7d*>C0fzymLi;lDIoa9#SK&OmXETA*mbUT}Z^# z8y>>6Z6Sh3sTdH9@F3cvjZh5pKt6ef<=wiUV)2(_NU!7bpL$9>KSaw%g}J+bm@!aR zw=yg5m#-CuJUB9NYPR%T2`Ikvy|~8YvSAO5FbR2`V>|TK^q?*i!;&Vh#!QmpQ#a%j zMOJ6{9A5e0Et)0%>OM_(xP~m}`HOg_{=y@%50)}8LA@-Z%xHsI$Kz%$$LFn;AnRw( z>v3{U>i4`|2U=k;@2}S0v-6qP-r5qNjpj_Djcs7ko*zr}Vm#zz=4^-FQE>Xh9y`;@ zgm^bi!S$*UN~^>@_(){Smjhn8G3197y?C;4U?m5vQ2;hhgJ{g1EOQOq#I{-$SWsrC z(?rVmI!J4@-~Y-z`U-a!U0Vh}>AhBqkl@EWPONVNY)0<7Z`E5~V4Y)X`$TA%6;<$$ zeymP{yRB)CA0nAu8V<_?7rU9mUEM=ns~v%KC=*ZC=c=Te({EDa%3Rj zn(EKfySZlcFECugSW$7t-f})QHDHkz#b$ZDeMT4v8ZQ9yT%msqyfoOwU z2GEFx-IW1;jtV)=j0Jd?nyV^v|nkI|4m3G$X!6 z3mwLpzM{(9Yy(|hrP-vr{u;6$TXqa!oA<2^pD>QMApfF!`ZD5|tA3mqZi&8CtG@F_ z8kDa#N%K(|?*<7*nqyS>aSp-~0hUDcgV;uZ0eJ6oSBj}F7wo476KqdzSWb_s`dRAj(btZ$UywGK;5g(;xXFzjxee z4$en?Lih0odldBSacaWNM}Cjoii=D^RpNeUP)CON9H6MN(RRc0HS4l}V4 zRcw#j$pe33N42B4IYYEEGOgN=(d{w%^@Jdmt^Q-HC>ENi)9|1gbyu@67|Ah?nR3xI z=cOq2k$Ndnp}6qZg*PVeuHVrRqUVKD5YWibo0eu+_YDhJa<4;#8+jeVlQ4^r*$+Mqvfa0a;&+xn21H>VSfjZOkD-q zhd4?ydqpNEz4>razH#5!C!W$6P#wx-EKBzR;qy;hCOC1Pqu;*gTQLZt2Lr7T$WS4G z1UFp$Ve}S3m6z5eEk`Yop!$b(rAE4JlIY!pTV-8UP?wSaH(w2GF5$P>B;2y_;X%7n zEy#eqA_ZtF$c8Lc|K?1wv?m|KWRxK@(>1cZ^@qrL2T1nHbB^>tLt*Nsz8+!+=MMsT(U*b^dmC} zF9|lOnDm**ly_psNyGc0V5P*C>mAr@2`?X0yE4$!v{%xJlGC?X)txFl@G1bi!|U z`c0bm=|ju0sA}+RNJ{O_6@?9thL)7efKvB-KB;qVDen|L9Ral!MHr~#`Dr{;DE$4y z&1cXP%pP6|A9{kdKA#I=Om(vKt>KrXCY96A4vh+zMzKpvu~_ASKS1FZDtECTS9om@ zkR@)#`8KaXn)koh;sbwDl$MH$Me#EOzUHu_F*L5RY`R*MVCqjjIS}+J1RrO`6p8=t z9@kKm{$rY;G!Wt=+k|PxzGekb4*q#h0eaX+$kGjRVSXhRTtW!mJ>>CKbZD4Ao&?|H z@3GXmofh}mvg6BY;g&yKx7bt_F%J|1`$nO)CAWGjS=0;IENX?g;=+nrQEKfTQd&I} zNpf6HOS+klXZ59+kfuDb%K4rk-Q={P!9CrkSc~dwXKaO04WaV+@pnLrL%~071t0#-B67=4B2hKj730w zXlb}>VY#~Wk9m9g8#yDR4t;8BTg} zJ-yrgc>f)9wJ65TXtcYI6*%z1EBvFh-va9*?8#9G0#v>3sPIsjrBNyL@>GT?QUdSG0~_;eV^))g{+DfMajGA_hfkBI94UFu z?Qw_NW)XSv?7=B3Q~In{kf&>7^M9SARo1eOe^TR9LF5Y-Q4g=#QygWQdZ~CXw&}*> zI?L%Q_>(;wfU*#n0V$Onfny*I>+xK8|fnTyxg!Pwq-{4w+R`9 z5`m4)Fd&9~JQT>a=d2J=AJ0d`XvpC6=MO(a?|A~{I_`PYwczmN@*6VVvW9IR!|Rs$ zPZB`|GbZ|;=;dDUo!y^V^*b#sL-Gw5%9*w0Cq5lLm=V7~U{@5o=q!lnpuYg3EW~xt zpB)c(zWR&gQJ5&}M?de8NWP~VLr7UFn26K9tS&Y^NXb9lEm_3b4j2n zL7RQan`n4U1ukMG6KE^ozNE3;wQ?n8+p5g{Xk)vRk%Xi$-TEXlbS#(n@bHeSmu&2~ zEK@R})im!uLJkKrGhz{~vLXp+QgF+kxnhN}{r(mY?oZ&*-hJ9mApX5W<#_cyP#FYI zC1GteMXLcrnAx20N;mT(3VERok2 zp}~~z6xELfAmzjUAD@5yUZ#VoK}$QV(b2cdy>TuwPwAY$Y=uZGqVK=&e9$a@^jxGQ zp7?P|#hGt7r6cT|>J*QIRjOpX&Rr!p3F8-IDN(5+v*EZ;LaP#8FcWa1+!7I<%pdr* zXsj)mj~C_FVpWi04Ep^^Q2}?ZD7i%IXmS~XzF{qJr>^nzi$f!_=T1_)ACUvXi4o2t z)0~WZ_ky$i5fN`QN>fEPH$qSl8Md|=M$nl4FrrN~Vy5FfC{36ia;z8tCm)7=a@tMQ z5yeMue#9sXjs01)v4{JG_0AiDlL<})JvAAi!t^Re=YvT1Mh@2s#D+ZW(@>IhpO;g1 zjhru-j#peh9#H~m5OW^`WAE$dNX|cFT%q@Vx)Al;|Gaau*u>{T>TDF7{=0q)L^iOO z$a-Bga^e^>9dWx(xdwNA*r+rXrO7A|y7ObR7*rd!@B0P+O*^Z?HM21s| zNy;3xi?4e4n@-1+J&}8E&bVo^+_y7$kQ>F&dcr;xOp8cjRnMZXUV{v*O}yzE^FHTI zSH6kzv5lCQl>PaFZB{P|T=7G)qD=E^;C-JJLHCwysU`xT#ioYRBPezmIOt6DQwXuT z0aMM^74SAP0M8m)$!6=^=*aR4qCfjoQWi%t&+nLzJZ3njnenH9U8E~iDT`pn zr@`09f9|FY&((Clfc%v~kw5#;dH*a28@>!>^kb+PqG?(;A061-X@QQnc1f#gun{Lo z@ugmW|Irb!(;${Nj0kAp-B3_d8PnIl75VgeUlCYsmPo$VtWmiaz_l*Z>43|P(X{y~`brnk`bCAL^t9FKZ zxYeypr|*YxXI{}iJP2WVrx7@yN%DdIk!f|Ig;^HEEPz1B!PXcIETdJ( zmvqeRqjwI#GZwd@^sJeC@%2Y(1CFw=?L3jmBu!8(ho}6vm@sW6m_(4BNp`Z{aTP2I zn6?$uR9hw*_E4qkXUy`&?qgbXQjS6le@oK!nE53E$=7I~%TxjYVX3^TcL7c^c0(^p zn)HWZlfL6SkR@5sgK_M5llh(X7q)tA&9?T03y49Mu?XmX&P2fb!6NNaWVyr>_=?Y~ zD=l>=Gt7>4G0t1;frazvu8iM?;H(%&*XlXJb=#N7rKb}`{hUAOX}I&f|D@p@pH;sM zQMIe>St!=Ju*GWLu4}0{z3is}*Ftk}NDX02Z+#z(r27?F&m&#uY{$l0Ooe2stbYf1 zpCL~zo+;ht`>H4%=Em01!razl^}!drjnpygN+YOrh|AY{g<96w#hT}fChgxfPtL!b zico=xnn%iaediY@+Vm*&oe*Tv!WzPg3w>3&%4kSI?{`48ryjCMFe=-$d=hY4YFP(Dp z8GxypH=w6%i|_IyUUDcH*i(L9A5knDjB{Dln{6UQw`NNBQz{I0edzmQCt;f>syh?x zFY5&(S7w@_keXzB2{)Tbq)hYD+H$k?#ej_qpqPhthK4kH*+6Y z!pzb~2|D^5uk(pukUnytsz0vUq41FqU!^LCOX_6PN^0;k!uSE+d7&Ab3+Fe_3xK$Ig-?o&6dJZoD0$H>O(q4$@ z4j>M!OYA8HVl3KdsXg;C3^UWHiV9}@GU@|rdS8WSjTC?xjkB2M_9O#kT1&39C8k;Ya zzmTr3HX4b`=HWyZ@4xYTFOE{#m-YfR#L^-jluk67?Sc3!FZ+iS7jY8#PsA&+Prf3b zgsh2%$C#t^0$~q}vlZ^!c%_~3-E(wSVV2j7E-1uGh%ZyH^R))<^?}PGoj<1B*5keW zrAv<19#r15&(l5E!N9+OPyFz`nu#@xe_z%RHD*Mc{${C% zKqK@}dJDJV{hAMuc4+c~oP6~9l}!S zr)4Iy$S8$%5EtI~vL+{5*^;SlDKnhr!;h(td`zv8&Y#oK2ZZgcy^Y_pY!G%M2R_)8 z(Ti++R`7T=FUh7b2r>>~x06l_45GCyqV+FiA#gBn78O)xUDWm)MRHM2b#5N9OQ~K| zwiu+*ohZ#%x|kbgAGC%HTa4SV+@bWO`(1Wa>t?LC`E)?qW5Js&1X$MlNKhWa@yMYpz$KZ=K}!fm4YL&oyG{P&$+h0=%SKMB9b z-kl~>T{XmuOh}^5Fp6dlTkZN51)LM?U8&FPc#bN;CWd0YvSaDWaat7#XKx#*_-KBU z#&&0o70;4@wlS3Y#xohha=lXmq=rubkYpxQK~P!G7flo2PKv-3`9rnWsHD2lJz91V z{YnSH8NIXvf%`HsG*SLms*Lq7gh;nO*EASG6R*md>k25OGEjd(!Cy^l7hIze`g0Wa ze`y7Yx!0Hv{D6CCSM#IIY&FO4JWx4%#x}>#Y(5Ipnj1$eT)Y4*Wwt4L_gs~G?+yLT zAOR)Erl{bWI(C#KGwqhm*Fb}I&O4i|l^>yArEwi%Ib8NcT~^uy3w-CVQ$~KLRtqV=_iQ737JgZC~+7;X=fpu=d2C?qp-kXu=j!R^}qXfW` zI4~lW__Y|T0)(Vet|EO3A(L1ca+hVz4+^uW23w?i=>v%Le(b$KW%ljN(JuNKIWIh> z?vUhWZd;2u8j3BU{vC;ikOUZI*h*tid4>NFu!S#m^D>rj(XrWmFMEAgTNF>XlBprG z39EEwL!p1HgiBMtrCvydz;EB;A!gP+W2o3DiYv9MAYa}@!6*Mv7%Q^^d%xSG4^al9 z2tcJ|wI&EMEcvw5e3V9CkKbaZLU1xVO-OhB$>2`3sXYb8FB_}A0-j#!bFO2BBmq#V z5epzS090X+RpUd?_Fdw#lV)Dj=uFTJ9l;Q}>!ro28pXa~#)kf6Z@#6v{w)=g4Gx=C zp#Hj~9OBat249Z# zSPPc(mY2(5D9xiWvF)qgI!-PV?EPXlUks%&)tN0^KVUSe9m{=?7c8o^B8ZY^Z6iod zv}!Q5Odi%YZF-lfM#zqel?rz3X_L4;@8JME;k9nxZK)V`+9PFw9$kGHTYcc*5%|r? z=TPF4FYn)UkmtayhGCQ& zKVt&LC^rrH{Ev)ZfSc1l1bDgsmIDxgF#kdr%C_Y*R?k>NQMPR&vHy|s^9cOSxiG+m%`>)8 zz=fUJ_kR=u-28tJK#YADAjaVt$7h_Lael_-8P{jro^gN1;~CFqyq@uf0%Cm3nEv^8 zem-8_zl8UHle5+bidO6U-^nS!^EXKtz|8L%5ENkMZ|4245drSMMfQJ(e%(I|V?6+h zu^#x}L;rsbhzhwVVwkyL&@;hMbHNbi|H^4z&c8=2F(eF-7zza>hM9%^>mVP`-)0X4 z5M#d>hDsg|MJ11j_;-p52yp$~$UsCGU?36-7>F_h{$u2M0{m@KKgY}O|8<8q`k5H0 zIWL&m>mMCI-~Ta1E5J}dMXXuHe_C^M@$&pV*6gVBVaP^tP-LU{sLg*Rocw=tEDRu> z00l@Vnoa$uHP>^-{H=8&0C5Ij%EN1JDqv>H&uMCECd6YR$Ri}cZ~E1g-;|%yCy65Y!zwVve^YD6j&mXRZ+uMfAs%c0?lB`OSGN?%Es4lIEZmzAX-ZV&(7FO5Q ztyx=L+aWgfouO@lw^&omK}9-qac46f)iG+ww#)|IG&SCa_)sqrT{18d_Tlki{no17 zwNY=aA@hTdk7z7ZbfZX86hqZrTol*69Q3q%(1c9%iYy1kQM{LC@HQnqAnHlm@(Wb6|7B%UG9`YD#vPid{giYiVd}i_ zk)m<&>5YY&p=Y0?8<_5a$pm5*36tc=nHSxY4$rD}PMYh#tQ-)2+1~ujWer`ES>6)j zC*F_%N1Etn9Gsrn9*gv!%a;?h7Ej`)ao@`^9waS3j#$!F!}p7r2%N#ywd?- zM+N}dFy(;VyD)!}+#h#Ek|nImfrCwoO;PNu@-QYrzbue(e-`I{;S$a75vN>)A)Q8qNG zV|s5o#~e?-ccIcb=8}gDZsw^Ya^|vKPg&w28=4eAosI2+=~Pb6`)oc*NkjSV z_tE{#_!o_Zx~BC{C#smnEbXLtx6h$ySMuVQvz;qEY|2$B!(`X{yW&ffB*p*SO2G=} z2r07<>J&(0p{(Z?3}0v!{WfH9r)@8n3)2lTRO@JegT=WCn)qBL8&H z`d~Mi=A6~&-9k2Xd(<1a2c?2JK)k-7NE*KzL!{j%s+>(1!&I65B<1E9>2l+Yb&+FE zoZbF7HJO>J)B*4Gi&JKIon`EEc;l*F9FC=CB0kOMZ^;kRJC+wV%5xao?_0Ba6G(msM_ygvbMz zyKlk#ugF;&oukyRH$ilx)Yh9^nbj5M9bBdlrM2!QrVXbbIC&`{*q^V|7HYYTO_Ks- zdlXG!avU}l(l+O~u@rkrwtVsO?M7BaQ`|ldv1e$C+r@rR3@LzeB?E(E2DE#KXmq24 ziIjM>jgEuQ3_3E;?yt}J+#9A6K-lSydn1QeI4`hyb@vfO=h6np{fqEb8O?J%mm`L9 zwh&_oOdSK95jq?pJ`S(FLVw|8Vlizjl(ejNhy!(yNGzo-1kP5``P4??uS86U%V~YO z598M(f24-U3I_@)%d?Ch030Zq9ws8|=pASBf`#j;mKwdFY6CrKe}gXXtV2}PY*cVc z2@uUK^kgc%faxU;z700MIhitpWP1A5K?=y@g3`TuR$UT5p%5Vwqu}7o?Zv2NOTp0|)mHg(@ z-X0F=ZD%hNt7gYzXhWc8$Ja^@c^f5zsptu@uMnN(4+SAV(tOL?qw7rFaRa;;v4nfW zK)5%&d9k-oWsj^0v1!g-2$x4&eGfb;`+33jQyH~{N)d$05zXs7KRSE9x9{}Gie4Up z_Bcc@KhY3N?^7h5J%a%5dy-hCB_q*BL21e8m6?rp#yD;E6uoEV#YdwRKwiACWvS;- zX7>;a9ilynj>pum&umc)_EQare{On(Sg~bKp?w`<%f=S_z}Hn~zp_EW7|)M|yz}E) zew%S7X}l+{zCxQ-8;j7MhMJAzOEMWyBt_$}>JeB5VcfHJ6g;a6&QKLg?A^ z8|u4OxOEQN_aWRmw>85eoHf`g;O;A!XN3#OAY3rNwb|!SXAWZNI_aKom3A+H((Z-* z>V(~FZHI`(kQdQ@0K$-RufN#(&BWqelsICEv2wJRK#Wz?`{kScg6aCZT44!SUk2f7 z<&erm^!-+Qpf=|TSPueF5x zKL+9cPyCRm>_44sNInNzvF_01H1z*VC)~Hr3hi5u_Bw?2Z8$hIz1MDP+!#@N+BcwW zgi!k%p7!9oD0`&0W5hx)*@RY$R)@A3Z425~v~6hH(RQHiMB9Z{kJbR;lHIKnyx3H} zm|(stc7sr{$4_6KEeLL0bgvbr`yfpBxB8kpw{JQ*ZrE=L4UHf)G|?hRQ46u=mvg0uasBei mod tests { use alloc::string::{String, ToString}; use alloy_genesis::{ChainConfig, Genesis}; + use alloy_op_hardforks::{ + BASE_MAINNET_JOVIAN_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, OP_SEPOLIA_JOVIAN_TIMESTAMP, + }; use alloy_primitives::b256; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; @@ -611,13 +614,20 @@ mod tests { // Isthmus ( Head { number: 0, timestamp: 1746806401, ..Default::default() }, - ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), + next: BASE_MAINNET_JOVIAN_TIMESTAMP, + }, + ), + // Jovian + ( + Head { + number: 0, + timestamp: BASE_MAINNET_JOVIAN_TIMESTAMP, + ..Default::default() + }, + BASE_MAINNET.hardfork_fork_id(OpHardfork::Jovian).unwrap(), ), - // // Jovian - // ( - // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: - // update timestamp when Jovian is planned */ ForkId { hash: - // ForkHash([0xef, 0x0e, 0x58, 0x33]), next: 0 }, ), ], ); } @@ -670,13 +680,20 @@ mod tests { // Isthmus ( Head { number: 0, timestamp: 1744905600, ..Default::default() }, - ForkId { hash: ForkHash([0x6c, 0x62, 0x5e, 0xe1]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x6c, 0x62, 0x5e, 0xe1]), + next: OP_SEPOLIA_JOVIAN_TIMESTAMP, + }, + ), + // Jovian + ( + Head { + number: 0, + timestamp: OP_SEPOLIA_JOVIAN_TIMESTAMP, + ..Default::default() + }, + OP_SEPOLIA.hardfork_fork_id(OpHardfork::Jovian).unwrap(), ), - // // Jovian - // ( - // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: - // update timestamp when Jovian is planned */ ForkId { hash: - // ForkHash([0x04, 0x2a, 0x5c, 0x14]), next: 0 }, ), ], ); } @@ -739,13 +756,20 @@ mod tests { // Isthmus ( Head { number: 105235063, timestamp: 1746806401, ..Default::default() }, - ForkId { hash: ForkHash([0x37, 0xbe, 0x75, 0x8f]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x37, 0xbe, 0x75, 0x8f]), + next: OP_MAINNET_JOVIAN_TIMESTAMP, + }, ), // Jovian - // ( - // Head { number: 105235063, timestamp: u64::MAX, ..Default::default() }, /* - // TODO: update timestamp when Jovian is planned */ ForkId { - // hash: ForkHash([0x26, 0xce, 0xa1, 0x75]), next: 0 }, ), + ( + Head { + number: 105235063, + timestamp: OP_MAINNET_JOVIAN_TIMESTAMP, + ..Default::default() + }, + OP_MAINNET.hardfork_fork_id(OpHardfork::Jovian).unwrap(), + ), ], ); } @@ -798,13 +822,20 @@ mod tests { // Isthmus ( Head { number: 0, timestamp: 1744905600, ..Default::default() }, - ForkId { hash: ForkHash([0x06, 0x0a, 0x4d, 0x1d]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x06, 0x0a, 0x4d, 0x1d]), + next: OP_SEPOLIA_JOVIAN_TIMESTAMP, + }, /* TODO: update timestamp when Jovian is planned */ + ), + // Jovian + ( + Head { + number: 0, + timestamp: OP_SEPOLIA_JOVIAN_TIMESTAMP, + ..Default::default() + }, + BASE_SEPOLIA.hardfork_fork_id(OpHardfork::Jovian).unwrap(), ), - // // Jovian - // ( - // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: - // update timestamp when Jovian is planned */ ForkId { hash: - // ForkHash([0xcd, 0xfd, 0x39, 0x99]), next: 0 }, ), ], ); } @@ -848,7 +879,7 @@ mod tests { #[test] fn latest_base_mainnet_fork_id() { assert_eq!( - ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, + ForkId { hash: ForkHash([0xfa, 0x71, 0x70, 0xef]), next: 0 }, BASE_MAINNET.latest_fork_id() ) } @@ -857,7 +888,7 @@ mod tests { fn latest_base_mainnet_fork_id_with_builder() { let base_mainnet = OpChainSpecBuilder::base_mainnet().build(); assert_eq!( - ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, + ForkId { hash: ForkHash([0xfa, 0x71, 0x70, 0xef]), next: 0 }, base_mainnet.latest_fork_id() ) } diff --git a/crates/optimism/chainspec/src/superchain/chain_specs.rs b/crates/optimism/chainspec/src/superchain/chain_specs.rs index 1547082eca3..8a794221ea6 100644 --- a/crates/optimism/chainspec/src/superchain/chain_specs.rs +++ b/crates/optimism/chainspec/src/superchain/chain_specs.rs @@ -45,6 +45,7 @@ create_superchain_specs!( ("settlus-sepolia", "sepolia"), ("shape", "mainnet"), ("shape", "sepolia"), + ("silent-data-mainnet", "mainnet"), ("snax", "mainnet"), ("soneium", "mainnet"), ("soneium-minato", "sepolia"), diff --git a/crates/optimism/chainspec/src/superchain/configs.rs b/crates/optimism/chainspec/src/superchain/configs.rs index 53b30a2f5d9..bb1929646a0 100644 --- a/crates/optimism/chainspec/src/superchain/configs.rs +++ b/crates/optimism/chainspec/src/superchain/configs.rs @@ -87,7 +87,17 @@ fn read_file( #[cfg(test)] mod tests { use super::*; - use crate::superchain::Superchain; + use crate::{generated_chain_value_parser, superchain::Superchain, SUPPORTED_CHAINS}; + use alloy_chains::NamedChain; + use alloy_op_hardforks::{ + OpHardfork, BASE_MAINNET_CANYON_TIMESTAMP, BASE_MAINNET_ECOTONE_TIMESTAMP, + BASE_MAINNET_ISTHMUS_TIMESTAMP, BASE_MAINNET_JOVIAN_TIMESTAMP, + BASE_SEPOLIA_CANYON_TIMESTAMP, BASE_SEPOLIA_ECOTONE_TIMESTAMP, + BASE_SEPOLIA_ISTHMUS_TIMESTAMP, BASE_SEPOLIA_JOVIAN_TIMESTAMP, OP_MAINNET_CANYON_TIMESTAMP, + OP_MAINNET_ECOTONE_TIMESTAMP, OP_MAINNET_ISTHMUS_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, + OP_SEPOLIA_CANYON_TIMESTAMP, OP_SEPOLIA_ECOTONE_TIMESTAMP, OP_SEPOLIA_ISTHMUS_TIMESTAMP, + OP_SEPOLIA_JOVIAN_TIMESTAMP, + }; use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER; use tar_no_std::TarArchiveRef; @@ -150,4 +160,139 @@ mod tests { ); } } + + #[test] + fn test_hardfork_timestamps() { + for &chain in SUPPORTED_CHAINS { + let metadata = generated_chain_value_parser(chain).unwrap(); + + match metadata.chain().named() { + Some(NamedChain::Optimism) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + OP_MAINNET_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + OP_MAINNET_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + OP_MAINNET_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + OP_MAINNET_ECOTONE_TIMESTAMP + ); + } + Some(NamedChain::OptimismSepolia) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + OP_SEPOLIA_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + OP_SEPOLIA_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + OP_SEPOLIA_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + OP_SEPOLIA_ECOTONE_TIMESTAMP + ); + } + Some(NamedChain::Base) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + BASE_MAINNET_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_MAINNET_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + BASE_MAINNET_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_MAINNET_ECOTONE_TIMESTAMP + ); + } + Some(NamedChain::BaseSepolia) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + BASE_SEPOLIA_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_SEPOLIA_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + BASE_SEPOLIA_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_SEPOLIA_ECOTONE_TIMESTAMP + ); + } + _ => {} + } + } + } } diff --git a/crates/optimism/consensus/src/validation/mod.rs b/crates/optimism/consensus/src/validation/mod.rs index 8509a97e7a4..c17e8429c81 100644 --- a/crates/optimism/consensus/src/validation/mod.rs +++ b/crates/optimism/consensus/src/validation/mod.rs @@ -202,12 +202,15 @@ mod tests { use reth_optimism_primitives::OpReceipt; use std::sync::Arc; - const JOVIAN_TIMESTAMP: u64 = 1900000000; + const HOLOCENE_TIMESTAMP: u64 = 1700000000; + const ISTHMUS_TIMESTAMP: u64 = 1750000000; + const JOVIAN_TIMESTAMP: u64 = 1800000000; const BLOCK_TIME_SECONDS: u64 = 2; fn holocene_chainspec() -> Arc { let mut hardforks = BASE_SEPOLIA_HARDFORKS.clone(); - hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + hardforks + .insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(HOLOCENE_TIMESTAMP)); Arc::new(OpChainSpec { inner: ChainSpec { chain: BASE_SEPOLIA.inner.chain, @@ -227,7 +230,7 @@ mod tests { chainspec .inner .hardforks - .insert(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1800000000)); + .insert(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(ISTHMUS_TIMESTAMP)); chainspec } @@ -236,7 +239,7 @@ mod tests { chainspec .inner .hardforks - .insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(1900000000)); + .insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(JOVIAN_TIMESTAMP)); chainspec } @@ -264,14 +267,14 @@ mod tests { base_fee_per_gas: Some(1), gas_used: 15763614, gas_limit: 144000000, - timestamp: 1800000003, + timestamp: HOLOCENE_TIMESTAMP + 3, extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]), ..Default::default() }; let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( &op_chain_spec, &parent, - 1800000005, + HOLOCENE_TIMESTAMP + 5, ); assert_eq!( base_fee.unwrap(), @@ -286,14 +289,14 @@ mod tests { gas_used: 15763614, gas_limit: 144000000, extra_data: Bytes::from_static(&[0, 0, 0, 0, 8, 0, 0, 0, 8]), - timestamp: 1800000003, + timestamp: HOLOCENE_TIMESTAMP + 3, ..Default::default() }; let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( &holocene_chainspec(), &parent, - 1800000005, + HOLOCENE_TIMESTAMP + 5, ); assert_eq!( base_fee.unwrap(), diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index 85152c59743..202194c63a4 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -18,6 +18,10 @@ extern crate alloc; +use alloy_op_hardforks::{ + BASE_MAINNET_JOVIAN_TIMESTAMP, BASE_SEPOLIA_JOVIAN_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, + OP_SEPOLIA_JOVIAN_TIMESTAMP, +}; // Re-export alloy-op-hardforks types. pub use alloy_op_hardforks::{OpHardfork, OpHardforks}; @@ -28,6 +32,7 @@ use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardf /// Dev hardforks pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { + const JOVIAN_TIMESTAMP: ForkCondition = ForkCondition::Timestamp(1761840000); ChainHardforks::new(vec![ (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), @@ -58,7 +63,7 @@ pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Granite.boxed(), ForkCondition::Timestamp(0)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(0)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(0)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(0)), + (OpHardfork::Jovian.boxed(), JOVIAN_TIMESTAMP), ]) }); @@ -97,8 +102,7 @@ pub static OP_MAINNET_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1736445601)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1746806401)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1746806401)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(OP_MAINNET_JOVIAN_TIMESTAMP)), ]) }); /// Optimism Sepolia list of hardforks. @@ -136,8 +140,7 @@ pub static OP_SEPOLIA_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1744905600)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1744905600)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(OP_SEPOLIA_JOVIAN_TIMESTAMP)), ]) }); @@ -176,8 +179,7 @@ pub static BASE_SEPOLIA_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1744905600)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1744905600)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(BASE_SEPOLIA_JOVIAN_TIMESTAMP)), ]) }); @@ -216,7 +218,6 @@ pub static BASE_MAINNET_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1736445601)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1746806401)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1746806401)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(BASE_MAINNET_JOVIAN_TIMESTAMP)), ]) }); From 30942597db7028b37a09c0311ffdb5cccd1a987c Mon Sep 17 00:00:00 2001 From: Jennifer Date: Wed, 29 Oct 2025 15:31:35 +0000 Subject: [PATCH 248/371] fix: add more context to expected hive failures (#19363) Co-authored-by: rakita --- .github/assets/hive/expected_failures.yaml | 31 +++++++++++++--------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index 7443ec5ee9a..db18aa9ceda 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -43,25 +43,30 @@ sync: [ ] engine-auth: [ ] +# EIP-7610 related tests (Revert creation in case of non-empty storage): +# # tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage -# no fix: it's too expensive to check whether the storage is empty on each creation (? - need more context on WHY) +# The test artificially creates an empty account with storage, then tests EIP-7610's behavior. +# On mainnet, ~25 such accounts exist as contract addresses (derived from keccak(prefix, caller, +# nonce/salt), not from public keys). No private key exists for contract addresses. To trigger +# this with EIP-7702, you'd need to recover a private key from one of the already deployed contract addresses - mathematically impossible. # -# tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment -# modified consolidation contract, not necessarily practical on mainnet (? - need more context) +# tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_* +# Requires hash collision on create2 address to target already deployed accounts with storage. +# ~20-30 such accounts exist from before the state-clear EIP. Creating new accounts targeting +# these requires hash collision - mathematically impossible to trigger on mainnet. +# ref: https://github.com/ethereum/go-ethereum/pull/28666#issuecomment-1891997143 +# +# System contract tests (already fixed and deployed): # # tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout and test_invalid_log_length -# system contract is already fixed and deployed; tests cover scenarios where contract is +# System contract is already fixed and deployed; tests cover scenarios where contract is # malformed which can't happen retroactively. No point in adding checks. # # tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment -# post-fork test contract deployment, should fix for spec compliance but not realistic on mainnet (? - need more context) -# -# tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_* -# status (27th June 2024): was discussed in ACDT meeting, need to be raised in ACDE. -# tests require hash collision on already deployed accounts with storage - mathematically -# impossible to trigger on mainnet. ~20-30 such accounts exist from before the state-clear -# EIP, but creating new accounts targeting these requires hash collision. -# ref: https://github.com/ethereum/go-ethereum/pull/28666#issuecomment-1891997143 +# tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment +# Post-fork system contract deployment tests. Should fix for spec compliance but not realistic +# on mainnet as these contracts are already deployed at the correct addresses. eels/consume-engine: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-nonzero_balance]-reth @@ -143,6 +148,8 @@ eels/consume-engine: - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth +# Blob limit tests: +# # tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py::test_max_blobs_per_tx_fork_transition[fork_PragueToOsakaAtTime15k-blob_count_7-blockchain_test] # this test inserts a chain via chain.rlp where the last block is invalid, but expects import to stop there, this doesn't work properly with our pipeline import approach hence the import fails when the invalid block is detected. #. In other words, if this test fails, this means we're correctly rejecting the block. From 66cfa9ed1a8c4bc2424aacf6fb2c1e67a78ee9a2 Mon Sep 17 00:00:00 2001 From: Mablr <59505383+mablr@users.noreply.github.com> Date: Wed, 29 Oct 2025 16:45:58 +0100 Subject: [PATCH 249/371] feat(rpc): implement `debug_dbGet` (#19369) --- crates/rpc/rpc-api/src/debug.rs | 2 +- crates/rpc/rpc-builder/tests/it/http.rs | 44 +++++++++++++++++++++++++ crates/rpc/rpc/src/debug.rs | 36 ++++++++++++++++++-- 3 files changed, 78 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 5dd7401782f..0fca5f18457 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -222,7 +222,7 @@ pub trait DebugApi { /// Returns the raw value of a key stored in the database. #[method(name = "dbGet")] - async fn debug_db_get(&self, key: String) -> RpcResult<()>; + async fn debug_db_get(&self, key: String) -> RpcResult>; /// Retrieves the state that corresponds to the block number and returns a list of accounts /// (including storage and code). diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 601fd789608..6be4d5d965d 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -1694,3 +1694,47 @@ async fn test_eth_fee_history_raw() { ) .await; } + +#[tokio::test(flavor = "multi_thread")] +async fn test_debug_db_get() { + reth_tracing::init_test_tracing(); + + let handle = launch_http(vec![RethRpcModule::Debug]).await; + let client = handle.http_client().unwrap(); + + let valid_test_cases = [ + "0x630000000000000000000000000000000000000000000000000000000000000000", + "c00000000000000000000000000000000", + ]; + + for key in valid_test_cases { + DebugApiClient::<()>::debug_db_get(&client, key.into()).await.unwrap(); + } + + // Invalid test cases + let test_cases = [ + ("0x0000", "Key must be 33 bytes, got 2"), + ("00", "Key must be 33 bytes, got 2"), + ( + "0x000000000000000000000000000000000000000000000000000000000000000000", + "Key prefix must be 0x63", + ), + ("000000000000000000000000000000000", "Key prefix must be 0x63"), + ("0xc0000000000000000000000000000000000000000000000000000000000000000", "Invalid hex key"), + ]; + + let match_error_msg = |err: jsonrpsee::core::client::Error, expected: String| -> bool { + match err { + jsonrpsee::core::client::Error::Call(error_obj) => { + error_obj.code() == ErrorCode::InvalidParams.code() && + error_obj.message() == expected + } + _ => false, + } + }; + + for (key, expected) in test_cases { + let err = DebugApiClient::<()>::debug_db_get(&client, key.into()).await.unwrap_err(); + assert!(match_error_msg(err, expected.into())); + } +} diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 99b37a09d9d..75d3b4ad7cc 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -5,7 +5,7 @@ use alloy_consensus::{ use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; use alloy_evm::env::BlockEnvironment; use alloy_genesis::ChainConfig; -use alloy_primitives::{uint, Address, Bytes, B256}; +use alloy_primitives::{hex::decode, uint, Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_eth::{ @@ -1143,8 +1143,38 @@ where Ok(()) } - async fn debug_db_get(&self, _key: String) -> RpcResult<()> { - Ok(()) + /// `debug_db_get` - database key lookup + /// + /// Currently supported: + /// * Contract bytecode associated with a code hash. The key format is: `<0x63>` + /// * Prefix byte: 0x63 (required) + /// * Code hash: 32 bytes + /// Must be provided as either: + /// * Hex string: "0x63..." (66 hex characters after 0x) + /// * Raw byte string: raw byte string (33 bytes) + /// See Geth impl: + async fn debug_db_get(&self, key: String) -> RpcResult> { + let key_bytes = if key.starts_with("0x") { + decode(&key).map_err(|_| EthApiError::InvalidParams("Invalid hex key".to_string()))? + } else { + key.into_bytes() + }; + + if key_bytes.len() != 33 { + return Err(EthApiError::InvalidParams(format!( + "Key must be 33 bytes, got {}", + key_bytes.len() + )) + .into()); + } + if key_bytes[0] != 0x63 { + return Err(EthApiError::InvalidParams("Key prefix must be 0x63".to_string()).into()); + } + + let code_hash = B256::from_slice(&key_bytes[1..33]); + + // No block ID is provided, so it defaults to the latest block + self.debug_code_by_hash(code_hash, None).await.map_err(Into::into) } async fn debug_dump_block(&self, _number: BlockId) -> RpcResult<()> { From 1114a9c07ed0f1ae418e1a91b4f623ca99347c0a Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Wed, 29 Oct 2025 12:02:57 -0400 Subject: [PATCH 250/371] feat(precompiles/jovian): add jovian precompiles to op-reth (#19333) Co-authored-by: Matthias Seitz --- Cargo.lock | 32 +++++++++++++-------------- Cargo.toml | 37 +++++++++++++++++--------------- crates/optimism/node/src/node.rs | 2 +- crates/optimism/rpc/src/miner.rs | 4 ++++ 4 files changed, 41 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62ce53a3f98..a944843fc52 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -253,9 +253,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28bd79e109f2b3ff81ed1a93ed3d07cf175ca627fd4fad176df721041cc40dcc" +checksum = "08e9e656d58027542447c1ca5aa4ca96293f09e6920c4651953b7451a7c35e4e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -370,9 +370,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35db78840a29b14fec51f3399a6dc82ecc815a5766eb80b32e69a0c92adddc14" +checksum = "593ce78cea49e4700b4d9061fb16a5455265176541eeba91265f548659d33229" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6031,9 +6031,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "op-alloy-consensus" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1fc8aa0e2f5b136d101630be009e4e6dbdd1f17bc3ce670f431511600d2930" +checksum = "e42e9de945efe3c2fbd207e69720c9c1af2b8caa6872aee0e216450c25a3ca70" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6057,9 +6057,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c5cca341184dbfcb49dbc124e5958e6a857499f04782907e5d969abb644e0b6" +checksum = "9c9da49a2812a0189dd05e81e4418c3ae13fd607a92654107f02ebad8e91ed9e" dependencies = [ "alloy-consensus", "alloy-network", @@ -6073,9 +6073,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190e9884a69012d4abc26d1c0bc60fe01d57899ab5417c8f38105ffaaab4149b" +checksum = "b62ceb771ab9323647093ea2e58dc7f25289a1b95cbef2faa2620f6ca2dee4d9" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -6083,9 +6083,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "274972c3c5e911b6675f6794ea0476b05e0bc1ea7e464f99ec2dc01b76d2eeb6" +checksum = "9cd1eb7bddd2232856ba9d259320a094f9edf2b9061acfe5966e7960208393e6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6103,9 +6103,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "860edb8d5a8d54bbcdabcbd8642c45b974351ce4e10ed528dd4508eee2a43833" +checksum = "5429622150d18d8e6847a701135082622413e2451b64d03f979415d764566bef" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6143,9 +6143,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "11.2.0" +version = "11.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33ab6a7bbcfffcbf784de78f14593b6389003f5c69653fcffcc163459a37d69" +checksum = "3f68e30e34902f61fc053ea3094229d0bf7c78ed1d24e6d0d89306c2d2db1687" dependencies = [ "auto_impl", "revm", diff --git a/Cargo.toml b/Cargo.toml index cfa4aa845ba..e00c7a148e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -469,24 +469,24 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { version = "30.1.1", default-features = false } +revm = { version = "30.2.0", default-features = false } revm-bytecode = { version = "7.0.2", default-features = false } -revm-database = { version = "9.0.0", default-features = false } -revm-state = { version = "8.0.0", default-features = false } -revm-primitives = { version = "21.0.0", default-features = false } -revm-interpreter = { version = "27.0.0", default-features = false } -revm-inspector = { version = "11.1.0", default-features = false } -revm-context = { version = "10.1.0", default-features = false } -revm-context-interface = { version = "11.1.0", default-features = false } -revm-database-interface = { version = "8.0.1", default-features = false } -op-revm = { version = "11.2.0", default-features = false } +revm-database = { version = "9.0.2", default-features = false } +revm-state = { version = "8.0.2", default-features = false } +revm-primitives = { version = "21.0.1", default-features = false } +revm-interpreter = { version = "27.0.2", default-features = false } +revm-inspector = { version = "11.1.2", default-features = false } +revm-context = { version = "10.1.2", default-features = false } +revm-context-interface = { version = "11.1.2", default-features = false } +revm-database-interface = { version = "8.0.3", default-features = false } +op-revm = { version = "11.3.0", default-features = false } revm-inspectors = "0.31.0" # eth alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.4.1" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.22.4", default-features = false } +alloy-evm = { version = "0.22.5", default-features = false } alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.4.1" @@ -524,13 +524,13 @@ alloy-transport-ipc = { version = "1.0.41", default-features = false } alloy-transport-ws = { version = "1.0.41", default-features = false } # op -alloy-op-evm = { version = "0.22.4", default-features = false } +alloy-op-evm = { version = "0.22.6", default-features = false } alloy-op-hardforks = "0.4.2" -op-alloy-rpc-types = { version = "0.21.0", default-features = false } -op-alloy-rpc-types-engine = { version = "0.21.0", default-features = false } -op-alloy-network = { version = "0.21.0", default-features = false } -op-alloy-consensus = { version = "0.21.0", default-features = false } -op-alloy-rpc-jsonrpsee = { version = "0.21.0", default-features = false } +op-alloy-rpc-types = { version = "0.22.0", default-features = false } +op-alloy-rpc-types-engine = { version = "0.22.0", default-features = false } +op-alloy-network = { version = "0.22.0", default-features = false } +op-alloy-consensus = { version = "0.22.0", default-features = false } +op-alloy-rpc-jsonrpsee = { version = "0.22.0", default-features = false } op-alloy-flz = { version = "0.13.1", default-features = false } # misc @@ -773,3 +773,6 @@ vergen-git2 = "1.0.5" # jsonrpsee-server = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } # jsonrpsee-http-client = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } # jsonrpsee-types = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } + +# alloy-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" } +# alloy-op-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" } diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index ebad4e66999..17380056d13 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -559,7 +559,7 @@ where modules.merge_if_module_configured(RethRpcModule::Debug, debug_ext.into_rpc())?; // extend the miner namespace if configured in the regular http server - modules.merge_if_module_configured( + modules.add_or_replace_if_module_configured( RethRpcModule::Miner, miner_ext.clone().into_rpc(), )?; diff --git a/crates/optimism/rpc/src/miner.rs b/crates/optimism/rpc/src/miner.rs index a4de556ea13..b01b37b58b2 100644 --- a/crates/optimism/rpc/src/miner.rs +++ b/crates/optimism/rpc/src/miner.rs @@ -35,6 +35,10 @@ impl MinerApiExtServer for OpMinerExtApi { Ok(true) } + + async fn set_gas_limit(&self, _max_block_gas: U64) -> RpcResult { + Ok(true) + } } /// Optimism miner metrics From dbc93466cac43aa9f869e0d62b6a26393debe9ac Mon Sep 17 00:00:00 2001 From: phrwlk Date: Wed, 29 Oct 2025 17:55:35 +0200 Subject: [PATCH 251/371] fix(engine): align compute_trie_input docs with actual persistence behavior (#19385) Co-authored-by: Brian Picciano --- crates/engine/tree/src/tree/payload_validator.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index b30eae1d1cb..4d42d889757 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -955,14 +955,11 @@ where /// /// It works as follows: /// 1. Collect in-memory blocks that are descendants of the provided parent hash using - /// [`crate::tree::TreeState::blocks_by_hash`]. - /// 2. If the persistence is in progress, and the block that we're computing the trie input for - /// is a descendant of the currently persisting blocks, we need to be sure that in-memory - /// blocks are not overlapping with the database blocks that may have been already persisted. - /// To do that, we're filtering out in-memory blocks that are lower than the highest database - /// block. - /// 3. Once in-memory blocks are collected and optionally filtered, we compute the - /// [`HashedPostState`] from them. + /// [`crate::tree::TreeState::blocks_by_hash`]. This returns the highest persisted ancestor + /// hash (`block_hash`) and the list of in-memory descendant blocks. + /// 2. Extend the `TrieInput` with the contents of these in-memory blocks (from oldest to + /// newest) to build the overlay state and trie updates that sit on top of the database view + /// anchored at `block_hash`. #[instrument( level = "debug", target = "engine::tree::payload_validator", From ea2b26f46a83c23f06b034050be8eb56ac17a535 Mon Sep 17 00:00:00 2001 From: Merkel Tranjes <140164174+rnkrtt@users.noreply.github.com> Date: Wed, 29 Oct 2025 17:32:43 +0100 Subject: [PATCH 252/371] fix: remove PersistenceState from TreeCtx (#19356) --- crates/engine/tree/src/tree/mod.rs | 30 +---------- .../engine/tree/src/tree/payload_validator.rs | 44 ++-------------- .../engine/tree/src/tree/persistence_state.rs | 1 + crates/engine/tree/src/tree/state.rs | 51 +++++++++++-------- crates/engine/tree/src/tree/tests.rs | 3 +- 5 files changed, 35 insertions(+), 94 deletions(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 324e3375d2c..5e2ed1c513c 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -2458,8 +2458,7 @@ where Ok(is_fork) => is_fork, }; - let ctx = - TreeCtx::new(&mut self.state, &self.persistence_state, &self.canonical_in_memory_state); + let ctx = TreeCtx::new(&mut self.state, &self.canonical_in_memory_state); let start = Instant::now(); @@ -2802,30 +2801,3 @@ pub enum InsertPayloadOk { /// The payload was valid and inserted into the tree. Inserted(BlockStatus), } - -/// Whether or not the blocks are currently persisting and the input block is a descendant. -#[derive(Debug, Clone, Copy)] -pub enum PersistingKind { - /// The blocks are not currently persisting. - NotPersisting, - /// The blocks are currently persisting but the input block is not a descendant. - PersistingNotDescendant, - /// The blocks are currently persisting and the input block is a descendant. - PersistingDescendant, -} - -impl PersistingKind { - /// Returns true if the parallel state root can be run. - /// - /// We only run the parallel state root if we are not currently persisting any blocks or - /// persisting blocks that are all ancestors of the one we are calculating the state root for. - pub const fn can_run_parallel_state_root(&self) -> bool { - matches!(self, Self::NotPersisting | Self::PersistingDescendant) - } - - /// Returns true if the blocks are currently being persisted and the input block is a - /// descendant. - pub const fn is_descendant(&self) -> bool { - matches!(self, Self::PersistingDescendant) - } -} diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 4d42d889757..fdd6b30a6e8 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -6,11 +6,10 @@ use crate::tree::{ executor::WorkloadExecutor, instrumented_state::InstrumentedStateProvider, payload_processor::{multiproof::MultiProofConfig, PayloadProcessor}, - persistence_state::CurrentPersistenceAction, precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}, sparse_trie::StateRootComputeOutcome, - EngineApiMetrics, EngineApiTreeState, ExecutionEnv, PayloadHandle, PersistenceState, - PersistingKind, StateProviderBuilder, StateProviderDatabase, TreeConfig, + EngineApiMetrics, EngineApiTreeState, ExecutionEnv, PayloadHandle, StateProviderBuilder, + StateProviderDatabase, TreeConfig, }; use alloy_consensus::transaction::Either; use alloy_eips::{eip1898::BlockWithParent, NumHash}; @@ -51,8 +50,6 @@ use tracing::{debug, debug_span, error, info, instrument, trace, warn}; pub struct TreeCtx<'a, N: NodePrimitives> { /// The engine API tree state state: &'a mut EngineApiTreeState, - /// Information about the current persistence state - persistence: &'a PersistenceState, /// Reference to the canonical in-memory state canonical_in_memory_state: &'a CanonicalInMemoryState, } @@ -61,7 +58,6 @@ impl<'a, N: NodePrimitives> std::fmt::Debug for TreeCtx<'a, N> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TreeCtx") .field("state", &"EngineApiTreeState") - .field("persistence_info", &self.persistence) .field("canonical_in_memory_state", &self.canonical_in_memory_state) .finish() } @@ -71,10 +67,9 @@ impl<'a, N: NodePrimitives> TreeCtx<'a, N> { /// Creates a new tree context pub const fn new( state: &'a mut EngineApiTreeState, - persistence: &'a PersistenceState, canonical_in_memory_state: &'a CanonicalInMemoryState, ) -> Self { - Self { state, persistence, canonical_in_memory_state } + Self { state, canonical_in_memory_state } } /// Returns a reference to the engine tree state @@ -87,43 +82,10 @@ impl<'a, N: NodePrimitives> TreeCtx<'a, N> { self.state } - /// Returns a reference to the persistence info - pub const fn persistence(&self) -> &PersistenceState { - self.persistence - } - /// Returns a reference to the canonical in-memory state pub const fn canonical_in_memory_state(&self) -> &'a CanonicalInMemoryState { self.canonical_in_memory_state } - - /// Determines the persisting kind for the given block based on persistence info. - /// - /// Based on the given header it returns whether any conflicting persistence operation is - /// currently in progress. - /// - /// This is adapted from the `persisting_kind_for` method in `EngineApiTreeHandler`. - pub fn persisting_kind_for(&self, block: BlockWithParent) -> PersistingKind { - // Check that we're currently persisting. - let Some(action) = self.persistence().current_action() else { - return PersistingKind::NotPersisting - }; - // Check that the persistince action is saving blocks, not removing them. - let CurrentPersistenceAction::SavingBlocks { highest } = action else { - return PersistingKind::PersistingNotDescendant - }; - - // The block being validated can only be a descendant if its number is higher than - // the highest block persisting. Otherwise, it's likely a fork of a lower block. - if block.block.number > highest.number && - self.state().tree_state.is_descendant(*highest, block) - { - return PersistingKind::PersistingDescendant - } - - // In all other cases, the block is not a descendant. - PersistingKind::PersistingNotDescendant - } } /// A helper type that provides reusable payload validation logic for network-specific validators. diff --git a/crates/engine/tree/src/tree/persistence_state.rs b/crates/engine/tree/src/tree/persistence_state.rs index bbb981a531a..82a8078447d 100644 --- a/crates/engine/tree/src/tree/persistence_state.rs +++ b/crates/engine/tree/src/tree/persistence_state.rs @@ -67,6 +67,7 @@ impl PersistenceState { /// Returns the current persistence action. If there is no persistence task in progress, then /// this returns `None`. + #[cfg(test)] pub(crate) fn current_action(&self) -> Option<&CurrentPersistenceAction> { self.rx.as_ref().map(|rx| &rx.2) } diff --git a/crates/engine/tree/src/tree/state.rs b/crates/engine/tree/src/tree/state.rs index a10d26e3f27..2f083a4d9e7 100644 --- a/crates/engine/tree/src/tree/state.rs +++ b/crates/engine/tree/src/tree/state.rs @@ -1,7 +1,7 @@ //! Functionality related to tree state. use crate::engine::EngineApiKind; -use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; +use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, BlockNumber, B256, @@ -294,10 +294,37 @@ impl TreeState { } } + /// Updates the canonical head to the given block. + pub(crate) const fn set_canonical_head(&mut self, new_head: BlockNumHash) { + self.current_canonical_head = new_head; + } + + /// Returns the tracked canonical head. + pub(crate) const fn canonical_head(&self) -> &BlockNumHash { + &self.current_canonical_head + } + + /// Returns the block hash of the canonical head. + pub(crate) const fn canonical_block_hash(&self) -> B256 { + self.canonical_head().hash + } + + /// Returns the block number of the canonical head. + pub(crate) const fn canonical_block_number(&self) -> BlockNumber { + self.canonical_head().number + } +} + +#[cfg(test)] +impl TreeState { /// Determines if the second block is a descendant of the first block. /// /// If the two blocks are the same, this returns `false`. - pub(crate) fn is_descendant(&self, first: BlockNumHash, second: BlockWithParent) -> bool { + pub(crate) fn is_descendant( + &self, + first: BlockNumHash, + second: alloy_eips::eip1898::BlockWithParent, + ) -> bool { // If the second block's parent is the first block's hash, then it is a direct child // and we can return early. if second.parent == first.hash { @@ -330,26 +357,6 @@ impl TreeState { // Now the block numbers should be equal, so we compare hashes. current_block.recovered_block().parent_hash() == first.hash } - - /// Updates the canonical head to the given block. - pub(crate) const fn set_canonical_head(&mut self, new_head: BlockNumHash) { - self.current_canonical_head = new_head; - } - - /// Returns the tracked canonical head. - pub(crate) const fn canonical_head(&self) -> &BlockNumHash { - &self.current_canonical_head - } - - /// Returns the block hash of the canonical head. - pub(crate) const fn canonical_block_hash(&self) -> B256 { - self.canonical_head().hash - } - - /// Returns the block number of the canonical head. - pub(crate) const fn canonical_block_number(&self) -> BlockNumber { - self.canonical_head().number - } } #[cfg(test)] diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 7c40680c809..7fbae4cac5c 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -403,7 +403,7 @@ impl ValidatorTestHarness { Self { harness, validator, metrics: TestMetrics::default() } } - /// Configure `PersistenceState` for specific `PersistingKind` scenarios + /// Configure `PersistenceState` for specific persistence scenarios fn start_persistence_operation(&mut self, action: CurrentPersistenceAction) { use tokio::sync::oneshot; @@ -432,7 +432,6 @@ impl ValidatorTestHarness { ) -> ValidationOutcome { let ctx = TreeCtx::new( &mut self.harness.tree.state, - &self.harness.tree.persistence_state, &self.harness.tree.canonical_in_memory_state, ); let result = self.validator.validate_block(block, ctx); From 715369b819308df96a9b329c30f4ce88ac36a168 Mon Sep 17 00:00:00 2001 From: Avory Date: Wed, 29 Oct 2025 18:36:02 +0200 Subject: [PATCH 253/371] docs: improve RESS protocol module documentation (#19370) --- crates/ress/protocol/src/lib.rs | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/crates/ress/protocol/src/lib.rs b/crates/ress/protocol/src/lib.rs index 50db2a3191c..82820cc5a31 100644 --- a/crates/ress/protocol/src/lib.rs +++ b/crates/ress/protocol/src/lib.rs @@ -1,5 +1,30 @@ -//! `ress` protocol is an `RLPx` subprotocol for stateless nodes. -//! following [RLPx specs](https://github.com/ethereum/devp2p/blob/master/rlpx.md) +//! RESS protocol for stateless Ethereum nodes. +//! +//! Enables stateless nodes to fetch execution witnesses, bytecode, and block data from +//! stateful peers for minimal on-disk state with full execution capability. +//! +//! ## Node Types +//! +//! - **Stateless**: Minimal state, requests data on-demand +//! - **Stateful**: Full Ethereum nodes providing state data +//! +//! Valid connections: Stateless ↔ Stateless ✅, Stateless ↔ Stateful ✅, Stateful ↔ Stateful ❌ +//! +//! ## Messages +//! +//! - `NodeType (0x00)`: Handshake +//! - `GetHeaders/Headers (0x01/0x02)`: Block headers +//! - `GetBlockBodies/BlockBodies (0x03/0x04)`: Block bodies +//! - `GetBytecode/Bytecode (0x05/0x06)`: Contract bytecode +//! - `GetWitness/Witness (0x07/0x08)`: Execution witnesses +//! +//! ## Flow +//! +//! 1. Exchange `NodeType` for compatibility +//! 2. Download ancestor blocks via headers/bodies +//! 3. For new payloads: request witness → get missing bytecode → execute +//! +//! Protocol version: `ress/1` #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", From 7989c7094bc1cfa6fdb830bfbabc38f174ba713e Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 29 Oct 2025 16:09:43 -0400 Subject: [PATCH 254/371] docs: fix otlp flag in monioring docs (#19394) --- docs/vocs/docs/pages/run/monitoring.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/vocs/docs/pages/run/monitoring.mdx b/docs/vocs/docs/pages/run/monitoring.mdx index d6c73436098..1b463efdb7d 100644 --- a/docs/vocs/docs/pages/run/monitoring.mdx +++ b/docs/vocs/docs/pages/run/monitoring.mdx @@ -10,10 +10,10 @@ Reth exposes a number of metrics which can be enabled by adding the `--metrics` reth node --metrics 127.0.0.1:9001 ``` -Alternatively, you can export metrics to an OpenTelemetry collector using `--otlp-metrics`: +Additionally, you can export spans to an OpenTelemetry collector using `--tracing-otlp`: ```bash -reth node --otlp-metrics 127.0.0.1:4318 +reth node --tracing-otlp=http://localhost:4318/v1/traces ``` Now, as the node is running, you can `curl` the endpoint you provided to the `--metrics` flag to get a text dump of the metrics at that time: From 7dc07e82585b58c0d4eef9dabc58df368c242a9b Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Wed, 29 Oct 2025 16:22:54 -0400 Subject: [PATCH 255/371] feat(jovian/rpc): update receipts to transmit over RPC with Jovian compatible fields (#19368) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/optimism/rpc/Cargo.toml | 1 + crates/optimism/rpc/src/eth/receipt.rs | 170 ++++++++++++++++++++++++- 3 files changed, 168 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a944843fc52..8b64fa5ca3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9525,6 +9525,7 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-json-rpc", + "alloy-op-hardforks", "alloy-primitives", "alloy-rpc-client", "alloy-rpc-types-debug", diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index acbc491f648..38114ea9ff9 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -84,6 +84,7 @@ metrics.workspace = true [dev-dependencies] reth-optimism-chainspec.workspace = true +alloy-op-hardforks.workspace = true [features] client = [ diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 5d1e8e29794..c04a4d2c72d 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -6,6 +6,7 @@ use alloy_eips::eip2718::Encodable2718; use alloy_rpc_types_eth::{Log, TransactionReceipt}; use op_alloy_consensus::{OpReceiptEnvelope, OpTransaction}; use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; +use op_revm::estimate_tx_compressed_size; use reth_chainspec::ChainSpecProvider; use reth_node_api::NodePrimitives; use reth_optimism_evm::RethL1BlockInfo; @@ -287,7 +288,7 @@ impl OpReceiptBuilder { let timestamp = input.meta.timestamp; let block_number = input.meta.block_number; let tx_signed = *input.tx.inner(); - let core_receipt = build_receipt(input, None, |receipt, next_log_index, meta| { + let mut core_receipt = build_receipt(input, None, |receipt, next_log_index, meta| { let map_logs = move |receipt: alloy_consensus::Receipt| { let Receipt { status, cumulative_gas_used, logs } = receipt; let logs = Log::collect_for_receipt(next_log_index, meta, logs); @@ -306,12 +307,28 @@ impl OpReceiptBuilder { OpReceipt::Eip7702(receipt) => { OpReceiptEnvelope::Eip7702(map_logs(receipt).into_with_bloom()) } + OpReceipt::Deposit(receipt) => { OpReceiptEnvelope::Deposit(receipt.map_inner(map_logs).into_with_bloom()) } } }); + // In jovian, we're using the blob gas used field to store the current da + // footprint's value. + // We're computing the jovian blob gas used before building the receipt since the inputs get + // consumed by the `build_receipt` function. + chain_spec.is_jovian_active_at_timestamp(timestamp).then(|| { + // Estimate the size of the transaction in bytes and multiply by the DA + // footprint gas scalar. + // Jovian specs: `https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/jovian/exec-engine.md#da-footprint-block-limit` + let da_size = estimate_tx_compressed_size(tx_signed.encoded_2718().as_slice()) + .saturating_div(1_000_000) + .saturating_mul(l1_block_info.da_footprint_gas_scalar.unwrap_or_default().into()); + + core_receipt.blob_gas_used = Some(da_size); + }); + let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp, block_number) .l1_block_info(chain_spec, tx_signed, l1_block_info)? .build(); @@ -333,11 +350,16 @@ impl OpReceiptBuilder { #[cfg(test)] mod test { use super::*; - use alloy_consensus::{Block, BlockBody}; - use alloy_primitives::{hex, U256}; + use alloy_consensus::{transaction::TransactionMeta, Block, BlockBody, Eip658Value, TxEip7702}; + use alloy_op_hardforks::{ + OpChainHardforks, OP_MAINNET_ISTHMUS_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, + }; + use alloy_primitives::{hex, Address, Bytes, Signature, U256}; + use op_alloy_consensus::OpTypedTransaction; use op_alloy_network::eip2718::Decodable2718; use reth_optimism_chainspec::{BASE_MAINNET, OP_MAINNET}; - use reth_optimism_primitives::OpTransactionSigned; + use reth_optimism_primitives::{OpPrimitives, OpTransactionSigned}; + use reth_primitives_traits::Recovered; /// OP Mainnet transaction at index 0 in block 124665056. /// @@ -567,4 +589,144 @@ mod test { assert_eq!(operator_fee_constant, None, "incorrect operator fee constant"); assert_eq!(da_footprint_gas_scalar, None, "incorrect da footprint gas scalar"); } + + #[test] + fn da_footprint_gas_scalar_included_in_receipt_post_jovian() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 10; + + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + input: Bytes::from(vec![1, 2]), + access_list: Default::default(), + authorization_list: Default::default(), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let receipt = OpReceiptFieldsBuilder::new(OP_MAINNET_JOVIAN_TIMESTAMP, u64::MAX) + .l1_block_info(&op_hardforks, &tx, &mut l1_block_info) + .expect("should parse revm l1 info") + .build(); + + assert_eq!(receipt.l1_block_info.da_footprint_gas_scalar, Some(DA_FOOTPRINT_GAS_SCALAR)); + } + + #[test] + fn blob_gas_used_included_in_receipt_post_jovian() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 100; + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + access_list: Default::default(), + authorization_list: Default::default(), + input: Bytes::from(vec![0; 1_000_000]), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let op_receipt = OpReceiptBuilder::new( + &op_hardforks, + ConvertReceiptInput:: { + tx: Recovered::new_unchecked(&tx, Address::default()), + receipt: OpReceipt::Eip7702(Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 100, + logs: vec![], + }), + gas_used: 100, + next_log_index: 0, + meta: TransactionMeta { + timestamp: OP_MAINNET_JOVIAN_TIMESTAMP, + ..Default::default() + }, + }, + &mut l1_block_info, + ) + .unwrap(); + + let expected_blob_gas_used = estimate_tx_compressed_size(tx.encoded_2718().as_slice()) + .saturating_div(1_000_000) + .saturating_mul(DA_FOOTPRINT_GAS_SCALAR.into()); + + assert_eq!(op_receipt.core_receipt.blob_gas_used, Some(expected_blob_gas_used)); + } + + #[test] + fn blob_gas_used_not_included_in_receipt_post_isthmus() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 100; + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + access_list: Default::default(), + authorization_list: Default::default(), + input: Bytes::from(vec![0; 1_000_000]), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let op_receipt = OpReceiptBuilder::new( + &op_hardforks, + ConvertReceiptInput:: { + tx: Recovered::new_unchecked(&tx, Address::default()), + receipt: OpReceipt::Eip7702(Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 100, + logs: vec![], + }), + gas_used: 100, + next_log_index: 0, + meta: TransactionMeta { + timestamp: OP_MAINNET_ISTHMUS_TIMESTAMP, + ..Default::default() + }, + }, + &mut l1_block_info, + ) + .unwrap(); + + assert_eq!(op_receipt.core_receipt.blob_gas_used, None); + } } From 1ed41d515150b449ba1e690613215bb7d1822e5e Mon Sep 17 00:00:00 2001 From: Galoretka Date: Wed, 29 Oct 2025 22:24:40 +0200 Subject: [PATCH 256/371] chore(primitives-traits): gate test-only modules (#19393) --- crates/primitives-traits/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 67df9637fa4..5400f52a204 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -148,6 +148,7 @@ pub use block::{ Block, FullBlock, RecoveredBlock, SealedBlock, }; +#[cfg(test)] mod withdrawal; pub use alloy_eips::eip2718::WithEncoded; @@ -156,6 +157,7 @@ pub mod crypto; mod error; pub use error::{GotExpected, GotExpectedBoxed}; +#[cfg(test)] mod log; pub use alloy_primitives::{logs_bloom, Log, LogData}; From be50b284b34b8e317f1a1ae3f53ffb2f3a09c74e Mon Sep 17 00:00:00 2001 From: leniram159 Date: Wed, 29 Oct 2025 21:34:31 +0100 Subject: [PATCH 257/371] feat: display blob params alongside hardfork info (#19358) --- crates/chainspec/src/spec.rs | 27 ++++++++++++++++++++---- crates/ethereum/hardforks/src/display.rs | 27 +++++++++++++++++++++--- 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 22ddddbc719..4c71b7a465f 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -10,7 +10,7 @@ use crate::{ sepolia::SEPOLIA_PARIS_BLOCK, EthChainSpec, }; -use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use alloc::{boxed::Box, format, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; use alloy_consensus::{ constants::{ @@ -440,7 +440,26 @@ impl ChainSpec { /// Returns the hardfork display helper. pub fn display_hardforks(&self) -> DisplayHardforks { - DisplayHardforks::new(self.hardforks.forks_iter()) + // Create an iterator with hardfork, condition, and optional blob metadata + let hardforks_with_meta = self.hardforks.forks_iter().map(|(fork, condition)| { + // Generate blob metadata for timestamp-based hardforks that have blob params + let metadata = match condition { + ForkCondition::Timestamp(timestamp) => { + // Try to get blob params for this timestamp + // This automatically handles all hardforks with blob support + EthChainSpec::blob_params_at_timestamp(self, timestamp).map(|params| { + format!( + "blob: (target: {}, max: {}, fraction: {})", + params.target_blob_count, params.max_blob_count, params.update_fraction + ) + }) + } + _ => None, + }; + (fork, condition, metadata) + }); + + DisplayHardforks::with_meta(hardforks_with_meta) } /// Get the fork id for the given hardfork. @@ -1157,8 +1176,8 @@ Merge hard forks: - Paris @58750000000000000000000 (network is known to be merged) Post-merge hard forks (timestamp based): - Shanghai @1681338455 -- Cancun @1710338135 -- Prague @1746612311" +- Cancun @1710338135 blob: (target: 3, max: 6, fraction: 3338477) +- Prague @1746612311 blob: (target: 6, max: 9, fraction: 5007716)" ); } diff --git a/crates/ethereum/hardforks/src/display.rs b/crates/ethereum/hardforks/src/display.rs index e40a117d26a..b01c478df80 100644 --- a/crates/ethereum/hardforks/src/display.rs +++ b/crates/ethereum/hardforks/src/display.rs @@ -25,6 +25,8 @@ struct DisplayFork { activated_at: ForkCondition, /// An optional EIP (e.g. `EIP-1559`). eip: Option, + /// Optional metadata to display alongside the fork (e.g. blob parameters) + metadata: Option, } impl core::fmt::Display for DisplayFork { @@ -38,6 +40,9 @@ impl core::fmt::Display for DisplayFork { match self.activated_at { ForkCondition::Block(at) | ForkCondition::Timestamp(at) => { write!(f, "{name_with_eip:32} @{at}")?; + if let Some(metadata) = &self.metadata { + write!(f, " {metadata}")?; + } } ForkCondition::TTD { total_difficulty, .. } => { // All networks that have merged are finalized. @@ -45,6 +50,9 @@ impl core::fmt::Display for DisplayFork { f, "{name_with_eip:32} @{total_difficulty} (network is known to be merged)", )?; + if let Some(metadata) = &self.metadata { + write!(f, " {metadata}")?; + } } ForkCondition::Never => unreachable!(), } @@ -145,14 +153,27 @@ impl DisplayHardforks { pub fn new<'a, I>(hardforks: I) -> Self where I: IntoIterator, + { + // Delegate to with_meta by mapping the iterator to include None for metadata + Self::with_meta(hardforks.into_iter().map(|(fork, condition)| (fork, condition, None))) + } + + /// Creates a new [`DisplayHardforks`] from an iterator of hardforks with optional metadata. + pub fn with_meta<'a, I>(hardforks: I) -> Self + where + I: IntoIterator)>, { let mut pre_merge = Vec::new(); let mut with_merge = Vec::new(); let mut post_merge = Vec::new(); - for (fork, condition) in hardforks { - let mut display_fork = - DisplayFork { name: fork.name().to_string(), activated_at: condition, eip: None }; + for (fork, condition, metadata) in hardforks { + let mut display_fork = DisplayFork { + name: fork.name().to_string(), + activated_at: condition, + eip: None, + metadata, + }; match condition { ForkCondition::Block(_) => { From e808b9ab8fe0976954ba3c92ee26ebb8e6f9ee81 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 Oct 2025 22:19:29 +0100 Subject: [PATCH 258/371] chore: fix unused dep (#19397) --- crates/net/downloaders/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 57094813eee..056d809d02f 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -51,7 +51,7 @@ thiserror.workspace = true tracing.workspace = true tempfile = { workspace = true, optional = true } -itertools.workspace = true +itertools = { workspace = true, optional = true } [dev-dependencies] async-compression = { workspace = true, features = ["gzip", "tokio"] } @@ -70,7 +70,7 @@ tempfile.workspace = true [features] default = [] -file-client = ["dep:async-compression", "dep:alloy-rlp"] +file-client = ["dep:async-compression", "dep:alloy-rlp", "dep:itertools"] test-utils = [ "tempfile", "reth-consensus/test-utils", From 752891b7cba21252039e0f6625586ec3b2fffb27 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 Oct 2025 22:19:42 +0100 Subject: [PATCH 259/371] chore: fix unused warning (#19395) --- crates/optimism/evm/Cargo.toml | 2 +- crates/optimism/evm/src/config.rs | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index f2dce0a9ba0..d7bbe29330f 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -76,4 +76,4 @@ std = [ "reth-storage-errors/std", ] portable = ["reth-revm/portable"] -rpc = ["reth-rpc-eth-api"] +rpc = ["reth-rpc-eth-api", "reth-optimism-primitives/serde", "reth-optimism-primitives/reth-codec"] diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index 47ed2853d0a..6ae2a91a6cb 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -1,8 +1,6 @@ pub use alloy_op_evm::{ spec as revm_spec, spec_by_timestamp_after_bedrock as revm_spec_by_timestamp_after_bedrock, }; - -use alloy_consensus::BlockHeader; use revm::primitives::{Address, Bytes, B256}; /// Context relevant for execution of a next block w.r.t OP. @@ -23,7 +21,7 @@ pub struct OpNextBlockEnvAttributes { } #[cfg(feature = "rpc")] -impl reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv +impl reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv for OpNextBlockEnvAttributes { fn build_pending_env(parent: &crate::SealedHeader) -> Self { From b15c28531029b06eb83180d980f210cf7a1e6284 Mon Sep 17 00:00:00 2001 From: strmfos <155266597+strmfos@users.noreply.github.com> Date: Wed, 29 Oct 2025 22:11:13 +0100 Subject: [PATCH 260/371] perf(codecs): avoid String allocation in proc macro type checking (#19354) --- crates/storage/codecs/derive/src/compact/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index 00f622be43e..f217134fa5b 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -176,7 +176,8 @@ fn should_use_alt_impl(ftype: &str, segment: &syn::PathSegment) -> bool { let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.args.last() && let (Some(path), 1) = (arg_path.path.segments.first(), arg_path.path.segments.len()) && ["B256", "Address", "Address", "Bloom", "TxHash", "BlockHash", "CompactPlaceholder"] - .contains(&path.ident.to_string().as_str()) + .iter() + .any(|&s| path.ident == s) { return true } From f303b28974192f75abb442f69c4a634a6c0e4841 Mon Sep 17 00:00:00 2001 From: Forostovec Date: Thu, 30 Oct 2025 00:02:30 +0200 Subject: [PATCH 261/371] chore: reuse gzip read buffer to avoid per-iteration allocation (#19398) --- crates/net/downloaders/src/file_client.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index de3d8f8f1f4..4d545aec178 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -481,18 +481,16 @@ impl FileReader { chunk: &mut Vec, chunk_byte_len: u64, ) -> Result { + let mut buffer = vec![0u8; 64 * 1024]; loop { if chunk.len() >= chunk_byte_len as usize { return Ok(true) } - let mut buffer = vec![0u8; 64 * 1024]; - match self.read(&mut buffer).await { Ok(0) => return Ok(!chunk.is_empty()), Ok(n) => { - buffer.truncate(n); - chunk.extend_from_slice(&buffer); + chunk.extend_from_slice(&buffer[..n]); } Err(e) => return Err(e.into()), } From 3fa10defd1c7d0c40695256d67f49a76019e1447 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 29 Oct 2025 23:06:03 +0100 Subject: [PATCH 262/371] chore: bump discv5 (#19400) --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- crates/net/discv5/src/lib.rs | 2 -- crates/net/network/src/discovery.rs | 1 - 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8b64fa5ca3e..895db6e47a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2974,9 +2974,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.9.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b4e7798d2ff74e29cee344dc490af947ae657d6ab5273dde35d58ce06a4d71" +checksum = "f170f4f6ed0e1df52bf43b403899f0081917ecf1500bfe312505cc3b515a8899" dependencies = [ "aes", "aes-gcm", @@ -4772,9 +4772,9 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.13.4" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b2eeee38fef3aa9b4cc5f1beea8a2444fc00e7377cafae396de3f5c2065e24" +checksum = "bf39cc0423ee66021dc5eccface85580e4a001e0c5288bae8bea7ecb69225e90" dependencies = [ "libc", "windows-sys 0.59.0", diff --git a/Cargo.toml b/Cargo.toml index e00c7a148e4..35c3c9614b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -623,8 +623,8 @@ tower = "0.5" tower-http = "0.6" # p2p -discv5 = "0.9" -if-addrs = "0.13" +discv5 = "0.10" +if-addrs = "0.14" # rpc jsonrpsee = "0.26.0" diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index ef2c69caedb..92c7c543a3a 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -83,7 +83,6 @@ impl Discv5 { //////////////////////////////////////////////////////////////////////////////////////////////// /// Adds the node to the table, if it is not already present. - #[expect(clippy::result_large_err)] pub fn add_node(&self, node_record: Enr) -> Result<(), Error> { let EnrCombinedKeyWrapper(enr) = node_record.into(); self.discv5.add_enr(enr).map_err(Error::AddNodeFailed) @@ -376,7 +375,6 @@ impl Discv5 { /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr) w.r.t. the local node's network /// stack, if field is set. - #[expect(clippy::result_large_err)] pub fn get_fork_id( &self, enr: &discv5::enr::Enr, diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index 6b95b1e3a63..9cc3a6249a8 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -200,7 +200,6 @@ impl Discovery { } /// Add a node to the discv4 table. - #[expect(clippy::result_large_err)] pub(crate) fn add_discv5_node(&self, enr: Enr) -> Result<(), NetworkError> { if let Some(discv5) = &self.discv5 { discv5.add_node(enr).map_err(NetworkError::Discv5Error)?; From bec4d7c436c49e22b128380cf1f474d345379d31 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 30 Oct 2025 00:50:39 +0100 Subject: [PATCH 263/371] perf: box ForkId in Peer struct to reduce size (#19402) --- crates/net/network-types/src/peers/mod.rs | 2 +- crates/net/network/src/peers.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/net/network-types/src/peers/mod.rs b/crates/net/network-types/src/peers/mod.rs index f3529875018..d41882d494c 100644 --- a/crates/net/network-types/src/peers/mod.rs +++ b/crates/net/network-types/src/peers/mod.rs @@ -25,7 +25,7 @@ pub struct Peer { /// The state of the connection, if any. pub state: PeerConnectionState, /// The [`ForkId`] that the peer announced via discovery. - pub fork_id: Option, + pub fork_id: Option>, /// Whether the entry should be removed after an existing session was terminated. pub remove_after_disconnect: bool, /// The kind of peer diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index d9ece3dd061..e89b1695d91 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -715,7 +715,7 @@ impl PeersManager { pub(crate) fn set_discovered_fork_id(&mut self, peer_id: PeerId, fork_id: ForkId) { if let Some(peer) = self.peers.get_mut(&peer_id) { trace!(target: "net::peers", ?peer_id, ?fork_id, "set discovered fork id"); - peer.fork_id = Some(fork_id); + peer.fork_id = Some(Box::new(fork_id)); } } @@ -757,7 +757,7 @@ impl PeersManager { Entry::Occupied(mut entry) => { let peer = entry.get_mut(); peer.kind = kind; - peer.fork_id = fork_id; + peer.fork_id = fork_id.map(Box::new); peer.addr = addr; if peer.state.is_incoming() { @@ -770,7 +770,7 @@ impl PeersManager { Entry::Vacant(entry) => { trace!(target: "net::peers", ?peer_id, addr=?addr.tcp(), "discovered new node"); let mut peer = Peer::with_kind(addr, kind); - peer.fork_id = fork_id; + peer.fork_id = fork_id.map(Box::new); entry.insert(peer); self.queued_actions.push_back(PeerAction::PeerAdded(peer_id)); } @@ -838,7 +838,7 @@ impl PeersManager { Entry::Occupied(mut entry) => { let peer = entry.get_mut(); peer.kind = kind; - peer.fork_id = fork_id; + peer.fork_id = fork_id.map(Box::new); peer.addr = addr; if peer.state == PeerConnectionState::Idle { @@ -853,7 +853,7 @@ impl PeersManager { trace!(target: "net::peers", ?peer_id, addr=?addr.tcp(), "connects new node"); let mut peer = Peer::with_kind(addr, kind); peer.state = PeerConnectionState::PendingOut; - peer.fork_id = fork_id; + peer.fork_id = fork_id.map(Box::new); entry.insert(peer); self.connection_info.inc_pending_out(); self.queued_actions From 7c007f7cdaecf36f1e808c454ff06fb4ba65fd37 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 30 Oct 2025 12:50:19 +0100 Subject: [PATCH 264/371] fix(cli): Metrics log when passed metrics port 0 (#19406) Co-authored-by: Varun Doshi --- crates/cli/commands/src/stage/run.rs | 1 - crates/node/builder/src/launch/common.rs | 1 - crates/node/metrics/src/server.rs | 2 ++ 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 010277480f5..f25338d30ef 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -116,7 +116,6 @@ impl let components = components(provider_factory.chain_spec()); if let Some(listen_addr) = self.metrics { - info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr); let config = MetricServerConfig::new( listen_addr, VersionInfo { diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 080945a76cc..c049ddfbf21 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -582,7 +582,6 @@ where let listen_addr = self.node_config().metrics.prometheus; if let Some(addr) = listen_addr { - info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); let config = MetricServerConfig::new( addr, VersionInfo { diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index d7beb6c3a1d..26e9a918faa 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -119,6 +119,8 @@ impl MetricServer { .await .wrap_err("Could not bind to address")?; + tracing::info!(target: "reth::cli", "Starting metrics endpoint at {}", listener.local_addr().unwrap()); + task_executor.spawn_with_graceful_shutdown_signal(|mut signal| { Box::pin(async move { loop { From be291144eee55d1a93473ebacc87d2cd695c40d0 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Thu, 30 Oct 2025 13:55:32 +0100 Subject: [PATCH 265/371] fix(engine): trigger live sync after backfill completes at finalized (#19390) --- crates/engine/tree/src/tree/mod.rs | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 5e2ed1c513c..a6e4a0d4cb1 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1574,6 +1574,32 @@ where return Ok(()) }; + // Check if there are more blocks to sync between current head and FCU target + if let Some(lowest_buffered) = + self.state.buffer.lowest_ancestor(&sync_target_state.head_block_hash) + { + let current_head_num = self.state.tree_state.current_canonical_head.number; + let target_head_num = lowest_buffered.number(); + + if let Some(distance) = self.distance_from_local_tip(current_head_num, target_head_num) + { + // There are blocks between current head and FCU target, download them + debug!( + target: "engine::tree", + %current_head_num, + %target_head_num, + %distance, + "Backfill complete, downloading remaining blocks to reach FCU target" + ); + + self.emit_event(EngineApiEvent::Download(DownloadRequest::BlockRange( + lowest_buffered.parent_hash(), + distance, + ))); + return Ok(()); + } + } + // try to close the gap by executing buffered blocks that are child blocks of the new head self.try_connect_buffered_blocks(self.state.tree_state.current_canonical_head) } From d87d0d1a1f592fe2f5cee025acf29d7f68ab4c86 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Thu, 30 Oct 2025 14:40:18 +0100 Subject: [PATCH 266/371] fix: Prune checkpoint fixes (#19407) --- crates/prune/types/src/lib.rs | 2 +- crates/prune/types/src/segment.rs | 12 +++ .../stages/src/stages/merkle_changesets.rs | 83 +++++++++++-------- crates/stages/stages/src/stages/prune.rs | 13 ++- .../src/providers/database/provider.rs | 14 ++-- .../provider/src/providers/state/overlay.rs | 23 ++--- 6 files changed, 95 insertions(+), 52 deletions(-) diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index a588693892a..b42574cde27 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -25,5 +25,5 @@ pub use pruner::{ PruneInterruptReason, PruneProgress, PrunedSegmentInfo, PrunerOutput, SegmentOutput, SegmentOutputCheckpoint, }; -pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; +pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError, PRUNE_SEGMENTS}; pub use target::{PruneModes, UnwindTargetPrunedError, MINIMUM_PRUNING_DISTANCE}; diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index faab12c70ad..aa0e893bb4a 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -40,6 +40,18 @@ pub enum PruneSegment { Bodies, } +/// Array of [`PruneSegment`]s actively in use. +pub const PRUNE_SEGMENTS: [PruneSegment; 8] = [ + PruneSegment::SenderRecovery, + PruneSegment::TransactionLookup, + PruneSegment::Receipts, + PruneSegment::ContractLogs, + PruneSegment::AccountHistory, + PruneSegment::StorageHistory, + PruneSegment::MerkleChangeSets, + PruneSegment::Bodies, +]; + #[cfg(test)] #[allow(clippy::derivable_impls)] impl Default for PruneSegment { diff --git a/crates/stages/stages/src/stages/merkle_changesets.rs b/crates/stages/stages/src/stages/merkle_changesets.rs index 7bf756c3dd3..9d33912041f 100644 --- a/crates/stages/stages/src/stages/merkle_changesets.rs +++ b/crates/stages/stages/src/stages/merkle_changesets.rs @@ -4,12 +4,13 @@ use alloy_primitives::BlockNumber; use reth_consensus::ConsensusError; use reth_primitives_traits::{GotExpected, SealedHeader}; use reth_provider::{ - ChainStateBlockReader, DBProvider, HeaderProvider, ProviderError, StageCheckpointReader, - TrieWriter, + ChainStateBlockReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, + PruneCheckpointWriter, StageCheckpointReader, TrieWriter, }; +use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_api::{ - BlockErrorKind, CheckpointBlockRange, ExecInput, ExecOutput, MerkleChangeSetsCheckpoint, Stage, - StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, + BlockErrorKind, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, + UnwindInput, UnwindOutput, }; use reth_trie::{updates::TrieUpdates, HashedPostState, KeccakKeyHasher, StateRoot, TrieInput}; use reth_trie_db::{DatabaseHashedPostState, DatabaseStateRoot}; @@ -39,14 +40,28 @@ impl MerkleChangeSets { /// Returns the range of blocks which are already computed. Will return an empty range if none /// have been computed. - fn computed_range(checkpoint: Option) -> Range { + fn computed_range( + provider: &Provider, + checkpoint: Option, + ) -> Result, StageError> + where + Provider: PruneCheckpointReader, + { let to = checkpoint.map(|chk| chk.block_number).unwrap_or_default(); - let from = checkpoint - .map(|chk| chk.merkle_changesets_stage_checkpoint().unwrap_or_default()) - .unwrap_or_default() - .block_range - .to; - from..to + 1 + + // Get the prune checkpoint for MerkleChangeSets to use as the lower bound. If there's no + // prune checkpoint or if the pruned block number is None, return empty range + let Some(from) = provider + .get_prune_checkpoint(PruneSegment::MerkleChangeSets)? + .and_then(|chk| chk.block_number) + // prune checkpoint indicates the last block pruned, so the block after is the start of + // the computed data + .map(|block_number| block_number + 1) + else { + return Ok(0..0) + }; + + Ok(from..to + 1) } /// Determines the target range for changeset computation based on the checkpoint and provider @@ -269,8 +284,13 @@ impl Default for MerkleChangeSets { impl Stage for MerkleChangeSets where - Provider: - StageCheckpointReader + TrieWriter + DBProvider + HeaderProvider + ChainStateBlockReader, + Provider: StageCheckpointReader + + TrieWriter + + DBProvider + + HeaderProvider + + ChainStateBlockReader + + PruneCheckpointReader + + PruneCheckpointWriter, { fn id(&self) -> StageId { StageId::MerkleChangeSets @@ -291,7 +311,7 @@ where // Get the previously computed range. This will be updated to reflect the populating of the // target range. - let mut computed_range = Self::computed_range(input.checkpoint); + let mut computed_range = Self::computed_range(provider, input.checkpoint)?; // We want the target range to not include any data already computed previously, if // possible, so we start the target range from the end of the computed range if that is @@ -336,16 +356,19 @@ where // Populate the target range with changesets Self::populate_range(provider, target_range)?; - let checkpoint_block_range = CheckpointBlockRange { - from: computed_range.start, - // CheckpointBlockRange is inclusive - to: computed_range.end.saturating_sub(1), - }; + // Update the prune checkpoint to reflect that all data before `computed_range.start` + // is not available. + provider.save_prune_checkpoint( + PruneSegment::MerkleChangeSets, + PruneCheckpoint { + block_number: Some(computed_range.start.saturating_sub(1)), + tx_number: None, + prune_mode: PruneMode::Before(computed_range.start), + }, + )?; - let checkpoint = StageCheckpoint::new(checkpoint_block_range.to) - .with_merkle_changesets_stage_checkpoint(MerkleChangeSetsCheckpoint { - block_range: checkpoint_block_range, - }); + // `computed_range.end` is exclusive. + let checkpoint = StageCheckpoint::new(computed_range.end.saturating_sub(1)); Ok(ExecOutput::done(checkpoint)) } @@ -358,22 +381,14 @@ where // Unwinding is trivial; just clear everything after the target block. provider.clear_trie_changesets_from(input.unwind_to + 1)?; - let mut computed_range = Self::computed_range(Some(input.checkpoint)); + let mut computed_range = Self::computed_range(provider, Some(input.checkpoint))?; computed_range.end = input.unwind_to + 1; if computed_range.start > computed_range.end { computed_range.start = computed_range.end; } - let checkpoint_block_range = CheckpointBlockRange { - from: computed_range.start, - // computed_range.end is exclusive - to: computed_range.end.saturating_sub(1), - }; - - let checkpoint = StageCheckpoint::new(input.unwind_to) - .with_merkle_changesets_stage_checkpoint(MerkleChangeSetsCheckpoint { - block_range: checkpoint_block_range, - }); + // `computed_range.end` is exclusive + let checkpoint = StageCheckpoint::new(computed_range.end.saturating_sub(1)); Ok(UnwindOutput { checkpoint }) } diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 3161d4b1412..f6fb7f90ae1 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -103,9 +103,18 @@ where // We cannot recover the data that was pruned in `execute`, so we just update the // checkpoints. let prune_checkpoints = provider.get_prune_checkpoints()?; + let unwind_to_last_tx = + provider.block_body_indices(input.unwind_to)?.map(|i| i.last_tx_num()); + for (segment, mut checkpoint) in prune_checkpoints { - checkpoint.block_number = Some(input.unwind_to); - provider.save_prune_checkpoint(segment, checkpoint)?; + // Only update the checkpoint if unwind_to is lower than the existing checkpoint. + if let Some(block) = checkpoint.block_number && + input.unwind_to < block + { + checkpoint.block_number = Some(input.unwind_to); + checkpoint.tx_number = unwind_to_last_tx; + provider.save_prune_checkpoint(segment, checkpoint)?; + } } Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) }) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index ece6ef56c85..9fa6500db12 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -52,7 +52,7 @@ use reth_primitives_traits::{ Account, Block as _, BlockBody as _, Bytecode, RecoveredBlock, SealedHeader, StorageEntry, }; use reth_prune_types::{ - PruneCheckpoint, PruneMode, PruneModes, PruneSegment, MINIMUM_PRUNING_DISTANCE, + PruneCheckpoint, PruneMode, PruneModes, PruneSegment, MINIMUM_PRUNING_DISTANCE, PRUNE_SEGMENTS, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -3024,10 +3024,14 @@ impl PruneCheckpointReader for DatabaseProvide } fn get_prune_checkpoints(&self) -> ProviderResult> { - Ok(self - .tx - .cursor_read::()? - .walk(None)? + Ok(PRUNE_SEGMENTS + .iter() + .filter_map(|segment| { + self.tx + .get::(*segment) + .transpose() + .map(|chk| chk.map(|chk| (*segment, chk))) + }) .collect::>()?) } } diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 519fe56d73c..5c086c273ba 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -96,19 +96,22 @@ where } })?; - // Extract a possible lower bound from stage checkpoint if available - let stage_lower_bound = stage_checkpoint.as_ref().and_then(|chk| { - chk.merkle_changesets_stage_checkpoint().map(|stage_chk| stage_chk.block_range.from) - }); + // If the requested block is the DB tip (determined by the MerkleChangeSets stage + // checkpoint) then there won't be any reverts necessary, and we can simply return Ok. + if upper_bound == requested_block { + return Ok(()) + } - // Extract a possible lower bound from prune checkpoint if available + // Extract the lower bound from prune checkpoint if available // The prune checkpoint's block_number is the highest pruned block, so data is available // starting from the next block - let prune_lower_bound = - prune_checkpoint.and_then(|chk| chk.block_number.map(|block| block + 1)); - - // Use the higher of the two lower bounds. If neither is available assume unbounded. - let lower_bound = stage_lower_bound.max(prune_lower_bound).unwrap_or(0); + let lower_bound = prune_checkpoint + .and_then(|chk| chk.block_number) + .map(|block_number| block_number + 1) + .ok_or_else(|| ProviderError::InsufficientChangesets { + requested: requested_block, + available: 0..=upper_bound, + })?; let available_range = lower_bound..=upper_bound; From 5f5dbb0121851bfb7a91142e849769612a739bec Mon Sep 17 00:00:00 2001 From: leniram159 Date: Thu, 30 Oct 2025 15:48:30 +0100 Subject: [PATCH 267/371] fix: accurate build features reporting in `reth --version` (#19124) --- Cargo.toml | 4 ++-- bin/reth/Cargo.toml | 4 +++- crates/ethereum/cli/Cargo.toml | 2 +- crates/optimism/bin/Cargo.toml | 4 +++- crates/optimism/cli/Cargo.toml | 2 +- 5 files changed, 10 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 35c3c9614b6..c3ae24ecea3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -372,7 +372,7 @@ reth-era-utils = { path = "crates/era-utils" } reth-errors = { path = "crates/errors" } reth-eth-wire = { path = "crates/net/eth-wire" } reth-eth-wire-types = { path = "crates/net/eth-wire-types" } -reth-ethereum-cli = { path = "crates/ethereum/cli" } +reth-ethereum-cli = { path = "crates/ethereum/cli", default-features = false } reth-ethereum-consensus = { path = "crates/ethereum/consensus", default-features = false } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives", default-features = false } reth-ethereum-forks = { path = "crates/ethereum/hardforks", default-features = false } @@ -413,7 +413,7 @@ reth-optimism-node = { path = "crates/optimism/node" } reth-node-types = { path = "crates/node/types" } reth-op = { path = "crates/optimism/reth", default-features = false } reth-optimism-chainspec = { path = "crates/optimism/chainspec", default-features = false } -reth-optimism-cli = { path = "crates/optimism/cli" } +reth-optimism-cli = { path = "crates/optimism/cli", default-features = false } reth-optimism-consensus = { path = "crates/optimism/consensus", default-features = false } reth-optimism-forks = { path = "crates/optimism/hardforks", default-features = false } reth-optimism-payload-builder = { path = "crates/optimism/payload" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index d4e134bf48c..31bc630a8a9 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -81,7 +81,9 @@ backon.workspace = true tempfile.workspace = true [features] -default = ["jemalloc", "reth-revm/portable"] +default = ["jemalloc", "otlp", "reth-revm/portable"] + +otlp = ["reth-ethereum-cli/otlp"] dev = ["reth-ethereum-cli/dev"] diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index 5dbb8bf4cd3..15f987d2b00 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -37,7 +37,7 @@ tracing.workspace = true tempfile.workspace = true [features] -default = ["jemalloc", "otlp"] +default = [] otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 3733227a3aa..568ed8aabfe 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -27,7 +27,9 @@ tracing.workspace = true workspace = true [features] -default = ["jemalloc", "reth-optimism-evm/portable"] +default = ["jemalloc", "otlp", "reth-optimism-evm/portable"] + +otlp = ["reth-optimism-cli/otlp"] jemalloc = ["reth-cli-util/jemalloc", "reth-optimism-cli/jemalloc"] jemalloc-prof = ["reth-cli-util/jemalloc-prof"] diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index eb320045337..aee7566de22 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -76,7 +76,7 @@ reth-stages = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec = { workspace = true, features = ["std", "superchain-configs"] } [features] -default = ["otlp"] +default = [] # Opentelemtry feature to activate metrics export otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] From e9400527cd687a890166a5d949ff57fc2a58f448 Mon Sep 17 00:00:00 2001 From: sashass1315 Date: Thu, 30 Oct 2025 17:12:10 +0200 Subject: [PATCH 268/371] chore(net): avoid cloning GetBlockBodies request (#19404) --- crates/net/network/src/fetch/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 6c14e994008..55bde002b3e 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -29,7 +29,7 @@ use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; type InflightHeadersRequest = Request>>; -type InflightBodiesRequest = Request, PeerRequestResult>>; +type InflightBodiesRequest = Request<(), PeerRequestResult>>; /// Manages data fetching operations. /// @@ -237,7 +237,7 @@ impl StateFetcher { }) } DownloadRequest::GetBlockBodies { request, response, .. } => { - let inflight = Request { request: request.clone(), response }; + let inflight = Request { request: (), response }; self.inflight_bodies_requests.insert(peer_id, inflight); BlockRequest::GetBlockBodies(GetBlockBodies(request)) } From 59bf11779c435a48a5acfed7ca7f9a076ddfb76d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lancelot=20de=20Ferri=C3=A8re?= Date: Thu, 30 Oct 2025 16:50:29 +0100 Subject: [PATCH 269/371] feat: Output the block execution outputs after validating (reth-stateless) (#19360) --- crates/stateless/src/validation.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/crates/stateless/src/validation.rs b/crates/stateless/src/validation.rs index a0475b09939..db5f317ab22 100644 --- a/crates/stateless/src/validation.rs +++ b/crates/stateless/src/validation.rs @@ -17,8 +17,11 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::{Consensus, HeaderValidator}; use reth_errors::ConsensusError; use reth_ethereum_consensus::{validate_block_post_execution, EthBeaconConsensus}; -use reth_ethereum_primitives::{Block, EthPrimitives}; -use reth_evm::{execute::Executor, ConfigureEvm}; +use reth_ethereum_primitives::{Block, EthPrimitives, EthereumReceipt}; +use reth_evm::{ + execute::{BlockExecutionOutput, Executor}, + ConfigureEvm, +}; use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_trie_common::{HashedPostState, KeccakKeyHasher}; @@ -144,7 +147,7 @@ pub fn stateless_validation( witness: ExecutionWitness, chain_spec: Arc, evm_config: E, -) -> Result +) -> Result<(B256, BlockExecutionOutput), StatelessValidationError> where ChainSpec: Send + Sync + EthChainSpec

+ EthereumHardforks + Debug, E: ConfigureEvm + Clone + 'static, @@ -170,7 +173,7 @@ pub fn stateless_validation_with_trie( witness: ExecutionWitness, chain_spec: Arc, evm_config: E, -) -> Result +) -> Result<(B256, BlockExecutionOutput), StatelessValidationError> where T: StatelessTrie, ChainSpec: Send + Sync + EthChainSpec
+ EthereumHardforks + Debug, @@ -242,7 +245,7 @@ where } // Return block hash - Ok(current_block.hash_slow()) + Ok((current_block.hash_slow(), output)) } /// Performs consensus validation checks on a block without execution or state validation. From fccf76a19a31040f68522aff32000365ed04563d Mon Sep 17 00:00:00 2001 From: Skylar Ray <137945430+sky-coderay@users.noreply.github.com> Date: Thu, 30 Oct 2025 20:51:27 +0200 Subject: [PATCH 270/371] fix(engine): remove redundant parent_to_child cleanup in insert_executed (#19380) --- crates/engine/tree/src/tree/state.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/crates/engine/tree/src/tree/state.rs b/crates/engine/tree/src/tree/state.rs index 2f083a4d9e7..0a13207e660 100644 --- a/crates/engine/tree/src/tree/state.rs +++ b/crates/engine/tree/src/tree/state.rs @@ -107,10 +107,6 @@ impl TreeState { self.blocks_by_number.entry(block_number).or_default().push(executed); self.parent_to_child.entry(parent_hash).or_default().insert(hash); - - for children in self.parent_to_child.values_mut() { - children.retain(|child| self.blocks_by_hash.contains_key(child)); - } } /// Remove single executed block by its hash. From dc8efbf9b3e880c87ae4c6bba80157b786f5de32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojtek=20=C5=81opata?= Date: Thu, 30 Oct 2025 20:53:43 +0100 Subject: [PATCH 271/371] feat: add --rpc.evm-memory-limit flag (#19279) Co-authored-by: Matthias Seitz --- Cargo.lock | 21 ++++--------------- Cargo.toml | 2 +- crates/ethereum/node/Cargo.toml | 2 +- crates/node/builder/src/rpc.rs | 1 + crates/node/core/src/args/rpc_server.rs | 11 ++++++++++ crates/optimism/node/Cargo.toml | 2 +- crates/optimism/rpc/src/eth/call.rs | 5 +++++ crates/revm/Cargo.toml | 1 + crates/rpc/rpc-builder/src/config.rs | 1 + crates/rpc/rpc-eth-api/Cargo.toml | 2 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 5 +++++ .../rpc/rpc-eth-types/src/builder/config.rs | 9 ++++++++ crates/rpc/rpc-eth-types/src/error/mod.rs | 6 +++++- crates/rpc/rpc/Cargo.toml | 2 +- crates/rpc/rpc/src/eth/builder.rs | 16 ++++++++++++++ crates/rpc/rpc/src/eth/core.rs | 13 ++++++++++++ crates/rpc/rpc/src/eth/helpers/call.rs | 5 +++++ docs/vocs/docs/pages/cli/reth/node.mdx | 5 +++++ testing/ef-tests/Cargo.toml | 2 +- 19 files changed, 87 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 895db6e47a8..a4e0e1fd0a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10650,7 +10650,7 @@ dependencies = [ "reth-storage-api", "reth-tasks", "reth-tracing", - "revm-interpreter 27.0.2", + "revm-interpreter", "revm-primitives", "rustc-hash", "schnellru", @@ -10867,7 +10867,7 @@ dependencies = [ "revm-database-interface", "revm-handler", "revm-inspector", - "revm-interpreter 28.0.0", + "revm-interpreter", "revm-precompile", "revm-primitives", "revm-state", @@ -10957,7 +10957,7 @@ dependencies = [ "revm-context", "revm-context-interface", "revm-database-interface", - "revm-interpreter 28.0.0", + "revm-interpreter", "revm-precompile", "revm-primitives", "revm-state", @@ -10975,7 +10975,7 @@ dependencies = [ "revm-context", "revm-database-interface", "revm-handler", - "revm-interpreter 28.0.0", + "revm-interpreter", "revm-primitives", "revm-state", "serde", @@ -11002,19 +11002,6 @@ dependencies = [ "thiserror 2.0.17", ] -[[package]] -name = "revm-interpreter" -version = "27.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0834fc25c020061f0f801d8de8bb53c88a63631cca5884a6c65b90c85e241138" -dependencies = [ - "revm-bytecode", - "revm-context-interface", - "revm-primitives", - "revm-state", - "serde", -] - [[package]] name = "revm-interpreter" version = "28.0.0" diff --git a/Cargo.toml b/Cargo.toml index c3ae24ecea3..c6a9abad754 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -474,7 +474,7 @@ revm-bytecode = { version = "7.0.2", default-features = false } revm-database = { version = "9.0.2", default-features = false } revm-state = { version = "8.0.2", default-features = false } revm-primitives = { version = "21.0.1", default-features = false } -revm-interpreter = { version = "27.0.2", default-features = false } +revm-interpreter = { version = "28.0.0", default-features = false } revm-inspector = { version = "11.1.2", default-features = false } revm-context = { version = "10.1.2", default-features = false } revm-context-interface = { version = "11.1.2", default-features = false } diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 3c0efdb0394..1594c6fad96 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -49,7 +49,7 @@ tokio.workspace = true # revm with required ethereum features # Note: this must be kept to ensure all features are properly enabled/forwarded -revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg", "memory_limit"] } # misc eyre.workspace = true diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index ed0a3fb64d4..a66d7b222e4 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -1156,6 +1156,7 @@ impl<'a, N: FullNodeComponents::new().range(1..)), + default_value_t = (1 << 32) - 1 + )] + pub rpc_evm_memory_limit: u64, + /// Maximum eth transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) #[arg( long = "rpc.txfeecap", @@ -408,6 +418,7 @@ impl Default for RpcServerArgs { rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(), rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(), rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP, + rpc_evm_memory_limit: (1 << 32) - 1, rpc_tx_fee_cap: constants::DEFAULT_TX_FEE_CAP_WEI, rpc_max_simulate_blocks: constants::DEFAULT_MAX_SIMULATE_BLOCKS, rpc_eth_proof_window: constants::DEFAULT_ETH_PROOF_WINDOW, diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 162700ac0ae..fdccffb869b 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -45,7 +45,7 @@ reth-optimism-primitives = { workspace = true, features = ["serde", "serde-binco # revm with required optimism features # Note: this must be kept to ensure all features are properly enabled/forwarded -revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg", "memory_limit"] } op-revm.workspace = true # ethereum diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index 4e853984ac9..db96bda83f3 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -35,4 +35,9 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.eth_api.max_simulate_blocks() } + + #[inline] + fn evm_memory_limit(&self) -> u64 { + self.inner.eth_api.evm_memory_limit() + } } diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 488a685b382..92036e39085 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -68,3 +68,4 @@ optional-checks = [ "optional-eip3607", "optional-no-base-fee", ] +memory_limit = ["revm/memory_limit"] diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 011e24d468b..4d57bdec7d8 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -105,6 +105,7 @@ impl RethRpcServerConfig for RpcServerArgs { .proof_permits(self.rpc_proof_permits) .pending_block_kind(self.rpc_pending_block) .raw_tx_forwarder(self.rpc_forwarder.clone()) + .rpc_evm_memory_limit(self.rpc_evm_memory_limit) } fn flashbots_config(&self) -> ValidationApiConfig { diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 88a7f059323..830e8cf83ae 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee", "optional_fee_charge"] } +revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee", "optional_fee_charge", "memory_limit"] } reth-chain-state.workspace = true revm-inspectors.workspace = true reth-primitives-traits = { workspace = true, features = ["rpc-compat"] } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 7eb10c10534..05f0de87464 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -493,6 +493,9 @@ pub trait Call: /// Returns the maximum number of blocks accepted for `eth_simulateV1`. fn max_simulate_blocks(&self) -> u64; + /// Returns the maximum memory the EVM can allocate per RPC request. + fn evm_memory_limit(&self) -> u64; + /// Returns the max gas limit that the caller can afford given a transaction environment. fn caller_gas_allowance( &self, @@ -811,6 +814,8 @@ pub trait Call: // evm_env.cfg_env.disable_fee_charge = true; + evm_env.cfg_env.memory_limit = self.evm_memory_limit(); + // set nonce to None so that the correct nonce is chosen by the EVM request.as_mut().take_nonce(); diff --git a/crates/rpc/rpc-eth-types/src/builder/config.rs b/crates/rpc/rpc-eth-types/src/builder/config.rs index 47f15ae5ae7..ded50ab4a83 100644 --- a/crates/rpc/rpc-eth-types/src/builder/config.rs +++ b/crates/rpc/rpc-eth-types/src/builder/config.rs @@ -95,6 +95,8 @@ pub struct EthConfig { pub raw_tx_forwarder: ForwardConfig, /// Timeout duration for `send_raw_transaction_sync` RPC method. pub send_raw_transaction_sync_timeout: Duration, + /// Maximum memory the EVM can allocate per RPC request. + pub rpc_evm_memory_limit: u64, } impl EthConfig { @@ -126,6 +128,7 @@ impl Default for EthConfig { pending_block_kind: PendingBlockKind::Full, raw_tx_forwarder: ForwardConfig::default(), send_raw_transaction_sync_timeout: RPC_DEFAULT_SEND_RAW_TX_SYNC_TIMEOUT_SECS, + rpc_evm_memory_limit: (1 << 32) - 1, } } } @@ -216,6 +219,12 @@ impl EthConfig { self.send_raw_transaction_sync_timeout = timeout; self } + + /// Configures the maximum memory the EVM can allocate per RPC request. + pub const fn rpc_evm_memory_limit(mut self, memory_limit: u64) -> Self { + self.rpc_evm_memory_limit = memory_limit; + self + } } /// Config for the filter diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index ef65e4ccc2b..b8814785478 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -619,6 +619,9 @@ pub enum RpcInvalidTransactionError { /// Contains the gas limit. #[error("out of gas: gas exhausted during memory expansion: {0}")] MemoryOutOfGas(u64), + /// Memory limit was exceeded during memory expansion. + #[error("out of memory: memory limit exceeded during memory expansion")] + MemoryLimitOutOfGas, /// Gas limit was exceeded during precompile execution. /// Contains the gas limit. #[error("out of gas: gas exhausted during precompiled contract execution: {0}")] @@ -723,7 +726,8 @@ impl RpcInvalidTransactionError { OutOfGasError::Basic | OutOfGasError::ReentrancySentry => { Self::BasicOutOfGas(gas_limit) } - OutOfGasError::Memory | OutOfGasError::MemoryLimit => Self::MemoryOutOfGas(gas_limit), + OutOfGasError::Memory => Self::MemoryOutOfGas(gas_limit), + OutOfGasError::MemoryLimit => Self::MemoryLimitOutOfGas, OutOfGasError::Precompile => Self::PrecompileOutOfGas(gas_limit), OutOfGasError::InvalidOperand => Self::InvalidOperandOutOfGas(gas_limit), } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index e028e47448d..81df4bff44f 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -63,7 +63,7 @@ alloy-rpc-types-txpool.workspace = true alloy-rpc-types-admin.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["kzg"] } alloy-serde.workspace = true -revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee"] } +revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee", "memory_limit"] } revm-primitives = { workspace = true, features = ["serde"] } # rpc diff --git a/crates/rpc/rpc/src/eth/builder.rs b/crates/rpc/rpc/src/eth/builder.rs index c34d268d64a..ff01903736b 100644 --- a/crates/rpc/rpc/src/eth/builder.rs +++ b/crates/rpc/rpc/src/eth/builder.rs @@ -44,6 +44,7 @@ pub struct EthApiBuilder { pending_block_kind: PendingBlockKind, raw_tx_forwarder: ForwardConfig, send_raw_transaction_sync_timeout: Duration, + evm_memory_limit: u64, } impl @@ -94,6 +95,7 @@ impl EthApiBuilder { pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; EthApiBuilder { components, @@ -114,6 +116,7 @@ impl EthApiBuilder { pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } } } @@ -145,6 +148,7 @@ where pending_block_kind: PendingBlockKind::Full, raw_tx_forwarder: ForwardConfig::default(), send_raw_transaction_sync_timeout: Duration::from_secs(30), + evm_memory_limit: (1 << 32) - 1, } } } @@ -183,6 +187,7 @@ where pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; EthApiBuilder { components, @@ -203,6 +208,7 @@ where pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } } @@ -230,6 +236,7 @@ where pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; EthApiBuilder { components, @@ -250,6 +257,7 @@ where pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } } @@ -477,6 +485,7 @@ where pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; let provider = components.provider().clone(); @@ -517,6 +526,7 @@ where pending_block_kind, raw_tx_forwarder.forwarder_client(), send_raw_transaction_sync_timeout, + evm_memory_limit, ) } @@ -541,4 +551,10 @@ where self.send_raw_transaction_sync_timeout = timeout; self } + + /// Sets the maximum memory the EVM can allocate per RPC request. + pub const fn evm_memory_limit(mut self, memory_limit: u64) -> Self { + self.evm_memory_limit = memory_limit; + self + } } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index d2e5cf124ec..4084168c4f6 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -155,6 +155,7 @@ where pending_block_kind: PendingBlockKind, raw_tx_forwarder: ForwardConfig, send_raw_transaction_sync_timeout: Duration, + evm_memory_limit: u64, ) -> Self { let inner = EthApiInner::new( components, @@ -173,6 +174,7 @@ where pending_block_kind, raw_tx_forwarder.forwarder_client(), send_raw_transaction_sync_timeout, + evm_memory_limit, ); Self { inner: Arc::new(inner) } @@ -318,6 +320,9 @@ pub struct EthApiInner { /// Blob sidecar converter blob_sidecar_converter: BlobSidecarConverter, + + /// Maximum memory the EVM can allocate per RPC request. + evm_memory_limit: u64, } impl EthApiInner @@ -344,6 +349,7 @@ where pending_block_kind: PendingBlockKind, raw_tx_forwarder: Option, send_raw_transaction_sync_timeout: Duration, + evm_memory_limit: u64, ) -> Self { let signers = parking_lot::RwLock::new(Default::default()); // get the block number of the latest block @@ -386,6 +392,7 @@ where pending_block_kind, send_raw_transaction_sync_timeout, blob_sidecar_converter: BlobSidecarConverter::new(), + evm_memory_limit, } } } @@ -563,6 +570,12 @@ where pub const fn blob_sidecar_converter(&self) -> &BlobSidecarConverter { &self.blob_sidecar_converter } + + /// Returns the EVM memory limit. + #[inline] + pub const fn evm_memory_limit(&self) -> u64 { + self.evm_memory_limit + } } #[cfg(test)] diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index abe06cb55ec..ad9f020bd0c 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -31,6 +31,11 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.max_simulate_blocks() } + + #[inline] + fn evm_memory_limit(&self) -> u64 { + self.inner.evm_memory_limit() + } } impl EstimateCall for EthApi diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 6f8b6ae88a7..b53dda3fb1a 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -405,6 +405,11 @@ RPC: [default: 50000000] + --rpc.evm-memory-limit + Maximum memory the EVM can allocate per RPC request + + [default: 4294967295] + --rpc.txfeecap Maximum eth transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index 745172cd82c..e9cf465a98d 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -32,7 +32,7 @@ reth-stateless = { workspace = true, features = ["secp256k1"] } reth-tracing.workspace = true reth-trie.workspace = true reth-trie-db.workspace = true -revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg", "memory_limit"] } alloy-rlp.workspace = true alloy-primitives.workspace = true From 6fafff5f145f99bb8cccf3e1b88ea009955ab648 Mon Sep 17 00:00:00 2001 From: Forostovec Date: Thu, 30 Oct 2025 22:43:11 +0200 Subject: [PATCH 272/371] fix: highest_nonces update in PendingPool::remove_transaction (#19301) Co-authored-by: Matthias Seitz --- crates/transaction-pool/src/pool/pending.rs | 96 ++++++++++++++++++--- 1 file changed, 83 insertions(+), 13 deletions(-) diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 317066137da..dc675031ea6 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -278,14 +278,6 @@ impl PendingPool { } } - /// Returns the ancestor the given transaction, the transaction with `nonce - 1`. - /// - /// Note: for a transaction with nonce higher than the current on chain nonce this will always - /// return an ancestor since all transaction in this pool are gapless. - fn ancestor(&self, id: &TransactionId) -> Option<&PendingTransaction> { - self.get(&id.unchecked_ancestor()?) - } - /// Adds a new transactions to the pending queue. /// /// # Panics @@ -342,14 +334,35 @@ impl PendingPool { let tx = self.by_id.remove(id)?; self.size_of -= tx.transaction.size(); - if let Some(highest) = self.highest_nonces.get(&id.sender) { - if highest.transaction.nonce() == id.nonce { - self.highest_nonces.remove(&id.sender); + match self.highest_nonces.entry(id.sender) { + Entry::Occupied(mut entry) => { + if entry.get().transaction.nonce() == id.nonce { + // we just removed the tx with the highest nonce for this sender, find the + // highest remaining tx from that sender + if let Some((_, new_highest)) = self + .by_id + .range(( + id.sender.start_bound(), + std::ops::Bound::Included(TransactionId::new(id.sender, u64::MAX)), + )) + .last() + { + // insert the new highest nonce for this sender + entry.insert(new_highest.clone()); + } else { + entry.remove(); + } + } } - if let Some(ancestor) = self.ancestor(id) { - self.highest_nonces.insert(id.sender, ancestor.clone()); + Entry::Vacant(_) => { + debug_assert!( + false, + "removed transaction without a tracked highest nonce {:?}", + id + ); } } + Some(tx.transaction) } @@ -1054,4 +1067,61 @@ mod tests { assert!(pool.get_txs_by_sender(sender_b).is_empty()); assert!(pool.get_txs_by_sender(sender_c).is_empty()); } + + #[test] + fn test_remove_non_highest_keeps_highest() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + let sender = address!("0x00000000000000000000000000000000000000aa"); + let txs = MockTransactionSet::dependent(sender, 0, 3, TxType::Eip1559).into_vec(); + for tx in txs { + pool.add_transaction(f.validated_arc(tx), 0); + } + pool.assert_invariants(); + let sender_id = f.ids.sender_id(&sender).unwrap(); + let mid_id = TransactionId::new(sender_id, 1); + let _ = pool.remove_transaction(&mid_id); + let highest = pool.highest_nonces.get(&sender_id).unwrap(); + assert_eq!(highest.transaction.nonce(), 2); + pool.assert_invariants(); + } + + #[test] + fn test_cascade_removal_recomputes_highest() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + let sender = address!("0x00000000000000000000000000000000000000bb"); + let txs = MockTransactionSet::dependent(sender, 0, 4, TxType::Eip1559).into_vec(); + for tx in txs { + pool.add_transaction(f.validated_arc(tx), 0); + } + pool.assert_invariants(); + let sender_id = f.ids.sender_id(&sender).unwrap(); + let id3 = TransactionId::new(sender_id, 3); + let _ = pool.remove_transaction(&id3); + let highest = pool.highest_nonces.get(&sender_id).unwrap(); + assert_eq!(highest.transaction.nonce(), 2); + let id2 = TransactionId::new(sender_id, 2); + let _ = pool.remove_transaction(&id2); + let highest = pool.highest_nonces.get(&sender_id).unwrap(); + assert_eq!(highest.transaction.nonce(), 1); + pool.assert_invariants(); + } + + #[test] + fn test_remove_only_tx_clears_highest() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + let sender = address!("0x00000000000000000000000000000000000000cc"); + let txs = MockTransactionSet::dependent(sender, 0, 1, TxType::Eip1559).into_vec(); + for tx in txs { + pool.add_transaction(f.validated_arc(tx), 0); + } + pool.assert_invariants(); + let sender_id = f.ids.sender_id(&sender).unwrap(); + let id0 = TransactionId::new(sender_id, 0); + let _ = pool.remove_transaction(&id0); + assert!(!pool.highest_nonces.contains_key(&sender_id)); + pool.assert_invariants(); + } } From cff942ed0e421ed3e914f39c70a61bfb6483fb3c Mon Sep 17 00:00:00 2001 From: Yash <72552910+kumaryash90@users.noreply.github.com> Date: Fri, 31 Oct 2025 05:31:32 +0530 Subject: [PATCH 273/371] chore: add tracing features to node-core crate (#19415) Co-authored-by: Matthias Seitz --- .config/zepter.yaml | 2 +- bin/reth-bench/Cargo.toml | 25 ++++++++++++++++++++----- bin/reth/Cargo.toml | 10 +++++++++- crates/ethereum/cli/Cargo.toml | 25 ++++++++++++++++++++----- crates/node/core/Cargo.toml | 6 ++++++ 5 files changed, 56 insertions(+), 12 deletions(-) diff --git a/.config/zepter.yaml b/.config/zepter.yaml index b754d06a062..251c0892d4d 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -12,7 +12,7 @@ workflows: # Check that `A` activates the features of `B`. "propagate-feature", # These are the features to check: - "--features=std,op,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat", + "--features=std,op,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat,min-error-logs,min-warn-logs,min-info-logs,min-debug-logs,min-trace-logs,otlp", # Do not try to add a new section to `[features]` of `A` only because `B` exposes that feature. There are edge-cases where this is still needed, but we can add them manually. "--left-side-feature-missing=ignore", # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 891fa4f9780..a07d0f5200e 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -81,11 +81,26 @@ jemalloc = [ jemalloc-prof = ["reth-cli-util/jemalloc-prof"] tracy-allocator = ["reth-cli-util/tracy-allocator"] -min-error-logs = ["tracing/release_max_level_error"] -min-warn-logs = ["tracing/release_max_level_warn"] -min-info-logs = ["tracing/release_max_level_info"] -min-debug-logs = ["tracing/release_max_level_debug"] -min-trace-logs = ["tracing/release_max_level_trace"] +min-error-logs = [ + "tracing/release_max_level_error", + "reth-node-core/min-error-logs", +] +min-warn-logs = [ + "tracing/release_max_level_warn", + "reth-node-core/min-warn-logs", +] +min-info-logs = [ + "tracing/release_max_level_info", + "reth-node-core/min-info-logs", +] +min-debug-logs = [ + "tracing/release_max_level_debug", + "reth-node-core/min-debug-logs", +] +min-trace-logs = [ + "tracing/release_max_level_trace", + "reth-node-core/min-trace-logs", +] # no-op feature flag for switching between the `optimism` and default functionality in CI matrices ethereum = [] diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 31bc630a8a9..850f082a462 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -83,7 +83,10 @@ tempfile.workspace = true [features] default = ["jemalloc", "otlp", "reth-revm/portable"] -otlp = ["reth-ethereum-cli/otlp"] +otlp = [ + "reth-ethereum-cli/otlp", + "reth-node-core/otlp", +] dev = ["reth-ethereum-cli/dev"] @@ -125,22 +128,27 @@ snmalloc-native = [ min-error-logs = [ "tracing/release_max_level_error", "reth-ethereum-cli/min-error-logs", + "reth-node-core/min-error-logs", ] min-warn-logs = [ "tracing/release_max_level_warn", "reth-ethereum-cli/min-warn-logs", + "reth-node-core/min-warn-logs", ] min-info-logs = [ "tracing/release_max_level_info", "reth-ethereum-cli/min-info-logs", + "reth-node-core/min-info-logs", ] min-debug-logs = [ "tracing/release_max_level_debug", "reth-ethereum-cli/min-debug-logs", + "reth-node-core/min-debug-logs", ] min-trace-logs = [ "tracing/release_max_level_trace", "reth-ethereum-cli/min-trace-logs", + "reth-node-core/min-trace-logs", ] [[bin]] diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index 15f987d2b00..728a97bfe3d 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -63,8 +63,23 @@ tracy-allocator = [] snmalloc = [] snmalloc-native = [] -min-error-logs = ["tracing/release_max_level_error"] -min-warn-logs = ["tracing/release_max_level_warn"] -min-info-logs = ["tracing/release_max_level_info"] -min-debug-logs = ["tracing/release_max_level_debug"] -min-trace-logs = ["tracing/release_max_level_trace"] +min-error-logs = [ + "tracing/release_max_level_error", + "reth-node-core/min-error-logs", +] +min-warn-logs = [ + "tracing/release_max_level_warn", + "reth-node-core/min-warn-logs", +] +min-info-logs = [ + "tracing/release_max_level_info", + "reth-node-core/min-info-logs", +] +min-debug-logs = [ + "tracing/release_max_level_debug", + "reth-node-core/min-debug-logs", +] +min-trace-logs = [ + "tracing/release_max_level_trace", + "reth-node-core/min-trace-logs", +] diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index b1a472bd9fd..e2852e01a81 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -80,6 +80,12 @@ asm-keccak = ["alloy-primitives/asm-keccak"] # Feature to enable opentelemetry export otlp = ["reth-tracing/otlp"] +min-error-logs = ["tracing/release_max_level_error"] +min-warn-logs = ["tracing/release_max_level_warn"] +min-info-logs = ["tracing/release_max_level_info"] +min-debug-logs = ["tracing/release_max_level_debug"] +min-trace-logs = ["tracing/release_max_level_trace"] + [build-dependencies] vergen = { workspace = true, features = ["build", "cargo", "emit_and_set"] } vergen-git2.workspace = true From d29370ebf8ab28b6978dfaa59606c4759850597d Mon Sep 17 00:00:00 2001 From: Eric Woolsey Date: Fri, 31 Oct 2025 02:09:29 -0700 Subject: [PATCH 274/371] chore: update superchain reg to c9881d543174ff00b8f3a9ad3f31bf4630b9743b (#19418) --- .../chainspec/res/superchain-configs.tar | Bin 9879040 -> 9879040 bytes .../chainspec/res/superchain_registry_commit | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/optimism/chainspec/res/superchain-configs.tar b/crates/optimism/chainspec/res/superchain-configs.tar index 77bfe0cd472d71cf44d694f563f80c2a778a53f6..86f2ab866ccf5e1a9d866a9ce10df4a60c4d1571 100644 GIT binary patch delta 12103 zcmb7KU5r)L73N%K#u4TYGk}5>6qN!Srp!5KpC1qf+9F&^fB2Ou9boRWdQEq$CZExlK1QrlIqQ2_X%w)wU9oQ43Acl!owtNxwZaFSGYLvrb8t z`oi+tYkg~dYprwl-~6a%KkRt@_LgYcC>MYHVDR7izwu|9>-&+6Jn8HI_5weOMokOL z?Q=>KckO(leFy$d=Z-BMb9eXl_U!E3bF6t{S>!(}j~i3W<}iw+J0IQ;zgeL<}4o`>ghh98tnmckjre3Q~nYgZ= zz(;!YUB+WoOfB3>cypu3po{N=PK>v*nEIdUowFX!5gw)Vz2PtR;@IOnR>d3=pA6&3 z@+Og9CVuMEYGc$v#xF#*3@Z@d4YFjUznWiMB0WmWqZfz#i;DUVpX)!06jA$zAIfmB~uZ=$LIito|zIjM2i_Q$yKqP#wX)= zc!G5{_>$WW$~>JynZTg5@5Ukb;;J~9f-f+#0emuKURxGzz;~3!=gZK1am*HRMSSR7 zsZMxvBS{&8g<*y+bCP~?82ZBj`d+Ae>WB%hh~=8or)f@yk--^6F@%bY=&~vl4KVVo zqKhTxtsLW`_E^p_SAq>K$_ zqgk_DtcpuC$iO!$&v!kx&s2oWAvMo8&ilQ%{cm6{FvHPqC4f$fp6&w z_8-RGAi^Cf2(dN|mnz+GxJ)aIUvyd`dvpl&_B5hQ4I(_0g3!ve@A+((RpLkr!oVC3 zk&!qX!Z%Y8dY;j7K<$u|J|#}3A#|Jvz2^CE#K>NbT7vkGA57$uonl z$P|R0G&+tZ4qtqlI1AGd`WCgZs4|&WVp$5pAT(kKgfXwpU;R@xug-0zdZOnMZI<1r zl-Q6f)KU#BFsBDi8a{^G5yN6y3fT6)r4+FTsCZjPwWO6*Eai$sB^9^o6F*n}~?klh6=zS%y~SeHRbiFfcC=KE5w-a<9aK+9z*I-s;Q2Myt2_YW}p_Y8KjgY{;s1&PhT7 z=de2bPi`r^Y`K+w?%XY^?p*!W)0STi*Sv9~Bqb-?iP&>1Y>X0n z#!_%2jEh$Hh}qM0Mf6Ss-1-JpSD)W{#ey3kbLNzx7ddVYO2oB1tWk+;;|d!rw zUbrB-F~ zd}+Ru6#K@*Ca7ZH#Bp7ubBhi5=Ef-<7P6MSlSH#`gWay8*^?F!&chI!j?u3{VJ^st zI5r73S&3tJ6xs;PRGedW!cm~w0w*bbc?axks`TZZ`5xOj_~sc`MI5Nv9Xb(hQ(#k- zXuFHxo0O0)vq@+QG9#2OkH06i@7|?q-@Uu%cQ#zjIg=dtbjo#!1b$)nASU+`oC||M zTBC*MvJ)SU-qGKZnvd^=-KT0kp1$pz4LQbj-V6n{O-{uVaq8=^8A_a*S=ej24Lw|E zj0S`+$RXD#D&j~l8qkbcFrn&Z%-;G7%eM@mx5mVZ%Ffi>kiE0BRqfe1J?AaI1Ep;h z2tK+ptVmFOuy3FW=N7t2x8@xPdc-z2-7)TWl8zg5VXdm;hTCWtHS|MTw?C|CJmyVa zA~yL-Y?6e2=ss^__SsNWgxY`*UzM;x)m{yI2>Ay!q*>D-99Mh^*bIwtwwzaZg9tM| zl9=PD>#|`UQt1A9NnQVZRo8$2R=W+!(6b@C;2g`0RST3@^+4gz7F=B*=7Bks^U6!G5HKYAdWFqJZpKJq=%-mqvdX4$*ZrEe0^X)AScAe;X#a^7G#|LCEiPyIz-PeDty032~lIY@#n0YMJ z^Cm}VD1T3()gl}gX#2Mf*{AsfJxR@hZLr5x&4J!+4VM3b(c6*}m;H?qM%Z51PcX)A zw?$f__B?yO;(y?g2(bgU6ZQmb7wk#cZrD#@dtgt&_QIZq{S3BG3Gqx}ru8y(7HRL^ z*vFYt9qKHrpHX$I_fwrk??6@y$RcLWvx#sHKrx>qcAx>qo6S4Z;NQALp9=Op2YX%> z_Z=KZGhSNGvyifF%7aeQb@3qVkm|aajJ1^#3pSB!m;LOD!RZCqVGK_%77{Xl=5K^_ zA#ts?0t#WPIP;>aJ@fM(`-nEMxS2zlhY6VBMS=|r_A)lDS14hRQKx3}_B{63r3WY- z;tQ|9j;O8+N5?)z#)9B-24z@rOo`JnFg-@i_I#C^f=sM*EA z#P8MrI&=QKd0r{Rmxeg#>T=!Uye`>3zr7^8q*v_1fZ!JAqe&Di&3l|L)Z%?TXPu#c L9{t0jEzbV{PCY+| delta 10992 zcma)CU2s*^73N&HNfeS8N+sAJF;bgtAU)^ov-bf3r76YBPc5|&i9aDEH$WhY{FS0b zDXpFue=6LT+ft{@;NXJ~f^(w1u^wTsp5*L-P}IP9;sqIK0bjC71Bl4bV=dRA5Cq z3}l3cC-YShyI{{&+8!Cl<2?(sAM>5Di?JuND=`jZ&a0w0;ax8^KG#JiOh#6yVUP|O z*GeLx4GN{Q-3r+TD+lN>9U!-Wv>$P)FNzD^XQkykJ} z8zf`97|~&0V9;M^gQC#s4#oC;NqkBhq-9!IKmEIBADXRVXTnzUntSsinAR1hPVOnr~XkcZF$ z>>vg+VXE7R0meD)lq5I^HysON6~eun3Ac6zt&UTs&lJFZ#v(UsF{T=$rX21eCft!t zjRGA6yhVo?i-N%Ee(iIhT@a_yByVs{;@fQtWaFyafQ-;>8-N3nIQO*qTHD3Yet`w)YdvlFy%q%7cK@__Pt~uL`xQlVojg*cmp!cW1 z%6S=jG+`yOXG#$8t)e)KLc1`5-h7}^inBCBQ=CXSF#_%}`7@wzWEljwU099*;5aqq zR#}_rn?k`!i3nwg@EDDvu(9LIHKq0yr-M_au{EWjlwAppNR>ia zX3>n~SS|~xGRJb&AMSNj%TR~5YTUV5EKbDPY9r3ptkS`f=dPZ(Dsm@JrT!EEdo|W;Eww5nLZZ~Z$Gna32m4etr4nxuG5x9u*`&*J7 zo(X9*+2L7(kGndlSjLj&1c|sj3v!1Mm%oyG(p87D39M5S0?Tf8Vg-fJgl)cmO=6OSCjnldrbcL*J__}l;Q#uXlL+8 zKG)8PnB58yM$Eo9_e)0|4$_hHN~aYIG(`XnD)GI0P4>OE!9P0sqO|**%8q5Fyrk_i zG!HW0v<sfyp-m#Lw?n>;?Z+ot8i*}z8$%xjRUt2ny`+DA00~Y1dO6J< zRw>N7HY(QfL|l}R$cT$7_m~sMG|kd(KhPkqRg9Rk(G$c^mQz#`-P)4gI3ap67!BBa8}z_JlT^ot;3Dsm0739r8z4C2dDno3^9hrceurKpZq#PMm<;K)z#yw}wE36j`0;-Z*BDQAOBz zC28nh1Np9L=zh3|;vpzsXPEF4DiQBL4EdfB@2}0RbHX^9YD--hL=}LwenmQzo7O=( zO}^<7f(l3J*B;iS4}G6KRvJ;e8@e;9b5b`ACDI@l6&(%6AN9m@6 z-m=2y53^hUo>UEOhdgbnhIVXohMq*bU_G`6X{A@}lf&qkmYtAYklm0ykncnGLVf_* z2YCkaEaZof{gCHOw&nT3dmNw8{LXhnC6h5peJ5n%2qn+Dpwf7{lN>67ftqqgxKU1 z4tq`tu}N^9HO7TnhmywDmrP^paMz1uTHKEt6#Gz`h?B$k0`X-kE$J3wQFar|ju@8cBnL$SQ5?`G$Tj4r$ocEJXGwR7Dy#s?XRFa!%MO{UWrw%D zszB`R_d(Dt&3QVu`=9@r$)5lDzyEaPx8%2_zWZ${ zaE{Z7?=L@Y&C6(0KJL$JKcDYkDEk)3g|W_W% Date: Fri, 31 Oct 2025 10:32:51 +0100 Subject: [PATCH 275/371] fix(compact): prevent bitflag overflow by using usize accumulator (#19408) --- .../storage/codecs/derive/src/compact/flags.rs | 18 +++++++++--------- .../storage/codecs/derive/src/compact/mod.rs | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/crates/storage/codecs/derive/src/compact/flags.rs b/crates/storage/codecs/derive/src/compact/flags.rs index b6bad462917..c3e0b988cf6 100644 --- a/crates/storage/codecs/derive/src/compact/flags.rs +++ b/crates/storage/codecs/derive/src/compact/flags.rs @@ -51,7 +51,7 @@ pub(crate) fn generate_flag_struct( quote! { buf.get_u8(), }; - total_bytes.into() + total_bytes ]; let docs = format!( @@ -64,11 +64,11 @@ pub(crate) fn generate_flag_struct( impl<'a> #ident<'a> { #[doc = #bitflag_encoded_bytes] pub const fn bitflag_encoded_bytes() -> usize { - #total_bytes as usize + #total_bytes } #[doc = #bitflag_unused_bits] pub const fn bitflag_unused_bits() -> usize { - #unused_bits as usize + #unused_bits } } } @@ -77,11 +77,11 @@ pub(crate) fn generate_flag_struct( impl #ident { #[doc = #bitflag_encoded_bytes] pub const fn bitflag_encoded_bytes() -> usize { - #total_bytes as usize + #total_bytes } #[doc = #bitflag_unused_bits] pub const fn bitflag_unused_bits() -> usize { - #unused_bits as usize + #unused_bits } } } @@ -123,8 +123,8 @@ fn build_struct_field_flags( fields: Vec<&StructFieldDescriptor>, field_flags: &mut Vec, is_zstd: bool, -) -> u8 { - let mut total_bits = 0; +) -> usize { + let mut total_bits: usize = 0; // Find out the adequate bit size for the length of each field, if applicable. for field in fields { @@ -138,7 +138,7 @@ fn build_struct_field_flags( let name = format_ident!("{name}_len"); let bitsize = get_bit_size(ftype); let bsize = format_ident!("B{bitsize}"); - total_bits += bitsize; + total_bits += bitsize as usize; field_flags.push(quote! { pub #name: #bsize , @@ -170,7 +170,7 @@ fn build_struct_field_flags( /// skipped field. /// /// Returns the total number of bytes used by the flags struct and how many unused bits. -fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec) -> (u8, u8) { +fn pad_flag_struct(total_bits: usize, field_flags: &mut Vec) -> (usize, usize) { let remaining = 8 - total_bits % 8; if remaining == 8 { (total_bits / 8, 0) diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index f217134fa5b..c1951233484 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -238,11 +238,11 @@ mod tests { impl TestStruct { #[doc = "Used bytes by [`TestStructFlags`]"] pub const fn bitflag_encoded_bytes() -> usize { - 2u8 as usize + 2usize } #[doc = "Unused bits for new fields by [`TestStructFlags`]"] pub const fn bitflag_unused_bits() -> usize { - 1u8 as usize + 1usize } } From 4d437c43bf0f6a8aca9e5a76be4985e77fcd7485 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Fri, 31 Oct 2025 11:56:37 +0100 Subject: [PATCH 276/371] fix: Properly set MerkleChangeSets checkpoint in stage's fast-path (#19421) --- crates/stages/stages/src/stages/merkle_changesets.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/stages/stages/src/stages/merkle_changesets.rs b/crates/stages/stages/src/stages/merkle_changesets.rs index 9d33912041f..dd4d8cf2017 100644 --- a/crates/stages/stages/src/stages/merkle_changesets.rs +++ b/crates/stages/stages/src/stages/merkle_changesets.rs @@ -312,6 +312,12 @@ where // Get the previously computed range. This will be updated to reflect the populating of the // target range. let mut computed_range = Self::computed_range(provider, input.checkpoint)?; + debug!( + target: "sync::stages::merkle_changesets", + ?computed_range, + ?target_range, + "Got computed and target ranges", + ); // We want the target range to not include any data already computed previously, if // possible, so we start the target range from the end of the computed range if that is @@ -335,9 +341,9 @@ where } // If target range is empty (target_start >= target_end), stage is already successfully - // executed + // executed. if target_range.start >= target_range.end { - return Ok(ExecOutput::done(input.checkpoint.unwrap_or_default())); + return Ok(ExecOutput::done(StageCheckpoint::new(target_range.end.saturating_sub(1)))); } // If our target range is a continuation of the already computed range then we can keep the From 8a72b519b2ed7dbf7457c9f28f9bd2497d0dfcba Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 31 Oct 2025 12:53:01 +0100 Subject: [PATCH 277/371] chore: add count field to trace (#19422) --- crates/engine/tree/src/tree/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index a6e4a0d4cb1..ca8a93df079 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1286,7 +1286,7 @@ where .map(|b| b.recovered_block().num_hash()) .expect("Checked non-empty persisting blocks"); - debug!(target: "engine::tree", blocks = ?blocks_to_persist.iter().map(|block| block.recovered_block().num_hash()).collect::>(), "Persisting blocks"); + debug!(target: "engine::tree", count=blocks_to_persist.len(), blocks = ?blocks_to_persist.iter().map(|block| block.recovered_block().num_hash()).collect::>(), "Persisting blocks"); let (tx, rx) = oneshot::channel(); let _ = self.persistence.save_blocks(blocks_to_persist, tx); From b6be053cbe76df3fffcbf716ec0a0f15e15e0dad Mon Sep 17 00:00:00 2001 From: Gengar Date: Fri, 31 Oct 2025 13:55:19 +0200 Subject: [PATCH 278/371] fix(codecs): return remaining slice in EIP-1559 from_compact (#19413) --- crates/storage/codecs/src/alloy/transaction/eip1559.rs | 5 +++-- crates/storage/codecs/src/alloy/transaction/eip2930.rs | 5 +++-- crates/storage/codecs/src/alloy/transaction/eip4844.rs | 5 +++-- crates/storage/codecs/src/alloy/transaction/eip7702.rs | 5 +++-- crates/storage/codecs/src/alloy/transaction/legacy.rs | 5 +++-- crates/storage/codecs/src/alloy/transaction/optimism.rs | 5 +++-- 6 files changed, 18 insertions(+), 12 deletions(-) diff --git a/crates/storage/codecs/src/alloy/transaction/eip1559.rs b/crates/storage/codecs/src/alloy/transaction/eip1559.rs index 6d910a6900c..f13422a2dea 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip1559.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip1559.rs @@ -53,7 +53,8 @@ impl Compact for AlloyTxEip1559 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip1559::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip1559::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, @@ -67,6 +68,6 @@ impl Compact for AlloyTxEip1559 { input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/eip2930.rs b/crates/storage/codecs/src/alloy/transaction/eip2930.rs index aeb08f361be..a5c25a84d4f 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip2930.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip2930.rs @@ -52,7 +52,8 @@ impl Compact for AlloyTxEip2930 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip2930::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip2930::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, nonce: tx.nonce, @@ -63,6 +64,6 @@ impl Compact for AlloyTxEip2930 { access_list: tx.access_list, input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/eip4844.rs b/crates/storage/codecs/src/alloy/transaction/eip4844.rs index 6367f3e08e7..6ea1927f7d5 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip4844.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip4844.rs @@ -68,7 +68,8 @@ impl Compact for AlloyTxEip4844 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip4844::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip4844::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, nonce: tx.nonce, @@ -82,7 +83,7 @@ impl Compact for AlloyTxEip4844 { max_fee_per_blob_gas: tx.max_fee_per_blob_gas, input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/eip7702.rs b/crates/storage/codecs/src/alloy/transaction/eip7702.rs index eab10af0b66..95de81c3804 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip7702.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip7702.rs @@ -57,7 +57,8 @@ impl Compact for AlloyTxEip7702 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip7702::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip7702::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, nonce: tx.nonce, @@ -70,6 +71,6 @@ impl Compact for AlloyTxEip7702 { access_list: tx.access_list, authorization_list: tx.authorization_list, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/legacy.rs b/crates/storage/codecs/src/alloy/transaction/legacy.rs index 1667893dc33..c4caf97ac38 100644 --- a/crates/storage/codecs/src/alloy/transaction/legacy.rs +++ b/crates/storage/codecs/src/alloy/transaction/legacy.rs @@ -67,7 +67,8 @@ impl Compact for AlloyTxLegacy { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxLegacy::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxLegacy::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, @@ -79,6 +80,6 @@ impl Compact for AlloyTxLegacy { input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index 40333ce9889..7f9c318e6a1 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -66,7 +66,8 @@ impl Compact for AlloyTxDeposit { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxDeposit::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxDeposit::from_compact(buf, len); let alloy_tx = Self { source_hash: tx.source_hash, from: tx.from, @@ -77,7 +78,7 @@ impl Compact for AlloyTxDeposit { is_system_transaction: tx.is_system_transaction, input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } From 728e03706ce42092d7d2a48dc2de0132cf222cf7 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Fri, 31 Oct 2025 13:39:40 +0100 Subject: [PATCH 279/371] feat(reth-bench): Default --wait-time to 250ms (#19425) --- bin/reth-bench/src/bench/new_payload_fcu.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index ce094895ee3..1d1bf59b365 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -30,8 +30,8 @@ pub struct Command { rpc_url: String, /// How long to wait after a forkchoice update before sending the next payload. - #[arg(long, value_name = "WAIT_TIME", value_parser = parse_duration, verbatim_doc_comment)] - wait_time: Option, + #[arg(long, value_name = "WAIT_TIME", value_parser = parse_duration, default_value = "250ms", verbatim_doc_comment)] + wait_time: Duration, /// The size of the block buffer (channel capacity) for prefetching blocks from the RPC /// endpoint. @@ -170,10 +170,8 @@ impl Command { // convert gas used to gigagas, then compute gigagas per second info!(%combined_result); - // wait if we need to - if let Some(wait_time) = self.wait_time { - tokio::time::sleep(wait_time).await; - } + // wait before sending the next payload + tokio::time::sleep(self.wait_time).await; // record the current result let gas_row = TotalGasRow { block_number, gas_used, time: current_duration }; From 9f4f66dd8e01c9805f69bf90a9c91cb86f217499 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 31 Oct 2025 13:48:33 +0100 Subject: [PATCH 280/371] perf: bias towards proof results (#19426) --- .../src/tree/payload_processor/multiproof.rs | 116 +++++++++--------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index ca3bd380d4d..73dc6a90954 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1029,7 +1029,64 @@ impl MultiProofTask { loop { trace!(target: "engine::tree::payload_processor::multiproof", "entering main channel receiving loop"); - crossbeam_channel::select! { + crossbeam_channel::select_biased! { + recv(self.proof_result_rx) -> proof_msg => { + match proof_msg { + Ok(proof_result) => { + proofs_processed += 1; + + self.metrics + .proof_calculation_duration_histogram + .record(proof_result.elapsed); + + self.multiproof_manager.on_calculation_complete(); + + // Convert ProofResultMessage to SparseTrieUpdate + match proof_result.result { + Ok(proof_result_data) => { + debug!( + target: "engine::tree::payload_processor::multiproof", + sequence = proof_result.sequence_number, + total_proofs = proofs_processed, + "Processing calculated proof from worker" + ); + + let update = SparseTrieUpdate { + state: proof_result.state, + multiproof: proof_result_data.into_multiproof(), + }; + + if let Some(combined_update) = + self.on_proof(proof_result.sequence_number, update) + { + let _ = self.to_sparse_trie.send(combined_update); + } + } + Err(error) => { + error!(target: "engine::tree::payload_processor::multiproof", ?error, "proof calculation error from worker"); + return + } + } + + if self.is_done( + proofs_processed, + state_update_proofs_requested, + prefetch_proofs_requested, + updates_finished, + ) { + debug!( + target: "engine::tree::payload_processor::multiproof", + "State updates finished and all proofs processed, ending calculation" + ); + break + } + } + Err(_) => { + error!(target: "engine::tree::payload_processor::multiproof", "Proof result channel closed unexpectedly"); + return + } + } + }, recv(self.rx) -> message => { match message { Ok(msg) => match msg { @@ -1129,63 +1186,6 @@ impl MultiProofTask { return } } - }, - recv(self.proof_result_rx) -> proof_msg => { - match proof_msg { - Ok(proof_result) => { - proofs_processed += 1; - - self.metrics - .proof_calculation_duration_histogram - .record(proof_result.elapsed); - - self.multiproof_manager.on_calculation_complete(); - - // Convert ProofResultMessage to SparseTrieUpdate - match proof_result.result { - Ok(proof_result_data) => { - debug!( - target: "engine::tree::payload_processor::multiproof", - sequence = proof_result.sequence_number, - total_proofs = proofs_processed, - "Processing calculated proof from worker" - ); - - let update = SparseTrieUpdate { - state: proof_result.state, - multiproof: proof_result_data.into_multiproof(), - }; - - if let Some(combined_update) = - self.on_proof(proof_result.sequence_number, update) - { - let _ = self.to_sparse_trie.send(combined_update); - } - } - Err(error) => { - error!(target: "engine::tree::payload_processor::multiproof", ?error, "proof calculation error from worker"); - return - } - } - - if self.is_done( - proofs_processed, - state_update_proofs_requested, - prefetch_proofs_requested, - updates_finished, - ) { - debug!( - target: "engine::tree::payload_processor::multiproof", - "State updates finished and all proofs processed, ending calculation" - ); - break - } - } - Err(_) => { - error!(target: "engine::tree::payload_processor::multiproof", "Proof result channel closed unexpectedly"); - return - } - } } } } From 1f2f1d432f9420916cabe5e6d0623b8b92a18ea7 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 31 Oct 2025 14:16:27 +0000 Subject: [PATCH 281/371] feat(node): CLI argument for sync state idle when backfill is idle (#19429) --- crates/node/builder/src/launch/engine.rs | 7 +++++++ crates/node/core/src/args/debug.rs | 8 ++++++++ docs/vocs/docs/pages/cli/reth/node.mdx | 5 +++++ 3 files changed, 20 insertions(+) diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 9faf9fcfa95..ffe07aaac88 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -261,12 +261,16 @@ impl EngineNodeLauncher { let provider = ctx.blockchain_db().clone(); let (exit, rx) = oneshot::channel(); let terminate_after_backfill = ctx.terminate_after_initial_backfill(); + let startup_sync_state_idle = ctx.node_config().debug.startup_sync_state_idle; info!(target: "reth::cli", "Starting consensus engine"); ctx.task_executor().spawn_critical("consensus engine", Box::pin(async move { if let Some(initial_target) = initial_target { debug!(target: "reth::cli", %initial_target, "start backfill sync"); + // network_handle's sync state is already initialized at Syncing engine_service.orchestrator_mut().start_backfill_sync(initial_target); + } else if startup_sync_state_idle { + network_handle.update_sync_state(SyncState::Idle); } let mut res = Ok(()); @@ -289,6 +293,9 @@ impl EngineNodeLauncher { debug!(target: "reth::cli", "Terminating after initial backfill"); break } + if startup_sync_state_idle { + network_handle.update_sync_state(SyncState::Idle); + } } ChainEvent::BackfillSyncStarted => { network_handle.update_sync_state(SyncState::Syncing); diff --git a/crates/node/core/src/args/debug.rs b/crates/node/core/src/args/debug.rs index 13d7685b055..b5d1fb3f7d8 100644 --- a/crates/node/core/src/args/debug.rs +++ b/crates/node/core/src/args/debug.rs @@ -101,6 +101,13 @@ pub struct DebugArgs { /// Example: `nodename:secret@host:port` #[arg(long = "ethstats", help_heading = "Debug")] pub ethstats: Option, + + /// Set the node to idle state when the backfill is not running. + /// + /// This makes the `eth_syncing` RPC return "Idle" when the node has just started or finished + /// the backfill, but did not yet receive any new blocks. + #[arg(long = "debug.startup-sync-state-idle", help_heading = "Debug")] + pub startup_sync_state_idle: bool, } impl Default for DebugArgs { @@ -119,6 +126,7 @@ impl Default for DebugArgs { invalid_block_hook: Some(InvalidBlockSelection::default()), healthy_node_rpc_url: None, ethstats: None, + startup_sync_state_idle: false, } } } diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index b53dda3fb1a..2326b40d7fc 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -701,6 +701,11 @@ Debug: --ethstats The URL of the ethstats server to connect to. Example: `nodename:secret@host:port` + --debug.startup-sync-state-idle + Set the node to idle state when the backfill is not running. + + This makes the `eth_syncing` RPC return "Idle" when the node has just started or finished the backfill, but did not yet receive any new blocks. + Database: --db.log-level Database logging level. Levels higher than "notice" require a debug build From af9b04c1a3a48c285ae50a46a87e56970c7cee7e Mon Sep 17 00:00:00 2001 From: Udoagwa Franklin <54338168+frankudoags@users.noreply.github.com> Date: Fri, 31 Oct 2025 16:02:51 +0100 Subject: [PATCH 282/371] feat(op-reth): implement miner_setGasLimit RPC (#19247) Co-authored-by: frankudoags Co-authored-by: Matthias Seitz --- crates/optimism/node/src/node.rs | 75 +++++++++++++++++++++++--- crates/optimism/payload/src/builder.rs | 25 +++++---- crates/optimism/payload/src/config.rs | 50 ++++++++++++++++- crates/optimism/rpc/src/miner.rs | 20 +++++-- 4 files changed, 145 insertions(+), 25 deletions(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 17380056d13..66156edefc9 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -39,7 +39,7 @@ use reth_optimism_evm::{OpEvmConfig, OpRethReceiptBuilder}; use reth_optimism_forks::OpHardforks; use reth_optimism_payload_builder::{ builder::OpPayloadTransactions, - config::{OpBuilderConfig, OpDAConfig}, + config::{OpBuilderConfig, OpDAConfig, OpGasLimitConfig}, OpAttributes, OpBuiltPayload, OpPayloadPrimitives, }; use reth_optimism_primitives::{DepositReceipt, OpPrimitives}; @@ -118,6 +118,10 @@ pub struct OpNode { /// /// By default no throttling is applied. pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + /// Used to control the gas limit of the blocks produced by the OP builder.(configured by the + /// batcher via the `miner_` api) + pub gas_limit_config: OpGasLimitConfig, } /// A [`ComponentsBuilder`] with its generic arguments set to a stack of Optimism specific builders. @@ -133,7 +137,11 @@ pub type OpNodeComponentBuilder = ComponentsBu impl OpNode { /// Creates a new instance of the Optimism node type. pub fn new(args: RollupArgs) -> Self { - Self { args, da_config: OpDAConfig::default() } + Self { + args, + da_config: OpDAConfig::default(), + gas_limit_config: OpGasLimitConfig::default(), + } } /// Configure the data availability configuration for the OP builder. @@ -142,6 +150,12 @@ impl OpNode { self } + /// Configure the gas limit configuration for the OP builder. + pub fn with_gas_limit_config(mut self, gas_limit_config: OpGasLimitConfig) -> Self { + self.gas_limit_config = gas_limit_config; + self + } + /// Returns the components for the given [`RollupArgs`]. pub fn components(&self) -> OpNodeComponentBuilder where @@ -161,7 +175,9 @@ impl OpNode { ) .executor(OpExecutorBuilder::default()) .payload(BasicPayloadServiceBuilder::new( - OpPayloadBuilder::new(compute_pending_block).with_da_config(self.da_config.clone()), + OpPayloadBuilder::new(compute_pending_block) + .with_da_config(self.da_config.clone()) + .with_gas_limit_config(self.gas_limit_config.clone()), )) .network(OpNetworkBuilder::new(disable_txpool_gossip, !discovery_v4)) .consensus(OpConsensusBuilder::default()) @@ -173,6 +189,7 @@ impl OpNode { .with_sequencer(self.args.sequencer.clone()) .with_sequencer_headers(self.args.sequencer_headers.clone()) .with_da_config(self.da_config.clone()) + .with_gas_limit_config(self.gas_limit_config.clone()) .with_enable_tx_conditional(self.args.enable_tx_conditional) .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) .with_historical_rpc(self.args.historical_rpc.clone()) @@ -286,6 +303,8 @@ pub struct OpAddOns< pub rpc_add_ons: RpcAddOns, /// Data availability configuration for the OP builder. pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + pub gas_limit_config: OpGasLimitConfig, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. pub sequencer_url: Option, @@ -306,9 +325,11 @@ where EthB: EthApiBuilder, { /// Creates a new instance from components. + #[allow(clippy::too_many_arguments)] pub const fn new( rpc_add_ons: RpcAddOns, da_config: OpDAConfig, + gas_limit_config: OpGasLimitConfig, sequencer_url: Option, sequencer_headers: Vec, historical_rpc: Option, @@ -318,6 +339,7 @@ where Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -368,6 +390,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -378,6 +401,7 @@ where OpAddOns::new( rpc_add_ons.with_engine_api(engine_api_builder), da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -394,6 +418,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, enable_tx_conditional, @@ -404,6 +429,7 @@ where OpAddOns::new( rpc_add_ons.with_payload_validator(payload_validator_builder), da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -423,6 +449,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, enable_tx_conditional, @@ -433,6 +460,7 @@ where OpAddOns::new( rpc_add_ons.with_rpc_middleware(rpc_middleware), da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -496,6 +524,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, enable_tx_conditional, @@ -536,7 +565,7 @@ where Box::new(ctx.node.task_executor().clone()), builder, ); - let miner_ext = OpMinerExtApi::new(da_config); + let miner_ext = OpMinerExtApi::new(da_config, gas_limit_config); let sequencer_client = if let Some(url) = sequencer_url { Some(SequencerClient::new_with_headers(url, sequencer_headers).await?) @@ -652,6 +681,8 @@ pub struct OpAddOnsBuilder { historical_rpc: Option, /// Data availability configuration for the OP builder. da_config: Option, + /// Gas limit configuration for the OP builder. + gas_limit_config: Option, /// Enable transaction conditionals. enable_tx_conditional: bool, /// Marker for network types. @@ -673,6 +704,7 @@ impl Default for OpAddOnsBuilder { sequencer_headers: Vec::new(), historical_rpc: None, da_config: None, + gas_limit_config: None, enable_tx_conditional: false, min_suggested_priority_fee: 1_000_000, _nt: PhantomData, @@ -702,6 +734,12 @@ impl OpAddOnsBuilder { self } + /// Configure the gas limit configuration for the OP payload builder. + pub fn with_gas_limit_config(mut self, gas_limit_config: OpGasLimitConfig) -> Self { + self.gas_limit_config = Some(gas_limit_config); + self + } + /// Configure if transaction conditional should be enabled. pub const fn with_enable_tx_conditional(mut self, enable_tx_conditional: bool) -> Self { self.enable_tx_conditional = enable_tx_conditional; @@ -735,6 +773,7 @@ impl OpAddOnsBuilder { sequencer_headers, historical_rpc, da_config, + gas_limit_config, enable_tx_conditional, min_suggested_priority_fee, tokio_runtime, @@ -747,6 +786,7 @@ impl OpAddOnsBuilder { sequencer_headers, historical_rpc, da_config, + gas_limit_config, enable_tx_conditional, min_suggested_priority_fee, _nt, @@ -779,6 +819,7 @@ impl OpAddOnsBuilder { sequencer_url, sequencer_headers, da_config, + gas_limit_config, enable_tx_conditional, min_suggested_priority_fee, historical_rpc, @@ -802,6 +843,7 @@ impl OpAddOnsBuilder { ) .with_tokio_runtime(tokio_runtime), da_config.unwrap_or_default(), + gas_limit_config.unwrap_or_default(), sequencer_url, sequencer_headers, historical_rpc, @@ -1006,13 +1048,21 @@ pub struct OpPayloadBuilder { /// This data availability configuration specifies constraints for the payload builder /// when assembling payloads pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + /// This is used to configure gas limit related constraints for the payload builder. + pub gas_limit_config: OpGasLimitConfig, } impl OpPayloadBuilder { /// Create a new instance with the given `compute_pending_block` flag and data availability /// config. pub fn new(compute_pending_block: bool) -> Self { - Self { compute_pending_block, best_transactions: (), da_config: OpDAConfig::default() } + Self { + compute_pending_block, + best_transactions: (), + da_config: OpDAConfig::default(), + gas_limit_config: OpGasLimitConfig::default(), + } } /// Configure the data availability configuration for the OP payload builder. @@ -1020,14 +1070,20 @@ impl OpPayloadBuilder { self.da_config = da_config; self } + + /// Configure the gas limit configuration for the OP payload builder. + pub fn with_gas_limit_config(mut self, gas_limit_config: OpGasLimitConfig) -> Self { + self.gas_limit_config = gas_limit_config; + self + } } impl OpPayloadBuilder { /// Configures the type responsible for yielding the transactions that should be included in the /// payload. pub fn with_transactions(self, best_transactions: T) -> OpPayloadBuilder { - let Self { compute_pending_block, da_config, .. } = self; - OpPayloadBuilder { compute_pending_block, best_transactions, da_config } + let Self { compute_pending_block, da_config, gas_limit_config, .. } = self; + OpPayloadBuilder { compute_pending_block, best_transactions, da_config, gas_limit_config } } } @@ -1068,7 +1124,10 @@ where pool, ctx.provider().clone(), evm_config, - OpBuilderConfig { da_config: self.da_config.clone() }, + OpBuilderConfig { + da_config: self.da_config.clone(), + gas_limit_config: self.gas_limit_config.clone(), + }, ) .with_transactions(self.best_transactions.clone()) .set_compute_pending_block(self.compute_pending_block); diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 05f33d3b699..3d047c5f617 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,9 +1,7 @@ //! Optimism payload builder implementation. use crate::{ - config::{OpBuilderConfig, OpDAConfig}, - error::OpPayloadBuilderError, - payload::OpBuiltPayload, - OpAttributes, OpPayloadBuilderAttributes, OpPayloadPrimitives, + config::OpBuilderConfig, error::OpPayloadBuilderError, payload::OpBuiltPayload, OpAttributes, + OpPayloadBuilderAttributes, OpPayloadPrimitives, }; use alloy_consensus::{BlockHeader, Transaction, Typed2718}; use alloy_evm::Evm as AlloyEvm; @@ -187,7 +185,7 @@ where let ctx = OpPayloadBuilderCtx { evm_config: self.evm_config.clone(), - da_config: self.config.da_config.clone(), + builder_config: self.config.clone(), chain_spec: self.client.chain_spec(), config, cancel, @@ -223,7 +221,7 @@ where let config = PayloadConfig { parent_header: Arc::new(parent), attributes }; let ctx = OpPayloadBuilderCtx { evm_config: self.evm_config.clone(), - da_config: self.config.da_config.clone(), + builder_config: self.config.clone(), chain_spec: self.client.chain_spec(), config, cancel: Default::default(), @@ -550,8 +548,8 @@ pub struct OpPayloadBuilderCtx< > { /// The type that knows how to perform system calls and configure the evm. pub evm_config: Evm, - /// The DA config for the payload builder - pub da_config: OpDAConfig, + /// Additional config for the builder/sequencer, e.g. DA and gas limit + pub builder_config: OpBuilderConfig, /// The chainspec pub chain_spec: Arc, /// How to build the payload. @@ -684,9 +682,14 @@ where Builder: BlockBuilder, <::Evm as AlloyEvm>::DB: Database, { - let block_gas_limit = builder.evm_mut().block().gas_limit(); - let block_da_limit = self.da_config.max_da_block_size(); - let tx_da_limit = self.da_config.max_da_tx_size(); + let mut block_gas_limit = builder.evm_mut().block().gas_limit(); + if let Some(gas_limit_config) = self.builder_config.gas_limit_config.gas_limit() { + // If a gas limit is configured, use that limit as target if it's smaller, otherwise use + // the block's actual gas limit. + block_gas_limit = gas_limit_config.min(block_gas_limit); + }; + let block_da_limit = self.builder_config.da_config.max_da_block_size(); + let tx_da_limit = self.builder_config.da_config.max_da_tx_size(); let base_fee = builder.evm_mut().block().basefee(); while let Some(tx) = best_txs.next(()) { diff --git a/crates/optimism/payload/src/config.rs b/crates/optimism/payload/src/config.rs index 469bfc9fe31..c79ee0ece4b 100644 --- a/crates/optimism/payload/src/config.rs +++ b/crates/optimism/payload/src/config.rs @@ -7,12 +7,14 @@ use std::sync::{atomic::AtomicU64, Arc}; pub struct OpBuilderConfig { /// Data availability configuration for the OP builder. pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + pub gas_limit_config: OpGasLimitConfig, } impl OpBuilderConfig { /// Creates a new OP builder configuration with the given data availability configuration. - pub const fn new(da_config: OpDAConfig) -> Self { - Self { da_config } + pub const fn new(da_config: OpDAConfig, gas_limit_config: OpGasLimitConfig) -> Self { + Self { da_config, gas_limit_config } } /// Returns the Data Availability configuration for the OP builder, if it has configured @@ -100,6 +102,40 @@ struct OpDAConfigInner { max_da_block_size: AtomicU64, } +/// Contains the Gas Limit configuration for the OP builder. +/// +/// This type is shareable and can be used to update the Gas Limit configuration for the OP payload +/// builder. +#[derive(Debug, Clone, Default)] +pub struct OpGasLimitConfig { + /// Gas limit for a transaction + /// + /// 0 means use the default gas limit. + gas_limit: Arc, +} + +impl OpGasLimitConfig { + /// Creates a new Gas Limit configuration with the given maximum gas limit. + pub fn new(max_gas_limit: u64) -> Self { + let this = Self::default(); + this.set_gas_limit(max_gas_limit); + this + } + /// Returns the gas limit for a transaction, if any. + pub fn gas_limit(&self) -> Option { + let val = self.gas_limit.load(std::sync::atomic::Ordering::Relaxed); + if val == 0 { + None + } else { + Some(val) + } + } + /// Sets the gas limit for a transaction. 0 means use the default gas limit. + pub fn set_gas_limit(&self, gas_limit: u64) { + self.gas_limit.store(gas_limit, std::sync::atomic::Ordering::Relaxed); + } +} + #[cfg(test)] mod tests { use super::*; @@ -122,4 +158,14 @@ mod tests { let config = OpBuilderConfig::default(); assert!(config.constrained_da_config().is_none()); } + + #[test] + fn test_gas_limit() { + let gas_limit = OpGasLimitConfig::default(); + assert_eq!(gas_limit.gas_limit(), None); + gas_limit.set_gas_limit(50000); + assert_eq!(gas_limit.gas_limit(), Some(50000)); + gas_limit.set_gas_limit(0); + assert_eq!(gas_limit.gas_limit(), None); + } } diff --git a/crates/optimism/rpc/src/miner.rs b/crates/optimism/rpc/src/miner.rs index b01b37b58b2..f8780f37e82 100644 --- a/crates/optimism/rpc/src/miner.rs +++ b/crates/optimism/rpc/src/miner.rs @@ -4,7 +4,7 @@ use alloy_primitives::U64; use jsonrpsee_core::{async_trait, RpcResult}; pub use op_alloy_rpc_jsonrpsee::traits::MinerApiExtServer; use reth_metrics::{metrics::Gauge, Metrics}; -use reth_optimism_payload_builder::config::OpDAConfig; +use reth_optimism_payload_builder::config::{OpDAConfig, OpGasLimitConfig}; use tracing::debug; /// Miner API extension for OP, exposes settings for the data availability configuration via the @@ -12,14 +12,15 @@ use tracing::debug; #[derive(Debug, Clone)] pub struct OpMinerExtApi { da_config: OpDAConfig, + gas_limit_config: OpGasLimitConfig, metrics: OpMinerMetrics, } impl OpMinerExtApi { /// Instantiate the miner API extension with the given, sharable data availability /// configuration. - pub fn new(da_config: OpDAConfig) -> Self { - Self { da_config, metrics: OpMinerMetrics::default() } + pub fn new(da_config: OpDAConfig, gas_limit_config: OpGasLimitConfig) -> Self { + Self { da_config, gas_limit_config, metrics: OpMinerMetrics::default() } } } @@ -36,7 +37,10 @@ impl MinerApiExtServer for OpMinerExtApi { Ok(true) } - async fn set_gas_limit(&self, _max_block_gas: U64) -> RpcResult { + async fn set_gas_limit(&self, gas_limit: U64) -> RpcResult { + debug!(target: "rpc", "Setting gas limit: {}", gas_limit); + self.gas_limit_config.set_gas_limit(gas_limit.to()); + self.metrics.set_gas_limit(gas_limit.to()); Ok(true) } } @@ -49,6 +53,8 @@ pub struct OpMinerMetrics { max_da_tx_size: Gauge, /// Max DA block size set on the miner max_da_block_size: Gauge, + /// Gas limit set on the miner + gas_limit: Gauge, } impl OpMinerMetrics { @@ -63,4 +69,10 @@ impl OpMinerMetrics { pub fn set_max_da_block_size(&self, size: u64) { self.max_da_block_size.set(size as f64); } + + /// Sets the gas limit gauge value + #[inline] + pub fn set_gas_limit(&self, gas_limit: u64) { + self.gas_limit.set(gas_limit as f64); + } } From ecd49aed11814b2da4c43cf4ab608a2f3d56b6bd Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 31 Oct 2025 16:03:10 +0100 Subject: [PATCH 283/371] perf: only chunk if more > 1 available (#19427) --- .../src/tree/payload_processor/multiproof.rs | 18 ++++++++++-------- crates/trie/parallel/src/proof_task.rs | 18 ++++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 73dc6a90954..18371b6dfaa 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -764,10 +764,11 @@ impl MultiProofTask { // Process proof targets in chunks. let mut chunks = 0; - // Only chunk if account or storage workers are available to take advantage of parallelism. - let should_chunk = - self.multiproof_manager.proof_worker_handle.has_available_account_workers() || - self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); + // Only chunk if multiple account or storage workers are available to take advantage of + // parallelism. + let should_chunk = self.multiproof_manager.proof_worker_handle.available_account_workers() > + 1 || + self.multiproof_manager.proof_worker_handle.available_storage_workers() > 1; let mut dispatch = |proof_targets| { self.multiproof_manager.dispatch( @@ -904,10 +905,11 @@ impl MultiProofTask { let mut spawned_proof_targets = MultiProofTargets::default(); - // Only chunk if account or storage workers are available to take advantage of parallelism. - let should_chunk = - self.multiproof_manager.proof_worker_handle.has_available_account_workers() || - self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); + // Only chunk if multiple account or storage workers are available to take advantage of + // parallelism. + let should_chunk = self.multiproof_manager.proof_worker_handle.available_account_workers() > + 1 || + self.multiproof_manager.proof_worker_handle.available_storage_workers() > 1; let mut dispatch = |hashed_state_update| { let proof_targets = get_proof_targets( diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 06ac673dd4e..bc5c788e4e2 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -206,14 +206,14 @@ impl ProofWorkerHandle { } } - /// Returns true if there are available storage workers to process tasks. - pub fn has_available_storage_workers(&self) -> bool { - self.storage_available_workers.load(Ordering::Relaxed) > 0 + /// Returns how many storage workers are currently available/idle. + pub fn available_storage_workers(&self) -> usize { + self.storage_available_workers.load(Ordering::Relaxed) } - /// Returns true if there are available account workers to process tasks. - pub fn has_available_account_workers(&self) -> bool { - self.account_available_workers.load(Ordering::Relaxed) > 0 + /// Returns how many account workers are currently available/idle. + pub fn available_account_workers(&self) -> usize { + self.account_available_workers.load(Ordering::Relaxed) } /// Returns the number of pending storage tasks in the queue. @@ -240,16 +240,14 @@ impl ProofWorkerHandle { /// /// This is calculated as total workers minus available workers. pub fn active_storage_workers(&self) -> usize { - self.storage_worker_count - .saturating_sub(self.storage_available_workers.load(Ordering::Relaxed)) + self.storage_worker_count.saturating_sub(self.available_storage_workers()) } /// Returns the number of account workers currently processing tasks. /// /// This is calculated as total workers minus available workers. pub fn active_account_workers(&self) -> usize { - self.account_worker_count - .saturating_sub(self.account_available_workers.load(Ordering::Relaxed)) + self.account_worker_count.saturating_sub(self.available_account_workers()) } /// Dispatch a storage proof computation to storage worker pool From 3bb90e64a2bd5199a91ba20346de4d03b8ee4df7 Mon Sep 17 00:00:00 2001 From: Avory Date: Fri, 31 Oct 2025 17:08:45 +0200 Subject: [PATCH 284/371] fix(beacon-api-sidecar): use correct block metadata for reorged blobs (#19424) --- examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 56755b1e730..cc3ba9abf88 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -1,5 +1,5 @@ use crate::BeaconSidecarConfig; -use alloy_consensus::{BlockHeader, Signed, Transaction as _, TxEip4844WithSidecar, Typed2718}; +use alloy_consensus::{Signed, Transaction as _, TxEip4844WithSidecar, Typed2718}; use alloy_eips::eip7594::BlobTransactionSidecarVariant; use alloy_primitives::B256; use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator}; @@ -202,9 +202,9 @@ where .map(|tx| { let transaction_hash = *tx.tx_hash(); let block_metadata = BlockMetadata { - block_hash: new.tip().hash(), - block_number: new.tip().number(), - gas_used: new.tip().gas_used(), + block_hash: block.hash(), + block_number: block.number, + gas_used: block.gas_used, }; BlobTransactionEvent::Reorged(ReorgedBlob { transaction_hash, From 1c5c709d6107f0c3391d6727e50744c7bc865e74 Mon Sep 17 00:00:00 2001 From: Micke <155267459+reallesee@users.noreply.github.com> Date: Fri, 31 Oct 2025 16:32:28 +0100 Subject: [PATCH 285/371] chore(codecs): replace todo with unimplemented in Compact derive (#19284) --- crates/storage/codecs/derive/src/compact/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index c1951233484..ed43286923b 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -82,7 +82,7 @@ pub fn get_fields(data: &Data) -> FieldList { ); load_field(&data_fields.unnamed[0], &mut fields, false); } - syn::Fields::Unit => todo!(), + syn::Fields::Unit => unimplemented!("Compact does not support unit structs"), }, Data::Enum(data) => { for variant in &data.variants { @@ -106,7 +106,7 @@ pub fn get_fields(data: &Data) -> FieldList { } } } - Data::Union(_) => todo!(), + Data::Union(_) => unimplemented!("Compact does not support union types"), } fields From b05eb5f79304e688847bfd03366aecc052d79edd Mon Sep 17 00:00:00 2001 From: bigbear <155267841+aso20455@users.noreply.github.com> Date: Fri, 31 Oct 2025 17:36:22 +0200 Subject: [PATCH 286/371] fix(txpool): correct propagate field name in Debug output (#19278) --- crates/transaction-pool/src/validate/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 725f83c392c..bccd4d7b347 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -515,7 +515,7 @@ impl fmt::Debug for ValidPoolTransaction { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ValidPoolTransaction") .field("id", &self.transaction_id) - .field("pragate", &self.propagate) + .field("propagate", &self.propagate) .field("origin", &self.origin) .field("hash", self.transaction.hash()) .field("tx", &self.transaction) From e894db8e07424454605cff2803978f122850daa2 Mon Sep 17 00:00:00 2001 From: Ragnar Date: Fri, 31 Oct 2025 16:44:14 +0100 Subject: [PATCH 287/371] perf: optimize SyncHeight event handling to avoid recursive calls (#19372) Co-authored-by: Matthias Seitz --- crates/stages/api/src/metrics/listener.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/crates/stages/api/src/metrics/listener.rs b/crates/stages/api/src/metrics/listener.rs index 8c0707d1bea..2ae367eb364 100644 --- a/crates/stages/api/src/metrics/listener.rs +++ b/crates/stages/api/src/metrics/listener.rs @@ -52,17 +52,7 @@ impl MetricsListener { trace!(target: "sync::metrics", ?event, "Metric event received"); match event { MetricEvent::SyncHeight { height } => { - for stage_id in StageId::ALL { - self.handle_event(MetricEvent::StageCheckpoint { - stage_id, - checkpoint: StageCheckpoint { - block_number: height, - stage_checkpoint: None, - }, - max_block_number: Some(height), - elapsed: Duration::default(), - }); - } + self.update_all_stages_height(height); } MetricEvent::StageCheckpoint { stage_id, checkpoint, max_block_number, elapsed } => { let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id); @@ -83,6 +73,17 @@ impl MetricsListener { } } } + + /// Updates all stage checkpoints to the given height efficiently. + fn update_all_stages_height(&mut self, height: BlockNumber) { + for stage_id in StageId::ALL { + let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id); + let height_f64 = height as f64; + stage_metrics.checkpoint.set(height_f64); + stage_metrics.entities_processed.set(height_f64); + stage_metrics.entities_total.set(height_f64); + } + } } impl Future for MetricsListener { From a43345b54cab1d14b11bcbd28acafbabdc8aadd9 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 31 Oct 2025 15:45:03 +0000 Subject: [PATCH 288/371] perf(tree): only chunk multiproof targets if needed (#19326) --- .../src/tree/payload_processor/multiproof.rs | 30 ++++++-- crates/trie/common/src/hashed_state.rs | 70 +++++++++++++++++++ crates/trie/common/src/proofs.rs | 34 +++++++++ 3 files changed, 130 insertions(+), 4 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 18371b6dfaa..5aac0e3f78f 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -743,7 +743,12 @@ impl MultiProofTask { /// Handles request for proof prefetch. /// /// Returns a number of proofs that were spawned. - #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all, fields(accounts = targets.len()))] + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::multiproof", + skip_all, + fields(accounts = targets.len(), chunks = 0) + )] fn on_prefetch_proof(&mut self, targets: MultiProofTargets) -> u64 { let proof_targets = self.get_prefetch_proof_targets(targets); self.fetched_proof_targets.extend_ref(&proof_targets); @@ -785,10 +790,16 @@ impl MultiProofTask { chunks += 1; }; - if should_chunk && let Some(chunk_size) = self.chunk_size { + if should_chunk && + let Some(chunk_size) = self.chunk_size && + proof_targets.chunking_length() > chunk_size + { + let mut chunks = 0usize; for proof_targets_chunk in proof_targets.chunks(chunk_size) { dispatch(proof_targets_chunk); + chunks += 1; } + tracing::Span::current().record("chunks", chunks); } else { dispatch(proof_targets); } @@ -874,7 +885,12 @@ impl MultiProofTask { /// Handles state updates. /// /// Returns a number of proofs that were spawned. - #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip(self, update), fields(accounts = update.len()))] + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::multiproof", + skip(self, update), + fields(accounts = update.len(), chunks = 0) + )] fn on_state_update(&mut self, source: StateChangeSource, update: EvmState) -> u64 { let hashed_state_update = evm_state_to_hashed_post_state(update); @@ -934,10 +950,16 @@ impl MultiProofTask { chunks += 1; }; - if should_chunk && let Some(chunk_size) = self.chunk_size { + if should_chunk && + let Some(chunk_size) = self.chunk_size && + not_fetched_state_update.chunking_length() > chunk_size + { + let mut chunks = 0usize; for chunk in not_fetched_state_update.chunks(chunk_size) { dispatch(chunk); + chunks += 1; } + tracing::Span::current().record("chunks", chunks); } else { dispatch(not_fetched_state_update); } diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index e693776c4e8..8d99ee5ebbb 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -278,6 +278,15 @@ impl HashedPostState { ChunkedHashedPostState::new(self, size) } + /// Returns the number of items that will be considered during chunking in `[Self::chunks]`. + pub fn chunking_length(&self) -> usize { + self.accounts.len() + + self.storages + .values() + .map(|storage| if storage.wiped { 1 } else { 0 } + storage.storage.len()) + .sum::() + } + /// Extend this hashed post state with contents of another. /// Entries in the second hashed post state take precedence. pub fn extend(&mut self, other: Self) { @@ -1246,4 +1255,65 @@ mod tests { assert_eq!(storage3.zero_valued_slots.len(), 1); assert!(storage3.zero_valued_slots.contains(&B256::from([4; 32]))); } + + #[test] + fn test_hashed_post_state_chunking_length() { + let addr1 = B256::from([1; 32]); + let addr2 = B256::from([2; 32]); + let addr3 = B256::from([3; 32]); + let addr4 = B256::from([4; 32]); + let slot1 = B256::from([1; 32]); + let slot2 = B256::from([2; 32]); + let slot3 = B256::from([3; 32]); + + let state = HashedPostState { + accounts: B256Map::from_iter([(addr1, None), (addr2, None), (addr4, None)]), + storages: B256Map::from_iter([ + ( + addr1, + HashedStorage { + wiped: false, + storage: B256Map::from_iter([ + (slot1, U256::ZERO), + (slot2, U256::ZERO), + (slot3, U256::ZERO), + ]), + }, + ), + ( + addr2, + HashedStorage { + wiped: true, + storage: B256Map::from_iter([ + (slot1, U256::ZERO), + (slot2, U256::ZERO), + (slot3, U256::ZERO), + ]), + }, + ), + ( + addr3, + HashedStorage { + wiped: false, + storage: B256Map::from_iter([ + (slot1, U256::ZERO), + (slot2, U256::ZERO), + (slot3, U256::ZERO), + ]), + }, + ), + ]), + }; + + let chunking_length = state.chunking_length(); + for size in 1..=state.clone().chunks(1).count() { + let chunk_count = state.clone().chunks(size).count(); + let expected_count = chunking_length.div_ceil(size); + assert_eq!( + chunk_count, expected_count, + "chunking_length: {}, size: {}", + chunking_length, size + ); + } + } } diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index b7961f047a4..a8e0bb59b93 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -89,6 +89,11 @@ impl MultiProofTargets { pub fn chunks(self, size: usize) -> ChunkedMultiProofTargets { ChunkedMultiProofTargets::new(self, size) } + + /// Returns the number of items that will be considered during chunking in `[Self::chunks]`. + pub fn chunking_length(&self) -> usize { + self.values().map(|slots| 1 + slots.len().saturating_sub(1)).sum::() + } } /// An iterator that yields chunks of the proof targets of at most `size` account and storage @@ -1067,4 +1072,33 @@ mod tests { acc.storage_root = EMPTY_ROOT_HASH; assert_eq!(acc, inverse); } + + #[test] + fn test_multiproof_targets_chunking_length() { + let mut targets = MultiProofTargets::default(); + targets.insert(B256::with_last_byte(1), B256Set::default()); + targets.insert( + B256::with_last_byte(2), + B256Set::from_iter([B256::with_last_byte(10), B256::with_last_byte(20)]), + ); + targets.insert( + B256::with_last_byte(3), + B256Set::from_iter([ + B256::with_last_byte(30), + B256::with_last_byte(31), + B256::with_last_byte(32), + ]), + ); + + let chunking_length = targets.chunking_length(); + for size in 1..=targets.clone().chunks(1).count() { + let chunk_count = targets.clone().chunks(size).count(); + let expected_count = chunking_length.div_ceil(size); + assert_eq!( + chunk_count, expected_count, + "chunking_length: {}, size: {}", + chunking_length, size + ); + } + } } From a5eb01b26bc5ee11cc845ec0b7c0af63ccfc79a6 Mon Sep 17 00:00:00 2001 From: oooLowNeoNooo Date: Fri, 31 Oct 2025 17:00:06 +0100 Subject: [PATCH 289/371] fix: rename variable in block_hash method from 'code' to 'hash' (#19269) --- crates/revm/src/cached.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/revm/src/cached.rs b/crates/revm/src/cached.rs index bf4bd6d5d1b..d40e814c12a 100644 --- a/crates/revm/src/cached.rs +++ b/crates/revm/src/cached.rs @@ -146,11 +146,11 @@ impl Database for CachedReadsDbMut<'_, DB> { } fn block_hash(&mut self, number: u64) -> Result { - let code = match self.cached.block_hashes.entry(number) { + let hash = match self.cached.block_hashes.entry(number) { Entry::Occupied(entry) => *entry.get(), Entry::Vacant(entry) => *entry.insert(self.db.block_hash_ref(number)?), }; - Ok(code) + Ok(hash) } } From d8729a9d2ce5c81cd26363b6c3b55a7953772d45 Mon Sep 17 00:00:00 2001 From: FT <140458077+zeevick10@users.noreply.github.com> Date: Fri, 31 Oct 2025 17:04:54 +0100 Subject: [PATCH 290/371] chore(docker): remove apt-get upgrade to ensure reproducible and faster builds (#19080) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index fc97c160bbc..b61c177525b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" # Install system dependencies -RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config +RUN apt-get update && apt-get install -y libclang-dev pkg-config # Builds a cargo-chef plan FROM chef AS planner From dff382b8e250ff57e9cfd64005c30d3822841b0b Mon Sep 17 00:00:00 2001 From: anim001k <140460766+anim001k@users.noreply.github.com> Date: Fri, 31 Oct 2025 17:06:06 +0100 Subject: [PATCH 291/371] fix: Inline value match in SparseTrie::find_leaf to remove redundant wrapper (#19138) Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- crates/trie/sparse/src/trie.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 891b718693a..500b642cd1e 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -974,6 +974,7 @@ impl SparseTrieInterface for SerialSparseTrie { expected_value: Option<&Vec>, ) -> Result { // Helper function to check if a value matches the expected value + #[inline] fn check_value_match( actual_value: &Vec, expected_value: Option<&Vec>, From 71c124798c8a15e6aeec1b4a4e6e9fc24073d6ac Mon Sep 17 00:00:00 2001 From: MIHAO PARK Date: Fri, 31 Oct 2025 17:08:07 +0100 Subject: [PATCH 292/371] perf(cli): optimize StorageChangeSets import in merkle stage dump (#18022) Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- crates/cli/commands/src/stage/dump/merkle.rs | 14 ++- crates/storage/db-api/src/table.rs | 3 + .../db/src/implementation/mdbx/cursor.rs | 107 ++++++++++++++++++ 3 files changed, 119 insertions(+), 5 deletions(-) diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index ee7564f7cb2..1815b0b0348 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -1,12 +1,12 @@ use std::sync::Arc; use super::setup; -use alloy_primitives::BlockNumber; +use alloy_primitives::{Address, BlockNumber}; use eyre::Result; use reth_config::config::EtlConfig; use reth_consensus::{ConsensusError, FullConsensus}; use reth_db::DatabaseEnv; -use reth_db_api::{database::Database, table::TableImporter, tables}; +use reth_db_api::{database::Database, models::BlockNumberAddress, table::TableImporter, tables}; use reth_db_common::DbTool; use reth_evm::ConfigureEvm; use reth_exex::ExExManagerHandle; @@ -135,9 +135,13 @@ fn unwind_and_copy( let unwind_inner_tx = provider.into_tx(); - // TODO optimize we can actually just get the entries we need - output_db - .update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; + output_db.update(|tx| { + tx.import_table_with_range::( + &unwind_inner_tx, + Some(BlockNumberAddress((from, Address::ZERO))), + BlockNumberAddress((to, Address::repeat_byte(0xff))), + ) + })??; output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index 5715852a5dd..54517908de7 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -139,6 +139,9 @@ pub trait TableImporter: DbTxMut { } /// Imports table data from another transaction within a range. + /// + /// This method works correctly with both regular and `DupSort` tables. For `DupSort` tables, + /// all duplicate entries within the range are preserved during import. fn import_table_with_range( &self, source_tx: &R, diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 0bbb75ce4b5..5ca6eacb6c7 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -345,3 +345,110 @@ impl DbDupCursorRW for Cursor { ) } } + +#[cfg(test)] +mod tests { + use crate::{ + mdbx::{DatabaseArguments, DatabaseEnv, DatabaseEnvKind}, + tables::StorageChangeSets, + Database, + }; + use alloy_primitives::{address, Address, B256, U256}; + use reth_db_api::{ + cursor::{DbCursorRO, DbDupCursorRW}, + models::{BlockNumberAddress, ClientVersion}, + table::TableImporter, + transaction::{DbTx, DbTxMut}, + }; + use reth_primitives_traits::StorageEntry; + use std::sync::Arc; + use tempfile::TempDir; + + fn create_test_db() -> Arc { + let path = TempDir::new().unwrap(); + let mut db = DatabaseEnv::open( + path.path(), + DatabaseEnvKind::RW, + DatabaseArguments::new(ClientVersion::default()), + ) + .unwrap(); + db.create_tables().unwrap(); + Arc::new(db) + } + + #[test] + fn test_import_table_with_range_works_on_dupsort() { + let addr1 = address!("0000000000000000000000000000000000000001"); + let addr2 = address!("0000000000000000000000000000000000000002"); + let addr3 = address!("0000000000000000000000000000000000000003"); + let source_db = create_test_db(); + let target_db = create_test_db(); + let test_data = vec![ + ( + BlockNumberAddress((100, addr1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(100) }, + ), + ( + BlockNumberAddress((100, addr1)), + StorageEntry { key: B256::with_last_byte(2), value: U256::from(200) }, + ), + ( + BlockNumberAddress((100, addr1)), + StorageEntry { key: B256::with_last_byte(3), value: U256::from(300) }, + ), + ( + BlockNumberAddress((101, addr1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(400) }, + ), + ( + BlockNumberAddress((101, addr2)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(500) }, + ), + ( + BlockNumberAddress((101, addr2)), + StorageEntry { key: B256::with_last_byte(2), value: U256::from(600) }, + ), + ( + BlockNumberAddress((102, addr3)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(700) }, + ), + ]; + + // setup data + let tx = source_db.tx_mut().unwrap(); + { + let mut cursor = tx.cursor_dup_write::().unwrap(); + for (key, value) in &test_data { + cursor.append_dup(*key, *value).unwrap(); + } + } + tx.commit().unwrap(); + + // import data from source db to target + let source_tx = source_db.tx().unwrap(); + let target_tx = target_db.tx_mut().unwrap(); + + target_tx + .import_table_with_range::( + &source_tx, + Some(BlockNumberAddress((100, Address::ZERO))), + BlockNumberAddress((102, Address::repeat_byte(0xff))), + ) + .unwrap(); + target_tx.commit().unwrap(); + + // fetch all data from target db + let verify_tx = target_db.tx().unwrap(); + let mut cursor = verify_tx.cursor_dup_read::().unwrap(); + let copied: Vec<_> = cursor.walk(None).unwrap().collect::, _>>().unwrap(); + + // verify each entry matches the test data + assert_eq!(copied.len(), test_data.len(), "Should copy all entries including duplicates"); + for ((copied_key, copied_value), (expected_key, expected_value)) in + copied.iter().zip(test_data.iter()) + { + assert_eq!(copied_key, expected_key); + assert_eq!(copied_value, expected_value); + } + } +} From 5f04690e2858c56990237365ee3ac5724f982860 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 31 Oct 2025 16:30:47 +0000 Subject: [PATCH 293/371] revert: "feat: Add building and publishing of *.deb packages (#18615)" (#19011) --- .github/workflows/release-reproducible.yml | 8 ---- .github/workflows/release.yml | 34 ++----------- .github/workflows/reproducible-build.yml | 14 ++++-- Cargo.toml | 6 --- Dockerfile.reproducible | 16 +++---- Makefile | 55 ++++++++++------------ bin/reth/Cargo.toml | 14 ------ pkg/reth/debian/reth.service | 13 ----- 8 files changed, 46 insertions(+), 114 deletions(-) delete mode 100644 pkg/reth/debian/reth.service diff --git a/.github/workflows/release-reproducible.yml b/.github/workflows/release-reproducible.yml index 9726cb77b89..e0e7f78aa58 100644 --- a/.github/workflows/release-reproducible.yml +++ b/.github/workflows/release-reproducible.yml @@ -40,20 +40,12 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Extract Rust version from Cargo.toml - id: rust_version - run: | - RUST_VERSION=$(cargo metadata --format-version 1 | jq -r '.packages[] | select(.name == "reth") | .rust_version' || echo "1") - echo "RUST_VERSION=$RUST_VERSION" >> $GITHUB_OUTPUT - - name: Build and push reproducible image uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile.reproducible push: true - build-args: | - RUST_VERSION=${{ steps.rust_version.outputs.RUST_VERSION }} tags: | ${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:${{ needs.extract-version.outputs.VERSION }} ${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:latest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 70960d2fe00..60206b6ace7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -18,10 +18,10 @@ env: REPO_NAME: ${{ github.repository_owner }}/reth IMAGE_NAME: ${{ github.repository_owner }}/reth OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth + REPRODUCIBLE_IMAGE_NAME: ${{ github.repository_owner }}/reth-reproducible CARGO_TERM_COLOR: always DOCKER_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/reth DOCKER_OP_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/op-reth - DEB_SUPPORTED_TARGETS: x86_64-unknown-linux-gnu aarch64-unknown-linux-gnu riscv64gc-unknown-linux-gnu jobs: dry-run: @@ -120,20 +120,11 @@ jobs: - name: Build Reth run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - - - name: Build Reth deb package - if: ${{ matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} - run: make build-deb-${{ matrix.configs.target }} PROFILE=${{ matrix.configs.profile }} VERSION=${{ needs.extract-version.outputs.VERSION }} - - name: Move binary run: | mkdir artifacts [[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe" - - # Move deb packages if they exist - if [[ "${{ matrix.build.binary }}" == "reth" && "${{ env.DEB_SUPPORTED_TARGETS }}" == *"${{ matrix.configs.target }}"* ]]; then - mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb" ./artifacts - fi + mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - name: Configure GPG and create artifacts env: @@ -143,12 +134,9 @@ jobs: export GPG_TTY=$(tty) echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import cd artifacts - tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz ${{ matrix.build.binary }}*[!.deb] + tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz ${{ matrix.build.binary }}* echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz - if [[ -f "${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb" ]]; then - echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb - fi - mv *tar.gz* *.deb* .. + mv *tar.gz* .. shell: bash - name: Upload artifact @@ -165,20 +153,6 @@ jobs: name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc - - name: Upload deb package - if: ${{ github.event.inputs.dry_run != 'true' && matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} - uses: actions/upload-artifact@v5 - with: - name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb - path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb - - - name: Upload deb package signature - if: ${{ github.event.inputs.dry_run != 'true' && matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} - uses: actions/upload-artifact@v5 - with: - name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb.asc - path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb.asc - draft-release: name: draft release runs-on: ubuntu-latest diff --git a/.github/workflows/reproducible-build.yml b/.github/workflows/reproducible-build.yml index 0f5dd2e72d8..b4a93cedaba 100644 --- a/.github/workflows/reproducible-build.yml +++ b/.github/workflows/reproducible-build.yml @@ -15,18 +15,24 @@ jobs: - uses: dtolnay/rust-toolchain@stable with: target: x86_64-unknown-linux-gnu + - name: Install cross main + run: | + cargo install cross --git https://github.com/cross-rs/cross - name: Install cargo-cache run: | cargo install cargo-cache + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true - name: Build Reth run: | - make build-reth-reproducible - mv target/x86_64-unknown-linux-gnu/reproducible/reth reth-build-1 + make build-reproducible + mv target/x86_64-unknown-linux-gnu/release/reth reth-build-1 - name: Clean cache run: make clean && cargo cache -a - name: Build Reth again run: | - make build-reth-reproducible - mv target/x86_64-unknown-linux-gnu/reproducible/reth reth-build-2 + make build-reproducible + mv target/x86_64-unknown-linux-gnu/release/reth reth-build-2 - name: Compare binaries run: cmp reth-build-1 reth-build-2 diff --git a/Cargo.toml b/Cargo.toml index c6a9abad754..6fa734e3d6c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -326,12 +326,6 @@ inherits = "release" lto = "fat" codegen-units = 1 -[profile.reproducible] -inherits = "release" -panic = "abort" -codegen-units = 1 -incremental = false - [workspace.dependencies] # reth op-reth = { path = "crates/optimism/bin" } diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible index 602b9b857c0..a0d4a17b5bb 100644 --- a/Dockerfile.reproducible +++ b/Dockerfile.reproducible @@ -1,17 +1,17 @@ -ARG RUST_VERSION=1 +# Use the Rust 1.88 image based on Debian Bookworm +FROM rust:1.88-bookworm AS builder -FROM rust:$RUST_VERSION-bookworm AS builder - -RUN apt-get update && apt-get install -y \ - git \ - libclang-dev=1:14.0-55.7~deb12u1 +# Install specific version of libclang-dev +RUN apt-get update && apt-get install -y libclang-dev=1:14.0-55.7~deb12u1 # Copy the project to the container COPY ./ /app WORKDIR /app -RUN make build-reth-reproducible -RUN mv /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth +# Build the project with the reproducible settings +RUN make build-reproducible + +RUN mv /app/target/x86_64-unknown-linux-gnu/release/reth /reth # Create a minimal final image with just the binary FROM gcr.io/distroless/cc-debian12:nonroot-6755e21ccd99ddead6edc8106ba03888cbeed41a diff --git a/Makefile b/Makefile index 8d8b0a5b3a5..30f6b0aa478 100644 --- a/Makefile +++ b/Makefile @@ -64,25 +64,34 @@ install-op: ## Build and install the op-reth binary under `$(CARGO_HOME)/bin`. build: ## Build the reth binary into `target` directory. cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" -.PHONY: build-reth -build-reth: ## Build the reth binary (alias for build target). - $(MAKE) build - # Environment variables for reproducible builds +# Initialize RUSTFLAGS +RUST_BUILD_FLAGS = +# Enable static linking to ensure reproducibility across builds +RUST_BUILD_FLAGS += --C target-feature=+crt-static +# Set the linker to use static libgcc to ensure reproducibility across builds +RUST_BUILD_FLAGS += -C link-arg=-static-libgcc +# Remove build ID from the binary to ensure reproducibility across builds +RUST_BUILD_FLAGS += -C link-arg=-Wl,--build-id=none +# Remove metadata hash from symbol names to ensure reproducible builds +RUST_BUILD_FLAGS += -C metadata='' # Set timestamp from last git commit for reproducible builds SOURCE_DATE ?= $(shell git log -1 --pretty=%ct) - -# `reproducible` only supports reth on x86_64-unknown-linux-gnu -build-%-reproducible: - @if [ "$*" != "reth" ]; then \ - echo "Error: Reproducible builds are only supported for reth, not $*"; \ - exit 1; \ - fi +# Disable incremental compilation to avoid non-deterministic artifacts +CARGO_INCREMENTAL_VAL = 0 +# Set C locale for consistent string handling and sorting +LOCALE_VAL = C +# Set UTC timezone for consistent time handling across builds +TZ_VAL = UTC + +.PHONY: build-reproducible +build-reproducible: ## Build the reth binary into `target` directory with reproducible builds. Only works for x86_64-unknown-linux-gnu currently SOURCE_DATE_EPOCH=$(SOURCE_DATE) \ - RUSTFLAGS="-C symbol-mangling-version=v0 -C strip=none -C link-arg=-Wl,--build-id=none -C metadata='' --remap-path-prefix $$(pwd)=." \ - LC_ALL=C \ - TZ=UTC \ - cargo build --bin reth --features "$(FEATURES)" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu + RUSTFLAGS="${RUST_BUILD_FLAGS} --remap-path-prefix $$(pwd)=." \ + CARGO_INCREMENTAL=${CARGO_INCREMENTAL_VAL} \ + LC_ALL=${LOCALE_VAL} \ + TZ=${TZ_VAL} \ + cargo build --bin reth --features "$(FEATURES)" --profile "release" --locked --target x86_64-unknown-linux-gnu .PHONY: build-debug build-debug: ## Build the reth binary into `target/debug` directory. @@ -146,22 +155,6 @@ op-build-x86_64-apple-darwin: op-build-aarch64-apple-darwin: $(MAKE) op-build-native-aarch64-apple-darwin -build-deb-%: - @case "$*" in \ - x86_64-unknown-linux-gnu|aarch64-unknown-linux-gnu|riscv64gc-unknown-linux-gnu) \ - echo "Building debian package for $*"; \ - ;; \ - *) \ - echo "Error: Debian packages are only supported for x86_64-unknown-linux-gnu, aarch64-unknown-linux-gnu, and riscv64gc-unknown-linux-gnu, not $*"; \ - exit 1; \ - ;; \ - esac - cargo install cargo-deb@3.6.0 --locked - cargo deb --profile $(PROFILE) --no-build --no-dbgsym --no-strip \ - --target $* \ - $(if $(VERSION),--deb-version "1~$(VERSION)") \ - $(if $(VERSION),--output "target/$*/$(PROFILE)/reth-$(VERSION)-$*-$(PROFILE).deb") - # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary cp $(CARGO_TARGET_DIR)/$(1)/$(PROFILE)/$(2) $(BIN_DIR)/$(2) diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 850f082a462..31d9294fec6 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -9,20 +9,6 @@ repository.workspace = true description = "Reth node implementation" default-run = "reth" -[package.metadata.deb] -maintainer = "reth team" -depends = "$auto" -section = "network" -priority = "optional" -maintainer-scripts = "../../pkg/reth/debian/" -assets = [ - "$auto", - ["../../README.md", "usr/share/doc/reth/", "644"], - ["../../LICENSE-APACHE", "usr/share/doc/reth/", "644"], - ["../../LICENSE-MIT", "usr/share/doc/reth/", "644"], -] -systemd-units = { enable = false, start = false, unit-name = "reth", unit-scripts = "../../pkg/reth/debian" } - [lints] workspace = true diff --git a/pkg/reth/debian/reth.service b/pkg/reth/debian/reth.service deleted file mode 100644 index edd78d455c0..00000000000 --- a/pkg/reth/debian/reth.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Modular, contributor-friendly and blazing-fast implementation of the Ethereum protocol -Wants=network-online.target -After=network.target network-online.target - -[Service] -Type=exec -DynamicUser=yes -StateDirectory=reth -ExecStart=/usr/bin/reth node --datadir %S/reth --log.file.max-files 0 - -[Install] -WantedBy=multi-user.target From dee0eca4d9336d949725f9f5b3c72869bfb2eb1c Mon Sep 17 00:00:00 2001 From: William Nwoke Date: Fri, 31 Oct 2025 17:32:30 +0100 Subject: [PATCH 294/371] feat(tasks): distinguish blocking and non-blocking tasks in metrics (#18440) Co-authored-by: Nathaniel Bajo Co-authored-by: Emilia Hane Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- crates/tasks/src/lib.rs | 14 +- crates/tasks/src/metrics.rs | 9 + etc/grafana/dashboards/overview.json | 567 +++++++++++++++++---------- 3 files changed, 378 insertions(+), 212 deletions(-) diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 473a727e10d..de45c41e24d 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -383,15 +383,17 @@ impl TaskExecutor { { let on_shutdown = self.on_shutdown.clone(); - // Clone only the specific counter that we need. - let finished_regular_tasks_total_metrics = - self.metrics.finished_regular_tasks_total.clone(); + // Choose the appropriate finished counter based on task kind + let finished_counter = match task_kind { + TaskKind::Default => self.metrics.finished_regular_tasks_total.clone(), + TaskKind::Blocking => self.metrics.finished_regular_blocking_tasks_total.clone(), + }; + // Wrap the original future to increment the finished tasks counter upon completion let task = { async move { // Create an instance of IncCounterOnDrop with the counter to increment - let _inc_counter_on_drop = - IncCounterOnDrop::new(finished_regular_tasks_total_metrics); + let _inc_counter_on_drop = IncCounterOnDrop::new(finished_counter); let fut = pin!(fut); let _ = select(on_shutdown, fut).await; } @@ -642,7 +644,7 @@ impl TaskSpawner for TaskExecutor { } fn spawn_blocking(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { - self.metrics.inc_regular_tasks(); + self.metrics.inc_regular_blocking_tasks(); self.spawn_blocking(fut) } diff --git a/crates/tasks/src/metrics.rs b/crates/tasks/src/metrics.rs index c486fa681cc..24d3065a529 100644 --- a/crates/tasks/src/metrics.rs +++ b/crates/tasks/src/metrics.rs @@ -16,6 +16,10 @@ pub struct TaskExecutorMetrics { pub(crate) regular_tasks_total: Counter, /// Number of finished spawned regular tasks pub(crate) finished_regular_tasks_total: Counter, + /// Number of spawned regular blocking tasks + pub(crate) regular_blocking_tasks_total: Counter, + /// Number of finished spawned regular blocking tasks + pub(crate) finished_regular_blocking_tasks_total: Counter, } impl TaskExecutorMetrics { @@ -28,6 +32,11 @@ impl TaskExecutorMetrics { pub(crate) fn inc_regular_tasks(&self) { self.regular_tasks_total.increment(1); } + + /// Increments the counter for spawned regular blocking tasks. + pub(crate) fn inc_regular_blocking_tasks(&self) { + self.regular_blocking_tasks_total.increment(1); + } } /// Helper type for increasing counters even if a task fails diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 7337b2b886b..591470bad23 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -2,7 +2,7 @@ "__inputs": [ { "name": "DS_PROMETHEUS", - "label": "prometheus", + "label": "Prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", @@ -46,7 +46,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "12.1.0-pre" + "version": "12.2.1" }, { "type": "panel", @@ -110,7 +110,6 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": null, "links": [], "panels": [ { @@ -164,9 +163,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -177,7 +174,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -234,9 +231,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -247,7 +242,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -304,9 +299,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -317,7 +310,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -374,9 +367,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -387,7 +378,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -444,9 +435,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -457,7 +446,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -514,9 +503,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -527,7 +514,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -596,9 +583,7 @@ "minVizWidth": 75, "orientation": "auto", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -606,7 +591,7 @@ "showThresholdMarkers": true, "sizing": "auto" }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -672,9 +657,7 @@ "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -682,7 +665,7 @@ "sizing": "auto", "valueMode": "color" }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -774,9 +757,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -784,7 +765,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -875,6 +856,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -920,7 +902,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -971,6 +953,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1017,7 +1000,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -1069,6 +1052,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1137,7 +1121,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -1417,6 +1401,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1485,7 +1470,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -1845,6 +1830,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1888,7 +1874,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -1976,6 +1962,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2019,7 +2006,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2107,6 +2094,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2150,7 +2138,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2297,6 +2285,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2344,7 +2333,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2432,6 +2421,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2478,7 +2468,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2542,6 +2532,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2589,7 +2580,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2652,6 +2643,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2699,7 +2691,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2751,6 +2743,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2798,7 +2791,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2998,6 +2991,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3041,7 +3035,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3104,6 +3098,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3151,7 +3146,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3272,6 +3267,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3318,7 +3314,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3370,6 +3366,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3416,7 +3413,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3481,6 +3478,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3528,7 +3526,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3601,6 +3599,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3648,7 +3647,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3740,6 +3739,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3787,7 +3787,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3844,6 +3844,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3877,9 +3878,7 @@ "id": "byNames", "options": { "mode": "exclude", - "names": [ - "Precompile cache hits" - ], + "names": ["Precompile cache hits"], "prefix": "All except:", "readOnly": true } @@ -3889,7 +3888,7 @@ "id": "custom.hideFrom", "value": { "legend": false, - "tooltip": false, + "tooltip": true, "viz": true } } @@ -3917,7 +3916,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3987,6 +3986,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4033,7 +4033,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -4085,6 +4085,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4132,7 +4133,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -4184,6 +4185,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4231,7 +4233,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -4295,6 +4297,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4374,7 +4377,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -4462,6 +4465,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4509,7 +4513,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -4561,6 +4565,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4608,7 +4613,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -4660,6 +4665,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4707,7 +4713,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -4760,6 +4766,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4807,7 +4814,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -4860,6 +4867,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4907,7 +4915,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -4961,6 +4969,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5008,7 +5017,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -5080,6 +5089,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5123,7 +5133,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -5213,7 +5223,7 @@ "unit": "percentunit" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -5269,6 +5279,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5312,7 +5323,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -5367,6 +5378,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5410,7 +5422,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -5468,6 +5480,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5507,10 +5520,7 @@ { "id": "custom.lineStyle", "value": { - "dash": [ - 0, - 10 - ], + "dash": [0, 10], "fill": "dot" } }, @@ -5542,7 +5552,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -5628,6 +5638,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5667,10 +5678,7 @@ { "id": "custom.lineStyle", "value": { - "dash": [ - 0, - 10 - ], + "dash": [0, 10], "fill": "dot" } }, @@ -5702,7 +5710,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -5785,32 +5793,27 @@ }, "id": 48, "options": { - "displayLabels": [ - "name" - ], + "displayLabels": ["name"], "legend": { "displayMode": "table", "placement": "right", "showLegend": true, - "values": [ - "value" - ] + "values": ["value"] }, "pieType": "pie", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, + "sort": "desc", "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -5863,6 +5866,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5910,7 +5914,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -5965,25 +5969,22 @@ "displayMode": "table", "placement": "right", "showLegend": true, - "values": [ - "value" - ] + "values": ["value"] }, "pieType": "pie", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, + "sort": "desc", "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -6035,6 +6036,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6083,7 +6085,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -6135,6 +6137,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6182,7 +6185,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -6214,6 +6217,9 @@ "cellOptions": { "type": "auto" }, + "footer": { + "reducers": [] + }, "inspect": false }, "mappings": [], @@ -6239,7 +6245,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6251,7 +6257,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6263,7 +6269,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6275,7 +6281,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6287,7 +6293,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6331,17 +6337,9 @@ "id": 58, "options": { "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, "showHeader": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -6405,32 +6403,27 @@ }, "id": 202, "options": { - "displayLabels": [ - "name" - ], + "displayLabels": ["name"], "legend": { "displayMode": "table", "placement": "right", "showLegend": true, - "values": [ - "value" - ] + "values": ["value"] }, "pieType": "pie", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, + "sort": "desc", "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -6463,6 +6456,9 @@ "cellOptions": { "type": "auto" }, + "footer": { + "reducers": [] + }, "inspect": false }, "mappings": [], @@ -6516,7 +6512,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6528,7 +6524,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6540,7 +6536,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6552,7 +6548,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6568,17 +6564,9 @@ "id": 204, "options": { "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, "showHeader": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -6613,6 +6601,9 @@ "cellOptions": { "type": "auto" }, + "footer": { + "reducers": [] + }, "inspect": false }, "mappings": [], @@ -6666,7 +6657,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6678,7 +6669,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6690,7 +6681,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6702,7 +6693,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6718,17 +6709,9 @@ "id": 205, "options": { "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, "showHeader": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -6783,6 +6766,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6830,7 +6814,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -6882,6 +6866,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6929,7 +6914,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -6995,6 +6980,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7009,7 +6995,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7040,7 +7027,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -7093,6 +7080,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7107,7 +7095,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7138,7 +7127,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -7190,6 +7179,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7204,7 +7194,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7236,7 +7227,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -7288,6 +7279,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7302,7 +7294,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7334,7 +7327,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -7400,6 +7393,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7472,7 +7466,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -7564,7 +7558,7 @@ "unit": "percentunit" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -7618,6 +7612,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7665,7 +7660,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -7753,7 +7748,7 @@ "unit": "percentunit" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -7807,6 +7802,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7890,7 +7886,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8065,6 +8061,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8112,7 +8109,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8178,6 +8175,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8192,7 +8190,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8248,7 +8247,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8337,6 +8336,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8351,7 +8351,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8383,7 +8384,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8459,6 +8460,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8473,7 +8475,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8504,7 +8507,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8582,6 +8585,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8662,7 +8666,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8786,6 +8790,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8830,7 +8835,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8906,6 +8911,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8952,7 +8958,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9016,6 +9022,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9080,7 +9087,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9145,6 +9152,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9209,7 +9217,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9300,6 +9308,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9373,7 +9382,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9430,6 +9439,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9503,7 +9513,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9560,6 +9570,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9633,7 +9644,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9690,6 +9701,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9763,7 +9775,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9834,6 +9846,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9880,7 +9893,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9932,6 +9945,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9978,7 +9992,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10030,6 +10044,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10076,7 +10091,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10140,6 +10155,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -10188,7 +10204,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10240,6 +10256,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -10288,7 +10305,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10340,6 +10357,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -10387,7 +10405,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10453,6 +10471,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10513,7 +10532,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10631,6 +10650,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10678,7 +10698,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10731,6 +10751,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10778,7 +10799,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10831,6 +10852,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10878,7 +10900,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10931,6 +10953,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10978,7 +11001,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -11032,6 +11055,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -11092,7 +11116,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -11129,13 +11153,146 @@ "title": "Task Executor regular tasks", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the number of regular blocking tasks currently ran by the executor.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "semi-dark-red", + "value": 80 + } + ] + }, + "unit": "tasks/s" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "unit", + "value": "tasks" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 362 + }, + "id": 1007, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "rate(reth_executor_spawn_regular_blocking_tasks_total{$instance_label=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Tasks started", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_regular_blocking_tasks_total{$instance_label=\"$instance\"} - reth_executor_spawn_finished_regular_blocking_tasks_total{$instance_label=\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" + } + ], + "title": "Task Executor regular blocking tasks", + "type": "timeseries" + }, { "collapsed": true, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 362 + "y": 370 }, "id": 236, "panels": [ @@ -11577,9 +11734,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -11615,7 +11770,7 @@ "h": 1, "w": 24, "x": 0, - "y": 363 + "y": 371 }, "id": 241, "panels": [ @@ -11946,7 +12101,7 @@ } ], "refresh": "5s", - "schemaVersion": 41, + "schemaVersion": 42, "tags": [], "templating": { "list": [ From e6aeba0d7d432dd87b94cb005520fd5f3db80adb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 1 Nov 2025 11:51:46 +0100 Subject: [PATCH 295/371] feat: support custom Download command defaults (#19437) --- crates/cli/commands/src/download.rs | 180 +++++++++++++++++++-- docs/vocs/docs/pages/cli/reth/download.mdx | 2 +- 2 files changed, 164 insertions(+), 18 deletions(-) diff --git a/crates/cli/commands/src/download.rs b/crates/cli/commands/src/download.rs index 8f09dc9b893..20bc7081f05 100644 --- a/crates/cli/commands/src/download.rs +++ b/crates/cli/commands/src/download.rs @@ -7,9 +7,10 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_fs_util as fs; use std::{ + borrow::Cow, io::{self, Read, Write}, path::Path, - sync::Arc, + sync::{Arc, OnceLock}, time::{Duration, Instant}, }; use tar::Archive; @@ -22,24 +23,109 @@ const MERKLE_BASE_URL: &str = "https://downloads.merkle.io"; const EXTENSION_TAR_LZ4: &str = ".tar.lz4"; const EXTENSION_TAR_ZSTD: &str = ".tar.zst"; +/// Global static download defaults +static DOWNLOAD_DEFAULTS: OnceLock = OnceLock::new(); + +/// Download configuration defaults +/// +/// Global defaults can be set via [`DownloadDefaults::try_init`]. +#[derive(Debug, Clone)] +pub struct DownloadDefaults { + /// List of available snapshot sources + pub available_snapshots: Vec>, + /// Default base URL for snapshots + pub default_base_url: Cow<'static, str>, + /// Optional custom long help text that overrides the generated help + pub long_help: Option, +} + +impl DownloadDefaults { + /// Initialize the global download defaults with this configuration + pub fn try_init(self) -> Result<(), Self> { + DOWNLOAD_DEFAULTS.set(self) + } + + /// Get a reference to the global download defaults + pub fn get_global() -> &'static DownloadDefaults { + DOWNLOAD_DEFAULTS.get_or_init(DownloadDefaults::default_download_defaults) + } + + /// Default download configuration with defaults from merkle.io and publicnode + pub fn default_download_defaults() -> Self { + Self { + available_snapshots: vec![ + Cow::Borrowed("https://www.merkle.io/snapshots (default, mainnet archive)"), + Cow::Borrowed("https://publicnode.com/snapshots (full nodes & testnets)"), + ], + default_base_url: Cow::Borrowed(MERKLE_BASE_URL), + long_help: None, + } + } + + /// Generates the long help text for the download URL argument using these defaults. + /// + /// If a custom long_help is set, it will be returned. Otherwise, help text is generated + /// from the available_snapshots list. + pub fn long_help(&self) -> String { + if let Some(ref custom_help) = self.long_help { + return custom_help.clone(); + } + + let mut help = String::from( + "Specify a snapshot URL or let the command propose a default one.\n\nAvailable snapshot sources:\n", + ); + + for source in &self.available_snapshots { + help.push_str("- "); + help.push_str(source); + help.push('\n'); + } + + help.push_str( + "\nIf no URL is provided, the latest mainnet archive snapshot\nwill be proposed for download from ", + ); + help.push_str(self.default_base_url.as_ref()); + help + } + + /// Add a snapshot source to the list + pub fn with_snapshot(mut self, source: impl Into>) -> Self { + self.available_snapshots.push(source.into()); + self + } + + /// Replace all snapshot sources + pub fn with_snapshots(mut self, sources: Vec>) -> Self { + self.available_snapshots = sources; + self + } + + /// Set the default base URL, e.g. `https://downloads.merkle.io`. + pub fn with_base_url(mut self, url: impl Into>) -> Self { + self.default_base_url = url.into(); + self + } + + /// Builder: Set custom long help text, overriding the generated help + pub fn with_long_help(mut self, help: impl Into) -> Self { + self.long_help = Some(help.into()); + self + } +} + +impl Default for DownloadDefaults { + fn default() -> Self { + Self::default_download_defaults() + } +} + #[derive(Debug, Parser)] pub struct DownloadCommand { #[command(flatten)] env: EnvironmentArgs, - #[arg( - long, - short, - help = "Custom URL to download the snapshot from", - long_help = "Specify a snapshot URL or let the command propose a default one.\n\ - \n\ - Available snapshot sources:\n\ - - https://www.merkle.io/snapshots (default, mainnet archive)\n\ - - https://publicnode.com/snapshots (full nodes & testnets)\n\ - \n\ - If no URL is provided, the latest mainnet archive snapshot\n\ - will be proposed for download from merkle.io" - )] + /// Custom URL to download the snapshot from + #[arg(long, short, long_help = DownloadDefaults::get_global().long_help())] url: Option, } @@ -207,9 +293,10 @@ async fn stream_and_extract(url: &str, target_dir: &Path) -> Result<()> { Ok(()) } -// Builds default URL for latest mainnet archive snapshot +// Builds default URL for latest mainnet archive snapshot using configured defaults async fn get_latest_snapshot_url() -> Result { - let latest_url = format!("{MERKLE_BASE_URL}/latest.txt"); + let base_url = &DownloadDefaults::get_global().default_base_url; + let latest_url = format!("{base_url}/latest.txt"); let filename = Client::new() .get(latest_url) .send() @@ -220,5 +307,64 @@ async fn get_latest_snapshot_url() -> Result { .trim() .to_string(); - Ok(format!("{MERKLE_BASE_URL}/{filename}")) + Ok(format!("{base_url}/{filename}")) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_download_defaults_builder() { + let defaults = DownloadDefaults::default() + .with_snapshot("https://example.com/snapshots (example)") + .with_base_url("https://example.com"); + + assert_eq!(defaults.default_base_url, "https://example.com"); + assert_eq!(defaults.available_snapshots.len(), 3); // 2 defaults + 1 added + } + + #[test] + fn test_download_defaults_replace_snapshots() { + let defaults = DownloadDefaults::default().with_snapshots(vec![ + Cow::Borrowed("https://custom1.com"), + Cow::Borrowed("https://custom2.com"), + ]); + + assert_eq!(defaults.available_snapshots.len(), 2); + assert_eq!(defaults.available_snapshots[0], "https://custom1.com"); + } + + #[test] + fn test_long_help_generation() { + let defaults = DownloadDefaults::default(); + let help = defaults.long_help(); + + assert!(help.contains("Available snapshot sources:")); + assert!(help.contains("merkle.io")); + assert!(help.contains("publicnode.com")); + } + + #[test] + fn test_long_help_override() { + let custom_help = "This is custom help text for downloading snapshots."; + let defaults = DownloadDefaults::default().with_long_help(custom_help); + + let help = defaults.long_help(); + assert_eq!(help, custom_help); + assert!(!help.contains("Available snapshot sources:")); + } + + #[test] + fn test_builder_chaining() { + let defaults = DownloadDefaults::default() + .with_base_url("https://custom.example.com") + .with_snapshot("https://snapshot1.com") + .with_snapshot("https://snapshot2.com") + .with_long_help("Custom help for snapshots"); + + assert_eq!(defaults.default_base_url, "https://custom.example.com"); + assert_eq!(defaults.available_snapshots.len(), 4); // 2 defaults + 2 added + assert_eq!(defaults.long_help, Some("Custom help for snapshots".to_string())); + } } diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index e7e3b6c0df6..6cdaa9ca2d3 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -81,7 +81,7 @@ Database: - https://publicnode.com/snapshots (full nodes & testnets) If no URL is provided, the latest mainnet archive snapshot - will be proposed for download from merkle.io + will be proposed for download from https://downloads.merkle.io Logging: --log.stdout.format From 780161a6472952704c5e1cc6d8f56fbda172d660 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Sat, 1 Nov 2025 16:13:51 +0100 Subject: [PATCH 296/371] chore: OverlayStateProviderFactory: don't query for reverts unless necessary (#19412) --- .../provider/src/providers/state/overlay.rs | 39 ++++++++++++------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 5c086c273ba..f912411f1e3 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -73,16 +73,30 @@ where F: DatabaseProviderFactory, F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader + BlockNumReader, { - /// Validates that there are sufficient changesets to revert to the requested block number. + /// Returns the block number for [`Self`]'s `block_hash` field, if any. + fn get_block_number(&self, provider: &F::Provider) -> ProviderResult> { + if let Some(block_hash) = self.block_hash { + Ok(Some( + provider + .convert_hash_or_number(block_hash.into())? + .ok_or_else(|| ProviderError::BlockHashNotFound(block_hash))?, + )) + } else { + Ok(None) + } + } + + /// Returns whether or not it is required to collect reverts, and validates that there are + /// sufficient changesets to revert to the requested block number if so. /// /// Returns an error if the `MerkleChangeSets` checkpoint doesn't cover the requested block. /// Takes into account both the stage checkpoint and the prune checkpoint to determine the /// available data range. - fn validate_changesets_availability( + fn reverts_required( &self, provider: &F::Provider, requested_block: BlockNumber, - ) -> ProviderResult<()> { + ) -> ProviderResult { // Get the MerkleChangeSets stage and prune checkpoints. let stage_checkpoint = provider.get_stage_checkpoint(StageId::MerkleChangeSets)?; let prune_checkpoint = provider.get_prune_checkpoint(PruneSegment::MerkleChangeSets)?; @@ -99,7 +113,7 @@ where // If the requested block is the DB tip (determined by the MerkleChangeSets stage // checkpoint) then there won't be any reverts necessary, and we can simply return Ok. if upper_bound == requested_block { - return Ok(()) + return Ok(false) } // Extract the lower bound from prune checkpoint if available @@ -123,7 +137,7 @@ where }); } - Ok(()) + Ok(true) } } @@ -140,15 +154,10 @@ where let provider = self.factory.database_provider_ro()?; // If block_hash is provided, collect reverts - let (trie_updates, hashed_state) = if let Some(block_hash) = self.block_hash { - // Convert block hash to block number - let from_block = provider - .convert_hash_or_number(block_hash.into())? - .ok_or_else(|| ProviderError::BlockHashNotFound(block_hash))?; - - // Validate that we have sufficient changesets for the requested block - self.validate_changesets_availability(&provider, from_block)?; - + let (trie_updates, hashed_state) = if let Some(from_block) = + self.get_block_number(&provider)? && + self.reverts_required(&provider, from_block)? + { // Collect trie reverts let mut trie_reverts = provider.trie_reverts(from_block + 1)?; @@ -186,7 +195,7 @@ where debug!( target: "providers::state::overlay", - ?block_hash, + block_hash = ?self.block_hash, ?from_block, num_trie_updates = ?trie_updates.total_len(), num_state_updates = ?hashed_state_updates.total_len(), From 0bca7b150db529ac41616a1d542aa0239fb57b99 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 2 Nov 2025 11:01:04 +0100 Subject: [PATCH 297/371] chore(deps): weekly `cargo update` (#19443) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> Co-authored-by: Matthias Seitz --- Cargo.lock | 383 +++++++++--------- .../engine/tree/src/tree/precompile_cache.rs | 4 +- 2 files changed, 183 insertions(+), 204 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a4e0e1fd0a8..4f3ef5779b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -58,9 +58,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] @@ -88,9 +88,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bbb778f50ecb0cebfb5c05580948501927508da7bd628833a8c4bd8545e23e2" +checksum = "6068f356948cd84b5ad9ac30c50478e433847f14a50714d2b68f15d052724049" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -103,9 +103,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b151e38e42f1586a01369ec52a6934702731d07e8509a7307331b09f6c46dc" +checksum = "3abecb92ba478a285fbf5689100dbafe4003ded4a09bf4b5ef62cca87cd4f79e" dependencies = [ "alloy-eips", "alloy-primitives", @@ -130,9 +130,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2d5e8668ef6215efdb7dcca6f22277b4e483a5650e05f5de22b2350971f4b8" +checksum = "2e864d4f11d1fb8d3ac2fd8f3a15f1ee46d55ec6d116b342ed1b2cb737f25894" dependencies = [ "alloy-consensus", "alloy-eips", @@ -145,9 +145,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630288cf4f3a34a8c6bc75c03dce1dbd47833138f65f37d53a1661eafc96b83f" +checksum = "c98d21aeef3e0783046c207abd3eb6cb41f6e77e0c0fc8077ebecd6df4f9d171" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -228,9 +228,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5434834adaf64fa20a6fb90877bc1d33214c41b055cc49f82189c98614368cc" +checksum = "07d9a64522a0db6ebcc4ff9c904e329e77dd737c2c25d30f1bdc32ca6c6ce334" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -276,9 +276,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919a8471cfbed7bcd8cf1197a57dda583ce0e10c6385f6ff4e8b41304b223392" +checksum = "675b163946b343ed2ddde4416114ad61fabc8b2a50d08423f38aa0ac2319e800" dependencies = [ "alloy-eips", "alloy-primitives", @@ -316,9 +316,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c69f6c9c68a1287c9d5ff903d0010726934de0dac10989be37b75a29190d55" +checksum = "f87b774478fcc616993e97659697f3e3c7988fdad598e46ee0ed11209cd0d8ee" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -331,9 +331,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf2ae05219e73e0979cb2cf55612aafbab191d130f203079805eaf881cca58" +checksum = "d5d6ed73d440bae8f27771b7cd507fa8f10f19ddf0b8f67e7622a52e0dbf798e" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -357,9 +357,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e58f4f345cef483eab7374f2b6056973c7419ffe8ad35e994b7a7f5d8e0c7ba4" +checksum = "219dccd2cf753a43bd9b0fbb7771a16927ffdb56e43e3a15755bef1a74d614aa" dependencies = [ "alloy-consensus", "alloy-eips", @@ -431,9 +431,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de2597751539b1cc8fe4204e5325f9a9ed83fcacfb212018dfcfa7877e76de21" +checksum = "f0ef8cbc2b68e2512acf04b2d296c05c98a661bc460462add6414528f4ff3d9b" dependencies = [ "alloy-chains", "alloy-consensus", @@ -476,9 +476,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e45a68423e732900a0c824b8e22237db461b79d2e472dd68b7547c16104427" +checksum = "be028fb1c6c173f5765d0baa3580a11d69826ea89fe00ee5c9d7eddb2c3509cd" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -520,9 +520,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edf8eb8be597cfa8c312934d2566ec4516f066d69164f9212d7a148979fdcfd8" +checksum = "2a0f67d1e655ed93efca217213340d21cce982333cc44a1d918af9150952ef66" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -546,9 +546,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "339af7336571dd39ae3a15bde08ae6a647e62f75350bd415832640268af92c06" +checksum = "fe106e50522980bc9e7cc9016f445531edf1a53e0fdba904c833b98c6fdff3f0" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -559,9 +559,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b33cdc0483d236cdfff763dae799ccef9646e94fb549a74f7adac6a7f7bb86" +checksum = "e8b67bf1ed8cac6fde7dd017ca0a1c33be846e613a265956089f983af1354f13" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -571,9 +571,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d98fb386a462e143f5efa64350860af39950c49e7c0cbdba419c16793116ef" +checksum = "c1cf94d581b3aa13ebacb90ea52e0179985b7c20d8a522319e7d40768d56667a" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -583,9 +583,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbde0801a32d21c5f111f037bee7e22874836fba7add34ed4a6919932dd7cf23" +checksum = "425e14ee32eb8b7edd6a2247fe0ed640785e6eba75af27db27f1e6220c15ef0d" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -594,9 +594,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55c8d51ebb7c5fa8be8ea739a3933c5bfea08777d2d662b30b2109ac5ca71e6b" +checksum = "440655ffd9ff8724fa76a07c7dbe18cb4353617215c23e3921163516b6c07ff8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -614,9 +614,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388cf910e66bd4f309a81ef746dcf8f9bca2226e3577890a8d56c5839225cf46" +checksum = "f69c12784cdf1059936249a6e705ec03bf8cea1a12181ed5cea9ca2be9cca684" dependencies = [ "alloy-primitives", "derive_more", @@ -626,9 +626,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "605ec375d91073851f566a3082548af69a28dca831b27a8be7c1b4c49f5c6ca2" +checksum = "aabc17f0eac3f747eeddebc768c8e30763d6f6c53188f5335a935dedc57ddfbd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -647,9 +647,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "361cd87ead4ba7659bda8127902eda92d17fa7ceb18aba1676f7be10f7222487" +checksum = "0185f68a0f8391ab996d335a887087d7ccdbc97952efab3516f6307d456ba2cd" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -669,9 +669,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1397926d8d06a2531578bafc3e0ec78f97a02f0e6d1631c67d80d22af6a3af02" +checksum = "6c89422163337ff64d9aaa13f3e4df53d60d789004044cd64ebc7dc4d5765a64" dependencies = [ "alloy-consensus", "alloy-eips", @@ -684,9 +684,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de4e95fb0572b97b17751d0fdf5cdc42b0050f9dd9459eddd1bf2e2fbfed0a33" +checksum = "d31a6766c8f91d18d07a36b57f55efd981752df619d30b395a92332a8b28ea05" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -698,9 +698,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cddde1bbd4feeb0d363ae7882af1e2e7955ef77c17f933f31402aad9343b57c5" +checksum = "4c208cbe2ea28368c3f61bd1e27b14238b7b03796e90370de3c0d8722e0f9830" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -710,9 +710,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64600fc6c312b7e0ba76f73a381059af044f4f21f43e07f51f1fa76c868fe302" +checksum = "596cfa360922ba9af901cc7370c68640e4f72adb6df0ab064de32f21fec498d7" dependencies = [ "alloy-primitives", "arbitrary", @@ -722,9 +722,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5772858492b26f780468ae693405f895d6a27dea6e3eab2c36b6217de47c2647" +checksum = "7f06333680d04370c8ed3a6b0eccff384e422c3d8e6b19e61fedc3a9f0ab7743" dependencies = [ "alloy-primitives", "async-trait", @@ -737,9 +737,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4195b803d0a992d8dbaab2ca1986fc86533d4bc80967c0cce7668b26ad99ef9" +checksum = "590dcaeb290cdce23155e68af4791d093afc3754b1a331198a25d2d44c5456e8" dependencies = [ "alloy-consensus", "alloy-network", @@ -826,12 +826,11 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "025a940182bddaeb594c26fe3728525ae262d0806fe6a4befdf5d7bc13d54bce" +checksum = "55bbdcee53e4e3857b5ddbc2986ebe9c2ab5f352ec285cb0da04c1e8f2ca9c18" dependencies = [ "alloy-json-rpc", - "alloy-primitives", "auto_impl", "base64 0.22.1", "derive_more", @@ -850,9 +849,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5064d1e1e1aabc918b5954e7fb8154c39e77ec6903a581b973198b26628fa" +checksum = "793967215109b4a334047c810ed6db5e873ad3ea07f65cc02202bd4b810d9615" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -865,9 +864,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d47962f3f1d9276646485458dc842b4e35675f42111c9d814ae4711c664c8300" +checksum = "15e182e5ae0c4858bb87df23ebfe31018d7e51fe1a264b8a8a2b26932cb04861" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -885,9 +884,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9476a36a34e2fb51b6746d009c53d309a186a825aa95435407f0e07149f4ad2d" +checksum = "32e9dc891c80d6216003d4b04f0a7463015d0873d36e4ac2ec0bcc9196aa4ea7" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -923,11 +922,10 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e52276fdb553d3c11563afad2898f4085165e4093604afe3d78b69afbf408f" +checksum = "ab54221eccefa254ce9f65b079c097b1796e48c21c7ce358230f8988d75392fb" dependencies = [ - "alloy-primitives", "darling 0.21.3", "proc-macro2", "quote", @@ -1870,9 +1868,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" +checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" dependencies = [ "memchr", "regex-automata", @@ -2116,9 +2114,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.50" +version = "4.5.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2cfd7bf8a6017ddaa4e32ffe7403d547790db06bd171c1c53926faab501623" +checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" dependencies = [ "clap_builder", "clap_derive", @@ -2126,9 +2124,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.50" +version = "4.5.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4c05b9e80c5ccd3a7ef080ad7b6ba7d6fc00a985b8b157197075677c82c7a0" +checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" dependencies = [ "anstream", "anstyle", @@ -3205,18 +3203,18 @@ dependencies = [ [[package]] name = "enum-ordinalize" -version = "4.3.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" dependencies = [ "enum-ordinalize-derive", ] [[package]] name = "enum-ordinalize-derive" -version = "4.3.1" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", @@ -4226,6 +4224,8 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" dependencies = [ + "allocator-api2", + "equivalent", "foldhash 0.2.0", "serde", ] @@ -4475,7 +4475,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots 1.0.3", + "webpki-roots 1.0.4", ] [[package]] @@ -4553,28 +4553,28 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" dependencies = [ "displaydoc", "potential_utf", - "yoke 0.8.0", + "yoke 0.8.1", "zerofrom", - "zerovec 0.11.4", + "zerovec 0.11.5", ] [[package]] name = "icu_locale_core" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ "displaydoc", - "litemap 0.8.0", - "tinystr 0.8.1", - "writeable 0.6.1", - "zerovec 0.11.4", + "litemap 0.8.1", + "tinystr 0.8.2", + "writeable 0.6.2", + "zerovec 0.11.5", ] [[package]] @@ -4630,17 +4630,16 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" dependencies = [ - "displaydoc", - "icu_collections 2.0.0", - "icu_normalizer_data 2.0.0", - "icu_properties 2.0.1", - "icu_provider 2.0.0", + "icu_collections 2.1.1", + "icu_normalizer_data 2.1.1", + "icu_properties 2.1.1", + "icu_provider 2.1.1", "smallvec", - "zerovec 0.11.4", + "zerovec 0.11.5", ] [[package]] @@ -4651,9 +4650,9 @@ checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" [[package]] name = "icu_normalizer_data" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" @@ -4672,18 +4671,16 @@ dependencies = [ [[package]] name = "icu_properties" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" dependencies = [ - "displaydoc", - "icu_collections 2.0.0", + "icu_collections 2.1.1", "icu_locale_core", - "icu_properties_data 2.0.1", - "icu_provider 2.0.0", - "potential_utf", + "icu_properties_data 2.1.1", + "icu_provider 2.1.1", "zerotrie", - "zerovec 0.11.4", + "zerovec 0.11.5", ] [[package]] @@ -4694,9 +4691,9 @@ checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" [[package]] name = "icu_properties_data" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" [[package]] name = "icu_provider" @@ -4717,19 +4714,17 @@ dependencies = [ [[package]] name = "icu_provider" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" dependencies = [ "displaydoc", "icu_locale_core", - "stable_deref_trait", - "tinystr 0.8.1", - "writeable 0.6.1", - "yoke 0.8.0", + "writeable 0.6.2", + "yoke 0.8.1", "zerofrom", "zerotrie", - "zerovec 0.11.4", + "zerovec 0.11.5", ] [[package]] @@ -4766,8 +4761,8 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ - "icu_normalizer 2.0.0", - "icu_properties 2.0.1", + "icu_normalizer 2.1.1", + "icu_properties 2.1.1", ] [[package]] @@ -5052,9 +5047,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.81" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" +checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" dependencies = [ "once_cell", "wasm-bindgen", @@ -5430,9 +5425,9 @@ checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "litemap" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "litrs" @@ -5722,14 +5717,14 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" dependencies = [ "libc", "log", "wasi 0.11.1+wasi-snapshot-preview1", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -6588,11 +6583,11 @@ checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "potential_utf" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" dependencies = [ - "zerovec 0.11.4", + "zerovec 0.11.5", ] [[package]] @@ -6738,14 +6733,13 @@ dependencies = [ [[package]] name = "proptest" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb0be07becd10686a0bb407298fb425360a5c44a663774406340c59a22de4ce" +checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" dependencies = [ "bit-set", "bit-vec", "bitflags 2.10.0", - "lazy_static", "num-traits", "rand 0.9.2", "rand_chacha 0.9.0", @@ -7187,11 +7181,11 @@ checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "regress" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145bb27393fe455dd64d6cbc8d059adfa392590a45eadf079c01b11857e7b010" +checksum = "2057b2325e68a893284d1538021ab90279adac1139957ca2a74426c6f118fb48" dependencies = [ - "hashbrown 0.15.5", + "hashbrown 0.16.0", "memchr", ] @@ -7241,7 +7235,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.3", + "webpki-roots 1.0.4", ] [[package]] @@ -10875,9 +10869,9 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "7.0.2" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "451748b17ac78bd2b0748ec472a5392cd78fc0f7d19d528be44770fda28fd6f7" +checksum = "3f2b51c414b7e79edd4a0569d06e2c4c029f8b60e5f3ee3e2fa21dc6c3717ee3" dependencies = [ "bitvec", "phf 0.13.1", @@ -10920,9 +10914,9 @@ dependencies = [ [[package]] name = "revm-database" -version = "9.0.2" +version = "9.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdefd7f40835e992bab40a245124cb1243e6c7a1c4659798827c809a59b0fea9" +checksum = "c2602625aa11ab1eda8e208e96b652c0bfa989b86c104a36537a62b081228af9" dependencies = [ "alloy-eips", "revm-bytecode", @@ -10934,9 +10928,9 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "8.0.3" +version = "8.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa488a73ac2738f11478650cdf1a0f263864c09d5f0e9bf6309e891a05323c60" +checksum = "58a4621143d6515e32f969306d9c85797ae0d3fe0c74784f1fda02ba441e5a08" dependencies = [ "auto_impl", "either", @@ -11054,9 +11048,9 @@ dependencies = [ [[package]] name = "revm-state" -version = "8.0.2" +version = "8.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6bd5e669b02007872a8ca2643a14e308fe1739ee4475d74122587c3388a06a" +checksum = "5a0b4873815e31cbc3e5b183b9128b86c09a487c027aaf8cc5cf4b9688878f9b" dependencies = [ "bitflags 2.10.0", "revm-bytecode", @@ -11340,9 +11334,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" dependencies = [ "web-time", "zeroize", @@ -11377,9 +11371,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.7" +version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ "ring", "rustls-pki-types", @@ -11804,9 +11798,9 @@ dependencies = [ [[package]] name = "signal-hook-mio" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" +checksum = "b75a19a7a740b25bc7944bdee6172368f988763b744e3d4dfe753f6b4ece40cc" dependencies = [ "libc", "mio", @@ -12407,12 +12401,12 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ "displaydoc", - "zerovec 0.11.4", + "zerovec 0.11.5", ] [[package]] @@ -12979,9 +12973,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-segmentation" @@ -13226,9 +13220,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" +checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" dependencies = [ "cfg-if", "once_cell", @@ -13237,25 +13231,11 @@ dependencies = [ "wasm-bindgen-shared", ] -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.108", - "wasm-bindgen-shared", -] - [[package]] name = "wasm-bindgen-futures" -version = "0.4.54" +version = "0.4.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" +checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" dependencies = [ "cfg-if", "js-sys", @@ -13266,9 +13246,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" +checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -13276,22 +13256,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" +checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" dependencies = [ + "bumpalo", "proc-macro2", "quote", "syn 2.0.108", - "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" +checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" dependencies = [ "unicode-ident", ] @@ -13325,9 +13305,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.81" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" +checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" dependencies = [ "js-sys", "wasm-bindgen", @@ -13349,14 +13329,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.3", + "webpki-root-certs 1.0.4", ] [[package]] name = "webpki-root-certs" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d651ec480de84b762e7be71e6efa7461699c19d9e2c272c8d93455f567786e" +checksum = "ee3e3b5f5e80bc89f30ce8d0343bf4e5f12341c51f3e26cbeecbc7c85443e85b" dependencies = [ "rustls-pki-types", ] @@ -13367,14 +13347,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.3", + "webpki-roots 1.0.4", ] [[package]] name = "webpki-roots" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" +checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" dependencies = [ "rustls-pki-types", ] @@ -14004,9 +13984,9 @@ checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" [[package]] name = "writeable" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] name = "ws_stream_wasm" @@ -14066,13 +14046,12 @@ dependencies = [ [[package]] name = "yoke" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" dependencies = [ - "serde", "stable_deref_trait", - "yoke-derive 0.8.0", + "yoke-derive 0.8.1", "zerofrom", ] @@ -14090,9 +14069,9 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", @@ -14163,12 +14142,12 @@ dependencies = [ [[package]] name = "zerotrie" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" dependencies = [ "displaydoc", - "yoke 0.8.0", + "yoke 0.8.1", "zerofrom", ] @@ -14185,13 +14164,13 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ - "yoke 0.8.0", + "yoke 0.8.1", "zerofrom", - "zerovec-derive 0.11.1", + "zerovec-derive 0.11.2", ] [[package]] @@ -14207,9 +14186,9 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", diff --git a/crates/engine/tree/src/tree/precompile_cache.rs b/crates/engine/tree/src/tree/precompile_cache.rs index 753922f66b3..1183dfbe983 100644 --- a/crates/engine/tree/src/tree/precompile_cache.rs +++ b/crates/engine/tree/src/tree/precompile_cache.rs @@ -273,9 +273,9 @@ mod tests { #[test] fn test_precompile_cache_basic() { - let dyn_precompile: DynPrecompile = |_input: PrecompileInput<'_>| -> PrecompileResult { + let dyn_precompile: DynPrecompile = (|_input: PrecompileInput<'_>| -> PrecompileResult { Ok(PrecompileOutput { gas_used: 0, bytes: Bytes::default(), reverted: false }) - } + }) .into(); let cache = From 1e27e734949c68e2095031f12440319a8198f444 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Nov 2025 09:51:54 +0100 Subject: [PATCH 298/371] chore: add config_mut helpers (#19436) --- crates/node/builder/src/builder/mod.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 8f01f251b53..f2886f47567 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -331,6 +331,11 @@ impl WithLaunchContext> { pub const fn config(&self) -> &NodeConfig { self.builder.config() } + + /// Returns a mutable reference to the node builder's config. + pub const fn config_mut(&mut self) -> &mut NodeConfig { + self.builder.config_mut() + } } impl WithLaunchContext> @@ -452,6 +457,11 @@ where &self.builder.config } + /// Returns a mutable reference to the node builder's config. + pub const fn config_mut(&mut self) -> &mut NodeConfig<::ChainSpec> { + &mut self.builder.config + } + /// Returns a reference to node's database. pub const fn db(&self) -> &T::DB { &self.builder.adapter.database @@ -729,6 +739,11 @@ impl BuilderContext { &self.config_container.config } + /// Returns a mutable reference to the config of the node. + pub const fn config_mut(&mut self) -> &mut NodeConfig<::ChainSpec> { + &mut self.config_container.config + } + /// Returns the loaded reh.toml config. pub const fn reth_config(&self) -> &reth_config::Config { &self.config_container.toml_config From 714ebf749c2022eaee6db8d51f03cfbefb17436d Mon Sep 17 00:00:00 2001 From: sashass1315 Date: Mon, 3 Nov 2025 10:52:49 +0200 Subject: [PATCH 299/371] fix: avoid unnecessary self.clone() in OpNetworkBuilder::network_config (#19451) --- crates/optimism/node/src/node.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 66156edefc9..65055eb6717 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -1169,7 +1169,8 @@ impl OpNetworkBuilder { Node: FullNodeTypes>, NetworkP: NetworkPrimitives, { - let Self { disable_txpool_gossip, disable_discovery_v4, .. } = self.clone(); + let disable_txpool_gossip = self.disable_txpool_gossip; + let disable_discovery_v4 = self.disable_discovery_v4; let args = &ctx.config().network; let network_builder = ctx .network_config_builder()? From 1e8f35c04634430cf16e26721dbe265003e5e9d0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Nov 2025 10:30:20 +0100 Subject: [PATCH 300/371] feat(op-reth): add FlashblocksListeners container and receipt helpers (#19446) Co-authored-by: Claude --- crates/optimism/flashblocks/src/lib.rs | 36 ++++ crates/optimism/flashblocks/src/payload.rs | 216 +++++++++++++++++++++ crates/optimism/rpc/src/eth/mod.rs | 71 +++---- 3 files changed, 282 insertions(+), 41 deletions(-) diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index d36ddb21fca..39577116e96 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -8,6 +8,8 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use reth_primitives_traits::NodePrimitives; + pub use payload::{ ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashBlock, FlashBlockDecoder, Metadata, @@ -39,3 +41,37 @@ pub type FlashBlockCompleteSequenceRx = /// Receiver that signals whether a [`FlashBlock`] is currently being built. pub type InProgressFlashBlockRx = tokio::sync::watch::Receiver>; + +/// Container for all flashblocks-related listeners. +/// +/// Groups together the three receivers that provide flashblock-related updates. +#[derive(Debug)] +pub struct FlashblocksListeners { + /// Receiver of the most recent [`PendingFlashBlock`] built out of [`FlashBlock`]s. + pub pending_block_rx: PendingBlockRx, + /// Receiver of the sequences of [`FlashBlock`]s built. + pub flashblock_rx: FlashBlockCompleteSequenceRx, + /// Receiver that signals whether a [`FlashBlock`] is currently being built. + pub in_progress_rx: InProgressFlashBlockRx, +} + +impl FlashblocksListeners { + /// Creates a new [`FlashblocksListeners`] with the given receivers. + pub const fn new( + pending_block_rx: PendingBlockRx, + flashblock_rx: FlashBlockCompleteSequenceRx, + in_progress_rx: InProgressFlashBlockRx, + ) -> Self { + Self { pending_block_rx, flashblock_rx, in_progress_rx } + } +} + +impl Clone for FlashblocksListeners { + fn clone(&self) -> Self { + Self { + pending_block_rx: self.pending_block_rx.clone(), + flashblock_rx: self.flashblock_rx.resubscribe(), + in_progress_rx: self.in_progress_rx.clone(), + } + } +} diff --git a/crates/optimism/flashblocks/src/payload.rs b/crates/optimism/flashblocks/src/payload.rs index da81ada016a..7469538ee3b 100644 --- a/crates/optimism/flashblocks/src/payload.rs +++ b/crates/optimism/flashblocks/src/payload.rs @@ -41,6 +41,11 @@ impl FlashBlock { pub fn parent_hash(&self) -> Option { Some(self.base.as_ref()?.parent_hash) } + + /// Returns the receipt for the given transaction hash. + pub fn receipt_by_hash(&self, hash: &B256) -> Option<&OpReceipt> { + self.metadata.receipt_by_hash(hash) + } } /// A trait for decoding flashblocks from bytes. @@ -57,6 +62,7 @@ impl FlashBlockDecoder for () { } /// Provides metadata about the block that may be useful for indexing or analysis. +// Note: this uses mixed camel, snake case: #[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct Metadata { /// The number of the block in the L2 chain. @@ -69,6 +75,13 @@ pub struct Metadata { pub receipts: BTreeMap, } +impl Metadata { + /// Returns the receipt for the given transaction hash. + pub fn receipt_by_hash(&self, hash: &B256) -> Option<&OpReceipt> { + self.receipts.get(hash) + } +} + /// Represents the base configuration of an execution payload that remains constant /// throughout block construction. This includes fundamental block properties like /// parent hash, block number, and other header fields that are determined at @@ -168,3 +181,206 @@ impl PendingFlashBlock { self.has_computed_state_root.then_some(self.pending.block().state_root()) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_flashblock_serde_roundtrip() { + let raw = r#"{ + "diff": { + "block_hash": "0x2d902e3fcb5bd57e0bf878cbbda1386e7fb8968d518912d58678a35e58261c46", + "gas_used": "0x2907796", + "logs_bloom": "0x5c21065292452cfcd5175abfee20e796773da578307356043ba4f62692aca01204e8908f97ab9df43f1e9c57f586b1c9a7df8b66ffa7746dfeeb538617fea5eb75ad87f8b6653f597d86814dc5ad6de404e5a48aeffcc4b1e170c2bdbc7a334936c66166ba0faa6517597b676ef65c588342756f280f7d610aa3ed35c5d877449bfacbdb9b40d98c457f974ab264ec40e4edd6e9fab4c0cb794bf75f10ea20dab75a1f9fd1c441d4c365d1476841e8593f1d1b9a1c52919a0fcf9fc5eef2ef82fe80971a72d1cde1cb195db4806058a229e88acfddfe1a1308adb6f69afa3aaf67f4bd49e93e9f9532ea30bd891a8ff08de61fb645bec678db816950b47fcef0", + "receipts_root": "0x2c4203e9aa87258627bf23ab4d5f9d92da30285ea11dc0b3e140a5a8d4b63e26", + "state_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactions": [ + "0x02f8c2822105830b0c58840b677c0f840c93fb5a834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c080a07e8486ab3db9f07588a3f37bd8ffb9b349ba9bb738a2500d78a4583e1e54a6f9a068d0b3c729a6777c81dd49bd0c2dc3a079f0ceed4e778fbfe79176e8b70d68d8", + "0xf90fae820248840158a3c58307291a94bbbfd134e9b44bfb5123898ba36b01de7ab93d9880b90f443087505600000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000012000000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f403000000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f40300000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda0291300000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000003600000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000044095ea7b30000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e22200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000133f4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ff3684f28c67538d4d072c2273400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000007e42213bc0b000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b000000000000000000000000ea758cac6115309b325c582fd0782d79e350217700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000007041fff991f000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b06d9200000000000000000000000000000000000000000000000000000000000000a0d311e79cd2099f6f1f0607040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000000e4c1fb425e000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069073bb900000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003c438c9c147000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000002710000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c800000000000000000000000000000000000000000000000000000000000001c400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002e4945bcec9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000220000000000000000000000000ea758cac6115309b325c582fd0782d79e35021770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000069073bb9000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000208f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca00000000000000000000000000000000000000000000000000000000000000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008434ee90ca000000000000000000000000f5c4f3dc02c3fb9279495a8fef7b0741da956157000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b1a7880000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e22200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f403000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002887696e8edbbcbd7306955512ff6f2d8426403eef4762157da3e9c5a89d78f682422da0c8d8b1aa1c9bfd1fe1e4a10c6123caa2fe582294aa5798c54546faa4c09590a9a012a1c78fca9cfefd281c1e44682de3c4420299da5cf2ae498f67d7de7dcf166c", + "0x02f8f582210582a649831db02984026c1a34833d090094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31c4289d066d04f33681f6686155c8243dff963557765630a39bdd8c54e6b7dbe5d4b689e9d536608db03163882cf005f7b5813e41d2fdec75161c8470a410c4c9201000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a088fd1a2b2e5891109afc3845b2c8b0ca76ea8306190dcb80a703a2451f7bab25a0718ae373e36c8ddb2b934ca936ed824db22c0625cfea29be3d408ff41787fc8c", + "0x02f9030b822105830536f9830f58ab84025c6b93833d090094c90d989d809e26b2d95fb72eb3288fef72af8c2f80b9029a00000000000069073af31c3d4d0646e102b6f958428cd8ed562efa6efb234f629b5f6ca52a15fd2e33aea76eb64fb04cae81b3e5b769dbdc681dcfd4b7a802a2cacdf1ccb65276a722c67607000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000010206777762d3eb91810b15526c2c9102864d722ef7a9ed24e77271c1dcbf0fdcba68138800000000010698c8f03094a9e65ccedc14c40130e4a5dd0ce14fb12ea58cbeac11f662b458b9271000000000000003045a9ad2bb92b0b3e5c571fdd5125114e04e02be1a0bb80000000001036e55486ea6b8691ba58224f3cae35505add86c372710000000000003681d6e4b0b020656ca04956ddaf76add7ef022f60dac00000000010003028be0fcdd7cf0b53b7b82b8f6ea8586d07c53359f2710000000000006c30e25679d5c77b257ac3a61ad08603b11e7afe77ac9222a5386c27d08b6b6c3ea6000000000010696d4b53a38337a5733179751781178a2613306063c511b78cd02684739288c0a01f400000000000002020d028b2d7a29d2e57efc6405a1dce1437180e3ce27100000000001068a71465e76d736564b0c90f5cf3d0d7b69c461c36f69250ae27dbead147cc8f80bb80000000000000206354def8b7e6b2ee04bf85c00f5e79f173d0b76d5017bab3a90c7ba62e1722699000000000000010245f3ad9e63f629be6e278cc4cf34d3b0a79a4a0b27100000000000010404b154dbcd3c75580382c2353082df4390613d93c627120000000001011500cc7d9c2b460720a48cc7444d7e7dfe43f6050bb80a03000000015c8dec5f0eedf1f8934815ef8fb8cb8198eac6520bb80a030000010286f3dd3b4d08de718d7909b0fdc16f4cbdf94ef527100000000000c001a0d4c12f6433ff6ea0573633364c030d8b46ed5764494f80eb434f27060c39f315a034df82c4ac185a666280d578992feee0c05fc75d93e3e2286726c85fba1bb0a0", + "0x02f8f68221058305c7b3830f4ef58401a5485d832dc6c094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31b777ac6b2082fc399fde92a814114b7896ca0b0503106910ea099d5e32c93bfc0013ed2850534c3f8583ab7276414416c0d15ac021126f6cb6ca1ed091ddc01eb01000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a09694b95dc893bed698ede415c188db3530ccc98a01d79bb9f11d783de7dddde9a0275b0165ab21ea0e6f721c624aa2270a3f98276ca0c95381d90e3f9d434b4881", + "0x02f8f682210583034573830f4ef58401a5485d832dc6c094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31c970da8f2adb8bafe6d254ec4428f8342508e169f75e8450f6ff8488813dfa638395e16787966f01731fddffd0e7352cde07fd24bba283bd27f1828fb2a0c700701000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a00181afe4bedab67692a9c1ff30a89fde6b3d3c8407a47a2777efcd6bdc0c39d2a022d6a4219e72eebdbc5d31ae998243ccec1b192c5c7c586308ccddb4838cd631", + "0x02f8c1822105830b0cfd830f4ed084013bce1b834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c001a0d87c4e16986db55b8846bccfe7bca824b75216e72d8f92369c46681800285cb2a00ec53251be3c2a0d19884747d123ddb0ada3c0a917b21882e297e95c2294d52a", + "0x02f901d58221058306361d830f4240840163efbc8301546194833589fcd6edb6e08f4c7c32d4f71b54bda0291380b90164cf092995000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b00000000000000000000000000b55712de2ce8f93be30d53c03d48ea275cd14d000000000000000000000000000000000000000000000000000000000000003e8000000000000000000000000000000000000000000000000000000006907385e0000000000000000000000000000000000000000000000000000000069073be2bef9866b70d0bb74d8763996eb5967b1b24cd48f7801f94ad80cb49431df6b1d00000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000417c9c2382c6c3f029aa3dcbf1df075366fae7bc9fba7f3729713e0bf4d518951f5340350208db96af23686d9985ce552e3588244456a23ca99ecbcae779ea11e71c00000000000000000000000000000000000000000000000000000000000000c080a0b1090c8c67ca9a49ba3591c72c8851f187bbfc39b1920dff2f6c0157ed1ada39a0265b7f704f4c1b5c2c5ca57f1a4040e1e48878c9ad5f2cca9c4e6669d12989f2", + "0x02f8c1822105830b0c98830f424084013bc18b834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c001a080a96d18ae46b58d9a470846a05b394ab4a49a2e379de1941205684e1ac291f9a01e6d4d2c6bab5bf8b89f1df2d6beb85d9f1b3f3be73ca2b72e4ad2d9da0d12d2", + "0x02f901d48221058231e0830f4240840163efbc8301544d94833589fcd6edb6e08f4c7c32d4f71b54bda0291380b90164cf0929950000000000000000000000001de8dbc2409c4bbf14445b0d404bb894f0c6cff70000000000000000000000008d8fa42584a727488eeb0e29405ad794a105bb9b0000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000006907385d0000000000000000000000000000000000000000000000000000000069073af16b129c414484e011621c44e0b32451fdbd69e63ef4919f427dde08c16cb199b100000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000041ae0a4b618c30f0e5d92d7fe99bb435413b2201711427699fd285f69666396cee76199d4e901cfb298612cb3b8ad06178cefb4136a8bc1be07c01b5fea80e5ec11b00000000000000000000000000000000000000000000000000000000000000c080a0af315068084aae367f00263dbd872908bbb9ceaefd6b792fc48dd357e6bdf8afa01e7f0e5913570394b9648939ef71fc5ac34fe320a2757ec388316731a335e69f", + "0x02f9022f82210583052d0b830f423f84025c5527833d090094c90d989d809e26b2d95fb72eb3288fef72af8c2f80b901be00000000000069073af31cf0f932cecc8c4c6ffffa554a63e8fba251434483ed3903966d2ba5a70121618a1c45bd9ee158192ab8d7e12ce0f447f2848a48aedaa89e0efa8637bb931745de05000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e2000000000000003045a9ad2bb92b0b3e5c571fdd5125114e04e02be1a0bb80000000001036e55486ea6b8691ba58224f3cae35505add86c372710000000000003681d6e4b0b020656ca04956ddaf76add7ef022f60dac0000000001010206777762d3eb91810b15526c2c9102864d722ef7a9ed24e77271c1dcbf0fdcba68138800000000010698c8f03094a9e65ccedc14c40130e4a5dd0ce14fb12ea58cbeac11f662b458b9271000000000000002005554419ccd0293d9383901f461c7c3e0c66e925f0bb80000000001028eb9437532fac8d6a7870f3f887b7978d20355fc271000000000000003035d28f920c9d23100e4a38b2ba2d8ae617c3b261501f4000000000102bc51db8aec659027ae0b0e468c0735418161a7800bb8000000000003dbc6998296caa1652a810dc8d3baf4a8294330f100500000000000c080a040000b130b1759df897a9573691a3d1cafacc6d95d0db1826f275afc30e2ff63a0400a7514f8d5383970c4412205ec8e9c6ca06acea504acabd2d3c36e9cb5003d" + ], + "withdrawals": [], + "withdrawals_root": "0x81864c23f426ad807d66c9fdde33213e1fdbac06c1b751d279901d1ce13670ac" + }, + "index": 10, + "metadata": { + "block_number": 37646058, + "new_account_balances": { + "0x000000000022d473030f116ddee9f6b43ac78ba3": "0x0", + "0x0000000071727de22e5e9d8baf0edac6f37da032": "0x23281e39594556899", + "0x0000f90827f1c53a10cb7a02335b175320002935": "0x0", + "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": "0x0" + }, + "receipts": { + "0x1a766690fd6d0febffc488f12fbd7385c43fbe1e07113a1316f22f176355297e": { + "Legacy": { + "cumulativeGasUsed": "0x2868d76", + "logs": [ + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b2ee6f", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x00000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f4030", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b2ee6f", + "topics": [ + "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x0000000000000000000000000000000000001ff3684f28c67538d4d072c22734" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044095ea7b30000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x00000000000000000000000000000000000000000000000000000000000133f4", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e2220000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000133f400000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177" + ] + }, + { + "address": "0x8f360baf899845441eccdc46525e26bb8860752a", + "data": "0x00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000001957cc57b7a9959c0000000000000000000000000000000000000000000000001957cc57b7a9959800000000000000000000000000000000000000000000000444e308096a22c339000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000092458cc3a866f04600000000000000000000000000000000000000000000000025f3e27916e84b59000", + "topics": [ + "0x4e1d56f7310a8c32b2267f756b19ba65019b4890068ce114a25009abe54de5ba" + ] + }, + { + "address": "0xba12222222228d8ba445958a75a0704d566bf2c8", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0x2170c741c41531aec20e7c107c24eecfdd15e69c9bb0a8dd37b1840b9e0b207b", + "0x8f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd", + "0x000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "0x000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177", + "0x000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c8" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c8", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x0000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007e42213bc0b000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b000000000000000000000000ea758cac6115309b325c582fd0782d79e350217700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000007041fff991f000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b06d9200000000000000000000000000000000000000000000000000000000000000a0d311e79cd2099f6f1f0607040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000000e4c1fb425e000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069073bb900000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003c438c9c147000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000002710000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c800000000000000000000000000000000000000000000000000000000000001c400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002e4945bcec9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000220000000000000000000000000ea758cac6115309b325c582fd0782d79e35021770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000069073bb9000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000208f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca00000000000000000000000000000000000000000000000000000000000000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008434ee90ca000000000000000000000000f5c4f3dc02c3fb9279495a8fef7b0741da956157000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b1a7880000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x00000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f4030" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e2220000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f40300000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + } + ], + "status": "0x1" + } + }, + "0x2cd6b4825b5ee40b703c947e15630336dceda97825b70412da54ccc27f484496": { + "Eip1559": { + "cumulativeGasUsed": "0x28cca69", + "logs": [ + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x", + "topics": [ + "0x98de503528ee59b575ef0c0a2576a82497bfc029a5685b209e9ec333479b10a5", + "0x000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b0", + "0xbef9866b70d0bb74d8763996eb5967b1b24cd48f7801f94ad80cb49431df6b1d" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x00000000000000000000000000000000000000000000000000000000000003e8", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b0", + "0x0000000000000000000000000b55712de2ce8f93be30d53c03d48ea275cd14d0" + ] + } + ], + "status": "0x1" + } + } + } + }, + "payload_id": "0x0316ecb1aa1671b5" +}"#; + + let flashblock: FlashBlock = serde_json::from_str(raw).expect("deserialize"); + let serialized = serde_json::to_string(&flashblock).expect("serialize"); + let roundtrip: FlashBlock = serde_json::from_str(&serialized).expect("roundtrip"); + + assert_eq!(flashblock, roundtrip); + } +} diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 04887d98f4c..f69da896424 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -24,7 +24,7 @@ use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ ExecutionPayloadBaseV1, FlashBlockBuildInfo, FlashBlockCompleteSequenceRx, FlashBlockService, - InProgressFlashBlockRx, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, + FlashblocksListeners, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; use reth_rpc::eth::core::EthApiInner; use reth_rpc_eth_api::{ @@ -85,17 +85,13 @@ impl OpEthApi { eth_api: EthApiNodeBackend, sequencer_client: Option, min_suggested_priority_fee: U256, - pending_block_rx: Option>, - flashblock_rx: Option, - in_progress_rx: Option, + flashblocks: Option>, ) -> Self { let inner = Arc::new(OpEthApiInner { eth_api, sequencer_client, min_suggested_priority_fee, - pending_block_rx, - flashblock_rx, - in_progress_rx, + flashblocks, }); Self { inner } } @@ -111,17 +107,17 @@ impl OpEthApi { /// Returns a cloned pending block receiver, if any. pub fn pending_block_rx(&self) -> Option> { - self.inner.pending_block_rx.clone() + self.inner.flashblocks.as_ref().map(|f| f.pending_block_rx.clone()) } /// Returns a flashblock receiver, if any, by resubscribing to it. pub fn flashblock_rx(&self) -> Option { - self.inner.flashblock_rx.as_ref().map(|rx| rx.resubscribe()) + self.inner.flashblocks.as_ref().map(|f| f.flashblock_rx.resubscribe()) } /// Returns information about the flashblock currently being built, if any. fn flashblock_build_info(&self) -> Option { - self.inner.in_progress_rx.as_ref().and_then(|rx| *rx.borrow()) + self.inner.flashblocks.as_ref().and_then(|f| *f.in_progress_rx.borrow()) } /// Extracts pending block if it matches the expected parent hash. @@ -143,7 +139,9 @@ impl OpEthApi { &self, parent_hash: B256, ) -> eyre::Result>> { - let Some(rx) = self.inner.pending_block_rx.as_ref() else { return Ok(None) }; + let Some(rx) = self.inner.flashblocks.as_ref().map(|f| &f.pending_block_rx) else { + return Ok(None) + }; // Check if a flashblock is being built if let Some(build_info) = self.flashblock_build_info() { @@ -352,16 +350,10 @@ pub struct OpEthApiInner { /// /// See also min_suggested_priority_fee: U256, - /// Pending block receiver. - /// - /// If set, then it provides current pending block based on received Flashblocks. - pending_block_rx: Option>, - /// Flashblocks receiver. + /// Flashblocks listeners. /// - /// If set, then it provides sequences of flashblock built. - flashblock_rx: Option, - /// Receiver that signals when a flashblock is being built - in_progress_rx: Option, + /// If set, provides receivers for pending blocks, flashblock sequences, and build status. + flashblocks: Option>, } impl fmt::Debug for OpEthApiInner { @@ -497,28 +489,27 @@ where None }; - let (pending_block_rx, flashblock_rx, in_progress_rx) = - if let Some(ws_url) = flashblocks_url { - info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); + let flashblocks = if let Some(ws_url) = flashblocks_url { + info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); - let (tx, pending_rx) = watch::channel(None); - let stream = WsFlashBlockStream::new(ws_url); - let service = FlashBlockService::new( - stream, - ctx.components.evm_config().clone(), - ctx.components.provider().clone(), - ctx.components.task_executor().clone(), - ); + let (tx, pending_rx) = watch::channel(None); + let stream = WsFlashBlockStream::new(ws_url); + let service = FlashBlockService::new( + stream, + ctx.components.evm_config().clone(), + ctx.components.provider().clone(), + ctx.components.task_executor().clone(), + ); - let flashblock_rx = service.subscribe_block_sequence(); - let in_progress_rx = service.subscribe_in_progress(); + let flashblock_rx = service.subscribe_block_sequence(); + let in_progress_rx = service.subscribe_in_progress(); - ctx.components.task_executor().spawn(Box::pin(service.run(tx))); + ctx.components.task_executor().spawn(Box::pin(service.run(tx))); - (Some(pending_rx), Some(flashblock_rx), Some(in_progress_rx)) - } else { - (None, None, None) - }; + Some(FlashblocksListeners::new(pending_rx, flashblock_rx, in_progress_rx)) + } else { + None + }; let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner(); @@ -526,9 +517,7 @@ where eth_api, sequencer_client, U256::from(min_suggested_priority_fee), - pending_block_rx, - flashblock_rx, - in_progress_rx, + flashblocks, )) } } From 7905fba953e5d7c96e547bc8f3e0a0edc8af374a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Nov 2025 12:55:36 +0100 Subject: [PATCH 301/371] feat: add broadcast channel for received flashblocks (#19459) Co-authored-by: Federico Gimenez --- crates/optimism/flashblocks/src/lib.rs | 33 +++++++------ crates/optimism/flashblocks/src/sequence.rs | 12 ++++- crates/optimism/flashblocks/src/service.rs | 53 ++++++++++++++++----- crates/optimism/rpc/src/eth/mod.rs | 25 +++++++--- 4 files changed, 85 insertions(+), 38 deletions(-) diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index 39577116e96..7220f443cc1 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -9,6 +9,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use reth_primitives_traits::NodePrimitives; +use std::sync::Arc; pub use payload::{ ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashBlock, FlashBlockDecoder, @@ -39,39 +40,37 @@ pub type PendingBlockRx = tokio::sync::watch::Receiver; +/// Receiver of received [`FlashBlock`]s from the (websocket) subscription. +/// +/// [`FlashBlock`]: crate::FlashBlock +pub type FlashBlockRx = tokio::sync::broadcast::Receiver>; + /// Receiver that signals whether a [`FlashBlock`] is currently being built. pub type InProgressFlashBlockRx = tokio::sync::watch::Receiver>; /// Container for all flashblocks-related listeners. /// -/// Groups together the three receivers that provide flashblock-related updates. +/// Groups together the channels for flashblock-related updates. #[derive(Debug)] pub struct FlashblocksListeners { - /// Receiver of the most recent [`PendingFlashBlock`] built out of [`FlashBlock`]s. + /// Receiver of the most recent executed [`PendingFlashBlock`] built out of [`FlashBlock`]s. pub pending_block_rx: PendingBlockRx, - /// Receiver of the sequences of [`FlashBlock`]s built. - pub flashblock_rx: FlashBlockCompleteSequenceRx, + /// Subscription channel of the complete sequences of [`FlashBlock`]s built. + pub flashblocks_sequence: tokio::sync::broadcast::Sender, /// Receiver that signals whether a [`FlashBlock`] is currently being built. pub in_progress_rx: InProgressFlashBlockRx, + /// Subscription channel for received flashblocks from the (websocket) connection. + pub received_flashblocks: tokio::sync::broadcast::Sender>, } impl FlashblocksListeners { - /// Creates a new [`FlashblocksListeners`] with the given receivers. + /// Creates a new [`FlashblocksListeners`] with the given channels. pub const fn new( pending_block_rx: PendingBlockRx, - flashblock_rx: FlashBlockCompleteSequenceRx, + flashblocks_sequence: tokio::sync::broadcast::Sender, in_progress_rx: InProgressFlashBlockRx, + received_flashblocks: tokio::sync::broadcast::Sender>, ) -> Self { - Self { pending_block_rx, flashblock_rx, in_progress_rx } - } -} - -impl Clone for FlashblocksListeners { - fn clone(&self) -> Self { - Self { - pending_block_rx: self.pending_block_rx.clone(), - flashblock_rx: self.flashblock_rx.resubscribe(), - in_progress_rx: self.in_progress_rx.clone(), - } + Self { pending_block_rx, flashblocks_sequence, in_progress_rx, received_flashblocks } } } diff --git a/crates/optimism/flashblocks/src/sequence.rs b/crates/optimism/flashblocks/src/sequence.rs index fff4bd84a45..f2363207e38 100644 --- a/crates/optimism/flashblocks/src/sequence.rs +++ b/crates/optimism/flashblocks/src/sequence.rs @@ -38,6 +38,13 @@ where Self { inner: BTreeMap::new(), block_broadcaster: tx, state_root: None } } + /// Returns the sender half of the [`FlashBlockCompleteSequence`] channel. + pub const fn block_sequence_broadcaster( + &self, + ) -> &broadcast::Sender { + &self.block_broadcaster + } + /// Gets a subscriber to the flashblock sequences produced. pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { self.block_broadcaster.subscribe() @@ -160,7 +167,10 @@ where } /// A complete sequence of flashblocks, often corresponding to a full block. -/// Ensure invariants of a complete flashblocks sequence. +/// +/// Ensures invariants of a complete flashblocks sequence. +/// If this entire sequence of flashblocks was executed on top of latest block, this also includes +/// the computed state root. #[derive(Debug, Clone)] pub struct FlashBlockCompleteSequence { inner: Vec, diff --git a/crates/optimism/flashblocks/src/service.rs b/crates/optimism/flashblocks/src/service.rs index 7e442470d98..f5d4a4a810d 100644 --- a/crates/optimism/flashblocks/src/service.rs +++ b/crates/optimism/flashblocks/src/service.rs @@ -1,8 +1,8 @@ use crate::{ sequence::FlashBlockPendingSequence, worker::{BuildArgs, FlashBlockBuilder}, - ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, - PendingFlashBlock, + ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, + InProgressFlashBlockRx, PendingFlashBlock, }; use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; @@ -19,6 +19,7 @@ use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskExecutor; use std::{ pin::Pin, + sync::Arc, task::{ready, Context, Poll}, time::Instant, }; @@ -42,6 +43,8 @@ pub struct FlashBlockService< rx: S, current: Option>, blocks: FlashBlockPendingSequence, + /// Broadcast channel to forward received flashblocks from the subscription. + received_flashblocks_tx: tokio::sync::broadcast::Sender>, rebuild: bool, builder: FlashBlockBuilder, canon_receiver: CanonStateNotifications, @@ -60,17 +63,6 @@ pub struct FlashBlockService< compute_state_root: bool, } -/// Information for a flashblock currently built -#[derive(Debug, Clone, Copy)] -pub struct FlashBlockBuildInfo { - /// Parent block hash - pub parent_hash: B256, - /// Flashblock index within the current block's sequence - pub index: u64, - /// Block number of the flashblock being built. - pub block_number: u64, -} - impl FlashBlockService where N: NodePrimitives, @@ -92,10 +84,12 @@ where /// Constructs a new `FlashBlockService` that receives [`FlashBlock`]s from `rx` stream. pub fn new(rx: S, evm_config: EvmConfig, provider: Provider, spawner: TaskExecutor) -> Self { let (in_progress_tx, _) = watch::channel(None); + let (received_flashblocks_tx, _) = tokio::sync::broadcast::channel(128); Self { rx, current: None, blocks: FlashBlockPendingSequence::new(), + received_flashblocks_tx, canon_receiver: provider.subscribe_to_canonical_state(), builder: FlashBlockBuilder::new(evm_config, provider), rebuild: false, @@ -114,6 +108,20 @@ where self } + /// Returns the sender half to the received flashblocks. + pub const fn flashblocks_broadcaster( + &self, + ) -> &tokio::sync::broadcast::Sender> { + &self.received_flashblocks_tx + } + + /// Returns the sender half to the flashblock sequence. + pub const fn block_sequence_broadcaster( + &self, + ) -> &tokio::sync::broadcast::Sender { + self.blocks.block_sequence_broadcaster() + } + /// Returns a subscriber to the flashblock sequence. pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { self.blocks.subscribe_block_sequence() @@ -137,6 +145,13 @@ where warn!("Flashblock service has stopped"); } + /// Notifies all subscribers about the received flashblock + fn notify_received_flashblock(&self, flashblock: &FlashBlock) { + if self.received_flashblocks_tx.receiver_count() > 0 { + let _ = self.received_flashblocks_tx.send(Arc::new(flashblock.clone())); + } + } + /// Returns the [`BuildArgs`] made purely out of [`FlashBlock`]s that were received earlier. /// /// Returns `None` if the flashblock have no `base` or the base is not a child block of latest. @@ -284,6 +299,7 @@ where while let Poll::Ready(Some(result)) = this.rx.poll_next_unpin(cx) { match result { Ok(flashblock) => { + this.notify_received_flashblock(&flashblock); if flashblock.index == 0 { this.metrics.last_flashblock_length.record(this.blocks.count() as f64); } @@ -344,6 +360,17 @@ where } } +/// Information for a flashblock currently built +#[derive(Debug, Clone, Copy)] +pub struct FlashBlockBuildInfo { + /// Parent block hash + pub parent_hash: B256, + /// Flashblock index within the current block's sequence + pub index: u64, + /// Block number of the flashblock being built. + pub block_number: u64, +} + type BuildJob = (Instant, oneshot::Receiver, CachedReads)>>>); diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index f69da896424..84929e98852 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -23,8 +23,8 @@ use reth_evm::ConfigureEvm; use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ - ExecutionPayloadBaseV1, FlashBlockBuildInfo, FlashBlockCompleteSequenceRx, FlashBlockService, - FlashblocksListeners, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, + ExecutionPayloadBaseV1, FlashBlockBuildInfo, FlashBlockCompleteSequenceRx, FlashBlockRx, + FlashBlockService, FlashblocksListeners, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; use reth_rpc::eth::core::EthApiInner; use reth_rpc_eth_api::{ @@ -110,9 +110,14 @@ impl OpEthApi { self.inner.flashblocks.as_ref().map(|f| f.pending_block_rx.clone()) } - /// Returns a flashblock receiver, if any, by resubscribing to it. - pub fn flashblock_rx(&self) -> Option { - self.inner.flashblocks.as_ref().map(|f| f.flashblock_rx.resubscribe()) + /// Returns a new subscription to received flashblocks. + pub fn subscribe_received_flashblocks(&self) -> Option { + self.inner.flashblocks.as_ref().map(|f| f.received_flashblocks.subscribe()) + } + + /// Returns a new subscription to flashblock sequences. + pub fn subscribe_flashblock_sequence(&self) -> Option { + self.inner.flashblocks.as_ref().map(|f| f.flashblocks_sequence.subscribe()) } /// Returns information about the flashblock currently being built, if any. @@ -501,12 +506,18 @@ where ctx.components.task_executor().clone(), ); - let flashblock_rx = service.subscribe_block_sequence(); + let flashblocks_sequence = service.block_sequence_broadcaster().clone(); + let received_flashblocks = service.flashblocks_broadcaster().clone(); let in_progress_rx = service.subscribe_in_progress(); ctx.components.task_executor().spawn(Box::pin(service.run(tx))); - Some(FlashblocksListeners::new(pending_rx, flashblock_rx, in_progress_rx)) + Some(FlashblocksListeners::new( + pending_rx, + flashblocks_sequence, + in_progress_rx, + received_flashblocks, + )) } else { None }; From 7438bdbdf64d8d00880b501e7c2ad9e4d3fd87a3 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Mon, 3 Nov 2025 15:28:14 +0000 Subject: [PATCH 302/371] refactor(prune): derive EnumIter instead of explicit array of segments (#19465) --- Cargo.lock | 1 + crates/prune/types/Cargo.toml | 2 + crates/prune/types/src/lib.rs | 2 +- crates/prune/types/src/segment.rs | 42 +++++++++++++------ .../src/providers/database/provider.rs | 9 ++-- 5 files changed, 37 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4f3ef5779b2..7f9e6eaaa07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9848,6 +9848,7 @@ dependencies = [ "reth-codecs", "serde", "serde_json", + "strum 0.27.2", "thiserror 2.0.17", "toml", ] diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index b60621b331a..30adbb14d91 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -16,6 +16,7 @@ reth-codecs = { workspace = true, optional = true } alloy-primitives.workspace = true derive_more.workspace = true +strum = { workspace = true, features = ["derive"] } thiserror.workspace = true modular-bitfield = { workspace = true, optional = true } @@ -42,6 +43,7 @@ std = [ "serde?/std", "serde_json/std", "thiserror/std", + "strum/std", ] test-utils = [ "std", diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index b42574cde27..a588693892a 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -25,5 +25,5 @@ pub use pruner::{ PruneInterruptReason, PruneProgress, PrunedSegmentInfo, PrunerOutput, SegmentOutput, SegmentOutputCheckpoint, }; -pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError, PRUNE_SEGMENTS}; +pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; pub use target::{PruneModes, UnwindTargetPrunedError, MINIMUM_PRUNING_DISTANCE}; diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index aa0e893bb4a..36e39fcb585 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -2,6 +2,7 @@ use crate::MINIMUM_PRUNING_DISTANCE; use derive_more::Display; +use strum::{EnumIter, IntoEnumIterator}; use thiserror::Error; /// Segment of the data that can be pruned. @@ -9,7 +10,7 @@ use thiserror::Error; /// VERY IMPORTANT NOTE: new variants must be added to the end of this enum, and old variants which /// are no longer used must not be removed from this enum. The variant index is encoded directly /// when writing to the `PruneCheckpoint` table, so changing the order here will corrupt the table. -#[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, EnumIter)] #[cfg_attr(test, derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] @@ -28,9 +29,11 @@ pub enum PruneSegment { /// Prune segment responsible for the `StorageChangeSets` and `StoragesHistory` tables. StorageHistory, #[deprecated = "Variant indexes cannot be changed"] + #[strum(disabled)] /// Prune segment responsible for the `CanonicalHeaders`, `Headers` tables. Headers, #[deprecated = "Variant indexes cannot be changed"] + #[strum(disabled)] /// Prune segment responsible for the `Transactions` table. Transactions, /// Prune segment responsible for all rows in `AccountsTrieChangeSets` and @@ -40,18 +43,6 @@ pub enum PruneSegment { Bodies, } -/// Array of [`PruneSegment`]s actively in use. -pub const PRUNE_SEGMENTS: [PruneSegment; 8] = [ - PruneSegment::SenderRecovery, - PruneSegment::TransactionLookup, - PruneSegment::Receipts, - PruneSegment::ContractLogs, - PruneSegment::AccountHistory, - PruneSegment::StorageHistory, - PruneSegment::MerkleChangeSets, - PruneSegment::Bodies, -]; - #[cfg(test)] #[allow(clippy::derivable_impls)] impl Default for PruneSegment { @@ -61,6 +52,14 @@ impl Default for PruneSegment { } impl PruneSegment { + /// Returns an iterator over all variants of [`PruneSegment`]. + /// + /// Excludes deprecated variants that are no longer used, but can still be found in the + /// database. + pub fn variants() -> impl Iterator { + Self::iter() + } + /// Returns minimum number of blocks to keep in the database for this segment. pub const fn min_blocks(&self, purpose: PrunePurpose) -> u64 { match self { @@ -117,3 +116,20 @@ pub enum PruneSegmentError { #[error("the configuration provided for {0} is invalid")] Configuration(PruneSegment), } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_prune_segment_iter_excludes_deprecated() { + let segments: Vec = PruneSegment::variants().collect(); + + // Verify deprecated variants are not included derived iter + #[expect(deprecated)] + { + assert!(!segments.contains(&PruneSegment::Headers)); + assert!(!segments.contains(&PruneSegment::Transactions)); + } + } +} diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 9fa6500db12..a90b2c2e640 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -52,7 +52,7 @@ use reth_primitives_traits::{ Account, Block as _, BlockBody as _, Bytecode, RecoveredBlock, SealedHeader, StorageEntry, }; use reth_prune_types::{ - PruneCheckpoint, PruneMode, PruneModes, PruneSegment, MINIMUM_PRUNING_DISTANCE, PRUNE_SEGMENTS, + PruneCheckpoint, PruneMode, PruneModes, PruneSegment, MINIMUM_PRUNING_DISTANCE, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -3024,13 +3024,12 @@ impl PruneCheckpointReader for DatabaseProvide } fn get_prune_checkpoints(&self) -> ProviderResult> { - Ok(PRUNE_SEGMENTS - .iter() + Ok(PruneSegment::variants() .filter_map(|segment| { self.tx - .get::(*segment) + .get::(segment) .transpose() - .map(|chk| chk.map(|chk| (*segment, chk))) + .map(|chk| chk.map(|chk| (segment, chk))) }) .collect::>()?) } From ea69063aae501cd24f383515ec8bfd3869d3f5c3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Nov 2025 17:51:46 +0100 Subject: [PATCH 303/371] feat: schedule fusaka (#19455) --- Cargo.lock | 8 ++--- Cargo.toml | 4 +-- crates/chainspec/src/spec.rs | 62 ++++++++++++++++++++++++++---------- 3 files changed, 51 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f9e6eaaa07..d6c9ea2785d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -290,9 +290,9 @@ dependencies = [ [[package]] name = "alloy-hardforks" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ffa71f397f89c72a27d9c7e3340eed7981a18df9a257dd16b835ef7f53aef6" +checksum = "51e7f93a60ef3d867c93d43442ef3f2d8a1095450131c3d4e16bbbbf2166b9bd" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -388,9 +388,9 @@ dependencies = [ [[package]] name = "alloy-op-hardforks" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b43e1c305c2f0e4b8878b943fa2f75234803bfca5cd4a4dc0a0a772842a278ea" +checksum = "d0bc135abf78cf83a460bf785d52e4fe83c3ba5fadd416e2f79f7409eec45958" dependencies = [ "alloy-chains", "alloy-hardforks", diff --git a/Cargo.toml b/Cargo.toml index 6fa734e3d6c..7afd6716dfc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -487,7 +487,7 @@ alloy-sol-macro = "1.4.1" alloy-sol-types = { version = "1.4.1", default-features = false } alloy-trie = { version = "0.9.1", default-features = false } -alloy-hardforks = "0.4.2" +alloy-hardforks = "0.4.3" alloy-consensus = { version = "1.0.41", default-features = false } alloy-contract = { version = "1.0.41", default-features = false } @@ -519,7 +519,7 @@ alloy-transport-ws = { version = "1.0.41", default-features = false } # op alloy-op-evm = { version = "0.22.6", default-features = false } -alloy-op-hardforks = "0.4.2" +alloy-op-hardforks = "0.4.3" op-alloy-rpc-types = { version = "0.22.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.22.0", default-features = false } op-alloy-network = { version = "0.22.0", default-features = false } diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 4c71b7a465f..5b67f30d025 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -4,7 +4,7 @@ use alloy_evm::eth::spec::EthExecutorSpec; use crate::{ constants::{MAINNET_DEPOSIT_CONTRACT, MAINNET_PRUNE_DELETE_LIMIT}, ethereum::SEPOLIA_PARIS_TTD, - holesky, hoodi, + holesky, hoodi, mainnet, mainnet::{MAINNET_PARIS_BLOCK, MAINNET_PARIS_TTD}, sepolia, sepolia::SEPOLIA_PARIS_BLOCK, @@ -113,7 +113,10 @@ pub static MAINNET: LazyLock> = LazyLock::new(|| { deposit_contract: Some(MAINNET_DEPOSIT_CONTRACT), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), prune_delete_limit: MAINNET_PRUNE_DELETE_LIMIT, - blob_params: BlobScheduleBlobParams::default(), + blob_params: BlobScheduleBlobParams::default().with_scheduled([ + (mainnet::MAINNET_BPO1_TIMESTAMP, BlobParams::bpo1()), + (mainnet::MAINNET_BPO2_TIMESTAMP, BlobParams::bpo2()), + ]), }; spec.genesis.config.dao_fork_support = true; spec.into() @@ -1177,7 +1180,10 @@ Merge hard forks: Post-merge hard forks (timestamp based): - Shanghai @1681338455 - Cancun @1710338135 blob: (target: 3, max: 6, fraction: 3338477) -- Prague @1746612311 blob: (target: 6, max: 9, fraction: 5007716)" +- Prague @1746612311 blob: (target: 6, max: 9, fraction: 5007716) +- Osaka @1764798551 blob: (target: 6, max: 9, fraction: 5007716) +- Bpo1 @1765290071 blob: (target: 10, max: 15, fraction: 8346193) +- Bpo2 @1767747671 blob: (target: 14, max: 21, fraction: 11684671)" ); } @@ -1421,7 +1427,10 @@ Post-merge hard forks (timestamp based): ), ( EthereumHardfork::Prague, - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + ForkId { + hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), + next: mainnet::MAINNET_OSAKA_TIMESTAMP, + }, ), ], ); @@ -1564,13 +1573,23 @@ Post-merge hard forks (timestamp based): ), // First Prague block ( - Head { number: 20000002, timestamp: 1746612311, ..Default::default() }, - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + Head { number: 20000004, timestamp: 1746612311, ..Default::default() }, + ForkId { + hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), + next: mainnet::MAINNET_OSAKA_TIMESTAMP, + }, ), - // Future Prague block + // Osaka block ( - Head { number: 20000002, timestamp: 2000000000, ..Default::default() }, - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + Head { + number: 20000004, + timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP, + ..Default::default() + }, + ForkId { + hash: ForkHash(hex!("0x5167e2a6")), + next: mainnet::MAINNET_BPO1_TIMESTAMP, + }, ), ], ); @@ -1879,11 +1898,22 @@ Post-merge hard forks (timestamp based): ), // First Prague block ( Head { number: 20000004, timestamp: 1746612311, ..Default::default() }, - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, - ), // Future Prague block + ForkId { + hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), + next: mainnet::MAINNET_OSAKA_TIMESTAMP, + }, + ), + // Osaka block ( - Head { number: 20000004, timestamp: 2000000000, ..Default::default() }, - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + Head { + number: 20000004, + timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP, + ..Default::default() + }, + ForkId { + hash: ForkHash(hex!("0x5167e2a6")), + next: mainnet::MAINNET_BPO1_TIMESTAMP, + }, ), ], ); @@ -2540,10 +2570,8 @@ Post-merge hard forks (timestamp based): #[test] fn latest_eth_mainnet_fork_id() { - assert_eq!( - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, - MAINNET.latest_fork_id() - ) + // BPO2 + assert_eq!(ForkId { hash: ForkHash(hex!("0x07c9462e")), next: 0 }, MAINNET.latest_fork_id()) } #[test] From a0eccf712815fc01e6ba87c9ae3571d0c803e13f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Nov 2025 18:06:48 +0100 Subject: [PATCH 304/371] chore: use name const for cli name (#19466) --- crates/ethereum/cli/src/interface.rs | 2 +- crates/optimism/cli/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index 8d2b4ba62fb..f41143bb4fa 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -32,7 +32,7 @@ use tracing::info; /// /// This is the entrypoint to the executable. #[derive(Debug, Parser)] -#[command(author, version =version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] +#[command(author, name = version_metadata().name_client.as_ref(), version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] pub struct Cli< C: ChainSpecParser = EthereumChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs, diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 1655b92d6ef..52fdcc2ddd5 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -62,7 +62,7 @@ use reth_node_metrics as _; /// /// This is the entrypoint to the executable. #[derive(Debug, Parser)] -#[command(author, version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] +#[command(author, name = version_metadata().name_client.as_ref(), version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] pub struct Cli< Spec: ChainSpecParser = OpChainSpecParser, Ext: clap::Args + fmt::Debug = RollupArgs, From 846025545cb0d15f09a098d6a39f2f85d0559f40 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 3 Nov 2025 18:14:45 +0100 Subject: [PATCH 305/371] fix(db): OverlayStateProviderFactory: default validation lower bound to 0 (#19468) --- .../storage/provider/src/providers/state/overlay.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index f912411f1e3..d3ef87e6c49 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -116,16 +116,18 @@ where return Ok(false) } - // Extract the lower bound from prune checkpoint if available + // Extract the lower bound from prune checkpoint if available. + // + // If not available we assume pruning has never ran and so there is no lower bound. This + // should not generally happen, since MerkleChangeSets always have pruning enabled, but when + // starting a new node from scratch (e.g. in a test case or benchmark) it can surface. + // // The prune checkpoint's block_number is the highest pruned block, so data is available // starting from the next block let lower_bound = prune_checkpoint .and_then(|chk| chk.block_number) .map(|block_number| block_number + 1) - .ok_or_else(|| ProviderError::InsufficientChangesets { - requested: requested_block, - available: 0..=upper_bound, - })?; + .unwrap_or_default(); let available_range = lower_bound..=upper_bound; From 93649fed0b6143ade2f82dc8c58defe8cba99a96 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Nov 2025 19:49:08 +0100 Subject: [PATCH 306/371] chore: bump revm 31 (#19470) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 627 ++++++++++++++----------------- Cargo.toml | 26 +- crates/optimism/rpc/src/error.rs | 7 +- 3 files changed, 308 insertions(+), 352 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d6c9ea2785d..18a234a0498 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -65,6 +65,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b" +dependencies = [ + "equator", +] + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -253,9 +262,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.22.6" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08e9e656d58027542447c1ca5aa4ca96293f09e6920c4651953b7451a7c35e4e" +checksum = "88d4291974e3564db30f1d2bcb3ba4a53dbc927e9a6fce2edaf389a712204fbd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -370,9 +379,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.22.6" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "593ce78cea49e4700b4d9061fb16a5455265176541eeba91265f548659d33229" +checksum = "ab958a76714744eff19308dd42a4b72c27e7624557dbdc4dfe69ac3d5af2583c" dependencies = [ "alloy-consensus", "alloy-eips", @@ -1374,7 +1383,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e6fa871e4334a622afd6bb2f611635e8083a6f5e2936c0f90f37c7ef9856298" dependencies = [ "async-channel", - "futures-lite", + "futures-lite 1.13.0", "http-types", "log", "memchr", @@ -1689,9 +1698,9 @@ dependencies = [ [[package]] name = "boa_ast" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c340fe0f0b267787095cbe35240c6786ff19da63ec7b69367ba338eace8169b" +checksum = "bc119a5ad34c3f459062a96907f53358989b173d104258891bb74f95d93747e8" dependencies = [ "bitflags 2.10.0", "boa_interner", @@ -1704,10 +1713,11 @@ dependencies = [ [[package]] name = "boa_engine" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f620c3f06f51e65c0504ddf04978be1b814ac6586f0b45f6019801ab5efd37f9" +checksum = "e637ec52ea66d76b0ca86180c259d6c7bb6e6a6e14b2f36b85099306d8b00cc3" dependencies = [ + "aligned-vec", "arrayvec", "bitflags 2.10.0", "boa_ast", @@ -1715,73 +1725,80 @@ dependencies = [ "boa_interner", "boa_macros", "boa_parser", - "boa_profiler", "boa_string", "bytemuck", "cfg-if", + "cow-utils", "dashmap 6.1.0", + "dynify", "fast-float2", - "hashbrown 0.15.5", - "icu_normalizer 1.5.0", + "float16", + "futures-channel", + "futures-concurrency", + "futures-lite 2.6.1", + "hashbrown 0.16.0", + "icu_normalizer", "indexmap 2.12.0", "intrusive-collections", - "itertools 0.13.0", + "itertools 0.14.0", "num-bigint", "num-integer", "num-traits", "num_enum", - "once_cell", - "pollster", + "paste", "portable-atomic", - "rand 0.8.5", + "rand 0.9.2", "regress", "rustc-hash", "ryu-js", "serde", "serde_json", - "sptr", + "small_btree", "static_assertions", + "tag_ptr", "tap", "thin-vec", "thiserror 2.0.17", "time", + "xsum", ] [[package]] name = "boa_gc" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2425c0b7720d42d73eaa6a883fbb77a5c920da8694964a3d79a67597ac55cce2" +checksum = "f1179f690cbfcbe5364cceee5f1cb577265bb6f07b0be6f210aabe270adcf9da" dependencies = [ "boa_macros", - "boa_profiler", "boa_string", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "thin-vec", ] [[package]] name = "boa_interner" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42407a3b724cfaecde8f7d4af566df4b56af32a2f11f0956f5570bb974e7f749" +checksum = "9626505d33dc63d349662437297df1d3afd9d5fc4a2b3ad34e5e1ce879a78848" dependencies = [ "boa_gc", "boa_macros", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "indexmap 2.12.0", "once_cell", - "phf 0.11.3", + "phf", "rustc-hash", "static_assertions", ] [[package]] name = "boa_macros" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" +checksum = "7f36418a46544b152632c141b0a0b7a453cd69ca150caeef83aee9e2f4b48b7d" dependencies = [ + "cfg-if", + "cow-utils", "proc-macro2", "quote", "syn 2.0.108", @@ -1790,39 +1807,33 @@ dependencies = [ [[package]] name = "boa_parser" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cc142dac798cdc6e2dbccfddeb50f36d2523bb977a976e19bdb3ae19b740804" +checksum = "02f99bf5b684f0de946378fcfe5f38c3a0fbd51cbf83a0f39ff773a0e218541f" dependencies = [ "bitflags 2.10.0", "boa_ast", "boa_interner", "boa_macros", - "boa_profiler", "fast-float2", - "icu_properties 1.5.1", + "icu_properties", "num-bigint", "num-traits", "regress", "rustc-hash", ] -[[package]] -name = "boa_profiler" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4064908e7cdf9b6317179e9b04dcb27f1510c1c144aeab4d0394014f37a0f922" - [[package]] name = "boa_string" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7debc13fbf7997bf38bf8e9b20f1ad5e2a7d27a900e1f6039fe244ce30f589b5" +checksum = "45ce9d7aa5563a2e14eab111e2ae1a06a69a812f6c0c3d843196c9d03fbef440" dependencies = [ "fast-float2", + "itoa", "paste", "rustc-hash", - "sptr", + "ryu-js", "static_assertions", ] @@ -2421,6 +2432,16 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "cordyceps" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688d7fbb8092b8de775ef2536f36c8c31f2bc4006ece2e8d8ad2d17d00ce0a2a" +dependencies = [ + "loom", + "tracing", +] + [[package]] name = "core-foundation" version = "0.10.1" @@ -2446,6 +2467,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "cow-utils" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "417bef24afe1460300965a25ff4a24b8b45ad011948302ec221e8a0a81eb2c79" + [[package]] name = "cpufeatures" version = "0.2.17" @@ -2901,6 +2928,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "diatomic-waker" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c" + [[package]] name = "diff" version = "0.1.13" @@ -3041,6 +3074,26 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "dynify" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81acb15628a3e22358bf73de5e7e62360b8a777dbcb5fc9ac7dfa9ae73723747" +dependencies = [ + "dynify-macros", +] + +[[package]] +name = "dynify-macros" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec431cd708430d5029356535259c5d645d60edd3d39c54e5eea9782d46caa7d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.108", +] + [[package]] name = "ecdsa" version = "0.16.9" @@ -3221,6 +3274,26 @@ dependencies = [ "syn 2.0.108", ] +[[package]] +name = "equator" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc" +dependencies = [ + "equator-macro", +] + +[[package]] +name = "equator-macro" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.108", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -3819,6 +3892,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + [[package]] name = "flate2" version = "1.1.5" @@ -3829,6 +3908,16 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float16" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bffafbd079d520191c7c2779ae9cf757601266cf4167d3f659ff09617ff8483" +dependencies = [ + "cfg-if", + "rustc_version 0.2.3", +] + [[package]] name = "fnv" version = "1.0.7" @@ -3886,6 +3975,19 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-buffered" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8e0e1f38ec07ba4abbde21eed377082f17ccb988be9d988a5adbf4bafc118fd" +dependencies = [ + "cordyceps", + "diatomic-waker", + "futures-core", + "pin-project-lite", + "spin", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -3896,6 +3998,21 @@ dependencies = [ "futures-sink", ] +[[package]] +name = "futures-concurrency" +version = "7.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eb68017df91f2e477ed4bea586c59eaecaa47ed885a770d0444e21e62572cd2" +dependencies = [ + "fixedbitset", + "futures-buffered", + "futures-core", + "futures-lite 2.6.1", + "pin-project", + "slab", + "smallvec", +] + [[package]] name = "futures-core" version = "0.3.31" @@ -3934,6 +4051,19 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" +dependencies = [ + "fastrand 2.3.0", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.31" @@ -4391,7 +4521,7 @@ dependencies = [ "anyhow", "async-channel", "base64 0.13.1", - "futures-lite", + "futures-lite 1.13.0", "infer", "pin-project-lite", "rand 0.7.3", @@ -4541,27 +4671,15 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke 0.7.5", - "zerofrom", - "zerovec 0.10.4", -] - -[[package]] -name = "icu_collections" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", "potential_utf", - "yoke 0.8.1", + "yoke", "zerofrom", - "zerovec 0.11.5", + "zerovec", ] [[package]] @@ -4571,146 +4689,57 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ "displaydoc", - "litemap 0.8.1", - "tinystr 0.8.2", - "writeable 0.6.2", - "zerovec 0.11.5", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap 0.7.5", - "tinystr 0.7.6", - "writeable 0.5.5", - "zerovec 0.10.4", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider 1.5.0", - "tinystr 0.7.6", - "zerovec 0.10.4", + "litemap", + "serde", + "tinystr", + "writeable", + "zerovec", ] -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "8b24a59706036ba941c9476a55cd57b82b77f38a3c667d637ee7cabbc85eaedc" dependencies = [ "displaydoc", - "icu_collections 1.5.0", - "icu_normalizer_data 1.5.1", - "icu_properties 1.5.1", - "icu_provider 1.5.0", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", "smallvec", "utf16_iter", - "utf8_iter", "write16", - "zerovec 0.10.4", + "zerovec", ] -[[package]] -name = "icu_normalizer" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" -dependencies = [ - "icu_collections 2.1.1", - "icu_normalizer_data 2.1.1", - "icu_properties 2.1.1", - "icu_provider 2.1.1", - "smallvec", - "zerovec 0.11.5", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" - [[package]] name = "icu_normalizer_data" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "f5a97b8ac6235e69506e8dacfb2adf38461d2ce6d3e9bd9c94c4cbc3cd4400a4" dependencies = [ "displaydoc", - "icu_collections 1.5.0", - "icu_locid_transform", - "icu_properties_data 1.5.1", - "icu_provider 1.5.0", - "tinystr 0.7.6", - "zerovec 0.10.4", -] - -[[package]] -name = "icu_properties" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" -dependencies = [ - "icu_collections 2.1.1", + "icu_collections", "icu_locale_core", - "icu_properties_data 2.1.1", - "icu_provider 2.1.1", + "icu_properties_data", + "icu_provider", + "potential_utf", "zerotrie", - "zerovec 0.11.5", + "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" - -[[package]] -name = "icu_properties_data" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" - -[[package]] -name = "icu_provider" -version = "1.5.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr 0.7.6", - "writeable 0.5.5", - "yoke 0.7.5", - "zerofrom", - "zerovec 0.10.4", -] +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" @@ -4720,22 +4749,13 @@ checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" dependencies = [ "displaydoc", "icu_locale_core", - "writeable 0.6.2", - "yoke 0.8.1", + "serde", + "stable_deref_trait", + "writeable", + "yoke", "zerofrom", "zerotrie", - "zerovec 0.11.5", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.108", + "zerovec", ] [[package]] @@ -4761,8 +4781,8 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ - "icu_normalizer 2.1.1", - "icu_properties 2.1.1", + "icu_normalizer", + "icu_properties", ] [[package]] @@ -5417,12 +5437,6 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" -[[package]] -name = "litemap" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" - [[package]] name = "litemap" version = "0.8.1" @@ -6138,9 +6152,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "11.3.0" +version = "12.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f68e30e34902f61fc053ea3094229d0bf7c78ed1d24e6d0d89306c2d2db1687" +checksum = "9e599c71e91670fb922e3cdcb04783caed1226352da19d674bd001b3bf2bc433" dependencies = [ "auto_impl", "revm", @@ -6387,37 +6401,17 @@ dependencies = [ "rustc_version 0.4.1", ] -[[package]] -name = "phf" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" -dependencies = [ - "phf_macros 0.11.3", - "phf_shared 0.11.3", -] - [[package]] name = "phf" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" dependencies = [ - "phf_macros 0.13.1", - "phf_shared 0.13.1", + "phf_macros", + "phf_shared", "serde", ] -[[package]] -name = "phf_generator" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" -dependencies = [ - "phf_shared 0.11.3", - "rand 0.8.5", -] - [[package]] name = "phf_generator" version = "0.13.1" @@ -6425,20 +6419,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737" dependencies = [ "fastrand 2.3.0", - "phf_shared 0.13.1", -] - -[[package]] -name = "phf_macros" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" -dependencies = [ - "phf_generator 0.11.3", - "phf_shared 0.11.3", - "proc-macro2", - "quote", - "syn 2.0.108", + "phf_shared", ] [[package]] @@ -6447,22 +6428,13 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef" dependencies = [ - "phf_generator 0.13.1", - "phf_shared 0.13.1", + "phf_generator", + "phf_shared", "proc-macro2", "quote", "syn 2.0.108", ] -[[package]] -name = "phf_shared" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" -dependencies = [ - "siphasher", -] - [[package]] name = "phf_shared" version = "0.13.1" @@ -6557,12 +6529,6 @@ dependencies = [ "plotters-backend", ] -[[package]] -name = "pollster" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f3a9f18d041e6d0e102a0a46750538147e5e8992d3b4873aaafee2520b00ce3" - [[package]] name = "polyval" version = "0.6.2" @@ -6587,7 +6553,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" dependencies = [ - "zerovec 0.11.5", + "zerovec", ] [[package]] @@ -10851,9 +10817,9 @@ dependencies = [ [[package]] name = "revm" -version = "30.2.0" +version = "31.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76df793c6ef3bef8f88f05b3873ebebce1494385a3ce8f58ad2e2e111aa0de11" +checksum = "f7bba993ce958f0b6eb23d2644ea8360982cb60baffedf961441e36faba6a2ca" dependencies = [ "revm-bytecode", "revm-context", @@ -10875,16 +10841,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f2b51c414b7e79edd4a0569d06e2c4c029f8b60e5f3ee3e2fa21dc6c3717ee3" dependencies = [ "bitvec", - "phf 0.13.1", + "phf", "revm-primitives", "serde", ] [[package]] name = "revm-context" -version = "10.1.2" +version = "11.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7adcce0c14cf59b7128de34185a0fbf8f63309539b9263b35ead870d73584114" +checksum = "f69efee45130bd9e5b0a7af27552fddc70bc161dafed533c2f818a2d1eb654e6" dependencies = [ "bitvec", "cfg-if", @@ -10899,9 +10865,9 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "11.1.2" +version = "12.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d620a9725e443c171fb195a074331fa4a745fa5cbb0018b4bbf42619e64b563" +checksum = "5ce2525e93db0ae2a3ec7dcde5443dfdb6fbf321c5090380d775730c67bc6cee" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10942,9 +10908,9 @@ dependencies = [ [[package]] name = "revm-handler" -version = "11.2.0" +version = "12.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d8049b2fbff6636150f4740c95369aa174e41b0383034e0e256cfdffcfcd23" +checksum = "e756198d43b6c4c5886548ffbc4594412d1a82b81723525c6e85ed6da0e91c5f" dependencies = [ "auto_impl", "derive-where", @@ -10961,9 +10927,9 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "11.2.0" +version = "12.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2a21dd773b654ec7e080025eecef4ac84c711150d1bd36acadf0546f471329a" +checksum = "c3fdd1e74cc99c6173c8692b6e480291e2ad0c21c716d9dc16e937ab2e0da219" dependencies = [ "auto_impl", "either", @@ -10979,9 +10945,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.31.2" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "782c38fa94f99b4b15f1690bffc2c3cbf06a0f460cf163b470d126914b47d343" +checksum = "21caa99f22184a6818946362778cccd3ff02f743c1e085bee87700671570ecb7" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -10999,9 +10965,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1de5c790122f8ded67992312af8acd41ccfcee629b25b819e10c5b1f69caf57" +checksum = "44efb7c2f4034a5bfd3d71ebfed076e48ac75e4972f1c117f2a20befac7716cd" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -11012,9 +10978,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "28.1.1" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e57aadd7a2087705f653b5aaacc8ad4f8e851f5d330661e3f4c43b5475bbceae" +checksum = "585098ede6d84d6fc6096ba804b8e221c44dc77679571d32664a55e665aa236b" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11262,6 +11228,15 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver 0.9.0", +] + [[package]] name = "rustc_version" version = "0.3.3" @@ -11555,13 +11530,22 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser 0.7.0", +] + [[package]] name = "semver" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ - "semver-parser", + "semver-parser 0.10.3", ] [[package]] @@ -11574,6 +11558,12 @@ dependencies = [ "serde_core", ] +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + [[package]] name = "semver-parser" version = "0.10.3" @@ -11899,6 +11889,15 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" +[[package]] +name = "small_btree" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ba60d2df92ba73864714808ca68c059734853e6ab722b40e1cf543ebb3a057a" +dependencies = [ + "arrayvec", +] + [[package]] name = "smallvec" version = "1.15.1" @@ -11970,6 +11969,12 @@ dependencies = [ "sha1", ] +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" + [[package]] name = "spki" version = "0.7.3" @@ -11980,12 +11985,6 @@ dependencies = [ "der", ] -[[package]] -name = "sptr" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" - [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -12120,6 +12119,12 @@ dependencies = [ "windows 0.57.0", ] +[[package]] +name = "tag_ptr" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0e973b34477b7823833469eb0f5a3a60370fef7a453e02d751b59180d0a5a05" + [[package]] name = "tagptr" version = "0.2.0" @@ -12390,16 +12395,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "tinystr" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" -dependencies = [ - "displaydoc", - "zerovec 0.10.4", -] - [[package]] name = "tinystr" version = "0.8.2" @@ -12407,7 +12402,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ "displaydoc", - "zerovec 0.11.5", + "serde_core", + "zerovec", ] [[package]] @@ -13977,12 +13973,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - [[package]] name = "writeable" version = "0.6.2" @@ -14028,22 +14018,16 @@ dependencies = [ ] [[package]] -name = "yansi" -version = "1.0.1" +name = "xsum" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +checksum = "0637d3a5566a82fa5214bae89087bc8c9fb94cd8e8a3c07feb691bb8d9c632db" [[package]] -name = "yoke" -version = "0.7.5" +name = "yansi" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive 0.7.5", - "zerofrom", -] +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" @@ -14052,22 +14036,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" dependencies = [ "stable_deref_trait", - "yoke-derive 0.8.1", + "yoke-derive", "zerofrom", ] -[[package]] -name = "yoke-derive" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.108", - "synstructure", -] - [[package]] name = "yoke-derive" version = "0.8.1" @@ -14148,19 +14120,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" dependencies = [ "displaydoc", - "yoke 0.8.1", - "zerofrom", -] - -[[package]] -name = "zerovec" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" -dependencies = [ - "yoke 0.7.5", + "yoke", "zerofrom", - "zerovec-derive 0.10.3", ] [[package]] @@ -14169,20 +14130,10 @@ version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ - "yoke 0.8.1", + "serde", + "yoke", "zerofrom", - "zerovec-derive 0.11.2", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.108", + "zerovec-derive", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7afd6716dfc..6f26dcc4774 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -463,24 +463,24 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { version = "30.2.0", default-features = false } -revm-bytecode = { version = "7.0.2", default-features = false } -revm-database = { version = "9.0.2", default-features = false } -revm-state = { version = "8.0.2", default-features = false } +revm = { version = "31.0.0", default-features = false } +revm-bytecode = { version = "7.1.0", default-features = false } +revm-database = { version = "9.0.3", default-features = false } +revm-state = { version = "8.1.0", default-features = false } revm-primitives = { version = "21.0.1", default-features = false } -revm-interpreter = { version = "28.0.0", default-features = false } -revm-inspector = { version = "11.1.2", default-features = false } -revm-context = { version = "10.1.2", default-features = false } -revm-context-interface = { version = "11.1.2", default-features = false } -revm-database-interface = { version = "8.0.3", default-features = false } -op-revm = { version = "11.3.0", default-features = false } -revm-inspectors = "0.31.0" +revm-interpreter = { version = "29.0.0", default-features = false } +revm-inspector = { version = "12.0.0", default-features = false } +revm-context = { version = "11.0.0", default-features = false } +revm-context-interface = { version = "12.0.0", default-features = false } +revm-database-interface = { version = "8.0.4", default-features = false } +op-revm = { version = "12.0.0", default-features = false } +revm-inspectors = "0.32.0" # eth alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.4.1" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.22.5", default-features = false } +alloy-evm = { version = "0.23.0", default-features = false } alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.4.1" @@ -518,7 +518,7 @@ alloy-transport-ipc = { version = "1.0.41", default-features = false } alloy-transport-ws = { version = "1.0.41", default-features = false } # op -alloy-op-evm = { version = "0.22.6", default-features = false } +alloy-op-evm = { version = "0.23.0", default-features = false } alloy-op-hardforks = "0.4.3" op-alloy-rpc-types = { version = "0.22.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.22.0", default-features = false } diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 40d34ef7cc0..2b5962460d6 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -67,6 +67,9 @@ pub enum OpInvalidTransactionError { /// A deposit transaction halted post-regolith #[error("deposit transaction halted after regolith")] HaltedDepositPostRegolith, + /// The encoded transaction was missing during evm execution. + #[error("missing enveloped transaction bytes")] + MissingEnvelopedTx, /// Transaction conditional errors. #[error(transparent)] TxConditionalErr(#[from] TxConditionalErr), @@ -76,7 +79,8 @@ impl From for jsonrpsee_types::error::ErrorObject<'st fn from(err: OpInvalidTransactionError) -> Self { match err { OpInvalidTransactionError::DepositSystemTxPostRegolith | - OpInvalidTransactionError::HaltedDepositPostRegolith => { + OpInvalidTransactionError::HaltedDepositPostRegolith | + OpInvalidTransactionError::MissingEnvelopedTx => { rpc_err(EthRpcErrorCode::TransactionRejected.code(), err.to_string(), None) } OpInvalidTransactionError::TxConditionalErr(_) => err.into(), @@ -93,6 +97,7 @@ impl TryFrom for OpInvalidTransactionError { Ok(Self::DepositSystemTxPostRegolith) } OpTransactionError::HaltedDepositPostRegolith => Ok(Self::HaltedDepositPostRegolith), + OpTransactionError::MissingEnvelopedTx => Ok(Self::MissingEnvelopedTx), OpTransactionError::Base(err) => Err(err), } } From 24fa984da4b487a62886526dfdc763b0fecafa74 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 3 Nov 2025 20:25:50 +0100 Subject: [PATCH 307/371] chore: add js-tracer feature to bins (#19441) --- .config/zepter.yaml | 2 +- bin/reth/Cargo.toml | 10 ++++++++-- crates/ethereum/node/Cargo.toml | 7 ++++++- crates/ethereum/reth/Cargo.toml | 8 +++++++- crates/node/builder/Cargo.toml | 6 +++++- crates/optimism/bin/Cargo.toml | 8 ++++++-- crates/optimism/node/Cargo.toml | 7 ++++++- crates/optimism/reth/Cargo.toml | 8 +++++++- crates/rpc/rpc/Cargo.toml | 6 +++++- 9 files changed, 51 insertions(+), 11 deletions(-) diff --git a/.config/zepter.yaml b/.config/zepter.yaml index 251c0892d4d..a4179c0a8fd 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -12,7 +12,7 @@ workflows: # Check that `A` activates the features of `B`. "propagate-feature", # These are the features to check: - "--features=std,op,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat,min-error-logs,min-warn-logs,min-info-logs,min-debug-logs,min-trace-logs,otlp", + "--features=std,op,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat,min-error-logs,min-warn-logs,min-info-logs,min-debug-logs,min-trace-logs,otlp,js-tracer", # Do not try to add a new section to `[features]` of `A` only because `B` exposes that feature. There are edge-cases where this is still needed, but we can add them manually. "--left-side-feature-missing=ignore", # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 31d9294fec6..eb0cf0bd2b2 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -40,7 +40,7 @@ reth-node-api.workspace = true reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true reth-ethereum-primitives.workspace = true -reth-node-ethereum = { workspace = true, features = ["js-tracer"] } +reth-node-ethereum.workspace = true reth-node-builder.workspace = true reth-node-metrics.workspace = true reth-consensus.workspace = true @@ -67,12 +67,18 @@ backon.workspace = true tempfile.workspace = true [features] -default = ["jemalloc", "otlp", "reth-revm/portable"] +default = ["jemalloc", "otlp", "reth-revm/portable", "js-tracer"] otlp = [ "reth-ethereum-cli/otlp", "reth-node-core/otlp", ] +js-tracer = [ + "reth-node-builder/js-tracer", + "reth-node-ethereum/js-tracer", + "reth-rpc/js-tracer", + "reth-rpc-eth-types/js-tracer", +] dev = ["reth-ethereum-cli/dev"] diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 1594c6fad96..575934007f9 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -82,7 +82,12 @@ asm-keccak = [ "reth-node-core/asm-keccak", "revm/asm-keccak", ] -js-tracer = ["reth-node-builder/js-tracer"] +js-tracer = [ + "reth-node-builder/js-tracer", + "reth-rpc/js-tracer", + "reth-rpc-eth-api/js-tracer", + "reth-rpc-eth-types/js-tracer", +] test-utils = [ "reth-node-builder/test-utils", "reth-chainspec/test-utils", diff --git a/crates/ethereum/reth/Cargo.toml b/crates/ethereum/reth/Cargo.toml index 959b7c1b65f..0d57abf6f20 100644 --- a/crates/ethereum/reth/Cargo.toml +++ b/crates/ethereum/reth/Cargo.toml @@ -144,7 +144,13 @@ rpc = [ "dep:alloy-rpc-types-engine", ] tasks = ["dep:reth-tasks"] -js-tracer = ["rpc", "reth-rpc/js-tracer"] +js-tracer = [ + "rpc", + "reth-rpc/js-tracer", + "reth-node-builder?/js-tracer", + "reth-node-ethereum?/js-tracer", + "reth-rpc-eth-types?/js-tracer", +] network = ["dep:reth-network", "tasks", "dep:reth-network-api", "dep:reth-eth-wire"] provider = ["storage-api", "tasks", "dep:reth-provider", "dep:reth-db", "dep:reth-codecs"] storage-api = ["dep:reth-storage-api"] diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index c1224d35e5a..8e8774e86c8 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -95,7 +95,11 @@ reth-evm-ethereum = { workspace = true, features = ["test-utils"] } [features] default = [] -js-tracer = ["reth-rpc/js-tracer"] +js-tracer = [ + "reth-rpc/js-tracer", + "reth-node-ethereum/js-tracer", + "reth-rpc-eth-types/js-tracer", +] test-utils = [ "reth-db/test-utils", "reth-chain-state/test-utils", diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 568ed8aabfe..ef203df0fc0 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -12,7 +12,7 @@ exclude.workspace = true reth-cli-util.workspace = true reth-optimism-cli.workspace = true reth-optimism-rpc.workspace = true -reth-optimism-node = { workspace = true, features = ["js-tracer"] } +reth-optimism-node.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-evm.workspace = true @@ -27,10 +27,14 @@ tracing.workspace = true workspace = true [features] -default = ["jemalloc", "otlp", "reth-optimism-evm/portable"] +default = ["jemalloc", "otlp", "reth-optimism-evm/portable", "js-tracer"] otlp = ["reth-optimism-cli/otlp"] +js-tracer = [ + "reth-optimism-node/js-tracer", +] + jemalloc = ["reth-cli-util/jemalloc", "reth-optimism-cli/jemalloc"] jemalloc-prof = ["reth-cli-util/jemalloc-prof"] tracy-allocator = ["reth-cli-util/tracy-allocator"] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index fdccffb869b..0576b3897f5 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -93,7 +93,12 @@ asm-keccak = [ "reth-node-core/asm-keccak", "revm/asm-keccak", ] -js-tracer = ["reth-node-builder/js-tracer"] +js-tracer = [ + "reth-node-builder/js-tracer", + "reth-optimism-node/js-tracer", + "reth-rpc/js-tracer", + "reth-rpc-eth-types/js-tracer", +] test-utils = [ "reth-tasks", "reth-e2e-test-utils", diff --git a/crates/optimism/reth/Cargo.toml b/crates/optimism/reth/Cargo.toml index 384eca45b8c..d120f04f614 100644 --- a/crates/optimism/reth/Cargo.toml +++ b/crates/optimism/reth/Cargo.toml @@ -126,7 +126,13 @@ rpc = [ "dep:reth-optimism-rpc", ] tasks = ["dep:reth-tasks"] -js-tracer = ["rpc", "reth-rpc/js-tracer"] +js-tracer = [ + "rpc", + "reth-rpc/js-tracer", + "reth-node-builder?/js-tracer", + "reth-optimism-node?/js-tracer", + "reth-rpc-eth-types?/js-tracer", +] network = ["dep:reth-network", "tasks", "dep:reth-network-api", "dep:reth-eth-wire"] provider = ["storage-api", "tasks", "dep:reth-provider", "dep:reth-db", "dep:reth-codecs"] pool = ["dep:reth-transaction-pool"] diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 81df4bff44f..a47fa5ebcdf 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -106,4 +106,8 @@ rand.workspace = true jsonrpsee = { workspace = true, features = ["client"] } [features] -js-tracer = ["revm-inspectors/js-tracer", "reth-rpc-eth-types/js-tracer"] +js-tracer = [ + "revm-inspectors/js-tracer", + "reth-rpc-eth-types/js-tracer", + "reth-rpc-eth-api/js-tracer", +] From 66957c7902f4408cd311ecb2c760f40cad5180a4 Mon Sep 17 00:00:00 2001 From: MIHAO PARK Date: Mon, 3 Nov 2025 20:47:35 +0100 Subject: [PATCH 308/371] chore(node): compact duration formatting in stage progress logs (#18720) --- crates/node/events/src/node.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 02c7709819e..20ac4394b4f 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -596,6 +596,8 @@ impl Display for Eta { f, "{}", humantime::format_duration(Duration::from_secs(remaining.as_secs())) + .to_string() + .replace(' ', "") ) } } @@ -621,6 +623,6 @@ mod tests { } .to_string(); - assert_eq!(eta, "13m 37s"); + assert_eq!(eta, "13m37s"); } } From c9897ad2301f76f6fb246a809856992e9a692b93 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 3 Nov 2025 19:53:10 +0000 Subject: [PATCH 309/371] fix: update `min_block` on `StaticFileProvider::update_index` (#19469) --- .../prune/prune/src/segments/user/bodies.rs | 117 ++++++++++++++++++ crates/storage/nippy-jar/src/lib.rs | 3 + .../src/providers/static_file/manager.rs | 34 ++++- 3 files changed, 153 insertions(+), 1 deletion(-) diff --git a/crates/prune/prune/src/segments/user/bodies.rs b/crates/prune/prune/src/segments/user/bodies.rs index db050234d96..0a6a432754b 100644 --- a/crates/prune/prune/src/segments/user/bodies.rs +++ b/crates/prune/prune/src/segments/user/bodies.rs @@ -207,4 +207,121 @@ mod tests { run_prune_test(&factory, &finished_exex_height_rx, test_case, tip); } } + + #[test] + fn min_block_updated_on_sync() { + // Regression test: update_index must update min_block to prevent stale values + // that can cause pruner to incorrectly delete static files when PruneMode::Before(0) is + // used. + + struct MinBlockTestCase { + // Block range + initial_range: Option, + updated_range: SegmentRangeInclusive, + // Min block + expected_before_update: Option, + expected_after_update: BlockNumber, + // Test delete_segment_below_block with this value + delete_below_block: BlockNumber, + // Expected number of deleted segments + expected_deleted: usize, + } + + let test_cases = vec![ + // Test 1: Empty initial state (None) -> syncs to block 100 + MinBlockTestCase { + initial_range: None, + updated_range: SegmentRangeInclusive::new(0, 100), + expected_before_update: None, + expected_after_update: 100, + delete_below_block: 1, + expected_deleted: 0, + }, + // Test 2: Genesis state [0..=0] -> syncs to block 100 (eg. op-reth node after op-reth + // init-state) + MinBlockTestCase { + initial_range: Some(SegmentRangeInclusive::new(0, 0)), + updated_range: SegmentRangeInclusive::new(0, 100), + expected_before_update: Some(0), + expected_after_update: 100, + delete_below_block: 1, + expected_deleted: 0, + }, + // Test 3: Existing state [0..=50] -> syncs to block 200 + MinBlockTestCase { + initial_range: Some(SegmentRangeInclusive::new(0, 50)), + updated_range: SegmentRangeInclusive::new(0, 200), + expected_before_update: Some(50), + expected_after_update: 200, + delete_below_block: 150, + expected_deleted: 0, + }, + ]; + + for ( + idx, + MinBlockTestCase { + initial_range, + updated_range, + expected_before_update, + expected_after_update, + delete_below_block, + expected_deleted, + }, + ) in test_cases.into_iter().enumerate() + { + let factory = create_test_provider_factory(); + let static_provider = factory.static_file_provider(); + + let mut writer = + static_provider.latest_writer(StaticFileSegment::Transactions).unwrap(); + + // Set up initial state if provided + if let Some(initial_range) = initial_range { + *writer.user_header_mut() = SegmentHeader::new( + initial_range, + Some(initial_range), + Some(initial_range), + StaticFileSegment::Transactions, + ); + writer.inner().set_dirty(); + writer.commit().unwrap(); + static_provider.initialize_index().unwrap(); + } + + // Verify initial state + assert_eq!( + static_provider.get_lowest_static_file_block(StaticFileSegment::Transactions), + expected_before_update, + "Test case {}: Initial min_block mismatch", + idx + ); + + // Update to new range + *writer.user_header_mut() = SegmentHeader::new( + updated_range, + Some(updated_range), + Some(updated_range), + StaticFileSegment::Transactions, + ); + writer.inner().set_dirty(); + writer.commit().unwrap(); // update_index is called inside + + // Verify min_block was updated (not stuck at stale value) + assert_eq!( + static_provider.get_lowest_static_file_block(StaticFileSegment::Transactions), + Some(expected_after_update), + "Test case {}: min_block should be updated to {} (not stuck at stale value)", + idx, + expected_after_update + ); + + // Verify delete_segment_below_block behaves correctly with updated min_block + let deleted = static_provider + .delete_segment_below_block(StaticFileSegment::Transactions, delete_below_block) + .unwrap(); + + assert_eq!(deleted.len(), expected_deleted); + } + } } diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 4f6b4df0006..1731dc87d04 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -200,6 +200,9 @@ impl NippyJar { // Read [`Self`] located at the data file. let config_path = path.with_extension(CONFIG_FILE_EXTENSION); let config_file = File::open(&config_path) + .inspect_err(|e| { + warn!( ?path, %e, "Failed to load static file jar"); + }) .map_err(|err| reth_fs_util::FsPathError::open(err, config_path))?; let mut obj = Self::load_from_reader(config_file)?; diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 28d13cfbe29..f9f0e688687 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -229,7 +229,7 @@ pub struct StaticFileProviderInner { map: DashMap<(BlockNumber, StaticFileSegment), LoadedJar>, /// Min static file range for each segment. /// This index is initialized on launch to keep track of the lowest, non-expired static file - /// per segment. + /// per segment and gets updated on `Self::update_index()`. /// /// This tracks the lowest static file per segment together with the block range in that /// file. E.g. static file is batched in 500k block intervals then the lowest static file @@ -530,6 +530,8 @@ impl StaticFileProvider { let header = jar.user_header().clone(); jar.delete().map_err(ProviderError::other)?; + // SAFETY: this is currently necessary to ensure that certain indexes like + // `static_files_min_block` have the correct values after pruning. self.initialize_index()?; Ok(header) @@ -615,6 +617,7 @@ impl StaticFileProvider { segment: StaticFileSegment, segment_max_block: Option, ) -> ProviderResult<()> { + let mut min_block = self.static_files_min_block.write(); let mut max_block = self.static_files_max_block.write(); let mut tx_index = self.static_files_tx_index.write(); @@ -629,6 +632,34 @@ impl StaticFileProvider { ) .map_err(ProviderError::other)?; + // Update min_block to track the lowest block range of the segment. + // This is initially set by initialize_index() on node startup, but must be updated + // as the file grows to prevent stale values. + // + // Without this update, min_block can remain at genesis (e.g. Some([0..=0]) or None) + // even after syncing to higher blocks (e.g. [0..=100]). A stale + // min_block causes get_lowest_static_file_block() to return the + // wrong end value, which breaks pruning logic that relies on it for + // safety checks. + // + // Example progression: + // 1. Node starts, initialize_index() sets min_block = [0..=0] + // 2. Sync to block 100, this update sets min_block = [0..=100] + // 3. Pruner calls get_lowest_static_file_block() -> returns 100 (correct). Without + // this update, it would incorrectly return 0 (stale) + if let Some(current_block_range) = jar.user_header().block_range().copied() { + min_block + .entry(segment) + .and_modify(|current_min| { + // delete_jar WILL ALWAYS re-initialize all indexes, so we are always + // sure that current_min is always the lowest. + if current_block_range.start() == current_min.start() { + *current_min = current_block_range; + } + }) + .or_insert(current_block_range); + } + // Updates the tx index by first removing all entries which have a higher // block_start than our current static file. if let Some(tx_range) = jar.user_header().tx_range() { @@ -678,6 +709,7 @@ impl StaticFileProvider { None => { tx_index.remove(&segment); max_block.remove(&segment); + min_block.remove(&segment); } }; From bb694fb576e32eeb2afba3f36425f918e3808c0b Mon Sep 17 00:00:00 2001 From: MIHAO PARK Date: Mon, 3 Nov 2025 21:41:23 +0100 Subject: [PATCH 310/371] chore(grafana): deduce label by aggregate metrics (#18550) --- etc/grafana/dashboards/overview.json | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 591470bad23..6d9563ffd2d 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -674,7 +674,7 @@ }, "editorMode": "builder", "exemplar": false, - "expr": "reth_sync_checkpoint{$instance_label=\"$instance\"}", + "expr": "max by (stage) (reth_sync_checkpoint{$instance_label=\"$instance\"})", "instant": true, "legendFormat": "{{stage}}", "range": false, @@ -910,7 +910,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_sync_entities_processed{$instance_label=\"$instance\"} / reth_sync_entities_total{$instance_label=\"$instance\"}", + "expr": "avg by (stage) (reth_sync_entities_processed{$instance_label=\"$instance\"} / reth_sync_entities_total{$instance_label=\"$instance\"})", "legendFormat": "{{stage}}", "range": true, "refId": "A" @@ -1008,7 +1008,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_sync_checkpoint{$instance_label=\"$instance\"}", + "expr": "max by (stage) (reth_sync_checkpoint{$instance_label=\"$instance\"})", "legendFormat": "{{stage}}", "range": true, "refId": "A" @@ -2342,7 +2342,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_sync_block_validation_state_root_duration{$instance_label=\"$instance\"}", + "expr": "avg(reth_sync_block_validation_state_root_duration{$instance_label=\"$instance\"})", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -2358,7 +2358,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_sync_execution_execution_duration{$instance_label=\"$instance\"}", + "expr": "avg(reth_sync_execution_execution_duration{$instance_label=\"$instance\"})", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -3535,7 +3535,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_sync_block_validation_state_root_duration{$instance_label=\"$instance\"}", + "expr": "avg(reth_sync_block_validation_state_root_duration{$instance_label=\"$instance\"})", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -3551,7 +3551,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_sync_execution_execution_duration{$instance_label=\"$instance\"}", + "expr": "avg(reth_sync_execution_execution_duration{$instance_label=\"$instance\"})", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -3796,7 +3796,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "reth_sync_block_validation_trie_input_duration{$instance_label=\"$instance\", quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "avg by(quantile) (reth_sync_block_validation_trie_input_duration{$instance_label=\"$instance\", quantile=~\"(0|0.5|0.9|0.95|1)\"})", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -4141,7 +4141,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_proof_calculation_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "avg by (quantile) (reth_tree_root_proof_calculation_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"})", "instant": false, "legendFormat": "{{quantile}} percentile", "range": true, @@ -4923,7 +4923,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_tree_root_multiproof_task_total_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "avg by (quantile) (reth_tree_root_multiproof_task_total_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"})", "hide": false, "instant": false, "legendFormat": "Task duration {{quantile}} percentile", From 3f2432761bf0cfeef3b3d68bd535bc8c967d823c Mon Sep 17 00:00:00 2001 From: Doryu Date: Mon, 3 Nov 2025 22:40:55 +0100 Subject: [PATCH 311/371] chore: Remove unused jsonrpsee tracing import in exex subscription example (#19448) --- examples/exex-subscription/src/main.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/exex-subscription/src/main.rs b/examples/exex-subscription/src/main.rs index 2f0c38f3852..e39408a3dc0 100644 --- a/examples/exex-subscription/src/main.rs +++ b/examples/exex-subscription/src/main.rs @@ -6,8 +6,7 @@ use alloy_primitives::{Address, U256}; use futures::TryStreamExt; use jsonrpsee::{ - core::SubscriptionResult, proc_macros::rpc, tracing, PendingSubscriptionSink, - SubscriptionMessage, + core::SubscriptionResult, proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage, }; use reth_ethereum::{ exex::{ExExContext, ExExEvent, ExExNotification}, From 0c00c1b48afb34c0d4a68cbf8fd1a109f93fe403 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 4 Nov 2025 01:35:07 +0100 Subject: [PATCH 312/371] chore: add --miner.gaslimit alias (#19475) --- crates/node/core/src/args/payload_builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index f751bcc070c..ca7befc0f08 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -17,7 +17,7 @@ pub struct PayloadBuilderArgs { pub extra_data: String, /// Target gas limit for built blocks. - #[arg(long = "builder.gaslimit", value_name = "GAS_LIMIT")] + #[arg(long = "builder.gaslimit", alias = "miner.gaslimit", value_name = "GAS_LIMIT")] pub gas_limit: Option, /// The interval at which the job should build a new payload after the last. From a311423292b9d000987192f3e3744e92d6a42e32 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 4 Nov 2025 01:35:39 +0100 Subject: [PATCH 313/371] chore: add queued reason to event (#19476) --- crates/transaction-pool/src/pool/events.rs | 7 +++++-- crates/transaction-pool/src/pool/listener.rs | 14 +++++++++++--- crates/transaction-pool/src/pool/mod.rs | 4 ++-- crates/transaction-pool/tests/it/listeners.rs | 2 +- 4 files changed, 19 insertions(+), 8 deletions(-) diff --git a/crates/transaction-pool/src/pool/events.rs b/crates/transaction-pool/src/pool/events.rs index 89cfc95bdfe..f6bdd4a4d04 100644 --- a/crates/transaction-pool/src/pool/events.rs +++ b/crates/transaction-pool/src/pool/events.rs @@ -2,6 +2,7 @@ use crate::{traits::PropagateKind, PoolTransaction, SubPool, ValidPoolTransactio use alloy_primitives::{TxHash, B256}; use std::sync::Arc; +use crate::pool::QueuedReason; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -11,7 +12,9 @@ pub enum FullTransactionEvent { /// Transaction has been added to the pending pool. Pending(TxHash), /// Transaction has been added to the queued pool. - Queued(TxHash), + /// + /// If applicable, attached the specific reason why this was queued. + Queued(TxHash, Option), /// Transaction has been included in the block belonging to this hash. Mined { /// The hash of the mined transaction. @@ -40,7 +43,7 @@ impl Clone for FullTransactionEvent { fn clone(&self) -> Self { match self { Self::Pending(hash) => Self::Pending(*hash), - Self::Queued(hash) => Self::Queued(*hash), + Self::Queued(hash, reason) => Self::Queued(*hash, reason.clone()), Self::Mined { tx_hash, block_hash } => { Self::Mined { tx_hash: *tx_hash, block_hash: *block_hash } } diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index 64eb756f38a..123c6cf956a 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -1,7 +1,10 @@ //! Listeners for the transaction-pool use crate::{ - pool::events::{FullTransactionEvent, NewTransactionEvent, TransactionEvent}, + pool::{ + events::{FullTransactionEvent, NewTransactionEvent, TransactionEvent}, + QueuedReason, + }, traits::{NewBlobSidecar, PropagateKind}, PoolTransaction, ValidPoolTransaction, }; @@ -17,6 +20,7 @@ use tokio::sync::mpsc::{ self as mpsc, error::TrySendError, Receiver, Sender, UnboundedReceiver, UnboundedSender, }; use tracing::debug; + /// The size of the event channel used to propagate transaction events. const TX_POOL_EVENT_CHANNEL_SIZE: usize = 1024; @@ -164,8 +168,12 @@ impl PoolEventBroadcast { } /// Notify listeners about a transaction that was added to the queued pool. - pub(crate) fn queued(&mut self, tx: &TxHash) { - self.broadcast_event(tx, TransactionEvent::Queued, FullTransactionEvent::Queued(*tx)); + pub(crate) fn queued(&mut self, tx: &TxHash, reason: Option) { + self.broadcast_event( + tx, + TransactionEvent::Queued, + FullTransactionEvent::Queued(*tx, reason), + ); } /// Notify listeners about a transaction that was propagated. diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 04f0e6e0b31..50d959a4757 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -747,8 +747,8 @@ where listener.discarded(tx.hash()); } } - AddedTransaction::Parked { transaction, replaced, .. } => { - listener.queued(transaction.hash()); + AddedTransaction::Parked { transaction, replaced, queued_reason, .. } => { + listener.queued(transaction.hash(), queued_reason.clone()); if let Some(replaced) = replaced { listener.replaced(replaced.clone(), *transaction.hash()); } diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs index d0a9c9c5aa8..105caae12b4 100644 --- a/crates/transaction-pool/tests/it/listeners.rs +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -82,7 +82,7 @@ async fn txpool_listener_queued_event() { assert_matches!(events.next().await, Some(TransactionEvent::Queued)); // The listener of all should receive queued event as well. - assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Queued(hash)) if hash == *transaction.get_hash()); + assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Queued(hash,_ )) if hash == *transaction.get_hash()); } #[tokio::test(flavor = "multi_thread")] From f3cf8d5e1033c824aba91e1be1b5cd3d27fa719e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 4 Nov 2025 09:51:33 +0100 Subject: [PATCH 314/371] feat: add helper to disable discovery (#19478) --- crates/node/core/src/node_config.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index c69593adf07..64b469086e7 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -427,6 +427,12 @@ impl NodeConfig { self } + /// Disables all discovery services for the node. + pub const fn with_disabled_discovery(mut self) -> Self { + self.network.discovery.disable_discovery = true; + self + } + /// Effectively disables the RPC state cache by setting the cache sizes to `0`. /// /// By setting the cache sizes to 0, caching of newly executed or fetched blocks will be From 44e99e56f0d102af2983d687bebc29f9437c01ee Mon Sep 17 00:00:00 2001 From: Block Wizard Date: Tue, 4 Nov 2025 11:05:27 +0200 Subject: [PATCH 315/371] fix(net): remove capacity inflation from buffered blocks size calculation (#19481) --- crates/net/downloaders/src/bodies/bodies.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 153f269fe41..5d6bd3cf7f8 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -21,7 +21,6 @@ use std::{ cmp::Ordering, collections::BinaryHeap, fmt::Debug, - mem, ops::RangeInclusive, pin::Pin, sync::Arc, @@ -215,9 +214,7 @@ where /// Adds a new response to the internal buffer fn buffer_bodies_response(&mut self, response: Vec>) { - // take into account capacity - let size = response.iter().map(BlockResponse::size).sum::() + - response.capacity() * mem::size_of::>(); + let size = response.iter().map(BlockResponse::size).sum::(); let response = OrderedBodiesResponse { resp: response, size }; let response_len = response.len(); From 6021a68dab093f7b36c3b188634dea409bca1737 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 4 Nov 2025 11:08:52 +0100 Subject: [PATCH 316/371] perf(rpc): use cache for latest block and receipts (#19483) --- crates/optimism/rpc/src/eth/pending_block.rs | 57 +++++++++----------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 151668f4039..88bf2496592 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -6,16 +6,13 @@ use alloy_eips::BlockNumberOrTag; use reth_chain_state::BlockState; use reth_rpc_eth_api::{ helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock, SpawnBlocking}, - FromEvmError, RpcConvert, RpcNodeCore, + FromEvmError, RpcConvert, RpcNodeCore, RpcNodeCoreExt, }; use reth_rpc_eth_types::{ block::BlockAndReceipts, builder::config::PendingBlockKind, error::FromEthApiError, EthApiError, PendingBlock, }; -use reth_storage_api::{ - BlockReader, BlockReaderIdExt, ReceiptProvider, StateProviderBox, StateProviderFactory, -}; -use std::sync::Arc; +use reth_storage_api::{BlockReaderIdExt, StateProviderBox, StateProviderFactory}; impl LoadPendingBlock for OpEthApi where @@ -38,33 +35,6 @@ where self.inner.eth_api.pending_block_kind() } - /// Returns the locally built pending block - async fn local_pending_block( - &self, - ) -> Result>, Self::Error> { - if let Ok(Some(pending)) = self.pending_flashblock().await { - return Ok(Some(pending.into_block_and_receipts())); - } - - // See: - let latest = self - .provider() - .latest_header()? - .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; - let block_id = latest.hash().into(); - let block = self - .provider() - .recovered_block(block_id, Default::default())? - .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; - - let receipts = self - .provider() - .receipts_by_block(block_id)? - .ok_or(EthApiError::ReceiptsNotFound(block_id.into()))?; - - Ok(Some(BlockAndReceipts { block: Arc::new(block), receipts: Arc::new(receipts) })) - } - /// Returns a [`StateProviderBox`] on a mem-pool built pending block overlaying latest. async fn local_pending_state(&self) -> Result, Self::Error> where @@ -83,4 +53,27 @@ where Ok(Some(Box::new(state.state_provider(latest_historical)) as StateProviderBox)) } + + /// Returns the locally built pending block + async fn local_pending_block( + &self, + ) -> Result>, Self::Error> { + if let Ok(Some(pending)) = self.pending_flashblock().await { + return Ok(Some(pending.into_block_and_receipts())); + } + + // See: + let latest = self + .provider() + .latest_header()? + .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; + + let latest = self + .cache() + .get_block_and_receipts(latest.hash()) + .await + .map_err(Self::Error::from_eth_err)? + .map(|(block, receipts)| BlockAndReceipts { block, receipts }); + Ok(latest) + } } From 2cb4e1bd2a6eaf27dc6d288159f3149e9c909712 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 4 Nov 2025 14:30:42 +0100 Subject: [PATCH 317/371] perf: use latest hash directly (#19486) --- crates/optimism/rpc/src/eth/mod.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 84929e98852..5dc0abd6208 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -35,10 +35,8 @@ use reth_rpc_eth_api::{ EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore, RpcNodeCoreExt, RpcTypes, }; -use reth_rpc_eth_types::{ - EthStateCache, FeeHistoryCache, GasPriceOracle, PendingBlock, PendingBlockEnvOrigin, -}; -use reth_storage_api::ProviderHeader; +use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle, PendingBlock}; +use reth_storage_api::{BlockReaderIdExt, ProviderHeader}; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, @@ -177,13 +175,11 @@ impl OpEthApi { OpEthApiError: FromEvmError, Rpc: RpcConvert, { - let pending = self.pending_block_env_and_cfg()?; - let parent = match pending.origin { - PendingBlockEnvOrigin::ActualPending(..) => return Ok(None), - PendingBlockEnvOrigin::DerivedFromLatest(parent) => parent, + let Some(latest) = self.provider().latest_header()? else { + return Ok(None); }; - self.flashblock(parent.hash()).await + self.flashblock(latest.hash()).await } } From 736a730a326a8a7c033a6d71721ca0321e61c4a2 Mon Sep 17 00:00:00 2001 From: Karl Yu <43113774+0xKarl98@users.noreply.github.com> Date: Tue, 4 Nov 2025 22:02:50 +0800 Subject: [PATCH 318/371] feat: support pending block tag in eth_getLogs for flashblocks (#19388) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/eth/filter.rs | 95 +++++++++++++++++++++++++++----- 1 file changed, 81 insertions(+), 14 deletions(-) diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 01b6a94158f..22b14d7a174 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1,6 +1,7 @@ //! `eth_` `Filter` RPC handler implementation use alloy_consensus::BlockHeader; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::{Sealable, TxHash}; use alloy_rpc_types_eth::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, Log, @@ -17,6 +18,7 @@ use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_errors::ProviderError; use reth_primitives_traits::{NodePrimitives, SealedHeader}; use reth_rpc_eth_api::{ + helpers::{EthBlocks, LoadReceipt}, EngineEthFilter, EthApiTypes, EthFilterApiServer, FullEthApiTypes, QueryLimits, RpcConvert, RpcNodeCoreExt, RpcTransaction, }; @@ -48,7 +50,11 @@ use tracing::{debug, error, trace}; impl EngineEthFilter for EthFilter where - Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, + Eth: FullEthApiTypes + + RpcNodeCoreExt + + LoadReceipt + + EthBlocks + + 'static, { /// Returns logs matching given filter object, no query limits fn logs( @@ -193,7 +199,11 @@ where impl EthFilter where - Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, + Eth: FullEthApiTypes + + RpcNodeCoreExt + + LoadReceipt + + EthBlocks + + 'static, { /// Access the underlying provider. fn provider(&self) -> &Eth::Provider { @@ -315,7 +325,7 @@ where #[async_trait] impl EthFilterApiServer> for EthFilter where - Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, + Eth: FullEthApiTypes + RpcNodeCoreExt + LoadReceipt + EthBlocks + 'static, { /// Handler for `eth_newFilter` async fn new_filter(&self, filter: Filter) -> RpcResult { @@ -356,8 +366,6 @@ where } }; - //let filter = FilterKind::PendingTransaction(transaction_kind); - // Install the filter and propagate any errors self.inner.install_filter(transaction_kind).await } @@ -434,6 +442,8 @@ impl EthFilterInner where Eth: RpcNodeCoreExt + EthApiTypes + + LoadReceipt + + EthBlocks + 'static, { /// Access the underlying provider. @@ -487,10 +497,43 @@ where Ok(all_logs) } FilterBlockOption::Range { from_block, to_block } => { - // compute the range - let info = self.provider().chain_info()?; + // Handle special case where from block is pending + if from_block.is_some_and(|b| b.is_pending()) { + let to_block = to_block.unwrap_or(BlockNumberOrTag::Pending); + if !(to_block.is_pending() || to_block.is_number()) { + // always empty range + return Ok(Vec::new()); + } + // Try to get pending block and receipts + if let Ok(Some(pending_block)) = self.eth_api.local_pending_block().await { + if let BlockNumberOrTag::Number(to_block) = to_block && + to_block < pending_block.block.number() + { + // this block range is empty based on the user input + return Ok(Vec::new()); + } + + let info = self.provider().chain_info()?; + if pending_block.block.number() > info.best_number { + // only consider the pending block if it is ahead of the chain + let mut all_logs = Vec::new(); + let timestamp = pending_block.block.timestamp(); + let block_num_hash = pending_block.block.num_hash(); + append_matching_block_logs( + &mut all_logs, + ProviderOrBlock::::Block(pending_block.block), + &filter, + block_num_hash, + &pending_block.receipts, + false, // removed = false for pending blocks + timestamp, + )?; + return Ok(all_logs); + } + } + } - // we start at the most recent block if unset in filter + let info = self.provider().chain_info()?; let start_block = info.best_number; let from = from_block .map(|num| self.provider().convert_block_number(num)) @@ -912,7 +955,11 @@ where /// Represents different modes for processing block ranges when filtering logs enum RangeMode< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > { /// Use cache-based processing for recent blocks Cached(CachedMode), @@ -921,7 +968,11 @@ enum RangeMode< } impl< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > RangeMode { /// Creates a new `RangeMode`. @@ -993,14 +1044,22 @@ impl< /// Mode for processing blocks using cache optimization for recent blocks struct CachedMode< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > { filter_inner: Arc>, headers_iter: std::vec::IntoIter::Header>>, } impl< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > CachedMode { async fn next(&mut self) -> Result>, EthFilterError> { @@ -1027,7 +1086,11 @@ type ReceiptFetchFuture

= /// Mode for processing blocks using range queries for older blocks struct RangeBlockMode< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > { filter_inner: Arc>, iter: Peekable::Header>>>, @@ -1038,7 +1101,11 @@ struct RangeBlockMode< } impl< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > RangeBlockMode { async fn next(&mut self) -> Result>, EthFilterError> { From 583eb837f0b67f94bf459ac68adcaa3590f14828 Mon Sep 17 00:00:00 2001 From: sashass1315 Date: Tue, 4 Nov 2025 16:27:12 +0200 Subject: [PATCH 319/371] docs(trie): fix PrefixSetMut docs and freeze() comment (#19467) --- crates/trie/common/src/prefix_set.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/crates/trie/common/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs index 35c4bc67839..74fdb789113 100644 --- a/crates/trie/common/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -71,16 +71,18 @@ pub struct TriePrefixSets { /// This data structure stores a set of `Nibbles` and provides methods to insert /// new elements and check whether any existing element has a given prefix. /// -/// Internally, this implementation uses a `Vec` and aims to act like a `BTreeSet` in being both -/// sorted and deduplicated. It does this by keeping a `sorted` flag. The `sorted` flag represents -/// whether or not the `Vec` is definitely sorted. When a new element is added, it is set to -/// `false.`. The `Vec` is sorted and deduplicated when `sorted` is `true` and: -/// * An element is being checked for inclusion (`contains`), or -/// * The set is being converted into an immutable `PrefixSet` (`freeze`) +/// Internally, this implementation stores keys in an unsorted `Vec` together with an +/// `all` flag. The `all` flag indicates that every entry should be considered changed and that +/// individual keys can be ignored. /// -/// This means that a `PrefixSet` will always be sorted and deduplicated when constructed from a -/// `PrefixSetMut`. +/// Sorting and deduplication do not happen during insertion or membership checks on this mutable +/// structure. Instead, keys are sorted and deduplicated when converting into the immutable +/// `PrefixSet` via `freeze()`. The immutable `PrefixSet` provides `contains` and relies on the +/// sorted and unique keys produced by `freeze()`; it does not perform additional sorting or +/// deduplication. /// +/// This guarantees that a `PrefixSet` constructed from a `PrefixSetMut` is always sorted and +/// deduplicated. /// # Examples /// /// ``` @@ -165,8 +167,7 @@ impl PrefixSetMut { } else { self.keys.sort_unstable(); self.keys.dedup(); - // We need to shrink in both the sorted and non-sorted cases because deduping may have - // occurred either on `freeze`, or during `contains`. + // Shrink after deduplication to release unused capacity. self.keys.shrink_to_fit(); PrefixSet { index: 0, all: false, keys: Arc::new(self.keys) } } From dd25caec12e8401a72d683bcefc0307861dd90ef Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Tue, 4 Nov 2025 15:49:12 +0100 Subject: [PATCH 320/371] chore: Various cleanups after consistent DB view removal (#19489) --- crates/engine/tree/benches/state_root_task.rs | 24 ++---- .../tree/src/tree/payload_processor/mod.rs | 37 +++------ .../src/tree/payload_processor/multiproof.rs | 2 +- .../engine/tree/src/tree/payload_validator.rs | 57 +++---------- crates/trie/parallel/src/proof.rs | 2 +- crates/trie/parallel/src/proof_task.rs | 82 +++++++++---------- crates/trie/trie/src/proof/trie_node.rs | 41 ++-------- crates/trie/trie/src/witness.rs | 8 +- 8 files changed, 85 insertions(+), 168 deletions(-) diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index e13ad26bc6b..b6306678b5b 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -227,21 +227,15 @@ fn bench_state_root(c: &mut Criterion) { }, |(genesis_hash, mut payload_processor, provider, state_updates)| { black_box({ - let mut handle = payload_processor - .spawn( - Default::default(), - core::iter::empty::< - Result< - Recovered, - core::convert::Infallible, - >, - >(), - StateProviderBuilder::new(provider.clone(), genesis_hash, None), - OverlayStateProviderFactory::new(provider), - &TreeConfig::default(), - ) - .map_err(|(err, ..)| err) - .expect("failed to spawn payload processor"); + let mut handle = payload_processor.spawn( + Default::default(), + core::iter::empty::< + Result, core::convert::Infallible>, + >(), + StateProviderBuilder::new(provider.clone(), genesis_hash, None), + OverlayStateProviderFactory::new(provider), + &TreeConfig::default(), + ); let mut state_hook = handle.state_hook(); diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 42587737298..d1f7531e9dd 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -28,10 +28,7 @@ use reth_evm::{ use reth_primitives_traits::NodePrimitives; use reth_provider::{BlockReader, DatabaseProviderROFactory, StateProviderFactory, StateReader}; use reth_revm::{db::BundleState, state::EvmState}; -use reth_trie::{ - hashed_cursor::HashedCursorFactory, prefix_set::TriePrefixSetsMut, - trie_cursor::TrieCursorFactory, -}; +use reth_trie::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; use reth_trie_parallel::{ proof_task::{ProofTaskCtx, ProofWorkerHandle}, root::ParallelStateRootError, @@ -204,10 +201,7 @@ where provider_builder: StateProviderBuilder, multiproof_provider_factory: F, config: &TreeConfig, - ) -> Result< - PayloadHandle, I::Tx>, I::Error>, - (ParallelStateRootError, I, ExecutionEnv, StateProviderBuilder), - > + ) -> PayloadHandle, I::Tx>, I::Error> where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, F: DatabaseProviderROFactory @@ -222,10 +216,9 @@ where // consistent view of the database, including the trie tables. Because of this there is no // need for an overarching prefix set to invalidate any section of the trie tables, and so // we use an empty prefix set. - let prefix_sets = Arc::new(TriePrefixSetsMut::default()); // Create and spawn the storage proof task - let task_ctx = ProofTaskCtx::new(multiproof_provider_factory, prefix_sets); + let task_ctx = ProofTaskCtx::new(multiproof_provider_factory); let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); let proof_handle = ProofWorkerHandle::new( @@ -267,12 +260,12 @@ where // Spawn the sparse trie task using any stored trie and parallel trie configuration. self.spawn_sparse_trie_task(sparse_trie_rx, proof_handle, state_root_tx); - Ok(PayloadHandle { + PayloadHandle { to_multi_proof, prewarm_handle, state_root: Some(state_root_rx), transactions: execution_rx, - }) + } } /// Spawns a task that exclusively handles cache prewarming for transaction execution. @@ -895,19 +888,13 @@ mod tests { let provider_factory = BlockchainProvider::new(factory).unwrap(); - let mut handle = - payload_processor - .spawn( - Default::default(), - core::iter::empty::< - Result, core::convert::Infallible>, - >(), - StateProviderBuilder::new(provider_factory.clone(), genesis_hash, None), - OverlayStateProviderFactory::new(provider_factory), - &TreeConfig::default(), - ) - .map_err(|(err, ..)| err) - .expect("failed to spawn payload processor"); + let mut handle = payload_processor.spawn( + Default::default(), + core::iter::empty::, core::convert::Infallible>>(), + StateProviderBuilder::new(provider_factory.clone(), genesis_hash, None), + OverlayStateProviderFactory::new(provider_factory), + &TreeConfig::default(), + ); let mut state_hook = handle.state_hook(); diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 5aac0e3f78f..7da199dd636 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1317,7 +1317,7 @@ mod tests { { let rt_handle = get_test_runtime_handle(); let overlay_factory = OverlayStateProviderFactory::new(factory); - let task_ctx = ProofTaskCtx::new(overlay_factory, Default::default()); + let task_ctx = ProofTaskCtx::new(overlay_factory); let proof_handle = ProofWorkerHandle::new(rt_handle, task_ctx, 1, 1); let (to_sparse_trie, _receiver) = std::sync::mpsc::channel(); diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index fdd6b30a6e8..ec6ac71a459 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -370,8 +370,7 @@ where let env = ExecutionEnv { evm_env, hash: input.hash(), parent_hash: input.parent_hash() }; // Plan the strategy used for state root computation. - let state_root_plan = self.plan_state_root_computation(); - let strategy = state_root_plan.strategy; + let strategy = self.plan_state_root_computation(); debug!( target: "engine::tree::payload_validator", @@ -383,7 +382,7 @@ where let txs = self.tx_iterator_for(&input)?; // Spawn the appropriate processor based on strategy - let (mut handle, strategy) = ensure_ok!(self.spawn_payload_processor( + let mut handle = ensure_ok!(self.spawn_payload_processor( env.clone(), txs, provider_builder, @@ -749,13 +748,10 @@ where state: &EngineApiTreeState, strategy: StateRootStrategy, ) -> Result< - ( - PayloadHandle< - impl ExecutableTxFor + use, - impl core::error::Error + Send + Sync + 'static + use, - >, - StateRootStrategy, - ), + PayloadHandle< + impl ExecutableTxFor + use, + impl core::error::Error + Send + Sync + 'static + use, + >, InsertBlockErrorKind, > { match strategy { @@ -789,34 +785,13 @@ where // Use state root task only if prefix sets are empty, otherwise proof generation is // too expensive because it requires walking all paths in every proof. let spawn_start = Instant::now(); - let (handle, strategy) = match self.payload_processor.spawn( + let handle = self.payload_processor.spawn( env, txs, provider_builder, multiproof_provider_factory, &self.config, - ) { - Ok(handle) => { - // Successfully spawned with state root task support - (handle, StateRootStrategy::StateRootTask) - } - Err((error, txs, env, provider_builder)) => { - // Failed to spawn proof workers, fallback to parallel state root - error!( - target: "engine::tree::payload_validator", - ?error, - "Failed to spawn proof workers, falling back to parallel state root" - ); - ( - self.payload_processor.spawn_cache_exclusive( - env, - txs, - provider_builder, - ), - StateRootStrategy::Parallel, - ) - } - }; + ); // record prewarming initialization duration self.metrics @@ -824,9 +799,9 @@ where .spawn_payload_processor .record(spawn_start.elapsed().as_secs_f64()); - Ok((handle, strategy)) + Ok(handle) } - strategy @ (StateRootStrategy::Parallel | StateRootStrategy::Synchronous) => { + StateRootStrategy::Parallel | StateRootStrategy::Synchronous => { let start = Instant::now(); let handle = self.payload_processor.spawn_cache_exclusive(env, txs, provider_builder); @@ -837,7 +812,7 @@ where .spawn_payload_processor .record(start.elapsed().as_secs_f64()); - Ok((handle, strategy)) + Ok(handle) } } } @@ -875,7 +850,7 @@ where /// Determines the state root computation strategy based on configuration. #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] - fn plan_state_root_computation(&self) -> StateRootPlan { + fn plan_state_root_computation(&self) -> StateRootStrategy { let strategy = if self.config.state_root_fallback() { StateRootStrategy::Synchronous } else if self.config.use_state_root_task() { @@ -890,7 +865,7 @@ where "Planned state root computation strategy" ); - StateRootPlan { strategy } + strategy } /// Called when an invalid block is encountered during validation. @@ -969,12 +944,6 @@ enum StateRootStrategy { Synchronous, } -/// State root computation plan that captures strategy and required data. -struct StateRootPlan { - /// Strategy that should be attempted for computing the state root. - strategy: StateRootStrategy, -} - /// Type that validates the payloads processed by the engine. /// /// This provides the necessary functions for validating/executing payloads/blocks. diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 09f5e56e771..433c13fb08f 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -329,7 +329,7 @@ mod tests { let rt = Runtime::new().unwrap(); let factory = reth_provider::providers::OverlayStateProviderFactory::new(factory); - let task_ctx = ProofTaskCtx::new(factory, Default::default()); + let task_ctx = ProofTaskCtx::new(factory); let proof_worker_handle = ProofWorkerHandle::new(rt.handle().clone(), task_ctx, 1, 1); let parallel_result = diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index bc5c788e4e2..8da4c28d91a 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -42,12 +42,12 @@ use alloy_rlp::{BufMut, Encodable}; use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use dashmap::DashMap; use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; -use reth_provider::{DatabaseProviderROFactory, ProviderError}; +use reth_provider::{DatabaseProviderROFactory, ProviderError, ProviderResult}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ hashed_cursor::HashedCursorFactory, node_iter::{TrieElement, TrieNodeIter}, - prefix_set::{TriePrefixSets, TriePrefixSetsMut}, + prefix_set::TriePrefixSets, proof::{ProofBlindedAccountProvider, ProofBlindedStorageProvider, StorageProof}, trie_cursor::TrieCursorFactory, walker::TrieWalker, @@ -161,7 +161,14 @@ impl ProofWorkerHandle { #[cfg(feature = "metrics")] metrics, ); - worker.run() + if let Err(error) = worker.run() { + error!( + target: "trie::proof_task", + worker_id, + ?error, + "Storage worker failed" + ); + } }); } drop(parent_span); @@ -191,7 +198,14 @@ impl ProofWorkerHandle { #[cfg(feature = "metrics")] metrics, ); - worker.run() + if let Err(error) = worker.run() { + error!( + target: "trie::proof_task", + worker_id, + ?error, + "Account worker failed" + ); + } }); } drop(parent_span); @@ -358,16 +372,12 @@ impl ProofWorkerHandle { pub struct ProofTaskCtx { /// The factory for creating state providers. factory: Factory, - /// The collection of prefix sets for the computation. Since the prefix sets _always_ - /// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here, - /// if we have cached nodes for them. - prefix_sets: Arc, } impl ProofTaskCtx { - /// Creates a new [`ProofTaskCtx`] with the given factory and prefix sets. - pub const fn new(factory: Factory, prefix_sets: Arc) -> Self { - Self { factory, prefix_sets } + /// Creates a new [`ProofTaskCtx`] with the given factory. + pub const fn new(factory: Factory) -> Self { + Self { factory } } } @@ -377,17 +387,14 @@ pub struct ProofTaskTx { /// The provider that implements `TrieCursorFactory` and `HashedCursorFactory`. provider: Provider, - /// The prefix sets for the computation. - prefix_sets: Arc, - /// Identifier for the worker within the worker pool, used only for tracing. id: usize, } impl ProofTaskTx { - /// Initializes a [`ProofTaskTx`] with the given provider, prefix sets, and ID. - const fn new(provider: Provider, prefix_sets: Arc, id: usize) -> Self { - Self { provider, prefix_sets, id } + /// Initializes a [`ProofTaskTx`] with the given provider and ID. + const fn new(provider: Provider, id: usize) -> Self { + Self { provider, id } } } @@ -462,12 +469,8 @@ where account: B256, path: &Nibbles, ) -> TrieNodeProviderResult { - let storage_node_provider = ProofBlindedStorageProvider::new( - &self.provider, - &self.provider, - self.prefix_sets.clone(), - account, - ); + let storage_node_provider = + ProofBlindedStorageProvider::new(&self.provider, &self.provider, account); storage_node_provider.trie_node(path) } @@ -475,11 +478,8 @@ where /// /// Used by account workers to retrieve blinded account trie nodes for proof construction. fn process_blinded_account_node(&self, path: &Nibbles) -> TrieNodeProviderResult { - let account_node_provider = ProofBlindedAccountProvider::new( - &self.provider, - &self.provider, - self.prefix_sets.clone(), - ); + let account_node_provider = + ProofBlindedAccountProvider::new(&self.provider, &self.provider); account_node_provider.trie_node(path) } } @@ -691,7 +691,7 @@ where /// /// If this function panics, the worker thread terminates but other workers /// continue operating and the system degrades gracefully. - fn run(self) { + fn run(self) -> ProviderResult<()> { let Self { task_ctx, work_rx, @@ -702,11 +702,8 @@ where } = self; // Create provider from factory - let provider = task_ctx - .factory - .database_provider_ro() - .expect("Storage worker failed to initialize: unable to create provider"); - let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); + let provider = task_ctx.factory.database_provider_ro()?; + let proof_tx = ProofTaskTx::new(provider, worker_id); trace!( target: "trie::proof_task", @@ -761,6 +758,8 @@ where #[cfg(feature = "metrics")] metrics.record_storage_nodes(storage_nodes_processed as usize); + + Ok(()) } /// Processes a storage proof request. @@ -934,7 +933,7 @@ where /// /// If this function panics, the worker thread terminates but other workers /// continue operating and the system degrades gracefully. - fn run(self) { + fn run(self) -> ProviderResult<()> { let Self { task_ctx, work_rx, @@ -946,11 +945,8 @@ where } = self; // Create provider from factory - let provider = task_ctx - .factory - .database_provider_ro() - .expect("Account worker failed to initialize: unable to create provider"); - let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); + let provider = task_ctx.factory.database_provider_ro()?; + let proof_tx = ProofTaskTx::new(provider, worker_id); trace!( target: "trie::proof_task", @@ -1004,6 +1000,8 @@ where #[cfg(feature = "metrics")] metrics.record_account_nodes(account_nodes_processed as usize); + + Ok(()) } /// Processes an account multiproof request. @@ -1476,12 +1474,10 @@ enum AccountWorkerJob { mod tests { use super::*; use reth_provider::test_utils::create_test_provider_factory; - use reth_trie_common::prefix_set::TriePrefixSetsMut; - use std::sync::Arc; use tokio::{runtime::Builder, task}; fn test_ctx(factory: Factory) -> ProofTaskCtx { - ProofTaskCtx::new(factory, Arc::new(TriePrefixSetsMut::default())) + ProofTaskCtx::new(factory) } /// Ensures `ProofWorkerHandle::new` spawns workers correctly. diff --git a/crates/trie/trie/src/proof/trie_node.rs b/crates/trie/trie/src/proof/trie_node.rs index 3e197072d49..8625412f3ae 100644 --- a/crates/trie/trie/src/proof/trie_node.rs +++ b/crates/trie/trie/src/proof/trie_node.rs @@ -2,11 +2,11 @@ use super::{Proof, StorageProof}; use crate::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; use alloy_primitives::{map::HashSet, B256}; use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; -use reth_trie_common::{prefix_set::TriePrefixSetsMut, MultiProofTargets, Nibbles}; +use reth_trie_common::{MultiProofTargets, Nibbles}; use reth_trie_sparse::provider::{ pad_path_to_key, RevealedNode, TrieNodeProvider, TrieNodeProviderFactory, }; -use std::{sync::Arc, time::Instant}; +use std::time::Instant; use tracing::{enabled, trace, Level}; /// Factory for instantiating providers capable of retrieving blinded trie nodes via proofs. @@ -16,18 +16,12 @@ pub struct ProofTrieNodeProviderFactory { trie_cursor_factory: T, /// The factory for hashed cursors. hashed_cursor_factory: H, - /// A set of prefix sets that have changes. - prefix_sets: Arc, } impl ProofTrieNodeProviderFactory { /// Create new proof-based blinded provider factory. - pub const fn new( - trie_cursor_factory: T, - hashed_cursor_factory: H, - prefix_sets: Arc, - ) -> Self { - Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets } + pub const fn new(trie_cursor_factory: T, hashed_cursor_factory: H) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory } } } @@ -43,7 +37,6 @@ where ProofBlindedAccountProvider { trie_cursor_factory: self.trie_cursor_factory.clone(), hashed_cursor_factory: self.hashed_cursor_factory.clone(), - prefix_sets: self.prefix_sets.clone(), } } @@ -51,7 +44,6 @@ where ProofBlindedStorageProvider { trie_cursor_factory: self.trie_cursor_factory.clone(), hashed_cursor_factory: self.hashed_cursor_factory.clone(), - prefix_sets: self.prefix_sets.clone(), account, } } @@ -64,18 +56,12 @@ pub struct ProofBlindedAccountProvider { trie_cursor_factory: T, /// The factory for hashed cursors. hashed_cursor_factory: H, - /// A set of prefix sets that have changes. - prefix_sets: Arc, } impl ProofBlindedAccountProvider { /// Create new proof-based blinded account node provider. - pub const fn new( - trie_cursor_factory: T, - hashed_cursor_factory: H, - prefix_sets: Arc, - ) -> Self { - Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets } + pub const fn new(trie_cursor_factory: T, hashed_cursor_factory: H) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory } } } @@ -89,7 +75,6 @@ where let targets = MultiProofTargets::from_iter([(pad_path_to_key(path), HashSet::default())]); let mut proof = Proof::new(&self.trie_cursor_factory, &self.hashed_cursor_factory) - .with_prefix_sets_mut(self.prefix_sets.as_ref().clone()) .with_branch_node_masks(true) .multiproof(targets) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; @@ -117,21 +102,14 @@ pub struct ProofBlindedStorageProvider { trie_cursor_factory: T, /// The factory for hashed cursors. hashed_cursor_factory: H, - /// A set of prefix sets that have changes. - prefix_sets: Arc, /// Target account. account: B256, } impl ProofBlindedStorageProvider { /// Create new proof-based blinded storage node provider. - pub const fn new( - trie_cursor_factory: T, - hashed_cursor_factory: H, - prefix_sets: Arc, - account: B256, - ) -> Self { - Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets, account } + pub const fn new(trie_cursor_factory: T, hashed_cursor_factory: H, account: B256) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory, account } } } @@ -144,14 +122,11 @@ where let start = enabled!(target: "trie::proof::blinded", Level::TRACE).then(Instant::now); let targets = HashSet::from_iter([pad_path_to_key(path)]); - let storage_prefix_set = - self.prefix_sets.storage_prefix_sets.get(&self.account).cloned().unwrap_or_default(); let mut proof = StorageProof::new_hashed( &self.trie_cursor_factory, &self.hashed_cursor_factory, self.account, ) - .with_prefix_set_mut(storage_prefix_set) .with_branch_node_masks(true) .storage_multiproof(targets) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 871d599c76b..763908c242d 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -24,7 +24,7 @@ use reth_trie_sparse::{ provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}, SerialSparseTrie, SparseStateTrie, }; -use std::sync::{mpsc, Arc}; +use std::sync::mpsc; /// State transition witness for the trie. #[derive(Debug)] @@ -147,11 +147,7 @@ where let (tx, rx) = mpsc::channel(); let blinded_provider_factory = WitnessTrieNodeProviderFactory::new( - ProofTrieNodeProviderFactory::new( - self.trie_cursor_factory, - self.hashed_cursor_factory, - Arc::new(self.prefix_sets), - ), + ProofTrieNodeProviderFactory::new(self.trie_cursor_factory, self.hashed_cursor_factory), tx, ); let mut sparse_trie = SparseStateTrie::::new(); From 5a6d3ddcad41f369678d96c9e4c780badbc42943 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Tue, 4 Nov 2025 15:10:05 +0000 Subject: [PATCH 321/371] feat(reth-bench-compare): upstream from personal repo (#19488) Co-authored-by: Claude --- .github/assets/check_wasm.sh | 1 + Cargo.lock | 97 +++ Cargo.toml | 4 + bin/reth-bench-compare/Cargo.toml | 96 +++ bin/reth-bench-compare/src/benchmark.rs | 296 +++++++ bin/reth-bench-compare/src/cli.rs | 931 ++++++++++++++++++++++ bin/reth-bench-compare/src/comparison.rs | 484 +++++++++++ bin/reth-bench-compare/src/compilation.rs | 354 ++++++++ bin/reth-bench-compare/src/git.rs | 330 ++++++++ bin/reth-bench-compare/src/main.rs | 45 ++ bin/reth-bench-compare/src/node.rs | 511 ++++++++++++ 11 files changed, 3149 insertions(+) create mode 100644 bin/reth-bench-compare/Cargo.toml create mode 100644 bin/reth-bench-compare/src/benchmark.rs create mode 100644 bin/reth-bench-compare/src/cli.rs create mode 100644 bin/reth-bench-compare/src/comparison.rs create mode 100644 bin/reth-bench-compare/src/compilation.rs create mode 100644 bin/reth-bench-compare/src/git.rs create mode 100644 bin/reth-bench-compare/src/main.rs create mode 100644 bin/reth-bench-compare/src/node.rs diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 8a380837b10..874b7d508c6 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -11,6 +11,7 @@ exclude_crates=( # The following require investigation if they can be fixed reth-basic-payload-builder reth-bench + reth-bench-compare reth-cli reth-cli-commands reth-cli-runner diff --git a/Cargo.lock b/Cargo.lock index 18a234a0498..cd26f0e83d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1684,6 +1684,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block2" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdeb9d870516001442e364c5220d3574d2da8dc765554b4a617230d33fa58ef5" +dependencies = [ + "objc2", +] + [[package]] name = "blst" version = "0.3.16" @@ -2654,6 +2663,17 @@ dependencies = [ "cipher", ] +[[package]] +name = "ctrlc" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73736a89c4aff73035ba2ed2e565061954da00d4970fc9ac25dcc85a2a20d790" +dependencies = [ + "dispatch2", + "nix 0.30.1", + "windows-sys 0.61.2", +] + [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -3036,6 +3056,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "dispatch2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" +dependencies = [ + "bitflags 2.10.0", + "block2", + "libc", + "objc2", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -5827,6 +5859,30 @@ dependencies = [ "unsigned-varint", ] +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "cfg_aliases", + "libc", +] + [[package]] name = "nom" version = "7.1.3" @@ -6016,6 +6072,21 @@ dependencies = [ "smallvec", ] +[[package]] +name = "objc2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c2599ce0ec54857b29ce62166b0ed9b4f6f1a70ccc9a71165b6154caca8c05" +dependencies = [ + "objc2-encode", +] + +[[package]] +name = "objc2-encode" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" + [[package]] name = "once_cell" version = "1.21.3" @@ -7319,6 +7390,32 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-bench-compare" +version = "1.8.3" +dependencies = [ + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "chrono", + "clap", + "csv", + "ctrlc", + "eyre", + "nix 0.29.0", + "reth-chainspec", + "reth-cli-runner", + "reth-cli-util", + "reth-node-core", + "reth-tracing", + "serde", + "serde_json", + "shellexpand", + "shlex", + "tokio", + "tracing", +] + [[package]] name = "reth-chain-state" version = "1.8.3" diff --git a/Cargo.toml b/Cargo.toml index 6f26dcc4774..a1fd8647a1a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ exclude = [".github/"] [workspace] members = [ "bin/reth-bench/", + "bin/reth-bench-compare/", "bin/reth/", "crates/storage/rpc-provider/", "crates/chain-state/", @@ -333,6 +334,7 @@ reth = { path = "bin/reth" } reth-storage-rpc-provider = { path = "crates/storage/rpc-provider" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-bench = { path = "bin/reth-bench" } +reth-bench-compare = { path = "bin/reth-bench-compare" } reth-chain-state = { path = "crates/chain-state" } reth-chainspec = { path = "crates/chainspec", default-features = false } reth-cli = { path = "crates/cli/cli" } @@ -568,6 +570,7 @@ serde_json = { version = "1.0", default-features = false, features = ["alloc"] } serde_with = { version = "3", default-features = false, features = ["macros"] } sha2 = { version = "0.10", default-features = false } shellexpand = "3.0.0" +shlex = "1.3" smallvec = "1" strum = { version = "0.27", default-features = false } strum_macros = "0.27" @@ -688,6 +691,7 @@ concat-kdf = "0.1.0" crossbeam-channel = "0.5.13" crossterm = "0.28.0" csv = "1.3.0" +ctrlc = "3.4" ctr = "0.9.2" data-encoding = "2" delegate = "0.13" diff --git a/bin/reth-bench-compare/Cargo.toml b/bin/reth-bench-compare/Cargo.toml new file mode 100644 index 00000000000..11d9b4f8bdb --- /dev/null +++ b/bin/reth-bench-compare/Cargo.toml @@ -0,0 +1,96 @@ +[package] +name = "reth-bench-compare" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Automated reth benchmark comparison between git references" + +[lints] +workspace = true + +[[bin]] +name = "reth-bench-compare" +path = "src/main.rs" + +[dependencies] +# reth +reth-cli-runner.workspace = true +reth-cli-util.workspace = true +reth-node-core.workspace = true +reth-tracing.workspace = true +reth-chainspec.workspace = true + +# alloy +alloy-provider = { workspace = true, features = ["reqwest-rustls-tls"], default-features = false } +alloy-rpc-types-eth.workspace = true +alloy-primitives.workspace = true + +# CLI and argument parsing +clap = { workspace = true, features = ["derive", "env"] } +eyre.workspace = true + +# Async runtime +tokio = { workspace = true, features = ["full"] } +tracing.workspace = true + +# Serialization +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true + +# Time handling +chrono = { workspace = true, features = ["serde"] } + +# Path manipulation +shellexpand.workspace = true + +# CSV handling +csv.workspace = true + +# Process management +ctrlc.workspace = true +shlex.workspace = true + +[target.'cfg(unix)'.dependencies] +nix = { version = "0.29", features = ["signal", "process"] } + +[features] +default = ["jemalloc"] + +asm-keccak = [ + "reth-node-core/asm-keccak", + "alloy-primitives/asm-keccak", +] + +jemalloc = [ + "reth-cli-util/jemalloc", + "reth-node-core/jemalloc", +] +jemalloc-prof = ["reth-cli-util/jemalloc-prof"] +tracy-allocator = ["reth-cli-util/tracy-allocator"] + +min-error-logs = [ + "tracing/release_max_level_error", + "reth-node-core/min-error-logs", +] +min-warn-logs = [ + "tracing/release_max_level_warn", + "reth-node-core/min-warn-logs", +] +min-info-logs = [ + "tracing/release_max_level_info", + "reth-node-core/min-info-logs", +] +min-debug-logs = [ + "tracing/release_max_level_debug", + "reth-node-core/min-debug-logs", +] +min-trace-logs = [ + "tracing/release_max_level_trace", + "reth-node-core/min-trace-logs", +] + +# no-op feature flag for switching between the `optimism` and default functionality in CI matrices +ethereum = [] diff --git a/bin/reth-bench-compare/src/benchmark.rs b/bin/reth-bench-compare/src/benchmark.rs new file mode 100644 index 00000000000..e1b971f5792 --- /dev/null +++ b/bin/reth-bench-compare/src/benchmark.rs @@ -0,0 +1,296 @@ +//! Benchmark execution using reth-bench. + +use crate::cli::Args; +use eyre::{eyre, Result, WrapErr}; +use std::{ + path::Path, + sync::{Arc, Mutex}, +}; +use tokio::{ + fs::File as AsyncFile, + io::{AsyncBufReadExt, AsyncWriteExt, BufReader}, + process::Command, +}; +use tracing::{debug, error, info, warn}; + +/// Manages benchmark execution using reth-bench +pub(crate) struct BenchmarkRunner { + rpc_url: String, + jwt_secret: String, + wait_time: Option, + warmup_blocks: u64, +} + +impl BenchmarkRunner { + /// Create a new `BenchmarkRunner` from CLI arguments + pub(crate) fn new(args: &Args) -> Self { + Self { + rpc_url: args.get_rpc_url(), + jwt_secret: args.jwt_secret_path().to_string_lossy().to_string(), + wait_time: args.wait_time.clone(), + warmup_blocks: args.get_warmup_blocks(), + } + } + + /// Clear filesystem caches (page cache, dentries, and inodes) + pub(crate) async fn clear_fs_caches() -> Result<()> { + info!("Clearing filesystem caches..."); + + // First sync to ensure all pending writes are flushed + let sync_output = + Command::new("sync").output().await.wrap_err("Failed to execute sync command")?; + + if !sync_output.status.success() { + return Err(eyre!("sync command failed")); + } + + // Drop caches - requires sudo/root permissions + // 3 = drop pagecache, dentries, and inodes + let drop_caches_cmd = Command::new("sudo") + .args(["-n", "sh", "-c", "echo 3 > /proc/sys/vm/drop_caches"]) + .output() + .await; + + match drop_caches_cmd { + Ok(output) if output.status.success() => { + info!("Successfully cleared filesystem caches"); + Ok(()) + } + Ok(output) => { + let stderr = String::from_utf8_lossy(&output.stderr); + if stderr.contains("sudo: a password is required") { + warn!("Unable to clear filesystem caches: sudo password required"); + warn!( + "For optimal benchmarking, configure passwordless sudo for cache clearing:" + ); + warn!(" echo '$USER ALL=(ALL) NOPASSWD: /bin/sh -c echo\\\\ [0-9]\\\\ \\\\>\\\\ /proc/sys/vm/drop_caches' | sudo tee /etc/sudoers.d/drop_caches"); + Ok(()) + } else { + Err(eyre!("Failed to clear filesystem caches: {}", stderr)) + } + } + Err(e) => { + warn!("Unable to clear filesystem caches: {}", e); + Ok(()) + } + } + } + + /// Run a warmup benchmark for cache warming + pub(crate) async fn run_warmup(&self, from_block: u64) -> Result<()> { + let to_block = from_block + self.warmup_blocks; + info!( + "Running warmup benchmark from block {} to {} ({} blocks)", + from_block, to_block, self.warmup_blocks + ); + + // Build the reth-bench command for warmup (no output flag) + let mut cmd = Command::new("reth-bench"); + cmd.args([ + "new-payload-fcu", + "--rpc-url", + &self.rpc_url, + "--jwt-secret", + &self.jwt_secret, + "--from", + &from_block.to_string(), + "--to", + &to_block.to_string(), + ]); + + // Add wait-time argument if provided + if let Some(ref wait_time) = self.wait_time { + cmd.args(["--wait-time", wait_time]); + } + + cmd.stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + debug!("Executing warmup reth-bench command: {:?}", cmd); + + // Execute the warmup benchmark + let mut child = cmd.spawn().wrap_err("Failed to start warmup reth-bench process")?; + + // Stream output at debug level + if let Some(stdout) = child.stdout.take() { + tokio::spawn(async move { + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[WARMUP] {}", line); + } + }); + } + + if let Some(stderr) = child.stderr.take() { + tokio::spawn(async move { + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[WARMUP] {}", line); + } + }); + } + + let status = child.wait().await.wrap_err("Failed to wait for warmup reth-bench")?; + + if !status.success() { + return Err(eyre!("Warmup reth-bench failed with exit code: {:?}", status.code())); + } + + info!("Warmup completed successfully"); + Ok(()) + } + + /// Run a benchmark for the specified block range + pub(crate) async fn run_benchmark( + &self, + from_block: u64, + to_block: u64, + output_dir: &Path, + ) -> Result<()> { + info!( + "Running benchmark from block {} to {} (output: {:?})", + from_block, to_block, output_dir + ); + + // Ensure output directory exists + std::fs::create_dir_all(output_dir) + .wrap_err_with(|| format!("Failed to create output directory: {output_dir:?}"))?; + + // Create log file path for reth-bench output + let log_file_path = output_dir.join("reth_bench.log"); + info!("reth-bench logs will be saved to: {:?}", log_file_path); + + // Build the reth-bench command + let mut cmd = Command::new("reth-bench"); + cmd.args([ + "new-payload-fcu", + "--rpc-url", + &self.rpc_url, + "--jwt-secret", + &self.jwt_secret, + "--from", + &from_block.to_string(), + "--to", + &to_block.to_string(), + "--output", + &output_dir.to_string_lossy(), + ]); + + // Add wait-time argument if provided + if let Some(ref wait_time) = self.wait_time { + cmd.args(["--wait-time", wait_time]); + } + + cmd.stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + // Debug log the command + debug!("Executing reth-bench command: {:?}", cmd); + + // Execute the benchmark + let mut child = cmd.spawn().wrap_err("Failed to start reth-bench process")?; + + // Capture stdout and stderr for error reporting + let stdout_lines = Arc::new(Mutex::new(Vec::new())); + let stderr_lines = Arc::new(Mutex::new(Vec::new())); + + // Stream stdout with prefix at debug level, capture for error reporting, and write to log + // file + if let Some(stdout) = child.stdout.take() { + let stdout_lines_clone = stdout_lines.clone(); + let log_file = AsyncFile::create(&log_file_path) + .await + .wrap_err(format!("Failed to create log file: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-BENCH] {}", line); + if let Ok(mut captured) = stdout_lines_clone.lock() { + captured.push(line.clone()); + } + // Write to log file (reth-bench output already has timestamps if needed) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + // Stream stderr with prefix at debug level, capture for error reporting, and write to log + // file + if let Some(stderr) = child.stderr.take() { + let stderr_lines_clone = stderr_lines.clone(); + let log_file = AsyncFile::options() + .create(true) + .append(true) + .open(&log_file_path) + .await + .wrap_err(format!("Failed to open log file for stderr: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-BENCH] {}", line); + if let Ok(mut captured) = stderr_lines_clone.lock() { + captured.push(line.clone()); + } + // Write to log file (reth-bench output already has timestamps if needed) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + let status = child.wait().await.wrap_err("Failed to wait for reth-bench")?; + + if !status.success() { + // Print all captured output when command fails + error!("reth-bench failed with exit code: {:?}", status.code()); + + if let Ok(stdout) = stdout_lines.lock() && + !stdout.is_empty() + { + error!("reth-bench stdout:"); + for line in stdout.iter() { + error!(" {}", line); + } + } + + if let Ok(stderr) = stderr_lines.lock() && + !stderr.is_empty() + { + error!("reth-bench stderr:"); + for line in stderr.iter() { + error!(" {}", line); + } + } + + return Err(eyre!("reth-bench failed with exit code: {:?}", status.code())); + } + + info!("Benchmark completed"); + Ok(()) + } +} diff --git a/bin/reth-bench-compare/src/cli.rs b/bin/reth-bench-compare/src/cli.rs new file mode 100644 index 00000000000..ecb7125c46d --- /dev/null +++ b/bin/reth-bench-compare/src/cli.rs @@ -0,0 +1,931 @@ +//! CLI argument parsing and main command orchestration. + +use alloy_provider::{Provider, ProviderBuilder}; +use clap::Parser; +use eyre::{eyre, Result, WrapErr}; +use reth_chainspec::Chain; +use reth_cli_runner::CliContext; +use reth_node_core::args::{DatadirArgs, LogArgs}; +use reth_tracing::FileWorkerGuard; +use std::{net::TcpListener, path::PathBuf, str::FromStr}; +use tokio::process::Command; +use tracing::{debug, info, warn}; + +use crate::{ + benchmark::BenchmarkRunner, comparison::ComparisonGenerator, compilation::CompilationManager, + git::GitManager, node::NodeManager, +}; + +/// Target for disabling the --debug.startup-sync-state-idle flag +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum DisableStartupSyncStateIdle { + /// Disable for baseline and warmup runs + Baseline, + /// Disable for feature runs only + Feature, + /// Disable for all runs + All, +} + +impl FromStr for DisableStartupSyncStateIdle { + type Err = String; + + fn from_str(s: &str) -> std::result::Result { + match s.to_lowercase().as_str() { + "baseline" => Ok(Self::Baseline), + "feature" => Ok(Self::Feature), + "all" => Ok(Self::All), + _ => Err(format!("Invalid value '{}'. Expected 'baseline', 'feature', or 'all'", s)), + } + } +} + +impl std::fmt::Display for DisableStartupSyncStateIdle { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Baseline => write!(f, "baseline"), + Self::Feature => write!(f, "feature"), + Self::All => write!(f, "all"), + } + } +} + +/// Automated reth benchmark comparison between git references +#[derive(Debug, Parser)] +#[command( + name = "reth-bench-compare", + about = "Compare reth performance between two git references (branches or tags)", + version +)] +pub(crate) struct Args { + /// Git reference (branch or tag) to use as baseline for comparison + #[arg(long, value_name = "REF")] + pub baseline_ref: String, + + /// Git reference (branch or tag) to compare against the baseline + #[arg(long, value_name = "REF")] + pub feature_ref: String, + + #[command(flatten)] + pub datadir: DatadirArgs, + + /// Number of blocks to benchmark + #[arg(long, value_name = "N", default_value = "100")] + pub blocks: u64, + + /// RPC endpoint for fetching block data + #[arg(long, value_name = "URL")] + pub rpc_url: Option, + + /// JWT secret file path + /// + /// If not provided, defaults to `//jwt.hex`. + /// If the file doesn't exist, it will be created automatically. + #[arg(long, value_name = "PATH")] + pub jwt_secret: Option, + + /// Output directory for benchmark results + #[arg(long, value_name = "PATH", default_value = "./reth-bench-compare")] + pub output_dir: String, + + /// Skip git branch validation (useful for testing) + #[arg(long)] + pub skip_git_validation: bool, + + /// Port for reth metrics endpoint + #[arg(long, value_name = "PORT", default_value = "5005")] + pub metrics_port: u16, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain name or numeric chain ID. + #[arg(long, value_name = "CHAIN", default_value = "mainnet", required = false)] + pub chain: Chain, + + /// Run reth binary with sudo (for elevated privileges) + #[arg(long)] + pub sudo: bool, + + /// Generate comparison charts using Python script + #[arg(long)] + pub draw: bool, + + /// Enable CPU profiling with samply during benchmark runs + #[arg(long)] + pub profile: bool, + + /// Wait time between engine API calls (passed to reth-bench) + #[arg(long, value_name = "DURATION")] + pub wait_time: Option, + + /// Number of blocks to run for cache warmup after clearing caches. + /// If not specified, defaults to the same as --blocks + #[arg(long, value_name = "N")] + pub warmup_blocks: Option, + + /// Disable filesystem cache clearing before warmup phase. + /// By default, filesystem caches are cleared before warmup to ensure consistent benchmarks. + #[arg(long)] + pub no_clear_cache: bool, + + #[command(flatten)] + pub logs: LogArgs, + + /// Additional arguments to pass to baseline reth node command + /// + /// Example: `--baseline-args "--debug.tip 0xabc..."` + #[arg(long, value_name = "ARGS")] + pub baseline_args: Option, + + /// Additional arguments to pass to feature reth node command + /// + /// Example: `--feature-args "--debug.tip 0xdef..."` + #[arg(long, value_name = "ARGS")] + pub feature_args: Option, + + /// Additional arguments to pass to reth node command (applied to both baseline and feature) + /// + /// All arguments after `--` will be passed directly to the reth node command. + /// Example: `reth-bench-compare --baseline-ref main --feature-ref pr/123 -- --debug.tip + /// 0xabc...` + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + pub reth_args: Vec, + + /// Comma-separated list of features to enable during reth compilation + /// + /// Example: `jemalloc,asm-keccak` + #[arg(long, value_name = "FEATURES", default_value = "jemalloc,asm-keccak")] + pub features: String, + + /// Disable automatic --debug.startup-sync-state-idle flag for specific runs. + /// Can be "baseline", "feature", or "all". + /// By default, the flag is passed to warmup, baseline, and feature runs. + /// When "baseline" is specified, the flag is NOT passed to warmup OR baseline. + /// When "feature" is specified, the flag is NOT passed to feature. + /// When "all" is specified, the flag is NOT passed to any run. + #[arg(long, value_name = "TARGET")] + pub disable_startup_sync_state_idle: Option, +} + +impl Args { + /// Initializes tracing with the configured options. + pub(crate) fn init_tracing(&self) -> Result> { + let guard = self.logs.init_tracing()?; + Ok(guard) + } + + /// Build additional arguments for a specific ref type, conditionally including + /// --debug.startup-sync-state-idle based on the configuration + pub(crate) fn build_additional_args( + &self, + ref_type: &str, + base_args_str: Option<&String>, + ) -> Vec { + // Parse the base arguments string if provided + let mut args = base_args_str.map(|s| parse_args_string(s)).unwrap_or_default(); + + // Determine if we should add the --debug.startup-sync-state-idle flag + let should_add_flag = match self.disable_startup_sync_state_idle { + None => true, // By default, add the flag + Some(DisableStartupSyncStateIdle::All) => false, + Some(DisableStartupSyncStateIdle::Baseline) => { + ref_type != "baseline" && ref_type != "warmup" + } + Some(DisableStartupSyncStateIdle::Feature) => ref_type != "feature", + }; + + if should_add_flag { + args.push("--debug.startup-sync-state-idle".to_string()); + debug!("Adding --debug.startup-sync-state-idle flag for ref_type: {}", ref_type); + } else { + debug!("Skipping --debug.startup-sync-state-idle flag for ref_type: {}", ref_type); + } + + args + } + + /// Get the default RPC URL for a given chain + const fn get_default_rpc_url(chain: &Chain) -> &'static str { + match chain.id() { + 8453 => "https://base-mainnet.rpc.ithaca.xyz", // base + 84532 => "https://base-sepolia.rpc.ithaca.xyz", // base-sepolia + 27082 => "https://rpc.hoodi.ethpandaops.io", // hoodi + _ => "https://reth-ethereum.ithaca.xyz/rpc", // mainnet and fallback + } + } + + /// Get the RPC URL, using chain-specific default if not provided + pub(crate) fn get_rpc_url(&self) -> String { + self.rpc_url.clone().unwrap_or_else(|| Self::get_default_rpc_url(&self.chain).to_string()) + } + + /// Get the JWT secret path - either provided or derived from datadir + pub(crate) fn jwt_secret_path(&self) -> PathBuf { + match &self.jwt_secret { + Some(path) => { + let jwt_secret_str = path.to_string_lossy(); + let expanded = shellexpand::tilde(&jwt_secret_str); + PathBuf::from(expanded.as_ref()) + } + None => { + // Use the same logic as reth: //jwt.hex + let chain_path = self.datadir.clone().resolve_datadir(self.chain); + chain_path.jwt() + } + } + } + + /// Get the resolved datadir path using the chain + pub(crate) fn datadir_path(&self) -> PathBuf { + let chain_path = self.datadir.clone().resolve_datadir(self.chain); + chain_path.data_dir().to_path_buf() + } + + /// Get the expanded output directory path + pub(crate) fn output_dir_path(&self) -> PathBuf { + let expanded = shellexpand::tilde(&self.output_dir); + PathBuf::from(expanded.as_ref()) + } + + /// Get the effective warmup blocks value - either specified or defaults to blocks + pub(crate) fn get_warmup_blocks(&self) -> u64 { + self.warmup_blocks.unwrap_or(self.blocks) + } +} + +/// Validate that the RPC endpoint chain ID matches the specified chain +async fn validate_rpc_chain_id(rpc_url: &str, expected_chain: &Chain) -> Result<()> { + // Create Alloy provider + let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?; + let provider = ProviderBuilder::new().connect_http(url); + + // Query chain ID using Alloy + let rpc_chain_id = provider + .get_chain_id() + .await + .map_err(|e| eyre!("Failed to get chain ID from RPC endpoint {}: {:?}", rpc_url, e))?; + + let expected_chain_id = expected_chain.id(); + + if rpc_chain_id != expected_chain_id { + return Err(eyre!( + "RPC endpoint chain ID mismatch!\n\ + Expected: {} (chain: {})\n\ + Found: {} at RPC endpoint: {}\n\n\ + Please use an RPC endpoint for the correct network or change the --chain argument.", + expected_chain_id, + expected_chain, + rpc_chain_id, + rpc_url + )); + } + + info!("Validated RPC endpoint chain ID"); + Ok(()) +} + +/// Main comparison workflow execution +pub(crate) async fn run_comparison(args: Args, _ctx: CliContext) -> Result<()> { + // Create a new process group for this process and all its children + #[cfg(unix)] + { + use nix::unistd::{getpid, setpgid}; + if let Err(e) = setpgid(getpid(), getpid()) { + warn!("Failed to create process group: {e}"); + } + } + + info!( + "Starting benchmark comparison between '{}' and '{}'", + args.baseline_ref, args.feature_ref + ); + + if args.sudo { + info!("Running in sudo mode - reth commands will use elevated privileges"); + } + + // Initialize Git manager + let git_manager = GitManager::new()?; + // Fetch all branches, tags, and commits + git_manager.fetch_all()?; + + // Initialize compilation manager + let output_dir = args.output_dir_path(); + let compilation_manager = CompilationManager::new( + git_manager.repo_root().to_string(), + output_dir.clone(), + git_manager.clone(), + args.features.clone(), + )?; + // Initialize node manager + let mut node_manager = NodeManager::new(&args); + + let benchmark_runner = BenchmarkRunner::new(&args); + let mut comparison_generator = ComparisonGenerator::new(&args); + + // Set the comparison directory in node manager to align with results directory + node_manager.set_comparison_dir(comparison_generator.get_output_dir()); + + // Store original git state for restoration + let original_ref = git_manager.get_current_ref()?; + info!("Current git reference: {}", original_ref); + + // Validate git state + if !args.skip_git_validation { + git_manager.validate_clean_state()?; + git_manager.validate_refs(&[&args.baseline_ref, &args.feature_ref])?; + } + + // Validate RPC endpoint chain ID matches the specified chain + let rpc_url = args.get_rpc_url(); + validate_rpc_chain_id(&rpc_url, &args.chain).await?; + + // Setup signal handling for cleanup + let git_manager_cleanup = git_manager.clone(); + let original_ref_cleanup = original_ref.clone(); + ctrlc::set_handler(move || { + eprintln!("Received interrupt signal, cleaning up..."); + + // Send SIGTERM to entire process group to ensure all children exit + #[cfg(unix)] + { + use nix::{ + sys::signal::{kill, Signal}, + unistd::Pid, + }; + + // Send SIGTERM to our process group (negative PID = process group) + let current_pid = std::process::id() as i32; + let pgid = Pid::from_raw(-current_pid); + if let Err(e) = kill(pgid, Signal::SIGTERM) { + eprintln!("Failed to send SIGTERM to process group: {e}"); + } + } + + // Give a moment for any ongoing git operations to complete + std::thread::sleep(std::time::Duration::from_millis(200)); + + if let Err(e) = git_manager_cleanup.switch_ref(&original_ref_cleanup) { + eprintln!("Failed to restore original git reference: {e}"); + eprintln!("You may need to manually run: git checkout {original_ref_cleanup}"); + } + std::process::exit(1); + })?; + + let result = run_benchmark_workflow( + &git_manager, + &compilation_manager, + &mut node_manager, + &benchmark_runner, + &mut comparison_generator, + &args, + ) + .await; + + // Always restore original git reference + info!("Restoring original git reference: {}", original_ref); + git_manager.switch_ref(&original_ref)?; + + // Handle any errors from the workflow + result?; + + Ok(()) +} + +/// Parse a string of arguments into a vector of strings +fn parse_args_string(args_str: &str) -> Vec { + shlex::split(args_str).unwrap_or_else(|| { + // Fallback to simple whitespace splitting if shlex fails + args_str.split_whitespace().map(|s| s.to_string()).collect() + }) +} + +/// Run compilation phase for both baseline and feature binaries +async fn run_compilation_phase( + git_manager: &GitManager, + compilation_manager: &CompilationManager, + args: &Args, + is_optimism: bool, +) -> Result<(String, String)> { + info!("=== Running compilation phase ==="); + + // Ensure required tools are available (only need to check once) + compilation_manager.ensure_reth_bench_available()?; + if args.profile { + compilation_manager.ensure_samply_available()?; + } + + let refs = [&args.baseline_ref, &args.feature_ref]; + let ref_types = ["baseline", "feature"]; + + // First, resolve all refs to commits using a HashMap to avoid race conditions where a ref is + // pushed to mid-run. + let mut ref_commits = std::collections::HashMap::new(); + for &git_ref in &refs { + if !ref_commits.contains_key(git_ref) { + git_manager.switch_ref(git_ref)?; + let commit = git_manager.get_current_commit()?; + ref_commits.insert(git_ref.clone(), commit); + info!("Reference {} resolves to commit: {}", git_ref, &ref_commits[git_ref][..8]); + } + } + + // Now compile each ref using the resolved commits + for (i, &git_ref) in refs.iter().enumerate() { + let ref_type = ref_types[i]; + let commit = &ref_commits[git_ref]; + + info!( + "Compiling {} binary for reference: {} (commit: {})", + ref_type, + git_ref, + &commit[..8] + ); + + // Switch to target reference + git_manager.switch_ref(git_ref)?; + + // Compile reth (with caching) + compilation_manager.compile_reth(commit, is_optimism)?; + + info!("Completed compilation for {} reference", ref_type); + } + + let baseline_commit = ref_commits[&args.baseline_ref].clone(); + let feature_commit = ref_commits[&args.feature_ref].clone(); + + info!("Compilation phase completed"); + Ok((baseline_commit, feature_commit)) +} + +/// Run warmup phase to warm up caches before benchmarking +async fn run_warmup_phase( + git_manager: &GitManager, + compilation_manager: &CompilationManager, + node_manager: &mut NodeManager, + benchmark_runner: &BenchmarkRunner, + args: &Args, + is_optimism: bool, + baseline_commit: &str, +) -> Result<()> { + info!("=== Running warmup phase ==="); + + // Use baseline for warmup + let warmup_ref = &args.baseline_ref; + + // Switch to baseline reference + git_manager.switch_ref(warmup_ref)?; + + // Get the cached binary path for baseline (should already be compiled) + let binary_path = + compilation_manager.get_cached_binary_path_for_commit(baseline_commit, is_optimism); + + // Verify the cached binary exists + if !binary_path.exists() { + return Err(eyre!( + "Cached baseline binary not found at {:?}. Compilation phase should have created it.", + binary_path + )); + } + + info!("Using cached baseline binary for warmup (commit: {})", &baseline_commit[..8]); + + // Build additional args with conditional --debug.startup-sync-state-idle flag + let additional_args = args.build_additional_args("warmup", args.baseline_args.as_ref()); + + // Start reth node for warmup + let mut node_process = + node_manager.start_node(&binary_path, warmup_ref, "warmup", &additional_args).await?; + + // Wait for node to be ready and get its current tip + let current_tip = node_manager.wait_for_node_ready_and_get_tip().await?; + info!("Warmup node is ready at tip: {}", current_tip); + + // Store the tip we'll unwind back to + let original_tip = current_tip; + + // Clear filesystem caches before warmup run only (unless disabled) + if args.no_clear_cache { + info!("Skipping filesystem cache clearing (--no-clear-cache flag set)"); + } else { + BenchmarkRunner::clear_fs_caches().await?; + } + + // Run warmup to warm up caches + benchmark_runner.run_warmup(current_tip).await?; + + // Stop node before unwinding (node must be stopped to release database lock) + node_manager.stop_node(&mut node_process).await?; + + // Unwind back to starting block after warmup + node_manager.unwind_to_block(original_tip).await?; + + info!("Warmup phase completed"); + Ok(()) +} + +/// Execute the complete benchmark workflow for both branches +async fn run_benchmark_workflow( + git_manager: &GitManager, + compilation_manager: &CompilationManager, + node_manager: &mut NodeManager, + benchmark_runner: &BenchmarkRunner, + comparison_generator: &mut ComparisonGenerator, + args: &Args, +) -> Result<()> { + // Detect if this is an Optimism chain once at the beginning + let rpc_url = args.get_rpc_url(); + let is_optimism = compilation_manager.detect_optimism_chain(&rpc_url).await?; + + // Run compilation phase for both binaries + let (baseline_commit, feature_commit) = + run_compilation_phase(git_manager, compilation_manager, args, is_optimism).await?; + + // Run warmup phase before benchmarking (skip if warmup_blocks is 0) + if args.get_warmup_blocks() > 0 { + run_warmup_phase( + git_manager, + compilation_manager, + node_manager, + benchmark_runner, + args, + is_optimism, + &baseline_commit, + ) + .await?; + } else { + info!("Skipping warmup phase (warmup_blocks is 0)"); + } + + let refs = [&args.baseline_ref, &args.feature_ref]; + let ref_types = ["baseline", "feature"]; + let commits = [&baseline_commit, &feature_commit]; + + for (i, &git_ref) in refs.iter().enumerate() { + let ref_type = ref_types[i]; + let commit = commits[i]; + info!("=== Processing {} reference: {} ===", ref_type, git_ref); + + // Switch to target reference + git_manager.switch_ref(git_ref)?; + + // Get the cached binary path for this git reference (should already be compiled) + let binary_path = + compilation_manager.get_cached_binary_path_for_commit(commit, is_optimism); + + // Verify the cached binary exists + if !binary_path.exists() { + return Err(eyre!( + "Cached {} binary not found at {:?}. Compilation phase should have created it.", + ref_type, + binary_path + )); + } + + info!("Using cached {} binary (commit: {})", ref_type, &commit[..8]); + + // Get reference-specific base arguments string + let base_args_str = match ref_type { + "baseline" => args.baseline_args.as_ref(), + "feature" => args.feature_args.as_ref(), + _ => None, + }; + + // Build additional args with conditional --debug.startup-sync-state-idle flag + let additional_args = args.build_additional_args(ref_type, base_args_str); + + // Start reth node + let mut node_process = + node_manager.start_node(&binary_path, git_ref, ref_type, &additional_args).await?; + + // Wait for node to be ready and get its current tip (wherever it is) + let current_tip = node_manager.wait_for_node_ready_and_get_tip().await?; + info!("Node is ready at tip: {}", current_tip); + + // Store the tip we'll unwind back to + let original_tip = current_tip; + + // Calculate benchmark range + // Note: reth-bench has an off-by-one error where it consumes the first block + // of the range, so we add 1 to compensate and get exactly args.blocks blocks + let from_block = original_tip; + let to_block = original_tip + args.blocks; + + // Run benchmark + let output_dir = comparison_generator.get_ref_output_dir(ref_type); + + // Capture start timestamp for the benchmark run + let benchmark_start = chrono::Utc::now(); + + // Run benchmark (comparison logic is handled separately by ComparisonGenerator) + benchmark_runner.run_benchmark(from_block, to_block, &output_dir).await?; + + // Capture end timestamp for the benchmark run + let benchmark_end = chrono::Utc::now(); + + // Stop node + node_manager.stop_node(&mut node_process).await?; + + // Unwind back to original tip + node_manager.unwind_to_block(original_tip).await?; + + // Store results for comparison + comparison_generator.add_ref_results(ref_type, &output_dir)?; + + // Set the benchmark run timestamps + comparison_generator.set_ref_timestamps(ref_type, benchmark_start, benchmark_end)?; + + info!("Completed {} reference benchmark", ref_type); + } + + // Generate comparison report + comparison_generator.generate_comparison_report().await?; + + // Generate charts if requested + if args.draw { + generate_comparison_charts(comparison_generator).await?; + } + + // Start samply servers if profiling was enabled + if args.profile { + start_samply_servers(args).await?; + } + + Ok(()) +} + +/// Generate comparison charts using the Python script +async fn generate_comparison_charts(comparison_generator: &ComparisonGenerator) -> Result<()> { + info!("Generating comparison charts with Python script..."); + + let baseline_output_dir = comparison_generator.get_ref_output_dir("baseline"); + let feature_output_dir = comparison_generator.get_ref_output_dir("feature"); + + let baseline_csv = baseline_output_dir.join("combined_latency.csv"); + let feature_csv = feature_output_dir.join("combined_latency.csv"); + + // Check if CSV files exist + if !baseline_csv.exists() { + return Err(eyre!("Baseline CSV not found: {:?}", baseline_csv)); + } + if !feature_csv.exists() { + return Err(eyre!("Feature CSV not found: {:?}", feature_csv)); + } + + let output_dir = comparison_generator.get_output_dir(); + let chart_output = output_dir.join("latency_comparison.png"); + + let script_path = "bin/reth-bench/scripts/compare_newpayload_latency.py"; + + info!("Running Python comparison script with uv..."); + let mut cmd = Command::new("uv"); + cmd.args([ + "run", + script_path, + &baseline_csv.to_string_lossy(), + &feature_csv.to_string_lossy(), + "-o", + &chart_output.to_string_lossy(), + ]); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + let output = cmd.output().await.map_err(|e| { + eyre!("Failed to execute Python script with uv: {}. Make sure uv is installed.", e) + })?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + let stdout = String::from_utf8_lossy(&output.stdout); + return Err(eyre!( + "Python script failed with exit code {:?}:\nstdout: {}\nstderr: {}", + output.status.code(), + stdout, + stderr + )); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + if !stdout.trim().is_empty() { + info!("Python script output:\n{}", stdout); + } + + info!("Comparison chart generated: {:?}", chart_output); + Ok(()) +} + +/// Start samply servers for viewing profiles +async fn start_samply_servers(args: &Args) -> Result<()> { + info!("Starting samply servers for profile viewing..."); + + let output_dir = args.output_dir_path(); + let profiles_dir = output_dir.join("profiles"); + + // Build profile paths + let baseline_profile = profiles_dir.join("baseline.json.gz"); + let feature_profile = profiles_dir.join("feature.json.gz"); + + // Check if profiles exist + if !baseline_profile.exists() { + warn!("Baseline profile not found: {:?}", baseline_profile); + return Ok(()); + } + if !feature_profile.exists() { + warn!("Feature profile not found: {:?}", feature_profile); + return Ok(()); + } + + // Find two consecutive available ports starting from 3000 + let (baseline_port, feature_port) = find_consecutive_ports(3000)?; + info!("Found available ports: {} and {}", baseline_port, feature_port); + + // Get samply path + let samply_path = get_samply_path().await?; + + // Start baseline server + info!("Starting samply server for baseline '{}' on port {}", args.baseline_ref, baseline_port); + let mut baseline_cmd = Command::new(&samply_path); + baseline_cmd + .args(["load", "--port", &baseline_port.to_string(), &baseline_profile.to_string_lossy()]) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + baseline_cmd.process_group(0); + } + + // Conditionally pipe output based on log level + if tracing::enabled!(tracing::Level::DEBUG) { + baseline_cmd.stdout(std::process::Stdio::piped()).stderr(std::process::Stdio::piped()); + } else { + baseline_cmd.stdout(std::process::Stdio::null()).stderr(std::process::Stdio::null()); + } + + // Debug log the command + debug!("Executing samply load command: {:?}", baseline_cmd); + + let mut baseline_child = + baseline_cmd.spawn().wrap_err("Failed to start samply server for baseline")?; + + // Stream baseline samply output if debug logging is enabled + if tracing::enabled!(tracing::Level::DEBUG) { + if let Some(stdout) = baseline_child.stdout.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-BASELINE] {}", line); + } + }); + } + + if let Some(stderr) = baseline_child.stderr.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-BASELINE] {}", line); + } + }); + } + } + + // Start feature server + info!("Starting samply server for feature '{}' on port {}", args.feature_ref, feature_port); + let mut feature_cmd = Command::new(&samply_path); + feature_cmd + .args(["load", "--port", &feature_port.to_string(), &feature_profile.to_string_lossy()]) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + feature_cmd.process_group(0); + } + + // Conditionally pipe output based on log level + if tracing::enabled!(tracing::Level::DEBUG) { + feature_cmd.stdout(std::process::Stdio::piped()).stderr(std::process::Stdio::piped()); + } else { + feature_cmd.stdout(std::process::Stdio::null()).stderr(std::process::Stdio::null()); + } + + // Debug log the command + debug!("Executing samply load command: {:?}", feature_cmd); + + let mut feature_child = + feature_cmd.spawn().wrap_err("Failed to start samply server for feature")?; + + // Stream feature samply output if debug logging is enabled + if tracing::enabled!(tracing::Level::DEBUG) { + if let Some(stdout) = feature_child.stdout.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-FEATURE] {}", line); + } + }); + } + + if let Some(stderr) = feature_child.stderr.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-FEATURE] {}", line); + } + }); + } + } + + // Give servers time to start + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + + // Print access information + println!("\n=== SAMPLY PROFILE SERVERS STARTED ==="); + println!("Baseline '{}': http://127.0.0.1:{}", args.baseline_ref, baseline_port); + println!("Feature '{}': http://127.0.0.1:{}", args.feature_ref, feature_port); + println!("\nOpen the URLs in your browser to view the profiles."); + println!("Press Ctrl+C to stop the servers and exit."); + println!("=========================================\n"); + + // Wait for Ctrl+C or process termination + let ctrl_c = tokio::signal::ctrl_c(); + let baseline_wait = baseline_child.wait(); + let feature_wait = feature_child.wait(); + + tokio::select! { + _ = ctrl_c => { + info!("Received Ctrl+C, shutting down samply servers..."); + } + result = baseline_wait => { + match result { + Ok(status) => info!("Baseline samply server exited with status: {}", status), + Err(e) => warn!("Baseline samply server error: {}", e), + } + } + result = feature_wait => { + match result { + Ok(status) => info!("Feature samply server exited with status: {}", status), + Err(e) => warn!("Feature samply server error: {}", e), + } + } + } + + // Ensure both processes are terminated + let _ = baseline_child.kill().await; + let _ = feature_child.kill().await; + + info!("Samply servers stopped."); + Ok(()) +} + +/// Find two consecutive available ports starting from the given port +fn find_consecutive_ports(start_port: u16) -> Result<(u16, u16)> { + for port in start_port..=65533 { + // Check if both port and port+1 are available + if is_port_available(port) && is_port_available(port + 1) { + return Ok((port, port + 1)); + } + } + Err(eyre!("Could not find two consecutive available ports starting from {}", start_port)) +} + +/// Check if a port is available by attempting to bind to it +fn is_port_available(port: u16) -> bool { + TcpListener::bind(("127.0.0.1", port)).is_ok() +} + +/// Get the absolute path to samply using 'which' command +async fn get_samply_path() -> Result { + let output = Command::new("which") + .arg("samply") + .output() + .await + .wrap_err("Failed to execute 'which samply' command")?; + + if !output.status.success() { + return Err(eyre!("samply not found in PATH")); + } + + let samply_path = String::from_utf8(output.stdout) + .wrap_err("samply path is not valid UTF-8")? + .trim() + .to_string(); + + if samply_path.is_empty() { + return Err(eyre!("which samply returned empty path")); + } + + Ok(samply_path) +} diff --git a/bin/reth-bench-compare/src/comparison.rs b/bin/reth-bench-compare/src/comparison.rs new file mode 100644 index 00000000000..316609569bf --- /dev/null +++ b/bin/reth-bench-compare/src/comparison.rs @@ -0,0 +1,484 @@ +//! Results comparison and report generation. + +use crate::cli::Args; +use chrono::{DateTime, Utc}; +use csv::Reader; +use eyre::{eyre, Result, WrapErr}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashMap, + fs, + path::{Path, PathBuf}, +}; +use tracing::{info, warn}; + +/// Manages comparison between baseline and feature reference results +pub(crate) struct ComparisonGenerator { + output_dir: PathBuf, + timestamp: String, + baseline_ref_name: String, + feature_ref_name: String, + baseline_results: Option, + feature_results: Option, +} + +/// Represents the results from a single benchmark run +#[derive(Debug, Clone)] +pub(crate) struct BenchmarkResults { + pub ref_name: String, + pub combined_latency_data: Vec, + pub summary: BenchmarkSummary, + pub start_timestamp: Option>, + pub end_timestamp: Option>, +} + +/// Combined latency CSV row structure +#[derive(Debug, Clone, Deserialize, Serialize)] +pub(crate) struct CombinedLatencyRow { + pub block_number: u64, + pub gas_used: u64, + pub new_payload_latency: u128, +} + +/// Total gas CSV row structure +#[derive(Debug, Clone, Deserialize, Serialize)] +pub(crate) struct TotalGasRow { + pub block_number: u64, + pub gas_used: u64, + pub time: u128, +} + +/// Summary statistics for a benchmark run +#[derive(Debug, Clone, Serialize)] +pub(crate) struct BenchmarkSummary { + pub total_blocks: u64, + pub total_gas_used: u64, + pub total_duration_ms: u128, + pub avg_new_payload_latency_ms: f64, + pub gas_per_second: f64, + pub blocks_per_second: f64, +} + +/// Comparison report between two benchmark runs +#[derive(Debug, Serialize)] +pub(crate) struct ComparisonReport { + pub timestamp: String, + pub baseline: RefInfo, + pub feature: RefInfo, + pub comparison_summary: ComparisonSummary, + pub per_block_comparisons: Vec, +} + +/// Information about a reference in the comparison +#[derive(Debug, Serialize)] +pub(crate) struct RefInfo { + pub ref_name: String, + pub summary: BenchmarkSummary, + pub start_timestamp: Option>, + pub end_timestamp: Option>, +} + +/// Summary of the comparison between references +#[derive(Debug, Serialize)] +pub(crate) struct ComparisonSummary { + pub new_payload_latency_change_percent: f64, + pub gas_per_second_change_percent: f64, + pub blocks_per_second_change_percent: f64, +} + +/// Per-block comparison data +#[derive(Debug, Serialize)] +pub(crate) struct BlockComparison { + pub block_number: u64, + pub baseline_new_payload_latency: u128, + pub feature_new_payload_latency: u128, + pub new_payload_latency_change_percent: f64, +} + +impl ComparisonGenerator { + /// Create a new comparison generator + pub(crate) fn new(args: &Args) -> Self { + let now: DateTime = Utc::now(); + let timestamp = now.format("%Y%m%d_%H%M%S").to_string(); + + Self { + output_dir: args.output_dir_path(), + timestamp, + baseline_ref_name: args.baseline_ref.clone(), + feature_ref_name: args.feature_ref.clone(), + baseline_results: None, + feature_results: None, + } + } + + /// Get the output directory for a specific reference + pub(crate) fn get_ref_output_dir(&self, ref_type: &str) -> PathBuf { + self.output_dir.join("results").join(&self.timestamp).join(ref_type) + } + + /// Get the main output directory for this comparison run + pub(crate) fn get_output_dir(&self) -> PathBuf { + self.output_dir.join("results").join(&self.timestamp) + } + + /// Add benchmark results for a reference + pub(crate) fn add_ref_results(&mut self, ref_type: &str, output_path: &Path) -> Result<()> { + let ref_name = match ref_type { + "baseline" => &self.baseline_ref_name, + "feature" => &self.feature_ref_name, + _ => return Err(eyre!("Unknown reference type: {}", ref_type)), + }; + + let results = self.load_benchmark_results(ref_name, output_path)?; + + match ref_type { + "baseline" => self.baseline_results = Some(results), + "feature" => self.feature_results = Some(results), + _ => return Err(eyre!("Unknown reference type: {}", ref_type)), + } + + info!("Loaded benchmark results for {} reference", ref_type); + + Ok(()) + } + + /// Set the benchmark run timestamps for a reference + pub(crate) fn set_ref_timestamps( + &mut self, + ref_type: &str, + start: DateTime, + end: DateTime, + ) -> Result<()> { + match ref_type { + "baseline" => { + if let Some(ref mut results) = self.baseline_results { + results.start_timestamp = Some(start); + results.end_timestamp = Some(end); + } else { + return Err(eyre!("Baseline results not loaded yet")); + } + } + "feature" => { + if let Some(ref mut results) = self.feature_results { + results.start_timestamp = Some(start); + results.end_timestamp = Some(end); + } else { + return Err(eyre!("Feature results not loaded yet")); + } + } + _ => return Err(eyre!("Unknown reference type: {}", ref_type)), + } + + Ok(()) + } + + /// Generate the final comparison report + pub(crate) async fn generate_comparison_report(&self) -> Result<()> { + info!("Generating comparison report..."); + + let baseline = + self.baseline_results.as_ref().ok_or_else(|| eyre!("Baseline results not loaded"))?; + + let feature = + self.feature_results.as_ref().ok_or_else(|| eyre!("Feature results not loaded"))?; + + // Generate comparison + let comparison_summary = + self.calculate_comparison_summary(&baseline.summary, &feature.summary)?; + let per_block_comparisons = self.calculate_per_block_comparisons(baseline, feature)?; + + let report = ComparisonReport { + timestamp: self.timestamp.clone(), + baseline: RefInfo { + ref_name: baseline.ref_name.clone(), + summary: baseline.summary.clone(), + start_timestamp: baseline.start_timestamp, + end_timestamp: baseline.end_timestamp, + }, + feature: RefInfo { + ref_name: feature.ref_name.clone(), + summary: feature.summary.clone(), + start_timestamp: feature.start_timestamp, + end_timestamp: feature.end_timestamp, + }, + comparison_summary, + per_block_comparisons, + }; + + // Write reports + self.write_comparison_reports(&report).await?; + + // Print summary to console + self.print_comparison_summary(&report); + + Ok(()) + } + + /// Load benchmark results from CSV files + fn load_benchmark_results( + &self, + ref_name: &str, + output_path: &Path, + ) -> Result { + let combined_latency_path = output_path.join("combined_latency.csv"); + let total_gas_path = output_path.join("total_gas.csv"); + + let combined_latency_data = self.load_combined_latency_csv(&combined_latency_path)?; + let total_gas_data = self.load_total_gas_csv(&total_gas_path)?; + + let summary = self.calculate_summary(&combined_latency_data, &total_gas_data)?; + + Ok(BenchmarkResults { + ref_name: ref_name.to_string(), + combined_latency_data, + summary, + start_timestamp: None, + end_timestamp: None, + }) + } + + /// Load combined latency CSV data + fn load_combined_latency_csv(&self, path: &Path) -> Result> { + let mut reader = Reader::from_path(path) + .wrap_err_with(|| format!("Failed to open combined latency CSV: {path:?}"))?; + + let mut rows = Vec::new(); + for result in reader.deserialize() { + let row: CombinedLatencyRow = result + .wrap_err_with(|| format!("Failed to parse combined latency row in {path:?}"))?; + rows.push(row); + } + + if rows.is_empty() { + return Err(eyre!("No data found in combined latency CSV: {:?}", path)); + } + + Ok(rows) + } + + /// Load total gas CSV data + fn load_total_gas_csv(&self, path: &Path) -> Result> { + let mut reader = Reader::from_path(path) + .wrap_err_with(|| format!("Failed to open total gas CSV: {path:?}"))?; + + let mut rows = Vec::new(); + for result in reader.deserialize() { + let row: TotalGasRow = + result.wrap_err_with(|| format!("Failed to parse total gas row in {path:?}"))?; + rows.push(row); + } + + if rows.is_empty() { + return Err(eyre!("No data found in total gas CSV: {:?}", path)); + } + + Ok(rows) + } + + /// Calculate summary statistics for a benchmark run + fn calculate_summary( + &self, + combined_data: &[CombinedLatencyRow], + total_gas_data: &[TotalGasRow], + ) -> Result { + if combined_data.is_empty() || total_gas_data.is_empty() { + return Err(eyre!("Cannot calculate summary for empty data")); + } + + let total_blocks = combined_data.len() as u64; + let total_gas_used: u64 = combined_data.iter().map(|r| r.gas_used).sum(); + + let total_duration_ms = total_gas_data.last().unwrap().time / 1000; // Convert microseconds to milliseconds + + let avg_new_payload_latency_ms: f64 = + combined_data.iter().map(|r| r.new_payload_latency as f64 / 1000.0).sum::() / + total_blocks as f64; + + let total_duration_seconds = total_duration_ms as f64 / 1000.0; + let gas_per_second = if total_duration_seconds > f64::EPSILON { + total_gas_used as f64 / total_duration_seconds + } else { + 0.0 + }; + + let blocks_per_second = if total_duration_seconds > f64::EPSILON { + total_blocks as f64 / total_duration_seconds + } else { + 0.0 + }; + + Ok(BenchmarkSummary { + total_blocks, + total_gas_used, + total_duration_ms, + avg_new_payload_latency_ms, + gas_per_second, + blocks_per_second, + }) + } + + /// Calculate comparison summary between baseline and feature + fn calculate_comparison_summary( + &self, + baseline: &BenchmarkSummary, + feature: &BenchmarkSummary, + ) -> Result { + let calc_percent_change = |baseline: f64, feature: f64| -> f64 { + if baseline.abs() > f64::EPSILON { + ((feature - baseline) / baseline) * 100.0 + } else { + 0.0 + } + }; + + Ok(ComparisonSummary { + new_payload_latency_change_percent: calc_percent_change( + baseline.avg_new_payload_latency_ms, + feature.avg_new_payload_latency_ms, + ), + gas_per_second_change_percent: calc_percent_change( + baseline.gas_per_second, + feature.gas_per_second, + ), + blocks_per_second_change_percent: calc_percent_change( + baseline.blocks_per_second, + feature.blocks_per_second, + ), + }) + } + + /// Calculate per-block comparisons + fn calculate_per_block_comparisons( + &self, + baseline: &BenchmarkResults, + feature: &BenchmarkResults, + ) -> Result> { + let mut baseline_map: HashMap = HashMap::new(); + for row in &baseline.combined_latency_data { + baseline_map.insert(row.block_number, row); + } + + let mut comparisons = Vec::new(); + for feature_row in &feature.combined_latency_data { + if let Some(baseline_row) = baseline_map.get(&feature_row.block_number) { + let calc_percent_change = |baseline: u128, feature: u128| -> f64 { + if baseline > 0 { + ((feature as f64 - baseline as f64) / baseline as f64) * 100.0 + } else { + 0.0 + } + }; + + let comparison = BlockComparison { + block_number: feature_row.block_number, + baseline_new_payload_latency: baseline_row.new_payload_latency, + feature_new_payload_latency: feature_row.new_payload_latency, + new_payload_latency_change_percent: calc_percent_change( + baseline_row.new_payload_latency, + feature_row.new_payload_latency, + ), + }; + comparisons.push(comparison); + } else { + warn!("Block {} not found in baseline data", feature_row.block_number); + } + } + + Ok(comparisons) + } + + /// Write comparison reports to files + async fn write_comparison_reports(&self, report: &ComparisonReport) -> Result<()> { + let report_dir = self.output_dir.join("results").join(&self.timestamp); + fs::create_dir_all(&report_dir) + .wrap_err_with(|| format!("Failed to create report directory: {report_dir:?}"))?; + + // Write JSON report + let json_path = report_dir.join("comparison_report.json"); + let json_content = serde_json::to_string_pretty(report) + .wrap_err("Failed to serialize comparison report to JSON")?; + fs::write(&json_path, json_content) + .wrap_err_with(|| format!("Failed to write JSON report: {json_path:?}"))?; + + // Write CSV report for per-block comparisons + let csv_path = report_dir.join("per_block_comparison.csv"); + let mut writer = csv::Writer::from_path(&csv_path) + .wrap_err_with(|| format!("Failed to create CSV writer: {csv_path:?}"))?; + + for comparison in &report.per_block_comparisons { + writer.serialize(comparison).wrap_err("Failed to write comparison row to CSV")?; + } + writer.flush().wrap_err("Failed to flush CSV writer")?; + + info!("Comparison reports written to: {:?}", report_dir); + Ok(()) + } + + /// Print comparison summary to console + fn print_comparison_summary(&self, report: &ComparisonReport) { + // Parse and format timestamp nicely + let formatted_timestamp = if let Ok(dt) = chrono::DateTime::parse_from_str( + &format!("{} +0000", report.timestamp.replace('_', " ")), + "%Y%m%d %H%M%S %z", + ) { + dt.format("%Y-%m-%d %H:%M:%S UTC").to_string() + } else { + // Fallback to original if parsing fails + report.timestamp.clone() + }; + + println!("\n=== BENCHMARK COMPARISON SUMMARY ==="); + println!("Timestamp: {formatted_timestamp}"); + println!("Baseline: {}", report.baseline.ref_name); + println!("Feature: {}", report.feature.ref_name); + println!(); + + let summary = &report.comparison_summary; + + println!("Performance Changes:"); + println!(" NewPayload Latency: {:+.2}%", summary.new_payload_latency_change_percent); + println!(" Gas/Second: {:+.2}%", summary.gas_per_second_change_percent); + println!(" Blocks/Second: {:+.2}%", summary.blocks_per_second_change_percent); + println!(); + + println!("Baseline Summary:"); + let baseline = &report.baseline.summary; + println!( + " Blocks: {}, Gas: {}, Duration: {:.2}s", + baseline.total_blocks, + baseline.total_gas_used, + baseline.total_duration_ms as f64 / 1000.0 + ); + println!(" Avg NewPayload: {:.2}ms", baseline.avg_new_payload_latency_ms); + if let (Some(start), Some(end)) = + (&report.baseline.start_timestamp, &report.baseline.end_timestamp) + { + println!( + " Started: {}, Ended: {}", + start.format("%Y-%m-%d %H:%M:%S UTC"), + end.format("%Y-%m-%d %H:%M:%S UTC") + ); + } + println!(); + + println!("Feature Summary:"); + let feature = &report.feature.summary; + println!( + " Blocks: {}, Gas: {}, Duration: {:.2}s", + feature.total_blocks, + feature.total_gas_used, + feature.total_duration_ms as f64 / 1000.0 + ); + println!(" Avg NewPayload: {:.2}ms", feature.avg_new_payload_latency_ms); + if let (Some(start), Some(end)) = + (&report.feature.start_timestamp, &report.feature.end_timestamp) + { + println!( + " Started: {}, Ended: {}", + start.format("%Y-%m-%d %H:%M:%S UTC"), + end.format("%Y-%m-%d %H:%M:%S UTC") + ); + } + println!(); + } +} diff --git a/bin/reth-bench-compare/src/compilation.rs b/bin/reth-bench-compare/src/compilation.rs new file mode 100644 index 00000000000..0bd9f70ce64 --- /dev/null +++ b/bin/reth-bench-compare/src/compilation.rs @@ -0,0 +1,354 @@ +//! Compilation operations for reth and reth-bench. + +use crate::git::GitManager; +use alloy_primitives::address; +use alloy_provider::{Provider, ProviderBuilder}; +use eyre::{eyre, Result, WrapErr}; +use std::{fs, path::PathBuf, process::Command}; +use tracing::{debug, error, info, warn}; + +/// Manages compilation operations for reth components +#[derive(Debug)] +pub(crate) struct CompilationManager { + repo_root: String, + output_dir: PathBuf, + git_manager: GitManager, + features: String, +} + +impl CompilationManager { + /// Create a new `CompilationManager` + pub(crate) const fn new( + repo_root: String, + output_dir: PathBuf, + git_manager: GitManager, + features: String, + ) -> Result { + Ok(Self { repo_root, output_dir, git_manager, features }) + } + + /// Detect if the RPC endpoint is an Optimism chain + pub(crate) async fn detect_optimism_chain(&self, rpc_url: &str) -> Result { + info!("Detecting chain type from RPC endpoint..."); + + // Create Alloy provider + let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?; + let provider = ProviderBuilder::new().connect_http(url); + + // Check for Optimism predeploy at address 0x420000000000000000000000000000000000000F + let is_optimism = !provider + .get_code_at(address!("0x420000000000000000000000000000000000000F")) + .await? + .is_empty(); + + if is_optimism { + info!("Detected Optimism chain"); + } else { + info!("Detected Ethereum chain"); + } + + Ok(is_optimism) + } + + /// Get the path to the cached binary using explicit commit hash + pub(crate) fn get_cached_binary_path_for_commit( + &self, + commit: &str, + is_optimism: bool, + ) -> PathBuf { + let identifier = &commit[..8]; // Use first 8 chars of commit + + let binary_name = if is_optimism { + format!("op-reth_{}", identifier) + } else { + format!("reth_{}", identifier) + }; + + self.output_dir.join("bin").join(binary_name) + } + + /// Compile reth using cargo build and cache the binary + pub(crate) fn compile_reth(&self, commit: &str, is_optimism: bool) -> Result<()> { + // Validate that current git commit matches the expected commit + let current_commit = self.git_manager.get_current_commit()?; + if current_commit != commit { + return Err(eyre!( + "Git commit mismatch! Expected: {}, but currently at: {}", + &commit[..8], + ¤t_commit[..8] + )); + } + + let cached_path = self.get_cached_binary_path_for_commit(commit, is_optimism); + + // Check if cached binary already exists (since path contains commit hash, it's valid) + if cached_path.exists() { + info!("Using cached binary (commit: {})", &commit[..8]); + return Ok(()); + } + + info!("No cached binary found, compiling (commit: {})...", &commit[..8]); + + let binary_name = if is_optimism { "op-reth" } else { "reth" }; + + info!( + "Compiling {} with profiling configuration (commit: {})...", + binary_name, + &commit[..8] + ); + + let mut cmd = Command::new("cargo"); + cmd.arg("build").arg("--profile").arg("profiling"); + + // Add features + cmd.arg("--features").arg(&self.features); + info!("Using features: {}", self.features); + + // Add bin-specific arguments for optimism + if is_optimism { + cmd.arg("--bin") + .arg("op-reth") + .arg("--manifest-path") + .arg("crates/optimism/bin/Cargo.toml"); + } + + cmd.current_dir(&self.repo_root); + + // Set RUSTFLAGS for native CPU optimization + cmd.env("RUSTFLAGS", "-C target-cpu=native"); + + // Debug log the command + debug!("Executing cargo command: {:?}", cmd); + + let output = cmd.output().wrap_err("Failed to execute cargo build command")?; + + // Print stdout and stderr with prefixes at debug level + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + for line in stdout.lines() { + if !line.trim().is_empty() { + debug!("[CARGO] {}", line); + } + } + + for line in stderr.lines() { + if !line.trim().is_empty() { + debug!("[CARGO] {}", line); + } + } + + if !output.status.success() { + // Print all output when compilation fails + error!("Cargo build failed with exit code: {:?}", output.status.code()); + + if !stdout.trim().is_empty() { + error!("Cargo stdout:"); + for line in stdout.lines() { + error!(" {}", line); + } + } + + if !stderr.trim().is_empty() { + error!("Cargo stderr:"); + for line in stderr.lines() { + error!(" {}", line); + } + } + + return Err(eyre!("Compilation failed with exit code: {:?}", output.status.code())); + } + + info!("{} compilation completed", binary_name); + + // Copy the compiled binary to cache + let source_path = + PathBuf::from(&self.repo_root).join(format!("target/profiling/{}", binary_name)); + if !source_path.exists() { + return Err(eyre!("Compiled binary not found at {:?}", source_path)); + } + + // Create bin directory if it doesn't exist + let bin_dir = self.output_dir.join("bin"); + fs::create_dir_all(&bin_dir).wrap_err("Failed to create bin directory")?; + + // Copy binary to cache + fs::copy(&source_path, &cached_path).wrap_err("Failed to copy binary to cache")?; + + // Make the cached binary executable + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let mut perms = fs::metadata(&cached_path)?.permissions(); + perms.set_mode(0o755); + fs::set_permissions(&cached_path, perms)?; + } + + info!("Cached compiled binary at: {:?}", cached_path); + Ok(()) + } + + /// Check if reth-bench is available in PATH + pub(crate) fn is_reth_bench_available(&self) -> bool { + match Command::new("which").arg("reth-bench").output() { + Ok(output) => { + if output.status.success() { + let path = String::from_utf8_lossy(&output.stdout); + info!("Found reth-bench: {}", path.trim()); + true + } else { + false + } + } + Err(_) => false, + } + } + + /// Check if samply is available in PATH + pub(crate) fn is_samply_available(&self) -> bool { + match Command::new("which").arg("samply").output() { + Ok(output) => { + if output.status.success() { + let path = String::from_utf8_lossy(&output.stdout); + info!("Found samply: {}", path.trim()); + true + } else { + false + } + } + Err(_) => false, + } + } + + /// Install samply using cargo + pub(crate) fn install_samply(&self) -> Result<()> { + info!("Installing samply via cargo..."); + + let mut cmd = Command::new("cargo"); + cmd.args(["install", "--locked", "samply"]); + + // Debug log the command + debug!("Executing cargo command: {:?}", cmd); + + let output = cmd.output().wrap_err("Failed to execute cargo install samply command")?; + + // Print stdout and stderr with prefixes at debug level + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + for line in stdout.lines() { + if !line.trim().is_empty() { + debug!("[CARGO-SAMPLY] {}", line); + } + } + + for line in stderr.lines() { + if !line.trim().is_empty() { + debug!("[CARGO-SAMPLY] {}", line); + } + } + + if !output.status.success() { + // Print all output when installation fails + error!("Cargo install samply failed with exit code: {:?}", output.status.code()); + + if !stdout.trim().is_empty() { + error!("Cargo stdout:"); + for line in stdout.lines() { + error!(" {}", line); + } + } + + if !stderr.trim().is_empty() { + error!("Cargo stderr:"); + for line in stderr.lines() { + error!(" {}", line); + } + } + + return Err(eyre!( + "samply installation failed with exit code: {:?}", + output.status.code() + )); + } + + info!("Samply installation completed"); + Ok(()) + } + + /// Ensure samply is available, installing if necessary + pub(crate) fn ensure_samply_available(&self) -> Result<()> { + if self.is_samply_available() { + Ok(()) + } else { + warn!("samply not found in PATH, installing..."); + self.install_samply() + } + } + + /// Ensure reth-bench is available, compiling if necessary + pub(crate) fn ensure_reth_bench_available(&self) -> Result<()> { + if self.is_reth_bench_available() { + Ok(()) + } else { + warn!("reth-bench not found in PATH, compiling and installing..."); + self.compile_reth_bench() + } + } + + /// Compile and install reth-bench using `make install-reth-bench` + pub(crate) fn compile_reth_bench(&self) -> Result<()> { + info!("Compiling and installing reth-bench..."); + + let mut cmd = Command::new("make"); + cmd.arg("install-reth-bench").current_dir(&self.repo_root); + + // Debug log the command + debug!("Executing make command: {:?}", cmd); + + let output = cmd.output().wrap_err("Failed to execute make install-reth-bench command")?; + + // Print stdout and stderr with prefixes at debug level + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + for line in stdout.lines() { + if !line.trim().is_empty() { + debug!("[MAKE-BENCH] {}", line); + } + } + + for line in stderr.lines() { + if !line.trim().is_empty() { + debug!("[MAKE-BENCH] {}", line); + } + } + + if !output.status.success() { + // Print all output when compilation fails + error!("Make install-reth-bench failed with exit code: {:?}", output.status.code()); + + if !stdout.trim().is_empty() { + error!("Make stdout:"); + for line in stdout.lines() { + error!(" {}", line); + } + } + + if !stderr.trim().is_empty() { + error!("Make stderr:"); + for line in stderr.lines() { + error!(" {}", line); + } + } + + return Err(eyre!( + "reth-bench compilation failed with exit code: {:?}", + output.status.code() + )); + } + + info!("Reth-bench compilation completed"); + Ok(()) + } +} diff --git a/bin/reth-bench-compare/src/git.rs b/bin/reth-bench-compare/src/git.rs new file mode 100644 index 00000000000..0da82b14018 --- /dev/null +++ b/bin/reth-bench-compare/src/git.rs @@ -0,0 +1,330 @@ +//! Git operations for branch management. + +use eyre::{eyre, Result, WrapErr}; +use std::process::Command; +use tracing::{info, warn}; + +/// Manages git operations for branch switching +#[derive(Debug, Clone)] +pub(crate) struct GitManager { + repo_root: String, +} + +impl GitManager { + /// Create a new `GitManager`, detecting the repository root + pub(crate) fn new() -> Result { + let output = Command::new("git") + .args(["rev-parse", "--show-toplevel"]) + .output() + .wrap_err("Failed to execute git command - is git installed?")?; + + if !output.status.success() { + return Err(eyre!("Not in a git repository or git command failed")); + } + + let repo_root = String::from_utf8(output.stdout) + .wrap_err("Git output is not valid UTF-8")? + .trim() + .to_string(); + + let manager = Self { repo_root }; + info!( + "Detected git repository at: {}, current reference: {}", + manager.repo_root(), + manager.get_current_ref()? + ); + + Ok(manager) + } + + /// Get the current git branch name + pub(crate) fn get_current_branch(&self) -> Result { + let output = Command::new("git") + .args(["branch", "--show-current"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current branch")?; + + if !output.status.success() { + return Err(eyre!("Failed to determine current branch")); + } + + let branch = String::from_utf8(output.stdout) + .wrap_err("Branch name is not valid UTF-8")? + .trim() + .to_string(); + + if branch.is_empty() { + return Err(eyre!("Not on a named branch (detached HEAD?)")); + } + + Ok(branch) + } + + /// Get the current git reference (branch name, tag, or commit hash) + pub(crate) fn get_current_ref(&self) -> Result { + // First try to get branch name + if let Ok(branch) = self.get_current_branch() { + return Ok(branch); + } + + // If not on a branch, check if we're on a tag + let tag_output = Command::new("git") + .args(["describe", "--exact-match", "--tags", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to check for tag")?; + + if tag_output.status.success() { + let tag = String::from_utf8(tag_output.stdout) + .wrap_err("Tag name is not valid UTF-8")? + .trim() + .to_string(); + return Ok(tag); + } + + // If not on a branch or tag, return the commit hash + let commit_output = Command::new("git") + .args(["rev-parse", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current commit")?; + + if !commit_output.status.success() { + return Err(eyre!("Failed to get current commit hash")); + } + + let commit_hash = String::from_utf8(commit_output.stdout) + .wrap_err("Commit hash is not valid UTF-8")? + .trim() + .to_string(); + + Ok(commit_hash) + } + + /// Check if the git working directory has uncommitted changes to tracked files + pub(crate) fn validate_clean_state(&self) -> Result<()> { + let output = Command::new("git") + .args(["status", "--porcelain"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to check git status")?; + + if !output.status.success() { + return Err(eyre!("Git status command failed")); + } + + let status_output = + String::from_utf8(output.stdout).wrap_err("Git status output is not valid UTF-8")?; + + // Check for uncommitted changes to tracked files + // Status codes: M = modified, A = added, D = deleted, R = renamed, C = copied, U = updated + // ?? = untracked files (we want to ignore these) + let has_uncommitted_changes = status_output.lines().any(|line| { + if line.len() >= 2 { + let status = &line[0..2]; + // Ignore untracked files (??) and ignored files (!!) + !matches!(status, "??" | "!!") + } else { + false + } + }); + + if has_uncommitted_changes { + warn!("Git working directory has uncommitted changes to tracked files:"); + for line in status_output.lines() { + if line.len() >= 2 && !matches!(&line[0..2], "??" | "!!") { + warn!(" {}", line); + } + } + return Err(eyre!( + "Git working directory has uncommitted changes to tracked files. Please commit or stash changes before running benchmark comparison." + )); + } + + // Check if there are untracked files and log them as info + let untracked_files: Vec<&str> = + status_output.lines().filter(|line| line.starts_with("??")).collect(); + + if !untracked_files.is_empty() { + info!( + "Git working directory has {} untracked files (this is OK)", + untracked_files.len() + ); + } + + info!("Git working directory is clean (no uncommitted changes to tracked files)"); + Ok(()) + } + + /// Fetch all refs from remote to ensure we have latest branches and tags + pub(crate) fn fetch_all(&self) -> Result<()> { + let output = Command::new("git") + .args(["fetch", "--all", "--tags", "--quiet", "--force"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to fetch latest refs")?; + + if output.status.success() { + info!("Fetched latest refs"); + } else { + let stderr = String::from_utf8_lossy(&output.stderr); + // Only warn if there's actual error content, not just fetch progress + if !stderr.trim().is_empty() && !stderr.contains("-> origin/") { + warn!("Git fetch encountered issues (continuing anyway): {}", stderr); + } + } + + Ok(()) + } + + /// Validate that the specified git references exist (branches, tags, or commits) + pub(crate) fn validate_refs(&self, refs: &[&str]) -> Result<()> { + for &git_ref in refs { + // Try branch first, then tag, then commit + let branch_check = Command::new("git") + .args(["rev-parse", "--verify", &format!("refs/heads/{git_ref}")]) + .current_dir(&self.repo_root) + .output(); + + let tag_check = Command::new("git") + .args(["rev-parse", "--verify", &format!("refs/tags/{git_ref}")]) + .current_dir(&self.repo_root) + .output(); + + let commit_check = Command::new("git") + .args(["rev-parse", "--verify", &format!("{git_ref}^{{commit}}")]) + .current_dir(&self.repo_root) + .output(); + + let found = if let Ok(output) = branch_check && + output.status.success() + { + info!("Validated branch exists: {}", git_ref); + true + } else if let Ok(output) = tag_check && + output.status.success() + { + info!("Validated tag exists: {}", git_ref); + true + } else if let Ok(output) = commit_check && + output.status.success() + { + info!("Validated commit exists: {}", git_ref); + true + } else { + false + }; + + if !found { + return Err(eyre!( + "Git reference '{}' does not exist as branch, tag, or commit", + git_ref + )); + } + } + + Ok(()) + } + + /// Switch to the specified git reference (branch, tag, or commit) + pub(crate) fn switch_ref(&self, git_ref: &str) -> Result<()> { + // First checkout the reference + let output = Command::new("git") + .args(["checkout", git_ref]) + .current_dir(&self.repo_root) + .output() + .wrap_err_with(|| format!("Failed to switch to reference '{git_ref}'"))?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(eyre!("Failed to switch to reference '{}': {}", git_ref, stderr)); + } + + // Check if this is a branch that tracks a remote and pull latest changes + let is_branch = Command::new("git") + .args(["show-ref", "--verify", "--quiet", &format!("refs/heads/{git_ref}")]) + .current_dir(&self.repo_root) + .status() + .map(|s| s.success()) + .unwrap_or(false); + + if is_branch { + // Check if the branch tracks a remote + let tracking_output = Command::new("git") + .args([ + "rev-parse", + "--abbrev-ref", + "--symbolic-full-name", + &format!("{git_ref}@{{upstream}}"), + ]) + .current_dir(&self.repo_root) + .output(); + + if let Ok(output) = tracking_output && + output.status.success() + { + let upstream = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if !upstream.is_empty() && upstream != format!("{git_ref}@{{upstream}}") { + // Branch tracks a remote, pull latest changes + info!("Pulling latest changes for branch: {}", git_ref); + + let pull_output = Command::new("git") + .args(["pull", "--ff-only"]) + .current_dir(&self.repo_root) + .output() + .wrap_err_with(|| { + format!("Failed to pull latest changes for branch '{git_ref}'") + })?; + + if pull_output.status.success() { + info!("Successfully pulled latest changes for branch: {}", git_ref); + } else { + let stderr = String::from_utf8_lossy(&pull_output.stderr); + warn!("Failed to pull latest changes for branch '{}': {}", git_ref, stderr); + // Continue anyway, we'll use whatever version we have + } + } + } + } + + // Verify the checkout succeeded by checking the current commit + let current_commit_output = Command::new("git") + .args(["rev-parse", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current commit")?; + + if !current_commit_output.status.success() { + return Err(eyre!("Failed to verify git checkout")); + } + + info!("Switched to reference: {}", git_ref); + Ok(()) + } + + /// Get the current commit hash + pub(crate) fn get_current_commit(&self) -> Result { + let output = Command::new("git") + .args(["rev-parse", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current commit")?; + + if !output.status.success() { + return Err(eyre!("Failed to get current commit hash")); + } + + let commit_hash = String::from_utf8(output.stdout) + .wrap_err("Commit hash is not valid UTF-8")? + .trim() + .to_string(); + + Ok(commit_hash) + } + + /// Get the repository root path + pub(crate) fn repo_root(&self) -> &str { + &self.repo_root + } +} diff --git a/bin/reth-bench-compare/src/main.rs b/bin/reth-bench-compare/src/main.rs new file mode 100644 index 00000000000..e866afb2509 --- /dev/null +++ b/bin/reth-bench-compare/src/main.rs @@ -0,0 +1,45 @@ +//! # reth-bench-compare +//! +//! Automated tool for comparing reth performance between two git branches. +//! This tool automates the complete workflow of compiling, running, and benchmarking +//! reth on different branches to provide meaningful performance comparisons. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +#[global_allocator] +static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); + +mod benchmark; +mod cli; +mod comparison; +mod compilation; +mod git; +mod node; + +use clap::Parser; +use cli::{run_comparison, Args}; +use eyre::Result; +use reth_cli_runner::CliRunner; + +fn main() -> Result<()> { + // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. + if std::env::var_os("RUST_BACKTRACE").is_none() { + unsafe { + std::env::set_var("RUST_BACKTRACE", "1"); + } + } + + let args = Args::parse(); + + // Initialize tracing + let _guard = args.init_tracing()?; + + // Run until either exit or sigint or sigterm + let runner = CliRunner::try_default_runtime()?; + runner.run_command_until_exit(|ctx| run_comparison(args, ctx)) +} diff --git a/bin/reth-bench-compare/src/node.rs b/bin/reth-bench-compare/src/node.rs new file mode 100644 index 00000000000..01eb9961f9f --- /dev/null +++ b/bin/reth-bench-compare/src/node.rs @@ -0,0 +1,511 @@ +//! Node management for starting, stopping, and controlling reth instances. + +use crate::cli::Args; +use alloy_provider::{Provider, ProviderBuilder}; +use alloy_rpc_types_eth::SyncStatus; +use eyre::{eyre, OptionExt, Result, WrapErr}; +#[cfg(unix)] +use nix::sys::signal::{killpg, Signal}; +#[cfg(unix)] +use nix::unistd::Pid; +use reth_chainspec::Chain; +use std::{fs, path::PathBuf, time::Duration}; +use tokio::{ + fs::File as AsyncFile, + io::{AsyncBufReadExt, AsyncWriteExt, BufReader as AsyncBufReader}, + process::Command, + time::{sleep, timeout}, +}; +use tracing::{debug, info, warn}; + +/// Manages reth node lifecycle and operations +pub(crate) struct NodeManager { + datadir: Option, + metrics_port: u16, + chain: Chain, + use_sudo: bool, + binary_path: Option, + enable_profiling: bool, + output_dir: PathBuf, + additional_reth_args: Vec, + comparison_dir: Option, +} + +impl NodeManager { + /// Create a new `NodeManager` with configuration from CLI args + pub(crate) fn new(args: &Args) -> Self { + Self { + datadir: Some(args.datadir_path().to_string_lossy().to_string()), + metrics_port: args.metrics_port, + chain: args.chain, + use_sudo: args.sudo, + binary_path: None, + enable_profiling: args.profile, + output_dir: args.output_dir_path(), + additional_reth_args: args.reth_args.clone(), + comparison_dir: None, + } + } + + /// Set the comparison directory path for logging + pub(crate) fn set_comparison_dir(&mut self, dir: PathBuf) { + self.comparison_dir = Some(dir); + } + + /// Get the log file path for a given reference type + fn get_log_file_path(&self, ref_type: &str) -> Result { + let comparison_dir = self + .comparison_dir + .as_ref() + .ok_or_eyre("Comparison directory not set. Call set_comparison_dir first.")?; + + // The comparison directory already contains the full path to results/ + let log_dir = comparison_dir.join(ref_type); + + // Create the directory if it doesn't exist + fs::create_dir_all(&log_dir) + .wrap_err(format!("Failed to create log directory: {:?}", log_dir))?; + + let log_file = log_dir.join("reth_node.log"); + Ok(log_file) + } + + /// Get the perf event max sample rate from the system, capped at 10000 + fn get_perf_sample_rate(&self) -> Option { + let perf_rate_file = "/proc/sys/kernel/perf_event_max_sample_rate"; + if let Ok(content) = fs::read_to_string(perf_rate_file) { + let rate_str = content.trim(); + if !rate_str.is_empty() { + if let Ok(system_rate) = rate_str.parse::() { + let capped_rate = std::cmp::min(system_rate, 10000); + info!( + "Detected perf_event_max_sample_rate: {}, using: {}", + system_rate, capped_rate + ); + return Some(capped_rate.to_string()); + } + warn!("Failed to parse perf_event_max_sample_rate: {}", rate_str); + } + } + None + } + + /// Get the absolute path to samply using 'which' command + async fn get_samply_path(&self) -> Result { + let output = Command::new("which") + .arg("samply") + .output() + .await + .wrap_err("Failed to execute 'which samply' command")?; + + if !output.status.success() { + return Err(eyre!("samply not found in PATH")); + } + + let samply_path = String::from_utf8(output.stdout) + .wrap_err("samply path is not valid UTF-8")? + .trim() + .to_string(); + + if samply_path.is_empty() { + return Err(eyre!("which samply returned empty path")); + } + + Ok(samply_path) + } + + /// Build reth arguments as a vector of strings + fn build_reth_args( + &self, + binary_path_str: &str, + additional_args: &[String], + ) -> (Vec, String) { + let mut reth_args = vec![binary_path_str.to_string(), "node".to_string()]; + + // Add chain argument (skip for mainnet as it's the default) + let chain_str = self.chain.to_string(); + if chain_str != "mainnet" { + reth_args.extend_from_slice(&["--chain".to_string(), chain_str.clone()]); + } + + // Add datadir if specified + if let Some(ref datadir) = self.datadir { + reth_args.extend_from_slice(&["--datadir".to_string(), datadir.clone()]); + } + + // Add reth-specific arguments + let metrics_arg = format!("0.0.0.0:{}", self.metrics_port); + reth_args.extend_from_slice(&[ + "--engine.accept-execution-requests-hash".to_string(), + "--metrics".to_string(), + metrics_arg, + "--http".to_string(), + "--http.api".to_string(), + "eth".to_string(), + "--disable-discovery".to_string(), + "--trusted-only".to_string(), + ]); + + // Add any additional arguments passed via command line (common to both baseline and + // feature) + reth_args.extend_from_slice(&self.additional_reth_args); + + // Add reference-specific additional arguments + reth_args.extend_from_slice(additional_args); + + (reth_args, chain_str) + } + + /// Create a command for profiling mode + async fn create_profiling_command( + &self, + ref_type: &str, + reth_args: &[String], + ) -> Result { + // Create profiles directory if it doesn't exist + let profile_dir = self.output_dir.join("profiles"); + fs::create_dir_all(&profile_dir).wrap_err("Failed to create profiles directory")?; + + let profile_path = profile_dir.join(format!("{}.json.gz", ref_type)); + info!("Starting reth node with samply profiling..."); + info!("Profile output: {:?}", profile_path); + + // Get absolute path to samply + let samply_path = self.get_samply_path().await?; + + let mut cmd = if self.use_sudo { + let mut sudo_cmd = Command::new("sudo"); + sudo_cmd.arg(&samply_path); + sudo_cmd + } else { + Command::new(&samply_path) + }; + + // Add samply arguments + cmd.args(["record", "--save-only", "-o", &profile_path.to_string_lossy()]); + + // Add rate argument if available + if let Some(rate) = self.get_perf_sample_rate() { + cmd.args(["--rate", &rate]); + } + + // Add separator and complete reth command + cmd.arg("--"); + cmd.args(reth_args); + + Ok(cmd) + } + + /// Create a command for direct reth execution + fn create_direct_command(&self, reth_args: &[String]) -> Command { + let binary_path = &reth_args[0]; + + if self.use_sudo { + info!("Starting reth node with sudo..."); + let mut cmd = Command::new("sudo"); + cmd.args(reth_args); + cmd + } else { + info!("Starting reth node..."); + let mut cmd = Command::new(binary_path); + cmd.args(&reth_args[1..]); // Skip the binary path since it's the command + cmd + } + } + + /// Start a reth node using the specified binary path and return the process handle + pub(crate) async fn start_node( + &mut self, + binary_path: &std::path::Path, + _git_ref: &str, + ref_type: &str, + additional_args: &[String], + ) -> Result { + // Store the binary path for later use (e.g., in unwind_to_block) + self.binary_path = Some(binary_path.to_path_buf()); + + let binary_path_str = binary_path.to_string_lossy(); + let (reth_args, _) = self.build_reth_args(&binary_path_str, additional_args); + + // Log additional arguments if any + if !self.additional_reth_args.is_empty() { + info!("Using common additional reth arguments: {:?}", self.additional_reth_args); + } + if !additional_args.is_empty() { + info!("Using reference-specific additional reth arguments: {:?}", additional_args); + } + + let mut cmd = if self.enable_profiling { + self.create_profiling_command(ref_type, &reth_args).await? + } else { + self.create_direct_command(&reth_args) + }; + + // Set process group for better signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + debug!("Executing reth command: {cmd:?}"); + + let mut child = cmd + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .kill_on_drop(true) // Kill on drop so that on Ctrl-C for parent process we stop all child processes + .spawn() + .wrap_err("Failed to start reth node")?; + + info!( + "Reth node started with PID: {:?} (binary: {})", + child.id().ok_or_eyre("Reth node is not running")?, + binary_path_str + ); + + // Prepare log file path + let log_file_path = self.get_log_file_path(ref_type)?; + info!("Reth node logs will be saved to: {:?}", log_file_path); + + // Stream stdout and stderr with prefixes at debug level and to log file + if let Some(stdout) = child.stdout.take() { + let log_file = AsyncFile::create(&log_file_path) + .await + .wrap_err(format!("Failed to create log file: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = AsyncBufReader::new(stdout); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH] {}", line); + // Write to log file (reth already includes timestamps) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + if let Some(stderr) = child.stderr.take() { + let log_file = AsyncFile::options() + .create(true) + .append(true) + .open(&log_file_path) + .await + .wrap_err(format!("Failed to open log file for stderr: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = AsyncBufReader::new(stderr); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH] {}", line); + // Write to log file (reth already includes timestamps) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + // Give the node a moment to start up + sleep(Duration::from_secs(5)).await; + + Ok(child) + } + + /// Wait for the node to be ready and return its current tip + pub(crate) async fn wait_for_node_ready_and_get_tip(&self) -> Result { + info!("Waiting for node to be ready and synced..."); + + let max_wait = Duration::from_secs(120); // 2 minutes to allow for sync + let check_interval = Duration::from_secs(2); + let rpc_url = "http://localhost:8545"; + + // Create Alloy provider + let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?; + let provider = ProviderBuilder::new().connect_http(url); + + timeout(max_wait, async { + loop { + // First check if RPC is up and node is not syncing + match provider.syncing().await { + Ok(sync_result) => { + match sync_result { + SyncStatus::Info(sync_info) => { + debug!("Node is still syncing {sync_info:?}, waiting..."); + } + _ => { + // Node is not syncing, now get the tip + match provider.get_block_number().await { + Ok(tip) => { + info!("Node is ready and not syncing at block: {}", tip); + return Ok(tip); + } + Err(e) => { + debug!("Failed to get block number: {}", e); + } + } + } + } + } + Err(e) => { + debug!("Node RPC not ready yet or failed to check sync status: {}", e); + } + } + + sleep(check_interval).await; + } + }) + .await + .wrap_err("Timed out waiting for node to be ready and synced")? + } + + /// Stop the reth node gracefully + pub(crate) async fn stop_node(&self, child: &mut tokio::process::Child) -> Result<()> { + let pid = child.id().expect("Child process ID should be available"); + + // Check if the process has already exited + match child.try_wait() { + Ok(Some(status)) => { + info!("Reth node (PID: {}) has already exited with status: {:?}", pid, status); + return Ok(()); + } + Ok(None) => { + // Process is still running, proceed to stop it + info!("Stopping process gracefully with SIGINT (PID: {})...", pid); + } + Err(e) => { + return Err(eyre!("Failed to check process status: {}", e)); + } + } + + #[cfg(unix)] + { + // Send SIGINT to process group to mimic Ctrl-C behavior + let nix_pgid = Pid::from_raw(pid as i32); + + match killpg(nix_pgid, Signal::SIGINT) { + Ok(()) => {} + Err(nix::errno::Errno::ESRCH) => { + info!("Process group {} has already exited", pid); + } + Err(e) => { + return Err(eyre!("Failed to send SIGINT to process group {}: {}", pid, e)); + } + } + } + + #[cfg(not(unix))] + { + // On non-Unix systems, fall back to using external kill command + let output = Command::new("taskkill") + .args(["/PID", &pid.to_string(), "/F"]) + .output() + .await + .wrap_err("Failed to execute taskkill command")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + // Check if the error is because the process doesn't exist + if stderr.contains("not found") || stderr.contains("not exist") { + info!("Process {} has already exited", pid); + } else { + return Err(eyre!("Failed to kill process {}: {}", pid, stderr)); + } + } + } + + // Wait for the process to exit + match child.wait().await { + Ok(status) => { + info!("Reth node (PID: {}) exited with status: {:?}", pid, status); + } + Err(e) => { + // If we get an error here, it might be because the process already exited + debug!("Error waiting for process exit (may have already exited): {}", e); + } + } + + Ok(()) + } + + /// Unwind the node to a specific block + pub(crate) async fn unwind_to_block(&self, block_number: u64) -> Result<()> { + if self.use_sudo { + info!("Unwinding node to block: {} (with sudo)", block_number); + } else { + info!("Unwinding node to block: {}", block_number); + } + + // Use the binary path from the last start_node call, or fallback to default + let binary_path = self + .binary_path + .as_ref() + .map(|p| p.to_string_lossy().to_string()) + .unwrap_or_else(|| "./target/profiling/reth".to_string()); + + let mut cmd = if self.use_sudo { + let mut sudo_cmd = Command::new("sudo"); + sudo_cmd.args([&binary_path, "stage", "unwind"]); + sudo_cmd + } else { + let mut reth_cmd = Command::new(&binary_path); + reth_cmd.args(["stage", "unwind"]); + reth_cmd + }; + + // Add chain argument (skip for mainnet as it's the default) + let chain_str = self.chain.to_string(); + if chain_str != "mainnet" { + cmd.args(["--chain", &chain_str]); + } + + // Add datadir if specified + if let Some(ref datadir) = self.datadir { + cmd.args(["--datadir", datadir]); + } + + cmd.args(["to-block", &block_number.to_string()]); + + // Debug log the command + debug!("Executing reth unwind command: {:?}", cmd); + + let mut child = cmd + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn() + .wrap_err("Failed to start unwind command")?; + + // Stream stdout and stderr with prefixes in real-time + if let Some(stdout) = child.stdout.take() { + tokio::spawn(async move { + let reader = AsyncBufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-UNWIND] {}", line); + } + }); + } + + if let Some(stderr) = child.stderr.take() { + tokio::spawn(async move { + let reader = AsyncBufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-UNWIND] {}", line); + } + }); + } + + // Wait for the command to complete + let status = child.wait().await.wrap_err("Failed to wait for unwind command")?; + + if !status.success() { + return Err(eyre!("Unwind command failed with exit code: {:?}", status.code())); + } + + info!("Unwound to block: {}", block_number); + Ok(()) + } +} From 29761637efe1a47bd4f568fcea605f2fbdf750da Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 4 Nov 2025 21:17:22 +0400 Subject: [PATCH 322/371] fix: use cost when checking fee cap (#19493) --- crates/transaction-pool/src/validate/eth.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 1436093d5bf..8f427e5d9b3 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -396,15 +396,12 @@ where match self.tx_fee_cap { Some(0) | None => {} // Skip if cap is 0 or None Some(tx_fee_cap_wei) => { - // max possible tx fee is (gas_price * gas_limit) - // (if EIP1559) max possible tx fee is (max_fee_per_gas * gas_limit) - let gas_price = transaction.max_fee_per_gas(); - let max_tx_fee_wei = gas_price.saturating_mul(transaction_gas_limit as u128); + let max_tx_fee_wei = transaction.cost().saturating_sub(transaction.value()); if max_tx_fee_wei > tx_fee_cap_wei { return Err(TransactionValidationOutcome::Invalid( transaction, InvalidPoolTransactionError::ExceedsFeeCap { - max_tx_fee_wei, + max_tx_fee_wei: max_tx_fee_wei.saturating_to(), tx_fee_cap_wei, }, )) From fdcc540492da73744b3e17cc4944e0800a29c5c6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 4 Nov 2025 18:52:09 +0100 Subject: [PATCH 323/371] fix: spawn block fetching blocking (#19491) Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- crates/rpc/rpc/src/trace.rs | 156 ++++++++++++++++++++---------------- 1 file changed, 87 insertions(+), 69 deletions(-) diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 6e4205eead4..e1e6bc26544 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -363,7 +363,7 @@ where ) -> Result, Eth::Error> { // We'll reuse the matcher across multiple blocks that are traced in parallel let matcher = Arc::new(filter.matcher()); - let TraceFilter { from_block, to_block, after, count, .. } = filter; + let TraceFilter { from_block, to_block, mut after, count, .. } = filter; let start = from_block.unwrap_or(0); let latest_block = self.provider().best_block_number().map_err(Eth::Error::from_eth_err)?; @@ -389,80 +389,97 @@ where .into()) } - // fetch all blocks in that range - let blocks = self - .provider() - .recovered_block_range(start..=end) - .map_err(Eth::Error::from_eth_err)? - .into_iter() - .map(Arc::new) - .collect::>(); - - // trace all blocks - let mut block_traces = Vec::with_capacity(blocks.len()); - for block in &blocks { - let matcher = matcher.clone(); - let traces = self.eth_api().trace_block_until( - block.hash().into(), - Some(block.clone()), - None, - TracingInspectorConfig::default_parity(), - move |tx_info, mut ctx| { - let mut traces = ctx - .take_inspector() - .into_parity_builder() - .into_localized_transaction_traces(tx_info); - traces.retain(|trace| matcher.matches(&trace.trace)); - Ok(Some(traces)) - }, - ); - block_traces.push(traces); - } - - let block_traces = futures::future::try_join_all(block_traces).await?; - let mut all_traces = block_traces - .into_iter() - .flatten() - .flat_map(|traces| traces.into_iter().flatten().flat_map(|traces| traces.into_iter())) - .collect::>(); - - // add reward traces for all blocks - for block in &blocks { - if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? { - all_traces.extend( - self.extract_reward_traces( - block.header(), - block.body().ommers(), - base_block_reward, - ) - .into_iter() - .filter(|trace| matcher.matches(&trace.trace)), + let mut all_traces = Vec::new(); + let mut block_traces = Vec::with_capacity(self.inner.eth_config.max_tracing_requests); + for chunk_start in (start..end).step_by(self.inner.eth_config.max_tracing_requests) { + let chunk_end = + std::cmp::min(chunk_start + self.inner.eth_config.max_tracing_requests as u64, end); + + // fetch all blocks in that chunk + let blocks = self + .eth_api() + .spawn_blocking_io(move |this| { + Ok(this + .provider() + .recovered_block_range(chunk_start..=chunk_end) + .map_err(Eth::Error::from_eth_err)? + .into_iter() + .map(Arc::new) + .collect::>()) + }) + .await?; + + // trace all blocks + for block in &blocks { + let matcher = matcher.clone(); + let traces = self.eth_api().trace_block_until( + block.hash().into(), + Some(block.clone()), + None, + TracingInspectorConfig::default_parity(), + move |tx_info, mut ctx| { + let mut traces = ctx + .take_inspector() + .into_parity_builder() + .into_localized_transaction_traces(tx_info); + traces.retain(|trace| matcher.matches(&trace.trace)); + Ok(Some(traces)) + }, ); - } else { - // no block reward, means we're past the Paris hardfork and don't expect any rewards - // because the blocks in ascending order - break + block_traces.push(traces); } - } - // Skips the first `after` number of matching traces. - // If `after` is greater than or equal to the number of matched traces, it returns an empty - // array. - if let Some(after) = after.map(|a| a as usize) { - if after < all_traces.len() { - all_traces.drain(..after); - } else { - return Ok(vec![]) + #[allow(clippy::iter_with_drain)] + let block_traces = futures::future::try_join_all(block_traces.drain(..)).await?; + all_traces.extend(block_traces.into_iter().flatten().flat_map(|traces| { + traces.into_iter().flatten().flat_map(|traces| traces.into_iter()) + })); + + // add reward traces for all blocks + for block in &blocks { + if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? { + all_traces.extend( + self.extract_reward_traces( + block.header(), + block.body().ommers(), + base_block_reward, + ) + .into_iter() + .filter(|trace| matcher.matches(&trace.trace)), + ); + } else { + // no block reward, means we're past the Paris hardfork and don't expect any + // rewards because the blocks in ascending order + break + } } - } - // Return at most `count` of traces - if let Some(count) = count { - let count = count as usize; - if count < all_traces.len() { - all_traces.truncate(count); + // Skips the first `after` number of matching traces. + if let Some(cutoff) = after.map(|a| a as usize) && + cutoff < all_traces.len() + { + all_traces.drain(..cutoff); + // we removed the first `after` traces + after = None; } - }; + + // Return at most `count` of traces + if let Some(count) = count { + let count = count as usize; + if count < all_traces.len() { + all_traces.truncate(count); + return Ok(all_traces) + } + }; + } + + // If `after` is greater than or equal to the number of matched traces, it returns an + // empty array. + if let Some(cutoff) = after.map(|a| a as usize) && + cutoff >= all_traces.len() + { + return Ok(vec![]) + } Ok(all_traces) } @@ -692,6 +709,7 @@ where /// # Limitations /// This currently requires block filter fields, since reth does not have address indices yet. async fn trace_filter(&self, filter: TraceFilter) -> RpcResult> { + let _permit = self.inner.blocking_task_guard.clone().acquire_many_owned(2).await; Ok(Self::trace_filter(self, filter).await.map_err(Into::into)?) } From c3a60fa75a30c8b527b63143dc060082bdfee87e Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Tue, 4 Nov 2025 13:39:34 -0500 Subject: [PATCH 324/371] chore(op-reth/scr): update superchain-registry configs. Commit 9e3f71cee0e4e2acb4864cb00f5fbee3555d8e9f (#19495) --- .../chainspec/res/superchain-configs.tar | Bin 9879040 -> 9879040 bytes .../chainspec/res/superchain_registry_commit | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/optimism/chainspec/res/superchain-configs.tar b/crates/optimism/chainspec/res/superchain-configs.tar index 86f2ab866ccf5e1a9d866a9ce10df4a60c4d1571..80345a284388a8117108cba79d5eca74621e0fe7 100644 GIT binary patch delta 11514 zcma)CYj9Q76?UKGCee_@Al0BC5vgVyP0wqelZXgS5ibudT0!F@LXsN@29cLkt%|gI zViXmwaxFN;$KX_d@aP%cLj6&+j-9DGQ?1&Htvd41%1CFJ>9lH_e&^m`$liNh!^|T2 zlQrx4t#7S;)_(Ir=~~F9?VFaLFX~UQ@z3=OKa*c=;ndr%8+alJ9M`V59oGrO3H5e% zVXAA?>XoY&uE5`{TAW$7p>~|)J2t&vw$ZBCP|;&hmgrWMLFozg!lW$bEX_uyMK~fP zv}8H^C|glFLUVlAhi^u6py*(>rmSOR7C})!D2jRci2d@&=wx~Mm)X-!y4GNh=R4l; zFuRWJdqmD+UYR~|#9DkpOO{t2jmMojufm|o4gw|ZuI&k%&=m6t=@Nq$&vVsE3|ybm zlI7bD#MP5JP4;*q42H+Y30#468EZP`7yP3nh4OEozAS8p1 zaP5z)nm@FTQ@noiexqVfRgCD_wBG{<8RKp#0ffAIK zERT?f`Q|)0dL9Co&clsndxWC|-gP`5aA3@4mal|8OWrv?@b@k~Yz}-F4tLZE?2vAw zm4tb|TDGAZQcmXhHg+VgyQ|HZKhIYp4}$`S@HFO;*&x>ws#`AEjfHq^H0MvRr@;29 zO33kj?DKf^6E_-N6T%&ClxsT)_8F5H^KI`P-;&Qf_m(k%B2X$LNgy@5vit&6#?G$? zDS7B}fp7`6r*U0#e4cpKdw! z>#qI3GnyIL%J?e=a|I+mTsh?b*7)TkFPMX>;P`d=30&uuSF0jBT(?$r9?&bwZRs(NRehWK|ja_i?x=pF5HL!5D|Y){4uqsW?bp z!~QuGU02RMg`H*0n^0+s6S^LS0R@XsXI(}(@k6ci1CMfEqO*QoQW~^4ZoaAHs&mQ${1;NglG>OG;p2cIW;l<&4R!(HKGB*6a&hYGQ#R zeIduRvOeou#Zxa~u2He)>DH1nL-N_o40wLBKnzM)Zy3J?yTb#LQaC&NbQ>Xr&G2MUj9kpN+( zzq*EPH&$ezv5O!~b|IpS?j#bU=QqSCC)5-Y(?_+QN)MgDacGM+u+2kmc0a}{jq!6F z4RHdSnmk#)8;$n_)p)iF4M19|MgdC)FFe> z;Lxc^FlGjgfGI( z3&B6gUKtAh%Fo&VjP2xW&5z=xMv&x3hBg0DqWOiVA_~YN)e$J(&*kW^n3*Q~+T;}> z*{711Sj&f+Yf8E0n<@^gxEf6X9!iu+o`%ZmYU>s2+ll#<{B;1`)6p8%iFx7HmiC@(i~i#0)h z(PZ6cG!px$I`$lg?n6Nt>1ufbK_c>00O@Jg=3_=)A-`_5ZZ+EH3oVEMTtcw~uYI(@ zmLyh?ohFSA?oEov09i_7p+T>*o?;0R$$i970PoGSwis!xfkMzbu;gu2EPWl*NWPup z!UALHvaGy?*^@tQk(Xtx%UNmQH`}>;rn57?rBF94vc{M~f|G7=G*0S5_HMRb zK9)UCqQq916$`a2u$|H^u*S+B!-Ra@E|;BmSk-3DD(l0YheX3He-ps9&N2cQ7=Tg` zAZQLnfFyswN}DZGd4P+Bo~o-ssM=^58Myfvjkp=FzmjjUMtrq(l1YipP$!+MbB6`Bv+UY$%YUKFX3`VO_D!jGDz6MSC0{{BZ_QeamQp9 zbvu?#(uf<*k#Sjg#; zA37twR9AhRj&Ywxvz zAb}GT)Y$u$rq55hPpjb2QTnh-U{<$Lkd8m-_ z?5XVs8bq~<NRHKhsM3WcJ9#uSO;zUGMUYmt24g$9W51dbt! zjToZe1rf6*%harkdjG7Mm-wvh{2~1e8o_7C#qfQqxwJi%lQ?*#re^41Y44dTQ+uWv zN*m_ky-FD@p_6<9V5Ijsn0F+q>;EC7Ro3}Z%#bCs5kylvWTso0FabB4jTq}zOTE5fTe&5vqkj$zT1bu%EB z$|>v8zTR!PW@wKo7)~VoN4#bxWES2jW~cAhTm@=5`a3YF36WaN%>0nwJUeRKJV!Qe zzRZMLKm^0#mgK|{$PMJjlC!x6L`aZTd)f~}^01bJjh{ue?Q+vFKm z^4y{Xt)IUda*fP9-!@9G?L7Zi#}vpCJy8bQz^|`0@P8jWihfYU<)5gX9DpAabmw)D z>t$-^4ZU-S!TgEs1Eh_6<2DwF+ z^)0_i8+snv1?#aoNQ-@Bu%jOxGjRoEC1e$3HRM*v8pux}w?S@)+yVI+f5vDoh7wCz5iL&)Hb;TT1rkbmwZ9vm^4BaZ?^)$N#^fzUBX@?>!g* delta 12695 zcmb7LYj9Q76?UII43HZLqF@n4rN{Vl#8eZ1g+AD33*7#qr6nqB4FW& z^+CMq_0f(n^yQ4jVQ%YY(CN_VkB)XI)3I8u^)VB*fJIw6%8c6fJ4vQD*?TAJl$izL z2kWe}*0;WIt-TMMzj(8BGwh*?HM7I1r?~hf{Qf_apYdy|D?}(mPm1Jc&lh2M%G4m& zIIDTWhIQ*3*W%x_tX;M8!dtt#+Sj#g?5>`Ws6-OqQkgCJ1jiUVS__aoi2*9EK5@zyhzfh9`Yb4lRcefft=J)g8GUQ%)4lny{?G z5}(zha0!nEJ|#QK;>`_14K6|ePK37{pYqSbQsnl1^i)CB9VoSh8=!fH^owx)GzID{Wa{5?20h^ZxxLm0 zNaF!s6wvutV9$pywR(Ax?^4yz^E=qF!i1#_RzQhBOR?~P3L?b?-e3>V(+ehC>Cy$8 zf_&cGY>hAQ^a{Zt2xvqI6!@C`gUi2d!$DV=Q(qZ6vp?gu;w?2jFJhT0zrYla-~zV#-CK{td6pMzb=~x&o>|;<&)yu|vccTGtBKBPlD-rE@Gd;poYJ(k@gV zdy)K@zP!FmsX^Q<}BqWyXS~P-Tx-rnSGj+I!J^kNa;}g!QpbP56`Gv zlrF+2*mK2P=cdl>P^~{^W2~w6JdFv_I{`HSD86U7pk0Heq7*~#wvW}Mz3h7Hh~f(^ zLgXQ&y1So5_Ol-q^PoT?-L@RSo7UYz_z{I_#ed7P8QVbOx|%x#EU1(wgSDzUJRDgB zMh+rDl9{ed!~JJ(2yi2{PKu@44*ph+x}FKGat<2*U2yvY-8KHnK- zeGhcpP|k-Q`c&fkyJn^{jKu26$1I;h{G`)S3_Aax+njUMvC!FWjoOnM-zA`=dp!Z= zCBsE6j82j%rkVN9ref5<_}S|7E1X-bLHmK0Y6Kn8qsNzjQ`JkHXEew(E_24&p5l2H zDIiy2#g{wVol;Dzejdxg`RcU>=Sk~^V9ZYmBI(SpLFSu=%e-&0Y>J&GDQ3GIg$9F5xZ@cgOP+pK;84mcR{s{oG05zKR-e6rWRe%jxk`Jr1ly zJ=IhC$6Jl*E^!`d1481Q(y18EVdZ$psoDDtw^D?{aEm#da+`OEOJ_09}_7XKd{E)v>s#XFTjQ)zmX#Y^&9|fd<^+Qlg0!b#{iu;mp%v zXQ*)I#5oq{L4ZY1<7+^eN^;D*C&DHv-hF1a!4k7A&k;FdDL`!wi<>_=6ZRF={K@1@ zyXhQw^R&Zb++mO>4KZ&x3pPdZhO;erlM>RUlSBeRLJ5=hcq=YDaJDKta8CQL%y1LT z>EOUsJjD_*{KC#fPR_GnF2I;=%-)_$j-0p`3I7t8zI`6-d{z4Pw2qg}kRx2+=&8VV zO{sXypZ+RrI%MO5>>Y+%oUMAHMnL!?A)tsN&mZgp0ZpF)zHu#vYfl5n zsmKl0J2+F79h}vE$ne|O+D3!m5}!atjP8Sd4PAI)w$18RKP`hEk>r*z#zib%dGbP7 zjaqrqtumV$`XP<8Rcx1Z%qzT@uMmo_kQV(&c;1Xzkg3RXwHzrvE@8eZJ07%KaC-h0t%lZIsAnIXGS6A~GV z=P17T;_RP{aFYhn4`h-H8j6ka_F;mG8DEE8qE^nhw948c(wMM7_0(~|0KfHIs>*um zEggcOAQf%T0!%?W?pZcIkW_JjmjKa z+Pu_Ab00Kx83lc&t>Ls~ar3lmRr9p#GWQwIeYA)%b>m$qmWcVj>tNqjeBbq!7U^4+ z3}{3lmF58=uDAia$8yW6AQrrg%UF&d8Lh&%v-n*E^EiW{|R;rd4 znk%ia2#rG+&r3n_3tFJhtx%{oOUyu9XbhSuW^SPDx+LaZZLphF%dS?_rExV~Vsy?|J5YMSsT=bG{a~4z?b)0d@=QR@iN@ zjj-Eccfjt1eIK?-aei0!0^`lVEYjS)v5ynoG{7uN?o#DTHrtpb*?|lSNF!!!OU&6; zDCXUk9jFiSI(erW{A=bVM+KYihW$V_ZQ3^0j(BM}PXo$^v%b#a)>pT|wyV}x<1waE zV!^i5+9hwDF*w}=+kxTf-fV2GpZ+OW(nu85<^dsW75nd1W&3~FZXTlbEpGZy=3xRR za1mp}g58fz>j9gvN9a?XygiRR>`DfxB*ljxfIX;M5APgvyEPXUET>S0h24rTd?@>@ z=@KiOv9w8C@(^iA+T+c* Date: Tue, 4 Nov 2025 21:48:55 +0100 Subject: [PATCH 325/371] perf: improve ethsendrawsync for op with flashblock (#19462) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 1 - crates/optimism/rpc/Cargo.toml | 1 - crates/optimism/rpc/src/eth/mod.rs | 10 +- crates/optimism/rpc/src/eth/transaction.rs | 93 ++++++------------- crates/rpc/rpc-eth-api/src/helpers/receipt.rs | 22 +---- crates/rpc/rpc-eth-types/src/pending_block.rs | 55 ++++++++++- crates/rpc/rpc-eth-types/src/utils.rs | 19 ++++ 7 files changed, 105 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd26f0e83d3..30048db5e98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9621,7 +9621,6 @@ dependencies = [ "reth-primitives-traits", "reth-rpc", "reth-rpc-api", - "reth-rpc-convert", "reth-rpc-engine-api", "reth-rpc-eth-api", "reth-rpc-eth-types", diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 38114ea9ff9..5d926caf159 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -28,7 +28,6 @@ reth-node-builder.workspace = true reth-chainspec.workspace = true reth-chain-state.workspace = true reth-rpc-engine-api.workspace = true -reth-rpc-convert.workspace = true # op-reth reth-optimism-evm.workspace = true diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 5dc0abd6208..8adbee93adc 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -94,6 +94,11 @@ impl OpEthApi { Self { inner } } + /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. + pub const fn builder() -> OpEthApiBuilder { + OpEthApiBuilder::new() + } + /// Returns a reference to the [`EthApiNodeBackend`]. pub fn eth_api(&self) -> &EthApiNodeBackend { self.inner.eth_api() @@ -132,11 +137,6 @@ impl OpEthApi { block.filter(|b| b.block().parent_hash() == parent_hash).map(|b| b.pending.clone()) } - /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. - pub const fn builder() -> OpEthApiBuilder { - OpEthApiBuilder::new() - } - /// Awaits a fresh flashblock if one is being built, otherwise returns current. async fn flashblock( &self, diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 37c05815a61..14ed9dbe247 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,20 +1,15 @@ //! Loads and formats OP transaction RPC response. use crate::{OpEthApi, OpEthApiError, SequencerClient}; -use alloy_consensus::TxReceipt as _; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_eth::TransactionInfo; use futures::StreamExt; use op_alloy_consensus::{transaction::OpTransactionInfo, OpTransaction}; use reth_chain_state::CanonStateSubscriptions; use reth_optimism_primitives::DepositReceipt; -use reth_primitives_traits::{BlockBody, SignedTransaction, SignerRecoverable}; -use reth_rpc_convert::transaction::ConvertReceiptInput; +use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_rpc_eth_api::{ - helpers::{ - receipt::calculate_gas_used_and_next_log_index, spec::SignersForRpc, EthTransactions, - LoadReceipt, LoadTransaction, - }, + helpers::{spec::SignersForRpc, EthTransactions, LoadReceipt, LoadTransaction}, try_into_op_tx_info, EthApiTypes as _, FromEthApiError, FromEvmError, RpcConvert, RpcNodeCore, RpcReceipt, TxInfoMapper, }; @@ -88,21 +83,35 @@ where fn send_raw_transaction_sync( &self, tx: Bytes, - ) -> impl Future, Self::Error>> + Send - where - Self: LoadReceipt + 'static, - { + ) -> impl Future, Self::Error>> + Send { let this = self.clone(); let timeout_duration = self.send_raw_transaction_sync_timeout(); async move { let mut canonical_stream = this.provider().canonical_state_stream(); let hash = EthTransactions::send_raw_transaction(&this, tx).await?; - let flashblock_rx = this.pending_block_rx(); - let mut flashblock_stream = flashblock_rx.map(WatchStream::new); + let mut flashblock_stream = this.pending_block_rx().map(WatchStream::new); tokio::time::timeout(timeout_duration, async { loop { tokio::select! { + biased; + // check if the tx was preconfirmed in a new flashblock + flashblock = async { + if let Some(stream) = &mut flashblock_stream { + stream.next().await + } else { + futures::future::pending().await + } + } => { + if let Some(flashblock) = flashblock.flatten() { + // if flashblocks are supported, attempt to find id from the pending block + if let Some(receipt) = flashblock + .find_and_convert_transaction_receipt(hash, this.tx_resp_builder()) + { + return receipt; + } + } + } // Listen for regular canonical block updates for inclusion canonical_notification = canonical_stream.next() => { if let Some(notification) = canonical_notification { @@ -118,23 +127,6 @@ where break; } } - // check if the tx was preconfirmed in a new flashblock - _flashblock_update = async { - if let Some(ref mut stream) = flashblock_stream { - stream.next().await - } else { - futures::future::pending().await - } - } => { - // Check flashblocks for faster confirmation (Optimism-specific) - if let Ok(Some(pending_block)) = this.pending_flashblock().await { - let block_and_receipts = pending_block.into_block_and_receipts(); - if block_and_receipts.block.body().contains_transaction(&hash) - && let Some(receipt) = this.transaction_receipt(hash).await? { - return Ok(receipt); - } - } - } } } Err(Self::Error::from_eth_err(EthApiError::TransactionConfirmationTimeout { @@ -168,42 +160,11 @@ where if tx_receipt.is_none() { // if flashblocks are supported, attempt to find id from the pending block - if let Ok(Some(pending_block)) = this.pending_flashblock().await { - let block_and_receipts = pending_block.into_block_and_receipts(); - if let Some((tx, receipt)) = - block_and_receipts.find_transaction_and_receipt_by_hash(hash) - { - // Build tx receipt from pending block and receipts directly inline. - // This avoids canonical cache lookup that would be done by the - // `build_transaction_receipt` which would result in a block not found - // issue. See: https://github.com/paradigmxyz/reth/issues/18529 - let meta = tx.meta(); - let all_receipts = &block_and_receipts.receipts; - - let (gas_used, next_log_index) = - calculate_gas_used_and_next_log_index(meta.index, all_receipts); - - return Ok(Some( - this.tx_resp_builder() - .convert_receipts_with_block( - vec![ConvertReceiptInput { - tx: tx - .tx() - .clone() - .try_into_recovered_unchecked() - .map_err(Self::Error::from_eth_err)? - .as_recovered_ref(), - gas_used: receipt.cumulative_gas_used() - gas_used, - receipt: receipt.clone(), - next_log_index, - meta, - }], - block_and_receipts.sealed_block(), - )? - .pop() - .unwrap(), - )) - } + if let Ok(Some(pending_block)) = this.pending_flashblock().await && + let Some(Ok(receipt)) = pending_block + .find_and_convert_transaction_receipt(hash, this.tx_resp_builder()) + { + return Ok(Some(receipt)); } } let Some((tx, meta, receipt)) = tx_receipt else { return Ok(None) }; diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 58c3e8897dc..12215fbff1e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -6,27 +6,11 @@ use alloy_consensus::{transaction::TransactionMeta, TxReceipt}; use futures::Future; use reth_primitives_traits::SignerRecoverable; use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert}; -use reth_rpc_eth_types::{error::FromEthApiError, EthApiError}; +use reth_rpc_eth_types::{ + error::FromEthApiError, utils::calculate_gas_used_and_next_log_index, EthApiError, +}; use reth_storage_api::{ProviderReceipt, ProviderTx}; -/// Calculates the gas used and next log index for a transaction at the given index -pub fn calculate_gas_used_and_next_log_index( - tx_index: u64, - all_receipts: &[impl TxReceipt], -) -> (u64, usize) { - let mut gas_used = 0; - let mut next_log_index = 0; - - if tx_index > 0 { - for receipt in all_receipts.iter().take(tx_index as usize) { - gas_used = receipt.cumulative_gas_used(); - next_log_index += receipt.logs().len(); - } - } - - (gas_used, next_log_index) -} - /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 45f50ea82c5..3150fffdc56 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,17 +4,18 @@ use std::{sync::Arc, time::Instant}; -use crate::block::BlockAndReceipts; -use alloy_consensus::BlockHeader; +use crate::{block::BlockAndReceipts, utils::calculate_gas_used_and_next_log_index}; +use alloy_consensus::{BlockHeader, TxReceipt}; use alloy_eips::{BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockHash, B256}; +use alloy_primitives::{BlockHash, TxHash, B256}; use derive_more::Constructor; use reth_chain_state::{BlockState, ExecutedBlock}; use reth_ethereum_primitives::Receipt; use reth_evm::{ConfigureEvm, EvmEnvFor}; use reth_primitives_traits::{ - Block, BlockTy, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, + Block, BlockTy, IndexedTx, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, }; +use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert, RpcTypes}; /// Configured [`reth_evm::EvmEnv`] for a pending block. #[derive(Debug, Clone, Constructor)] @@ -129,6 +130,52 @@ impl PendingBlock { pub fn parent_hash(&self) -> BlockHash { self.executed_block.recovered_block().parent_hash() } + + /// Finds a transaction by hash and returns it along with its corresponding receipt. + /// + /// Returns `None` if the transaction is not found in this block. + pub fn find_transaction_and_receipt_by_hash( + &self, + tx_hash: TxHash, + ) -> Option<(IndexedTx<'_, N::Block>, &N::Receipt)> { + let indexed_tx = self.executed_block.recovered_block().find_indexed(tx_hash)?; + let receipt = self.receipts.get(indexed_tx.index())?; + Some((indexed_tx, receipt)) + } + + /// Returns the rpc transaction receipt for the given transaction hash if it exists. + /// + /// This uses the given converter to turn [`Self::find_transaction_and_receipt_by_hash`] into + /// the rpc format. + pub fn find_and_convert_transaction_receipt( + &self, + tx_hash: TxHash, + converter: &C, + ) -> Option::Receipt, C::Error>> + where + C: RpcConvert, + { + let (tx, receipt) = self.find_transaction_and_receipt_by_hash(tx_hash)?; + let meta = tx.meta(); + let all_receipts = &self.receipts; + + let (gas_used, next_log_index) = + calculate_gas_used_and_next_log_index(meta.index, all_receipts); + + converter + .convert_receipts_with_block( + vec![ConvertReceiptInput { + tx: tx.recovered_tx(), + gas_used: receipt.cumulative_gas_used() - gas_used, + receipt: receipt.clone(), + next_log_index, + meta, + }], + self.executed_block.sealed_block(), + ) + .map(|mut receipts| receipts.pop()) + .transpose() + } } impl From> for BlockState { diff --git a/crates/rpc/rpc-eth-types/src/utils.rs b/crates/rpc/rpc-eth-types/src/utils.rs index 69f9833af5e..4a613c1915b 100644 --- a/crates/rpc/rpc-eth-types/src/utils.rs +++ b/crates/rpc/rpc-eth-types/src/utils.rs @@ -1,9 +1,28 @@ //! Commonly used code snippets use super::{EthApiError, EthResult}; +use alloy_consensus::TxReceipt; use reth_primitives_traits::{Recovered, SignedTransaction}; use std::future::Future; +/// Calculates the gas used and next log index for a transaction at the given index +pub fn calculate_gas_used_and_next_log_index( + tx_index: u64, + all_receipts: &[impl TxReceipt], +) -> (u64, usize) { + let mut gas_used = 0; + let mut next_log_index = 0; + + if tx_index > 0 { + for receipt in all_receipts.iter().take(tx_index as usize) { + gas_used = receipt.cumulative_gas_used(); + next_log_index += receipt.logs().len(); + } + } + + (gas_used, next_log_index) +} + /// Recovers a [`SignedTransaction`] from an enveloped encoded byte stream. /// /// This is a helper function that returns the appropriate RPC-specific error if the input data is From 8ac37f3c6771588ad2655f5c1aa78db87c2d2563 Mon Sep 17 00:00:00 2001 From: Avory Date: Wed, 5 Nov 2025 01:19:08 +0200 Subject: [PATCH 326/371] docs(banlist): document timeout update behavior on re-ban (#19497) --- crates/net/banlist/src/lib.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/net/banlist/src/lib.rs b/crates/net/banlist/src/lib.rs index fb44500efe2..31b779bc8d5 100644 --- a/crates/net/banlist/src/lib.rs +++ b/crates/net/banlist/src/lib.rs @@ -125,11 +125,14 @@ impl BanList { /// Bans the IP until the timestamp. /// /// This does not ban non-global IPs. + /// If the IP is already banned, the timeout will be updated to the new value. pub fn ban_ip_until(&mut self, ip: IpAddr, until: Instant) { self.ban_ip_with(ip, Some(until)); } - /// Bans the peer until the timestamp + /// Bans the peer until the timestamp. + /// + /// If the peer is already banned, the timeout will be updated to the new value. pub fn ban_peer_until(&mut self, node_id: PeerId, until: Instant) { self.ban_peer_with(node_id, Some(until)); } @@ -147,6 +150,8 @@ impl BanList { } /// Bans the peer indefinitely or until the given timeout. + /// + /// If the peer is already banned, the timeout will be updated to the new value. pub fn ban_peer_with(&mut self, node_id: PeerId, until: Option) { self.banned_peers.insert(node_id, until); } @@ -154,6 +159,7 @@ impl BanList { /// Bans the ip indefinitely or until the given timeout. /// /// This does not ban non-global IPs. + /// If the IP is already banned, the timeout will be updated to the new value. pub fn ban_ip_with(&mut self, ip: IpAddr, until: Option) { if is_global(&ip) { self.banned_ips.insert(ip, until); @@ -167,7 +173,7 @@ mod tests { #[test] fn can_ban_unban_peer() { - let peer = PeerId::random(); + let peer = PeerId::new([1; 64]); let mut banlist = BanList::default(); banlist.ban_peer(peer); assert!(banlist.is_banned_peer(&peer)); From f4715ee62f33ce82ea952888044fecf99a55210f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?jos=C3=A9=20v?= <52646071+Peponks9@users.noreply.github.com> Date: Tue, 4 Nov 2025 17:50:41 -0600 Subject: [PATCH 327/371] chore: add custom hardforks example (#19391) Co-authored-by: Matthias Seitz --- Cargo.lock | 13 ++ Cargo.toml | 1 + examples/custom-hardforks/Cargo.toml | 16 +++ examples/custom-hardforks/src/chainspec.rs | 149 +++++++++++++++++++++ examples/custom-hardforks/src/main.rs | 5 + 5 files changed, 184 insertions(+) create mode 100644 examples/custom-hardforks/Cargo.toml create mode 100644 examples/custom-hardforks/src/chainspec.rs create mode 100644 examples/custom-hardforks/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 30048db5e98..032babbd70e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2701,6 +2701,19 @@ dependencies = [ "syn 2.0.108", ] +[[package]] +name = "custom-hardforks" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "reth-chainspec", + "reth-network-peers", + "serde", +] + [[package]] name = "darling" version = "0.20.11" diff --git a/Cargo.toml b/Cargo.toml index a1fd8647a1a..8afc22cab57 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -148,6 +148,7 @@ members = [ "examples/custom-node/", "examples/custom-engine-types/", "examples/custom-evm/", + "examples/custom-hardforks/", "examples/custom-inspector/", "examples/custom-node-components/", "examples/custom-payload-builder/", diff --git a/examples/custom-hardforks/Cargo.toml b/examples/custom-hardforks/Cargo.toml new file mode 100644 index 00000000000..78060f6af62 --- /dev/null +++ b/examples/custom-hardforks/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "custom-hardforks" +license.workspace = true +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +# Core Reth dependencies for chain specs and hardforks +reth-chainspec.workspace = true +reth-network-peers.workspace = true +alloy-genesis.workspace = true +alloy-consensus.workspace = true +alloy-primitives.workspace = true +alloy-eips.workspace = true +serde = { version = "1.0", features = ["derive"] } diff --git a/examples/custom-hardforks/src/chainspec.rs b/examples/custom-hardforks/src/chainspec.rs new file mode 100644 index 00000000000..d51db59fddb --- /dev/null +++ b/examples/custom-hardforks/src/chainspec.rs @@ -0,0 +1,149 @@ +//! Custom chain specification integrating hardforks. +//! +//! This demonstrates how to build a `ChainSpec` with custom hardforks, +//! implementing required traits for integration with Reth's chain management. + +use alloy_eips::eip7840::BlobParams; +use alloy_genesis::Genesis; +use alloy_primitives::{B256, U256}; +use reth_chainspec::{ + hardfork, BaseFeeParams, Chain, ChainSpec, DepositContract, EthChainSpec, EthereumHardfork, + EthereumHardforks, ForkCondition, Hardfork, Hardforks, +}; +use reth_network_peers::NodeRecord; +use serde::{Deserialize, Serialize}; + +// Define custom hardfork variants using Reth's `hardfork!` macro. +// Each variant represents a protocol upgrade (e.g., enabling new features). +hardfork!( + /// Custom hardforks for the example chain. + /// + /// These are inspired by Ethereum's upgrades but customized for demonstration. + /// Add new variants here to extend the chain's hardfork set. + CustomHardfork { + /// Enables basic custom features (e.g., a new precompile). + BasicUpgrade, + /// Enables advanced features (e.g., state modifications). + AdvancedUpgrade, + } +); + +// Implement the `Hardfork` trait for each variant. +// This defines the name and any custom logic (e.g., feature toggles). +// Note: The hardfork! macro already implements Hardfork, so no manual impl needed. + +// Configuration for hardfork activation. +// This struct holds settings like activation blocks and is serializable for config files. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct CustomHardforkConfig { + /// Block number to activate BasicUpgrade. + pub basic_upgrade_block: u64, + /// Block number to activate AdvancedUpgrade. + pub advanced_upgrade_block: u64, +} + +// Custom chain spec wrapping Reth's `ChainSpec` with our hardforks. +#[derive(Debug, Clone)] +pub struct CustomChainSpec { + pub inner: ChainSpec, +} + +impl CustomChainSpec { + /// Creates a custom chain spec from a genesis file. + /// + /// This parses the [`ChainSpec`] and adds the custom hardforks. + pub fn from_genesis(genesis: Genesis) -> Self { + let extra = genesis.config.extra_fields.deserialize_as::().unwrap(); + + let mut inner = ChainSpec::from_genesis(genesis); + inner.hardforks.insert( + CustomHardfork::BasicUpgrade, + ForkCondition::Timestamp(extra.basic_upgrade_block), + ); + inner.hardforks.insert( + CustomHardfork::AdvancedUpgrade, + ForkCondition::Timestamp(extra.advanced_upgrade_block), + ); + Self { inner } + } +} + +// Implement `Hardforks` to integrate custom hardforks with Reth's system. +impl Hardforks for CustomChainSpec { + fn fork(&self, fork: H) -> ForkCondition { + self.inner.fork(fork) + } + + fn forks_iter(&self) -> impl Iterator { + self.inner.forks_iter() + } + + fn fork_id(&self, head: &reth_chainspec::Head) -> reth_chainspec::ForkId { + self.inner.fork_id(head) + } + + fn latest_fork_id(&self) -> reth_chainspec::ForkId { + self.inner.latest_fork_id() + } + + fn fork_filter(&self, head: reth_chainspec::Head) -> reth_chainspec::ForkFilter { + self.inner.fork_filter(head) + } +} + +// Implement `EthChainSpec` for compatibility with Ethereum-based nodes. +impl EthChainSpec for CustomChainSpec { + type Header = alloy_consensus::Header; + + fn chain(&self) -> Chain { + self.inner.chain() + } + + fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams { + self.inner.base_fee_params_at_timestamp(timestamp) + } + + fn blob_params_at_timestamp(&self, timestamp: u64) -> Option { + self.inner.blob_params_at_timestamp(timestamp) + } + + fn deposit_contract(&self) -> Option<&DepositContract> { + self.inner.deposit_contract() + } + + fn genesis_hash(&self) -> B256 { + self.inner.genesis_hash() + } + + fn prune_delete_limit(&self) -> usize { + self.inner.prune_delete_limit() + } + + fn display_hardforks(&self) -> Box { + Box::new(self.inner.display_hardforks()) + } + + fn genesis_header(&self) -> &Self::Header { + self.inner.genesis_header() + } + + fn genesis(&self) -> &Genesis { + self.inner.genesis() + } + + fn bootnodes(&self) -> Option> { + self.inner.bootnodes() + } + + fn final_paris_total_difficulty(&self) -> Option { + self.inner.final_paris_total_difficulty() + } +} + +// Implement `EthereumHardforks` to support Ethereum hardfork queries. +impl EthereumHardforks for CustomChainSpec { + fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition { + self.inner.ethereum_fork_activation(fork) + } +} diff --git a/examples/custom-hardforks/src/main.rs b/examples/custom-hardforks/src/main.rs new file mode 100644 index 00000000000..588f260c616 --- /dev/null +++ b/examples/custom-hardforks/src/main.rs @@ -0,0 +1,5 @@ +//! Example that showcases how to inject custom hardforks. + +pub mod chainspec; + +fn main() {} From b90badbe6de2d83ae23c18cd1f476d83d41e9d97 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Nov 2025 10:49:17 +0100 Subject: [PATCH 328/371] fix: skip code check in get_transaction_by_sender_and_nonce (#19502) --- crates/rpc/rpc-eth-api/src/helpers/transaction.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index d2e0b5f943a..8a49208cd8c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -307,10 +307,8 @@ pub trait EthTransactions: LoadTransaction { return Ok(Some(self.tx_resp_builder().fill_pending(transaction)?)); } - // Check if the sender is a contract - if !self.get_code(sender, None).await?.is_empty() { - return Ok(None); - } + // Note: we can't optimize for contracts (account with code) and cannot shortcircuit if + // the address has code, because with 7702 EOAs can also have code let highest = self.transaction_count(sender, None).await?.saturating_to::(); From 644ecce8217174091bd855dc87bb79c96f57df3c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Nov 2025 11:10:16 +0100 Subject: [PATCH 329/371] chore: bump min ckzg (#19504) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 8afc22cab57..dec3ab94366 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -646,7 +646,7 @@ secp256k1 = { version = "0.30", default-features = false, features = ["global-co rand_08 = { package = "rand", version = "0.8" } # for eip-4844 -c-kzg = "2.1.4" +c-kzg = "2.1.5" # config toml = "0.8" From 89be06f6ad25b7d15fb19d2c2dc4c03c21f758fe Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Nov 2025 12:38:37 +0100 Subject: [PATCH 330/371] chore: bump version 1.8.4 (#19503) --- Cargo.lock | 278 +++++++++++++++++++-------------------- Cargo.toml | 2 +- docs/vocs/vocs.config.ts | 2 +- 3 files changed, 141 insertions(+), 141 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 032babbd70e..aff1cf56cdb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3193,7 +3193,7 @@ dependencies = [ [[package]] name = "ef-test-runner" -version = "1.8.3" +version = "1.8.4" dependencies = [ "clap", "ef-tests", @@ -3201,7 +3201,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3681,7 +3681,7 @@ dependencies = [ [[package]] name = "example-full-contract-state" -version = "1.8.3" +version = "1.8.4" dependencies = [ "eyre", "reth-ethereum", @@ -3820,7 +3820,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "clap", @@ -6218,7 +6218,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.8.3" +version = "1.8.4" dependencies = [ "clap", "reth-cli-util", @@ -7296,7 +7296,7 @@ checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" [[package]] name = "reth" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7343,7 +7343,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7366,7 +7366,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7405,7 +7405,7 @@ dependencies = [ [[package]] name = "reth-bench-compare" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "alloy-provider", @@ -7431,7 +7431,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7463,7 +7463,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7483,7 +7483,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-genesis", "clap", @@ -7496,7 +7496,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7578,7 +7578,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.8.3" +version = "1.8.4" dependencies = [ "reth-tasks", "tokio", @@ -7587,7 +7587,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7607,7 +7607,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7631,7 +7631,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.8.3" +version = "1.8.4" dependencies = [ "proc-macro2", "quote", @@ -7641,7 +7641,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "eyre", @@ -7658,7 +7658,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7670,7 +7670,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7684,7 +7684,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7709,7 +7709,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7743,7 +7743,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7773,7 +7773,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7803,7 +7803,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7819,7 +7819,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7845,7 +7845,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7870,7 +7870,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7898,7 +7898,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7936,7 +7936,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7993,7 +7993,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.8.3" +version = "1.8.4" dependencies = [ "aes", "alloy-primitives", @@ -8023,7 +8023,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8046,7 +8046,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8070,7 +8070,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.8.3" +version = "1.8.4" dependencies = [ "futures", "pin-project", @@ -8099,7 +8099,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8170,7 +8170,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8197,7 +8197,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8219,7 +8219,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "bytes", @@ -8236,7 +8236,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8262,7 +8262,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.8.3" +version = "1.8.4" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -8272,7 +8272,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8310,7 +8310,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8335,7 +8335,7 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8375,7 +8375,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.8.3" +version = "1.8.4" dependencies = [ "clap", "eyre", @@ -8399,7 +8399,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8415,7 +8415,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8433,7 +8433,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8446,7 +8446,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8474,7 +8474,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8501,7 +8501,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "rayon", @@ -8511,7 +8511,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8535,7 +8535,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8559,7 +8559,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-evm", "alloy-primitives", @@ -8571,7 +8571,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8591,7 +8591,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8635,7 +8635,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "eyre", @@ -8666,7 +8666,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8683,7 +8683,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.8.3" +version = "1.8.4" dependencies = [ "serde", "serde_json", @@ -8692,7 +8692,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8725,7 +8725,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.8.3" +version = "1.8.4" dependencies = [ "bytes", "futures", @@ -8747,7 +8747,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.8.3" +version = "1.8.4" dependencies = [ "bitflags 2.10.0", "byteorder", @@ -8765,7 +8765,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.8.3" +version = "1.8.4" dependencies = [ "bindgen 0.71.1", "cc", @@ -8773,7 +8773,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.8.3" +version = "1.8.4" dependencies = [ "futures", "metrics", @@ -8784,14 +8784,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.8.3" +version = "1.8.4" dependencies = [ "futures-util", "if-addrs", @@ -8805,7 +8805,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8865,7 +8865,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8889,7 +8889,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8911,7 +8911,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8928,7 +8928,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -8941,7 +8941,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.8.3" +version = "1.8.4" dependencies = [ "anyhow", "bincode 1.3.3", @@ -8959,7 +8959,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8982,7 +8982,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9053,7 +9053,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9106,7 +9106,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9159,7 +9159,7 @@ dependencies = [ [[package]] name = "reth-node-ethstats" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9182,7 +9182,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9205,7 +9205,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.8.3" +version = "1.8.4" dependencies = [ "eyre", "http", @@ -9227,7 +9227,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.8.3" +version = "1.8.4" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9238,7 +9238,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.8.3" +version = "1.8.4" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9278,7 +9278,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9306,7 +9306,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9357,7 +9357,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9388,7 +9388,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9417,7 +9417,7 @@ dependencies = [ [[package]] name = "reth-optimism-flashblocks" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9456,7 +9456,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9466,7 +9466,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9524,7 +9524,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9563,7 +9563,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9590,7 +9590,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9652,7 +9652,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "reth-codecs", @@ -9664,7 +9664,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9701,7 +9701,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9721,7 +9721,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.8.3" +version = "1.8.4" dependencies = [ "pin-project", "reth-payload-primitives", @@ -9732,7 +9732,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9752,7 +9752,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9761,7 +9761,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9770,7 +9770,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9792,7 +9792,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9829,7 +9829,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9877,7 +9877,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9907,11 +9907,11 @@ dependencies = [ [[package]] name = "reth-prune-db" -version = "1.8.3" +version = "1.8.4" [[package]] name = "reth-prune-types" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "arbitrary", @@ -9930,7 +9930,7 @@ dependencies = [ [[package]] name = "reth-ress-protocol" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9956,7 +9956,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9982,7 +9982,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9996,7 +9996,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10079,7 +10079,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-genesis", @@ -10106,7 +10106,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10125,7 +10125,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-network", @@ -10180,7 +10180,7 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-json-rpc", @@ -10207,7 +10207,7 @@ dependencies = [ [[package]] name = "reth-rpc-e2e-tests" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -10227,7 +10227,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10263,7 +10263,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10306,7 +10306,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10353,7 +10353,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10370,7 +10370,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10385,7 +10385,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10442,7 +10442,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10471,7 +10471,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "arbitrary", @@ -10487,7 +10487,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10514,7 +10514,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "assert_matches", @@ -10537,7 +10537,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "clap", @@ -10549,7 +10549,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10571,7 +10571,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10586,7 +10586,7 @@ dependencies = [ [[package]] name = "reth-storage-rpc-provider" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10615,7 +10615,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.8.3" +version = "1.8.4" dependencies = [ "auto_impl", "dyn-clone", @@ -10632,7 +10632,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10647,7 +10647,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.8.3" +version = "1.8.4" dependencies = [ "tokio", "tokio-stream", @@ -10656,7 +10656,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.8.3" +version = "1.8.4" dependencies = [ "clap", "eyre", @@ -10672,7 +10672,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.8.3" +version = "1.8.4" dependencies = [ "clap", "eyre", @@ -10688,7 +10688,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10736,7 +10736,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10769,7 +10769,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10802,7 +10802,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10827,7 +10827,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10857,7 +10857,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10890,7 +10890,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.8.3" +version = "1.8.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10919,7 +10919,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.8.3" +version = "1.8.4" dependencies = [ "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index dec3ab94366..87020204e0d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.8.3" +version = "1.8.4" edition = "2024" rust-version = "1.88" license = "MIT OR Apache-2.0" diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index e98af7701a2..73d8b6b4442 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -21,7 +21,7 @@ export default defineConfig({ }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { - text: 'v1.8.3', + text: 'v1.8.4', items: [ { text: 'Releases', From 1cd5b50aaf77de6ebe1d65cd7d4fd1939f45635b Mon Sep 17 00:00:00 2001 From: Cypher Pepe <125112044+cypherpepe@users.noreply.github.com> Date: Wed, 5 Nov 2025 14:54:27 +0300 Subject: [PATCH 331/371] fix: dead link Sentry (#19505) --- docs/design/review.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/design/review.md b/docs/design/review.md index 22a32ef904f..304d3582f5e 100644 --- a/docs/design/review.md +++ b/docs/design/review.md @@ -4,7 +4,7 @@ This document contains some of our research on how other codebases designed vari ## P2P -* [`Sentry`](https://erigon.gitbook.io/erigon/advanced-usage/sentry), a pluggable p2p node following the [Erigon gRPC architecture](https://erigon.substack.com/p/current-status-of-silkworm-and-silkrpc): +* [`Sentry`](https://erigon.gitbook.io/docs/summary/fundamentals/modules/sentry), a pluggable p2p node following the [Erigon gRPC architecture](https://erigon.substack.com/p/current-status-of-silkworm-and-silkrpc): * [`vorot93`](https://github.com/vorot93/) first started by implementing a rust devp2p stack in [`devp2p`](https://github.com/vorot93/devp2p) * vorot93 then started work on sentry, using devp2p, to satisfy the erigon architecture of modular components connected with gRPC. * The code from rust-ethereum/devp2p was merged into sentry, and rust-ethereum/devp2p was archived From 5b062b21e1cee617f2f748ce797d3ef4df4c0fae Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Nov 2025 13:30:37 +0100 Subject: [PATCH 332/371] chore: bump hardforks (#19506) --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- crates/optimism/chainspec/src/lib.rs | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aff1cf56cdb..8a55191baf2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -299,9 +299,9 @@ dependencies = [ [[package]] name = "alloy-hardforks" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e7f93a60ef3d867c93d43442ef3f2d8a1095450131c3d4e16bbbbf2166b9bd" +checksum = "1e29d7eacf42f89c21d7f089916d0bdb4f36139a31698790e8837d2dbbd4b2c3" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -397,9 +397,9 @@ dependencies = [ [[package]] name = "alloy-op-hardforks" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0bc135abf78cf83a460bf785d52e4fe83c3ba5fadd416e2f79f7409eec45958" +checksum = "95ac97adaba4c26e17192d81f49186ac20c1e844e35a00e169c8d3d58bc84e6b" dependencies = [ "alloy-chains", "alloy-hardforks", diff --git a/Cargo.toml b/Cargo.toml index 87020204e0d..f1dc705ecc5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -490,7 +490,7 @@ alloy-sol-macro = "1.4.1" alloy-sol-types = { version = "1.4.1", default-features = false } alloy-trie = { version = "0.9.1", default-features = false } -alloy-hardforks = "0.4.3" +alloy-hardforks = "0.4.4" alloy-consensus = { version = "1.0.41", default-features = false } alloy-contract = { version = "1.0.41", default-features = false } @@ -522,7 +522,7 @@ alloy-transport-ws = { version = "1.0.41", default-features = false } # op alloy-op-evm = { version = "0.23.0", default-features = false } -alloy-op-hardforks = "0.4.3" +alloy-op-hardforks = "0.4.4" op-alloy-rpc-types = { version = "0.22.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.22.0", default-features = false } op-alloy-network = { version = "0.22.0", default-features = false } diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 03e7ff70a86..42bb3e3d2b3 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -522,7 +522,7 @@ mod tests { use alloy_op_hardforks::{ BASE_MAINNET_JOVIAN_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, OP_SEPOLIA_JOVIAN_TIMESTAMP, }; - use alloy_primitives::b256; + use alloy_primitives::{b256, hex}; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; use reth_optimism_forks::{OpHardfork, OpHardforks}; @@ -879,7 +879,7 @@ mod tests { #[test] fn latest_base_mainnet_fork_id() { assert_eq!( - ForkId { hash: ForkHash([0xfa, 0x71, 0x70, 0xef]), next: 0 }, + ForkId { hash: ForkHash(hex!("1cfeafc9")), next: 0 }, BASE_MAINNET.latest_fork_id() ) } @@ -888,7 +888,7 @@ mod tests { fn latest_base_mainnet_fork_id_with_builder() { let base_mainnet = OpChainSpecBuilder::base_mainnet().build(); assert_eq!( - ForkId { hash: ForkHash([0xfa, 0x71, 0x70, 0xef]), next: 0 }, + ForkId { hash: ForkHash(hex!("1cfeafc9")), next: 0 }, base_mainnet.latest_fork_id() ) } From 84785f025eac5eed123997454998db77a299e1e5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 5 Nov 2025 14:33:46 +0100 Subject: [PATCH 333/371] chore: bump v1.9.0 (#19507) --- Cargo.lock | 278 +++++++++++++++++++-------------------- Cargo.toml | 2 +- docs/vocs/vocs.config.ts | 2 +- 3 files changed, 141 insertions(+), 141 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a55191baf2..b7c8618f423 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3193,7 +3193,7 @@ dependencies = [ [[package]] name = "ef-test-runner" -version = "1.8.4" +version = "1.9.0" dependencies = [ "clap", "ef-tests", @@ -3201,7 +3201,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3681,7 +3681,7 @@ dependencies = [ [[package]] name = "example-full-contract-state" -version = "1.8.4" +version = "1.9.0" dependencies = [ "eyre", "reth-ethereum", @@ -3820,7 +3820,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "clap", @@ -6218,7 +6218,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.8.4" +version = "1.9.0" dependencies = [ "clap", "reth-cli-util", @@ -7296,7 +7296,7 @@ checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" [[package]] name = "reth" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7343,7 +7343,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7366,7 +7366,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7405,7 +7405,7 @@ dependencies = [ [[package]] name = "reth-bench-compare" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-provider", @@ -7431,7 +7431,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7463,7 +7463,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7483,7 +7483,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-genesis", "clap", @@ -7496,7 +7496,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7578,7 +7578,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.8.4" +version = "1.9.0" dependencies = [ "reth-tasks", "tokio", @@ -7587,7 +7587,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7607,7 +7607,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7631,7 +7631,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.8.4" +version = "1.9.0" dependencies = [ "proc-macro2", "quote", @@ -7641,7 +7641,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "eyre", @@ -7658,7 +7658,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7670,7 +7670,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7684,7 +7684,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7709,7 +7709,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7743,7 +7743,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7773,7 +7773,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7803,7 +7803,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7819,7 +7819,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7845,7 +7845,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7870,7 +7870,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7898,7 +7898,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7936,7 +7936,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7993,7 +7993,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.8.4" +version = "1.9.0" dependencies = [ "aes", "alloy-primitives", @@ -8023,7 +8023,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8046,7 +8046,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8070,7 +8070,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.8.4" +version = "1.9.0" dependencies = [ "futures", "pin-project", @@ -8099,7 +8099,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8170,7 +8170,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8197,7 +8197,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8219,7 +8219,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "bytes", @@ -8236,7 +8236,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8262,7 +8262,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.8.4" +version = "1.9.0" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -8272,7 +8272,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8310,7 +8310,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8335,7 +8335,7 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8375,7 +8375,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.8.4" +version = "1.9.0" dependencies = [ "clap", "eyre", @@ -8399,7 +8399,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8415,7 +8415,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8433,7 +8433,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8446,7 +8446,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8474,7 +8474,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8501,7 +8501,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "rayon", @@ -8511,7 +8511,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8535,7 +8535,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8559,7 +8559,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-evm", "alloy-primitives", @@ -8571,7 +8571,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8591,7 +8591,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8635,7 +8635,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "eyre", @@ -8666,7 +8666,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8683,7 +8683,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.8.4" +version = "1.9.0" dependencies = [ "serde", "serde_json", @@ -8692,7 +8692,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8725,7 +8725,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.8.4" +version = "1.9.0" dependencies = [ "bytes", "futures", @@ -8747,7 +8747,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.8.4" +version = "1.9.0" dependencies = [ "bitflags 2.10.0", "byteorder", @@ -8765,7 +8765,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.8.4" +version = "1.9.0" dependencies = [ "bindgen 0.71.1", "cc", @@ -8773,7 +8773,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.8.4" +version = "1.9.0" dependencies = [ "futures", "metrics", @@ -8784,14 +8784,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.8.4" +version = "1.9.0" dependencies = [ "futures-util", "if-addrs", @@ -8805,7 +8805,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8865,7 +8865,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8889,7 +8889,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8911,7 +8911,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8928,7 +8928,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -8941,7 +8941,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.8.4" +version = "1.9.0" dependencies = [ "anyhow", "bincode 1.3.3", @@ -8959,7 +8959,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8982,7 +8982,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9053,7 +9053,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9106,7 +9106,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9159,7 +9159,7 @@ dependencies = [ [[package]] name = "reth-node-ethstats" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9182,7 +9182,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9205,7 +9205,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.8.4" +version = "1.9.0" dependencies = [ "eyre", "http", @@ -9227,7 +9227,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.8.4" +version = "1.9.0" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9238,7 +9238,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.8.4" +version = "1.9.0" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9278,7 +9278,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9306,7 +9306,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9357,7 +9357,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9388,7 +9388,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9417,7 +9417,7 @@ dependencies = [ [[package]] name = "reth-optimism-flashblocks" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9456,7 +9456,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9466,7 +9466,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9524,7 +9524,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9563,7 +9563,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9590,7 +9590,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9652,7 +9652,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "reth-codecs", @@ -9664,7 +9664,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9701,7 +9701,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9721,7 +9721,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.8.4" +version = "1.9.0" dependencies = [ "pin-project", "reth-payload-primitives", @@ -9732,7 +9732,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9752,7 +9752,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9761,7 +9761,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9770,7 +9770,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9792,7 +9792,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9829,7 +9829,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9877,7 +9877,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9907,11 +9907,11 @@ dependencies = [ [[package]] name = "reth-prune-db" -version = "1.8.4" +version = "1.9.0" [[package]] name = "reth-prune-types" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -9930,7 +9930,7 @@ dependencies = [ [[package]] name = "reth-ress-protocol" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9956,7 +9956,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9982,7 +9982,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9996,7 +9996,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10079,7 +10079,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-genesis", @@ -10106,7 +10106,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10125,7 +10125,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-network", @@ -10180,7 +10180,7 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-json-rpc", @@ -10207,7 +10207,7 @@ dependencies = [ [[package]] name = "reth-rpc-e2e-tests" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -10227,7 +10227,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10263,7 +10263,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10306,7 +10306,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10353,7 +10353,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10370,7 +10370,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10385,7 +10385,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10442,7 +10442,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10471,7 +10471,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -10487,7 +10487,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10514,7 +10514,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "assert_matches", @@ -10537,7 +10537,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "clap", @@ -10549,7 +10549,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10571,7 +10571,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10586,7 +10586,7 @@ dependencies = [ [[package]] name = "reth-storage-rpc-provider" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10615,7 +10615,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.8.4" +version = "1.9.0" dependencies = [ "auto_impl", "dyn-clone", @@ -10632,7 +10632,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10647,7 +10647,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.8.4" +version = "1.9.0" dependencies = [ "tokio", "tokio-stream", @@ -10656,7 +10656,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.8.4" +version = "1.9.0" dependencies = [ "clap", "eyre", @@ -10672,7 +10672,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.8.4" +version = "1.9.0" dependencies = [ "clap", "eyre", @@ -10688,7 +10688,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10736,7 +10736,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10769,7 +10769,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10802,7 +10802,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10827,7 +10827,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10857,7 +10857,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10890,7 +10890,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.8.4" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10919,7 +10919,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.8.4" +version = "1.9.0" dependencies = [ "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index f1dc705ecc5..6e6f4226598 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.8.4" +version = "1.9.0" edition = "2024" rust-version = "1.88" license = "MIT OR Apache-2.0" diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index 73d8b6b4442..4deb6c6df0b 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -21,7 +21,7 @@ export default defineConfig({ }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { - text: 'v1.8.4', + text: 'v1.9.0', items: [ { text: 'Releases', From 1b5f1293bc6d19eed09e66d7212bbe1b041f88a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Wed, 5 Nov 2025 16:40:32 +0100 Subject: [PATCH 334/371] refactor(era): move to e2s module e2s types and file handling (#19490) --- crates/era-utils/src/export.rs | 2 +- crates/era-utils/src/history.rs | 2 +- crates/era/src/consensus_types.rs | 2 +- crates/era/src/e2s/error.rs | 32 +++++++++++++++++++ crates/era/src/{e2s_file.rs => e2s/file.rs} | 7 ++-- crates/era/src/e2s/mod.rs | 5 +++ crates/era/src/{e2s_types.rs => e2s/types.rs} | 30 +---------------- crates/era/src/era1_file.rs | 7 ++-- crates/era/src/era1_types.rs | 2 +- crates/era/src/era_file_ops.rs | 2 +- crates/era/src/era_types.rs | 4 +-- crates/era/src/execution_types.rs | 8 ++--- crates/era/src/lib.rs | 5 ++- crates/era/tests/it/dd.rs | 2 +- crates/era/tests/it/genesis.rs | 2 +- crates/era/tests/it/main.rs | 2 +- crates/era/tests/it/roundtrip.rs | 2 +- 17 files changed, 65 insertions(+), 51 deletions(-) create mode 100644 crates/era/src/e2s/error.rs rename crates/era/src/{e2s_file.rs => e2s/file.rs} (99%) create mode 100644 crates/era/src/e2s/mod.rs rename crates/era/src/{e2s_types.rs => e2s/types.rs} (90%) diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs index 6ccdba24262..1690c59a2bc 100644 --- a/crates/era-utils/src/export.rs +++ b/crates/era-utils/src/export.rs @@ -6,7 +6,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256, U256}; use eyre::{eyre, Result}; use reth_era::{ - e2s_types::IndexEntry, + e2s::types::IndexEntry, era1_file::Era1Writer, era1_types::{BlockIndex, Era1Id}, era_file_ops::{EraFileId, StreamWriter}, diff --git a/crates/era-utils/src/history.rs b/crates/era-utils/src/history.rs index 58d5e383c37..17d6a9df7c0 100644 --- a/crates/era-utils/src/history.rs +++ b/crates/era-utils/src/history.rs @@ -9,7 +9,7 @@ use reth_db_api::{ RawKey, RawTable, RawValue, }; use reth_era::{ - e2s_types::E2sError, + e2s::error::E2sError, era1_file::{BlockTupleIterator, Era1Reader}, era_file_ops::StreamReader, execution_types::BlockTuple, diff --git a/crates/era/src/consensus_types.rs b/crates/era/src/consensus_types.rs index cdcc77ce57a..ffb03cdd64b 100644 --- a/crates/era/src/consensus_types.rs +++ b/crates/era/src/consensus_types.rs @@ -1,7 +1,7 @@ //! Consensus types for Era post-merge history files use crate::{ - e2s_types::{E2sError, Entry}, + e2s::{error::E2sError, types::Entry}, DecodeCompressedSsz, }; use snap::{read::FrameDecoder, write::FrameEncoder}; diff --git a/crates/era/src/e2s/error.rs b/crates/era/src/e2s/error.rs new file mode 100644 index 00000000000..ccfbe7296c8 --- /dev/null +++ b/crates/era/src/e2s/error.rs @@ -0,0 +1,32 @@ +//! Error handling for e2s files operations + +use std::io; +use thiserror::Error; + +/// Error types for e2s file operations +#[derive(Error, Debug)] +pub enum E2sError { + /// IO error during file operations + #[error("IO error: {0}")] + Io(#[from] io::Error), + + /// Error during SSZ encoding/decoding + #[error("SSZ error: {0}")] + Ssz(String), + + /// Reserved field in header not zero + #[error("Reserved field in header not zero")] + ReservedNotZero, + + /// Error during snappy compression + #[error("Snappy compression error: {0}")] + SnappyCompression(String), + + /// Error during snappy decompression + #[error("Snappy decompression error: {0}")] + SnappyDecompression(String), + + /// Error during RLP encoding/decoding + #[error("RLP error: {0}")] + Rlp(String), +} diff --git a/crates/era/src/e2s_file.rs b/crates/era/src/e2s/file.rs similarity index 99% rename from crates/era/src/e2s_file.rs rename to crates/era/src/e2s/file.rs index e1b6989a0f3..9c48add603b 100644 --- a/crates/era/src/e2s_file.rs +++ b/crates/era/src/e2s/file.rs @@ -2,7 +2,10 @@ //! //! See also -use crate::e2s_types::{E2sError, Entry, Version}; +use crate::e2s::{ + error::E2sError, + types::{Entry, Version}, +}; use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write}; /// A reader for `E2Store` files that wraps a [`BufReader`]. @@ -107,7 +110,7 @@ impl E2StoreWriter { #[cfg(test)] mod tests { use super::*; - use crate::e2s_types::{SLOT_INDEX, VERSION}; + use crate::e2s::types::{SLOT_INDEX, VERSION}; use std::io::Cursor; fn create_slot_index_data(starting_slot: u64, offsets: &[i64]) -> Vec { diff --git a/crates/era/src/e2s/mod.rs b/crates/era/src/e2s/mod.rs new file mode 100644 index 00000000000..d67190f4759 --- /dev/null +++ b/crates/era/src/e2s/mod.rs @@ -0,0 +1,5 @@ +//! Core e2store primitives and file handling. + +pub mod error; +pub mod file; +pub mod types; diff --git a/crates/era/src/e2s_types.rs b/crates/era/src/e2s/types.rs similarity index 90% rename from crates/era/src/e2s_types.rs rename to crates/era/src/e2s/types.rs index f14bfe56e86..dd0e9485da2 100644 --- a/crates/era/src/e2s_types.rs +++ b/crates/era/src/e2s/types.rs @@ -8,9 +8,9 @@ //! An [`Entry`] is a complete record in the file, consisting of both a [`Header`] and its //! associated data +use crate::e2s::error::E2sError; use ssz_derive::{Decode, Encode}; use std::io::{self, Read, Write}; -use thiserror::Error; /// [`Version`] record: ['e', '2'] pub const VERSION: [u8; 2] = [0x65, 0x32]; @@ -21,34 +21,6 @@ pub const EMPTY: [u8; 2] = [0x00, 0x00]; /// `SlotIndex` record: ['i', '2'] pub const SLOT_INDEX: [u8; 2] = [0x69, 0x32]; -/// Error types for e2s file operations -#[derive(Error, Debug)] -pub enum E2sError { - /// IO error during file operations - #[error("IO error: {0}")] - Io(#[from] io::Error), - - /// Error during SSZ encoding/decoding - #[error("SSZ error: {0}")] - Ssz(String), - - /// Reserved field in header not zero - #[error("Reserved field in header not zero")] - ReservedNotZero, - - /// Error during snappy compression - #[error("Snappy compression error: {0}")] - SnappyCompression(String), - - /// Error during snappy decompression - #[error("Snappy decompression error: {0}")] - SnappyDecompression(String), - - /// Error during RLP encoding/decoding - #[error("RLP error: {0}")] - Rlp(String), -} - /// Header for TLV records in e2store files #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] pub struct Header { diff --git a/crates/era/src/era1_file.rs b/crates/era/src/era1_file.rs index dc34ddef42b..e01af1f8776 100644 --- a/crates/era/src/era1_file.rs +++ b/crates/era/src/era1_file.rs @@ -6,8 +6,11 @@ //! See also . use crate::{ - e2s_file::{E2StoreReader, E2StoreWriter}, - e2s_types::{E2sError, Entry, IndexEntry, Version}, + e2s::{ + error::E2sError, + file::{E2StoreReader, E2StoreWriter}, + types::{Entry, IndexEntry, Version}, + }, era1_types::{BlockIndex, Era1Group, Era1Id, BLOCK_INDEX}, era_file_ops::{EraFileFormat, FileReader, StreamReader, StreamWriter}, execution_types::{ diff --git a/crates/era/src/era1_types.rs b/crates/era/src/era1_types.rs index ef239f3e164..428a8d56936 100644 --- a/crates/era/src/era1_types.rs +++ b/crates/era/src/era1_types.rs @@ -3,7 +3,7 @@ //! See also use crate::{ - e2s_types::{Entry, IndexEntry}, + e2s::types::{Entry, IndexEntry}, era_file_ops::EraFileId, execution_types::{Accumulator, BlockTuple, MAX_BLOCKS_PER_ERA1}, }; diff --git a/crates/era/src/era_file_ops.rs b/crates/era/src/era_file_ops.rs index 469d6b78351..4ce2ede739f 100644 --- a/crates/era/src/era_file_ops.rs +++ b/crates/era/src/era_file_ops.rs @@ -1,6 +1,6 @@ //! Represents reading and writing operations' era file -use crate::{e2s_types::Version, E2sError}; +use crate::e2s::{error::E2sError, types::Version}; use std::{ fs::File, io::{Read, Seek, Write}, diff --git a/crates/era/src/era_types.rs b/crates/era/src/era_types.rs index a50b6f19281..15d967875b1 100644 --- a/crates/era/src/era_types.rs +++ b/crates/era/src/era_types.rs @@ -4,7 +4,7 @@ use crate::{ consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, - e2s_types::{Entry, IndexEntry, SLOT_INDEX}, + e2s::types::{Entry, IndexEntry, SLOT_INDEX}, }; /// Era file content group @@ -126,7 +126,7 @@ impl IndexEntry for SlotIndex { mod tests { use super::*; use crate::{ - e2s_types::{Entry, IndexEntry}, + e2s::types::{Entry, IndexEntry}, test_utils::{create_beacon_block, create_beacon_state}, }; diff --git a/crates/era/src/execution_types.rs b/crates/era/src/execution_types.rs index 6feb2873fbd..da6e1472002 100644 --- a/crates/era/src/execution_types.rs +++ b/crates/era/src/execution_types.rs @@ -24,7 +24,7 @@ //! // Decompressed and decode typed compressed header //! let decoded_header: Header = compressed.decode_header()?; //! assert_eq!(decoded_header.number, 100); -//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! # Ok::<(), reth_era::e2s::error::E2sError>(()) //! ``` //! //! ## [`CompressedBody`] @@ -46,7 +46,7 @@ //! let decoded_body: alloy_consensus::BlockBody = //! compressed_body.decode()?; //! assert_eq!(decoded_body.transactions.len(), 1); -//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! # Ok::<(), reth_era::e2s::error::E2sError>(()) //! ``` //! //! ## [`CompressedReceipts`] @@ -68,11 +68,11 @@ //! // Get raw receipt by decoding and decompressing compressed and encoded receipt //! let decompressed_receipt = compressed_receipt_data.decode::()?; //! assert_eq!(decompressed_receipt.receipt.cumulative_gas_used, 21000); -//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! # Ok::<(), reth_era::e2s::error::E2sError>(()) //! `````` use crate::{ - e2s_types::{E2sError, Entry}, + e2s::{error::E2sError, types::Entry}, DecodeCompressed, }; use alloy_consensus::{Block, BlockBody, Header}; diff --git a/crates/era/src/lib.rs b/crates/era/src/lib.rs index fd0596e9dfc..91e21d78cdb 100644 --- a/crates/era/src/lib.rs +++ b/crates/era/src/lib.rs @@ -13,8 +13,7 @@ //! - Era1 format: pub mod consensus_types; -pub mod e2s_file; -pub mod e2s_types; +pub mod e2s; pub mod era1_file; pub mod era1_types; pub mod era_file_ops; @@ -23,7 +22,7 @@ pub mod execution_types; #[cfg(test)] pub(crate) mod test_utils; -use crate::e2s_types::E2sError; +use crate::e2s::error::E2sError; use alloy_rlp::Decodable; use ssz::Decode; diff --git a/crates/era/tests/it/dd.rs b/crates/era/tests/it/dd.rs index 769a398d6ce..853fbbe095d 100644 --- a/crates/era/tests/it/dd.rs +++ b/crates/era/tests/it/dd.rs @@ -4,7 +4,7 @@ use alloy_consensus::{BlockBody, Header}; use alloy_primitives::U256; use reth_era::{ - e2s_types::IndexEntry, + e2s::types::IndexEntry, era1_file::{Era1Reader, Era1Writer}, era_file_ops::{StreamReader, StreamWriter}, execution_types::CompressedBody, diff --git a/crates/era/tests/it/genesis.rs b/crates/era/tests/it/genesis.rs index 80869f97fa0..6666e7775f7 100644 --- a/crates/era/tests/it/genesis.rs +++ b/crates/era/tests/it/genesis.rs @@ -7,7 +7,7 @@ use crate::{ Era1TestDownloader, ERA1_MAINNET_FILES_NAMES, ERA1_SEPOLIA_FILES_NAMES, MAINNET, SEPOLIA, }; use alloy_consensus::{BlockBody, Header}; -use reth_era::{e2s_types::IndexEntry, execution_types::CompressedBody}; +use reth_era::{e2s::types::IndexEntry, execution_types::CompressedBody}; use reth_ethereum_primitives::TransactionSigned; #[tokio::test(flavor = "multi_thread")] diff --git a/crates/era/tests/it/main.rs b/crates/era/tests/it/main.rs index 611862aa8ea..daa17d34514 100644 --- a/crates/era/tests/it/main.rs +++ b/crates/era/tests/it/main.rs @@ -8,7 +8,7 @@ use reqwest::{Client, Url}; use reth_era::{ - e2s_types::E2sError, + e2s::error::E2sError, era1_file::{Era1File, Era1Reader}, era_file_ops::FileReader, }; diff --git a/crates/era/tests/it/roundtrip.rs b/crates/era/tests/it/roundtrip.rs index a78af341371..00d5448d670 100644 --- a/crates/era/tests/it/roundtrip.rs +++ b/crates/era/tests/it/roundtrip.rs @@ -10,7 +10,7 @@ use alloy_consensus::{BlockBody, BlockHeader, Header, ReceiptWithBloom}; use rand::{prelude::IndexedRandom, rng}; use reth_era::{ - e2s_types::IndexEntry, + e2s::types::IndexEntry, era1_file::{Era1File, Era1Reader, Era1Writer}, era1_types::{Era1Group, Era1Id}, era_file_ops::{EraFileFormat, StreamReader, StreamWriter}, From 11d28b1abb0b5755c56daaf9daa2bc179e52b1af Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Wed, 5 Nov 2025 15:48:01 +0000 Subject: [PATCH 335/371] chore: use `dashboard` variable in main Grafana dashboard (#19518) --- etc/grafana/dashboards/overview.json | 316 +++++++++++++-------------- 1 file changed, 158 insertions(+), 158 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 6d9563ffd2d..480dba3b466 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -179,7 +179,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -247,7 +247,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -315,7 +315,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -383,7 +383,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -451,7 +451,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -519,7 +519,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -596,7 +596,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -670,7 +670,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -770,7 +770,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_db_table_size{$instance_label=\"$instance\"})", @@ -794,7 +794,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_static_files_segment_size{$instance_label=\"$instance\"})", @@ -825,7 +825,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -922,7 +922,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -1020,7 +1020,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_forkchoiceUpdated RPC API", "fieldConfig": { @@ -1141,7 +1141,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1173,7 +1173,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1205,7 +1205,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1237,7 +1237,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1269,7 +1269,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1301,7 +1301,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1333,7 +1333,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1369,7 +1369,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_newPayload RPC API", "fieldConfig": { @@ -1490,7 +1490,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1522,7 +1522,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1554,7 +1554,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1586,7 +1586,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1618,7 +1618,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1650,7 +1650,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1682,7 +1682,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1714,7 +1714,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1746,7 +1746,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1778,7 +1778,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -1879,7 +1879,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.5\"}", @@ -1890,7 +1890,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.9\"}", @@ -1902,7 +1902,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.95\"}", @@ -1914,7 +1914,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.99\"}", @@ -2011,7 +2011,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.5\"}", @@ -2022,7 +2022,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.9\"}", @@ -2034,7 +2034,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.95\"}", @@ -2046,7 +2046,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.99\"}", @@ -2143,7 +2143,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sync_execution_gas_per_second{$instance_label=\"$instance\"}", @@ -2170,7 +2170,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2202,7 +2202,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2234,7 +2234,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2338,7 +2338,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2389,7 +2389,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Engine API messages received by the CL, either engine_newPayload or engine_forkchoiceUpdated", "fieldConfig": { @@ -2484,7 +2484,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_consensus_engine_beacon_new_payload_messages{$instance_label=\"$instance\"}[$__rate_interval])", @@ -2585,7 +2585,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_consensus_engine_beacon_failed_new_payload_response_deliveries{$instance_label=\"$instance\"}[$__rate_interval])", @@ -2611,7 +2611,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_newPayload to engine_forkchoiceUpdated", "fieldConfig": { @@ -2711,7 +2711,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histograms for the engine_getPayloadBodiesByHashV1 and engine_getPayloadBodiesByRangeV1 RPC APIs", "fieldConfig": { @@ -2812,7 +2812,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2844,7 +2844,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2876,7 +2876,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2908,7 +2908,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2940,7 +2940,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3040,7 +3040,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_engine_rpc_blobs_blob_count{$instance_label=\"$instance\"}[$__rate_interval])", @@ -3067,7 +3067,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -3167,7 +3167,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3199,7 +3199,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3235,7 +3235,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total pipeline runs triggered by the sync controller", "fieldConfig": { @@ -3334,7 +3334,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -3447,7 +3447,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -3547,7 +3547,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3652,7 +3652,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -3686,7 +3686,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -3792,7 +3792,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -3921,7 +3921,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -4038,7 +4038,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_proofs_processed_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4138,7 +4138,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "avg by (quantile) (reth_tree_root_proof_calculation_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"})", @@ -4238,7 +4238,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4250,7 +4250,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4382,7 +4382,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4394,7 +4394,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4406,7 +4406,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_max_storage_workers{$instance_label=\"$instance\"}", @@ -4418,7 +4418,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_max_account_workers{$instance_label=\"$instance\"}", @@ -4518,7 +4518,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_total_account_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4618,7 +4618,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_total_storage_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4718,7 +4718,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_skipped_account_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4819,7 +4819,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_skipped_storage_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4920,7 +4920,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "avg by (quantile) (reth_tree_root_multiproof_task_total_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"})", @@ -5022,7 +5022,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5138,7 +5138,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5228,7 +5228,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5328,7 +5328,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5427,7 +5427,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5557,7 +5557,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -5715,7 +5715,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5818,7 +5818,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_db_table_size{$instance_label=\"$instance\"}", @@ -5919,7 +5919,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5989,7 +5989,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "sum by (type) ( reth_db_table_pages{$instance_label=\"$instance\"} )", @@ -6090,7 +6090,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by (job) ( reth_db_table_size{$instance_label=\"$instance\"} )", @@ -6190,7 +6190,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_db_freelist{$instance_label=\"$instance\"}) by (job)", @@ -6344,7 +6344,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -6428,7 +6428,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_static_files_segment_size{$instance_label=\"$instance\"}", @@ -6571,7 +6571,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -6716,7 +6716,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -6819,7 +6819,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by (job) ( reth_static_files_segment_size{$instance_label=\"$instance\"} )", @@ -6919,7 +6919,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "max(max_over_time(reth_static_files_jar_provider_write_duration_seconds{$instance_label=\"$instance\", operation=\"commit-writer\", quantile=\"1\"}[$__interval]) > 0) by (segment)", @@ -7032,7 +7032,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_blockchain_tree_canonical_chain_height{$instance_label=\"$instance\"}", @@ -7132,7 +7132,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_blockchain_tree_block_buffer_blocks{$instance_label=\"$instance\"}", @@ -7232,7 +7232,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "increase(reth_blockchain_tree_reorgs{$instance_label=\"$instance\"}[$__rate_interval])", @@ -7332,7 +7332,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_blockchain_tree_latest_reorg_depth{$instance_label=\"$instance\"}", @@ -7471,7 +7471,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -7563,7 +7563,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -7665,7 +7665,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "max(max_over_time(reth_rpc_server_calls_time_seconds{$instance_label=\"$instance\"}[$__rate_interval])) by (method) > 0", @@ -7753,7 +7753,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -7891,7 +7891,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -7924,7 +7924,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -7958,7 +7958,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -7992,7 +7992,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -8030,7 +8030,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -8144,7 +8144,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -8263,7 +8263,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_headers_total_flushed{$instance_label=\"$instance\"}", @@ -8288,7 +8288,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_total_flushed{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8389,7 +8389,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_timeout_errors{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8412,7 +8412,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_validation_errors{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8512,7 +8512,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_headers_in_flight_requests{$instance_label=\"$instance\"}", @@ -8553,7 +8553,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The internal state of the headers downloader: the number of downloaded headers, and the number of headers sent to the header stage.", "fieldConfig": { @@ -8682,7 +8682,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_total_flushed{$instance_label=\"$instance\"}", @@ -8706,7 +8706,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_bodies_total_downloaded{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8730,7 +8730,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_buffered_blocks{$instance_label=\"$instance\"}", @@ -8758,7 +8758,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Internal errors in the bodies downloader. These are expected to happen from time to time.", "fieldConfig": { @@ -8851,7 +8851,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_bodies_unexpected_errors{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8879,7 +8879,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The number of connected peers and in-progress requests for bodies.", "fieldConfig": { @@ -8974,7 +8974,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_network_connected_peers{$instance_label=\"$instance\"}", @@ -9092,7 +9092,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_buffered_blocks_size_bytes{$instance_label=\"$instance\"}", @@ -9120,7 +9120,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The number of blocks in a request and size in bytes of those block responses", "fieldConfig": { @@ -9234,7 +9234,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_response_response_length{$instance_label=\"$instance\"}", @@ -9276,7 +9276,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9407,7 +9407,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9538,7 +9538,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9669,7 +9669,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9814,7 +9814,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Number of active jobs", "fieldConfig": { @@ -9913,7 +9913,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total number of initiated jobs", "fieldConfig": { @@ -10012,7 +10012,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total number of failed jobs", "fieldConfig": { @@ -10124,7 +10124,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10225,7 +10225,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10326,7 +10326,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10440,7 +10440,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10549,7 +10549,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_allocated{$instance_label=\"$instance\"}", @@ -10575,7 +10575,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_metadata{$instance_label=\"$instance\"}", @@ -10601,7 +10601,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_retained{$instance_label=\"$instance\"}", @@ -10703,7 +10703,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_process_resident_memory_bytes{$instance_label=\"$instance\"}", @@ -10804,7 +10804,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "avg(rate(reth_process_cpu_seconds_total{$instance_label=\"$instance\"}[1m]))", @@ -10905,7 +10905,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_process_open_fds{$instance_label=\"$instance\"}", @@ -11006,7 +11006,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_executor_spawn_critical_tasks_total{$instance_label=\"$instance\"}- reth_executor_spawn_finished_critical_tasks_total{$instance_label=\"$instance\"}", @@ -11121,7 +11121,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -11156,7 +11156,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Tracks the number of regular blocking tasks currently ran by the executor.", "fieldConfig": { @@ -11254,7 +11254,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -11299,7 +11299,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The total number of canonical state notifications sent to ExExes.", "fieldConfig": { @@ -11395,7 +11395,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The total number of events ExExes have sent to the manager.", "fieldConfig": { @@ -11491,7 +11491,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Current and Maximum capacity of the internal state notifications buffer.", "fieldConfig": { @@ -11583,7 +11583,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "max_over_time(reth_exex_manager_max_capacity{$instance_label=\"$instance\"}[1h])", @@ -11679,7 +11679,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_exex_manager_buffer_size{$instance_label=\"$instance\"}", @@ -11747,7 +11747,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_exex_manager_num_exexs{$instance_label=\"$instance\"}", @@ -11858,7 +11858,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_lowest_committed_block_height{$instance_label=\"$instance\"}", @@ -11888,7 +11888,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -11982,7 +11982,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_notifications_count{$instance_label=\"$instance\"}", @@ -12081,7 +12081,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_size_bytes{$instance_label=\"$instance\"}", From 629363a6ea252412eca00ce14266c36a3c4f0bbf Mon Sep 17 00:00:00 2001 From: David Klank <155117116+davidjsonn@users.noreply.github.com> Date: Wed, 5 Nov 2025 19:07:10 +0200 Subject: [PATCH 336/371] refactor: use `Url::as_str()` directly in era modules (#19485) --- crates/era-downloader/tests/it/checksums.rs | 2 +- crates/era-downloader/tests/it/main.rs | 2 +- crates/era-utils/tests/it/main.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/era-downloader/tests/it/checksums.rs b/crates/era-downloader/tests/it/checksums.rs index 630cbece5d4..20717bfda0b 100644 --- a/crates/era-downloader/tests/it/checksums.rs +++ b/crates/era-downloader/tests/it/checksums.rs @@ -60,7 +60,7 @@ impl HttpClient for FailingClient { ) -> eyre::Result> + Send + Sync + Unpin> { let url = url.into_url().unwrap(); - Ok(futures::stream::iter(vec![Ok(match url.to_string().as_str() { + Ok(futures::stream::iter(vec![Ok(match url.as_str() { "https://mainnet.era1.nimbus.team/" => Bytes::from_static(crate::NIMBUS), "https://era1.ethportal.net/" => Bytes::from_static(crate::ETH_PORTAL), "https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(crate::ITHACA), diff --git a/crates/era-downloader/tests/it/main.rs b/crates/era-downloader/tests/it/main.rs index 526d3885bff..189d95506d0 100644 --- a/crates/era-downloader/tests/it/main.rs +++ b/crates/era-downloader/tests/it/main.rs @@ -32,7 +32,7 @@ impl HttpClient for StubClient { ) -> eyre::Result> + Send + Sync + Unpin> { let url = url.into_url().unwrap(); - Ok(futures::stream::iter(vec![Ok(match url.to_string().as_str() { + Ok(futures::stream::iter(vec![Ok(match url.as_str() { "https://mainnet.era1.nimbus.team/" => Bytes::from_static(NIMBUS), "https://era1.ethportal.net/" => Bytes::from_static(ETH_PORTAL), "https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(ITHACA), diff --git a/crates/era-utils/tests/it/main.rs b/crates/era-utils/tests/it/main.rs index 94805c5b356..2e2ec0b0556 100644 --- a/crates/era-utils/tests/it/main.rs +++ b/crates/era-utils/tests/it/main.rs @@ -32,7 +32,7 @@ impl HttpClient for ClientWithFakeIndex { ) -> eyre::Result> + Send + Sync + Unpin> { let url = url.into_url()?; - match url.to_string().as_str() { + match url.as_str() { ITHACA_ERA_INDEX_URL => { // Create a static stream without boxing let stream = From 2ba17cf10dd713aa0216960168f04f2f73b0ddca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Wed, 5 Nov 2025 18:07:45 +0100 Subject: [PATCH 337/371] refactor(era): move era types and file handling to new module (#19520) --- crates/cli/commands/src/export_era.rs | 2 +- crates/era-utils/src/export.rs | 18 ++++++---- crates/era-utils/src/history.rs | 9 ++--- crates/era-utils/tests/it/history.rs | 2 +- crates/era/src/common/decode.rs | 17 +++++++++ .../{era_file_ops.rs => common/file_ops.rs} | 2 +- crates/era/src/common/mod.rs | 4 +++ crates/era/src/consensus_types.rs | 2 +- crates/era/src/{era1_file.rs => era1/file.rs} | 36 ++++++++----------- crates/era/src/era1/mod.rs | 4 +++ .../types/execution.rs} | 8 ++--- .../{era1_types.rs => era1/types/group.rs} | 8 ++--- crates/era/src/era1/types/mod.rs | 6 ++++ crates/era/src/lib.rs | 23 ++---------- crates/era/src/test_utils.rs | 2 +- crates/era/tests/it/dd.rs | 8 +++-- crates/era/tests/it/genesis.rs | 2 +- crates/era/tests/it/main.rs | 4 +-- crates/era/tests/it/roundtrip.rs | 14 +++++--- crates/stages/stages/src/stages/era.rs | 2 +- 20 files changed, 96 insertions(+), 77 deletions(-) create mode 100644 crates/era/src/common/decode.rs rename crates/era/src/{era_file_ops.rs => common/file_ops.rs} (98%) create mode 100644 crates/era/src/common/mod.rs rename crates/era/src/{era1_file.rs => era1/file.rs} (95%) create mode 100644 crates/era/src/era1/mod.rs rename crates/era/src/{execution_types.rs => era1/types/execution.rs} (98%) rename crates/era/src/{era1_types.rs => era1/types/group.rs} (98%) create mode 100644 crates/era/src/era1/types/mod.rs diff --git a/crates/cli/commands/src/export_era.rs b/crates/cli/commands/src/export_era.rs index dbedf1852e5..5f4f0306bb0 100644 --- a/crates/cli/commands/src/export_era.rs +++ b/crates/cli/commands/src/export_era.rs @@ -4,7 +4,7 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::{Args, Parser}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; +use reth_era::era1::types::execution::MAX_BLOCKS_PER_ERA1; use reth_era_utils as era1; use reth_provider::DatabaseProviderFactory; use std::{path::PathBuf, sync::Arc}; diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs index 1690c59a2bc..d2aa706c798 100644 --- a/crates/era-utils/src/export.rs +++ b/crates/era-utils/src/export.rs @@ -6,13 +6,17 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256, U256}; use eyre::{eyre, Result}; use reth_era::{ + common::file_ops::{EraFileId, StreamWriter}, e2s::types::IndexEntry, - era1_file::Era1Writer, - era1_types::{BlockIndex, Era1Id}, - era_file_ops::{EraFileId, StreamWriter}, - execution_types::{ - Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, - TotalDifficulty, MAX_BLOCKS_PER_ERA1, + era1::{ + file::Era1Writer, + types::{ + execution::{ + Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, + TotalDifficulty, MAX_BLOCKS_PER_ERA1, + }, + group::{BlockIndex, Era1Id}, + }, }, }; use reth_fs_util as fs; @@ -306,7 +310,7 @@ where #[cfg(test)] mod tests { use crate::ExportConfig; - use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; + use reth_era::era1::types::execution::MAX_BLOCKS_PER_ERA1; use tempfile::tempdir; #[test] diff --git a/crates/era-utils/src/history.rs b/crates/era-utils/src/history.rs index 17d6a9df7c0..a1d3e8c8590 100644 --- a/crates/era-utils/src/history.rs +++ b/crates/era-utils/src/history.rs @@ -9,11 +9,12 @@ use reth_db_api::{ RawKey, RawTable, RawValue, }; use reth_era::{ + common::{decode::DecodeCompressed, file_ops::StreamReader}, e2s::error::E2sError, - era1_file::{BlockTupleIterator, Era1Reader}, - era_file_ops::StreamReader, - execution_types::BlockTuple, - DecodeCompressed, + era1::{ + file::{BlockTupleIterator, Era1Reader}, + types::execution::BlockTuple, + }, }; use reth_era_downloader::EraMeta; use reth_etl::Collector; diff --git a/crates/era-utils/tests/it/history.rs b/crates/era-utils/tests/it/history.rs index 8e720f1001b..2075722398f 100644 --- a/crates/era-utils/tests/it/history.rs +++ b/crates/era-utils/tests/it/history.rs @@ -1,7 +1,7 @@ use crate::{ClientWithFakeIndex, ITHACA_ERA_INDEX_URL}; use reqwest::{Client, Url}; use reth_db_common::init::init_genesis; -use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; +use reth_era::era1::types::execution::MAX_BLOCKS_PER_ERA1; use reth_era_downloader::{EraClient, EraStream, EraStreamConfig}; use reth_era_utils::{export, import, ExportConfig}; use reth_etl::Collector; diff --git a/crates/era/src/common/decode.rs b/crates/era/src/common/decode.rs new file mode 100644 index 00000000000..cef3368d74c --- /dev/null +++ b/crates/era/src/common/decode.rs @@ -0,0 +1,17 @@ +//! Compressed data decoding utilities. + +use crate::e2s::error::E2sError; +use alloy_rlp::Decodable; +use ssz::Decode; + +/// Extension trait for generic decoding from compressed data +pub trait DecodeCompressed { + /// Decompress and decode the data into the given type + fn decode(&self) -> Result; +} + +/// Extension trait for generic decoding from compressed ssz data +pub trait DecodeCompressedSsz { + /// Decompress and decode the SSZ data into the given type + fn decode(&self) -> Result; +} diff --git a/crates/era/src/era_file_ops.rs b/crates/era/src/common/file_ops.rs similarity index 98% rename from crates/era/src/era_file_ops.rs rename to crates/era/src/common/file_ops.rs index 4ce2ede739f..752f5b66fb3 100644 --- a/crates/era/src/era_file_ops.rs +++ b/crates/era/src/common/file_ops.rs @@ -1,4 +1,4 @@ -//! Represents reading and writing operations' era file +//! Era file format traits and I/O operations. use crate::e2s::{error::E2sError, types::Version}; use std::{ diff --git a/crates/era/src/common/mod.rs b/crates/era/src/common/mod.rs new file mode 100644 index 00000000000..3ad45dfdd8a --- /dev/null +++ b/crates/era/src/common/mod.rs @@ -0,0 +1,4 @@ +//! Common utilities and shared functionality. + +pub mod decode; +pub mod file_ops; diff --git a/crates/era/src/consensus_types.rs b/crates/era/src/consensus_types.rs index ffb03cdd64b..3c4a924e7a6 100644 --- a/crates/era/src/consensus_types.rs +++ b/crates/era/src/consensus_types.rs @@ -1,8 +1,8 @@ //! Consensus types for Era post-merge history files use crate::{ + common::decode::DecodeCompressedSsz, e2s::{error::E2sError, types::Entry}, - DecodeCompressedSsz, }; use snap::{read::FrameDecoder, write::FrameEncoder}; use ssz::Decode; diff --git a/crates/era/src/era1_file.rs b/crates/era/src/era1/file.rs similarity index 95% rename from crates/era/src/era1_file.rs rename to crates/era/src/era1/file.rs index e01af1f8776..3f230e8ea66 100644 --- a/crates/era/src/era1_file.rs +++ b/crates/era/src/era1/file.rs @@ -6,16 +6,19 @@ //! See also . use crate::{ + common::file_ops::{EraFileFormat, FileReader, StreamReader, StreamWriter}, e2s::{ error::E2sError, file::{E2StoreReader, E2StoreWriter}, types::{Entry, IndexEntry, Version}, }, - era1_types::{BlockIndex, Era1Group, Era1Id, BLOCK_INDEX}, - era_file_ops::{EraFileFormat, FileReader, StreamReader, StreamWriter}, - execution_types::{ - self, Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, - TotalDifficulty, MAX_BLOCKS_PER_ERA1, + era1::types::{ + execution::{ + Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, + TotalDifficulty, ACCUMULATOR, COMPRESSED_BODY, COMPRESSED_HEADER, COMPRESSED_RECEIPTS, + MAX_BLOCKS_PER_ERA1, TOTAL_DIFFICULTY, + }, + group::{BlockIndex, Era1Group, Era1Id, BLOCK_INDEX}, }, }; use alloy_primitives::BlockNumber; @@ -130,19 +133,19 @@ impl BlockTupleIterator { }; match entry.entry_type { - execution_types::COMPRESSED_HEADER => { + COMPRESSED_HEADER => { self.headers.push_back(CompressedHeader::from_entry(&entry)?); } - execution_types::COMPRESSED_BODY => { + COMPRESSED_BODY => { self.bodies.push_back(CompressedBody::from_entry(&entry)?); } - execution_types::COMPRESSED_RECEIPTS => { + COMPRESSED_RECEIPTS => { self.receipts.push_back(CompressedReceipts::from_entry(&entry)?); } - execution_types::TOTAL_DIFFICULTY => { + TOTAL_DIFFICULTY => { self.difficulties.push_back(TotalDifficulty::from_entry(&entry)?); } - execution_types::ACCUMULATOR => { + ACCUMULATOR => { if self.accumulator.is_some() { return Err(E2sError::Ssz("Multiple accumulator entries found".to_string())); } @@ -330,10 +333,7 @@ impl StreamWriter for Era1Writer { impl Era1Writer { /// Write a single block tuple - pub fn write_block( - &mut self, - block_tuple: &crate::execution_types::BlockTuple, - ) -> Result<(), E2sError> { + pub fn write_block(&mut self, block_tuple: &BlockTuple) -> Result<(), E2sError> { if !self.has_written_version { self.write_version()?; } @@ -406,13 +406,7 @@ impl Era1Writer { #[cfg(test)] mod tests { use super::*; - use crate::{ - era_file_ops::FileWriter, - execution_types::{ - Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, - TotalDifficulty, - }, - }; + use crate::common::file_ops::FileWriter; use alloy_primitives::{B256, U256}; use std::io::Cursor; use tempfile::tempdir; diff --git a/crates/era/src/era1/mod.rs b/crates/era/src/era1/mod.rs new file mode 100644 index 00000000000..de0803e7212 --- /dev/null +++ b/crates/era/src/era1/mod.rs @@ -0,0 +1,4 @@ +//! Core era1 primitives and file handling. + +pub mod file; +pub mod types; diff --git a/crates/era/src/execution_types.rs b/crates/era/src/era1/types/execution.rs similarity index 98% rename from crates/era/src/execution_types.rs rename to crates/era/src/era1/types/execution.rs index da6e1472002..e6022d57140 100644 --- a/crates/era/src/execution_types.rs +++ b/crates/era/src/era1/types/execution.rs @@ -16,7 +16,7 @@ //! //! ```rust //! use alloy_consensus::Header; -//! use reth_era::{execution_types::CompressedHeader, DecodeCompressed}; +//! use reth_era::{common::decode::DecodeCompressed, era1::types::execution::CompressedHeader}; //! //! let header = Header { number: 100, ..Default::default() }; //! // Compress the header: rlp encoding and Snappy compression @@ -32,7 +32,7 @@ //! ```rust //! use alloy_consensus::{BlockBody, Header}; //! use alloy_primitives::Bytes; -//! use reth_era::{execution_types::CompressedBody, DecodeCompressed}; +//! use reth_era::{common::decode::DecodeCompressed, era1::types::execution::CompressedBody}; //! use reth_ethereum_primitives::TransactionSigned; //! //! let body: BlockBody = BlockBody { @@ -53,7 +53,7 @@ //! //! ```rust //! use alloy_consensus::ReceiptWithBloom; -//! use reth_era::{execution_types::CompressedReceipts, DecodeCompressed}; +//! use reth_era::{common::decode::DecodeCompressed, era1::types::execution::CompressedReceipts}; //! use reth_ethereum_primitives::{Receipt, TxType}; //! //! let receipt = Receipt { @@ -72,8 +72,8 @@ //! `````` use crate::{ + common::decode::DecodeCompressed, e2s::{error::E2sError, types::Entry}, - DecodeCompressed, }; use alloy_consensus::{Block, BlockBody, Header}; use alloy_primitives::{B256, U256}; diff --git a/crates/era/src/era1_types.rs b/crates/era/src/era1/types/group.rs similarity index 98% rename from crates/era/src/era1_types.rs rename to crates/era/src/era1/types/group.rs index 428a8d56936..5a7e65a4048 100644 --- a/crates/era/src/era1_types.rs +++ b/crates/era/src/era1/types/group.rs @@ -1,11 +1,11 @@ -//! Era1 types +//! Era1 group for era1 file content //! //! See also use crate::{ + common::file_ops::EraFileId, e2s::types::{Entry, IndexEntry}, - era_file_ops::EraFileId, - execution_types::{Accumulator, BlockTuple, MAX_BLOCKS_PER_ERA1}, + era1::types::execution::{Accumulator, BlockTuple, MAX_BLOCKS_PER_ERA1}, }; use alloy_primitives::BlockNumber; @@ -174,8 +174,8 @@ impl EraFileId for Era1Id { mod tests { use super::*; use crate::{ + common::decode::DecodeCompressed, test_utils::{create_sample_block, create_test_block_with_compressed_data}, - DecodeCompressed, }; use alloy_consensus::ReceiptWithBloom; use alloy_primitives::{B256, U256}; diff --git a/crates/era/src/era1/types/mod.rs b/crates/era/src/era1/types/mod.rs new file mode 100644 index 00000000000..44568ddf79b --- /dev/null +++ b/crates/era/src/era1/types/mod.rs @@ -0,0 +1,6 @@ +//! Era1 types +//! +//! See also + +pub mod execution; +pub mod group; diff --git a/crates/era/src/lib.rs b/crates/era/src/lib.rs index 91e21d78cdb..4d57db2f6b5 100644 --- a/crates/era/src/lib.rs +++ b/crates/era/src/lib.rs @@ -12,28 +12,11 @@ //! - Era format: //! - Era1 format: +pub mod common; pub mod consensus_types; pub mod e2s; -pub mod era1_file; -pub mod era1_types; -pub mod era_file_ops; +pub mod era1; pub mod era_types; -pub mod execution_types; + #[cfg(test)] pub(crate) mod test_utils; - -use crate::e2s::error::E2sError; -use alloy_rlp::Decodable; -use ssz::Decode; - -/// Extension trait for generic decoding from compressed data -pub trait DecodeCompressed { - /// Decompress and decode the data into the given type - fn decode(&self) -> Result; -} - -/// Extension trait for generic decoding from compressed ssz data -pub trait DecodeCompressedSsz { - /// Decompress and decode the SSZ data into the given type - fn decode(&self) -> Result; -} diff --git a/crates/era/src/test_utils.rs b/crates/era/src/test_utils.rs index 96b2545be16..5c80e178d82 100644 --- a/crates/era/src/test_utils.rs +++ b/crates/era/src/test_utils.rs @@ -2,7 +2,7 @@ use crate::{ consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, - execution_types::{ + era1::types::execution::{ BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, }, }; diff --git a/crates/era/tests/it/dd.rs b/crates/era/tests/it/dd.rs index 853fbbe095d..9c1e5d163ea 100644 --- a/crates/era/tests/it/dd.rs +++ b/crates/era/tests/it/dd.rs @@ -4,10 +4,12 @@ use alloy_consensus::{BlockBody, Header}; use alloy_primitives::U256; use reth_era::{ + common::file_ops::{StreamReader, StreamWriter}, e2s::types::IndexEntry, - era1_file::{Era1Reader, Era1Writer}, - era_file_ops::{StreamReader, StreamWriter}, - execution_types::CompressedBody, + era1::{ + file::{Era1Reader, Era1Writer}, + types::execution::CompressedBody, + }, }; use reth_ethereum_primitives::TransactionSigned; use std::io::Cursor; diff --git a/crates/era/tests/it/genesis.rs b/crates/era/tests/it/genesis.rs index 6666e7775f7..14f563edf2f 100644 --- a/crates/era/tests/it/genesis.rs +++ b/crates/era/tests/it/genesis.rs @@ -7,7 +7,7 @@ use crate::{ Era1TestDownloader, ERA1_MAINNET_FILES_NAMES, ERA1_SEPOLIA_FILES_NAMES, MAINNET, SEPOLIA, }; use alloy_consensus::{BlockBody, Header}; -use reth_era::{e2s::types::IndexEntry, execution_types::CompressedBody}; +use reth_era::{e2s::types::IndexEntry, era1::types::execution::CompressedBody}; use reth_ethereum_primitives::TransactionSigned; #[tokio::test(flavor = "multi_thread")] diff --git a/crates/era/tests/it/main.rs b/crates/era/tests/it/main.rs index daa17d34514..9750e7b10b0 100644 --- a/crates/era/tests/it/main.rs +++ b/crates/era/tests/it/main.rs @@ -8,9 +8,9 @@ use reqwest::{Client, Url}; use reth_era::{ + common::file_ops::FileReader, e2s::error::E2sError, - era1_file::{Era1File, Era1Reader}, - era_file_ops::FileReader, + era1::file::{Era1File, Era1Reader}, }; use reth_era_downloader::EraClient; use std::{ diff --git a/crates/era/tests/it/roundtrip.rs b/crates/era/tests/it/roundtrip.rs index 00d5448d670..56f5ac20cd4 100644 --- a/crates/era/tests/it/roundtrip.rs +++ b/crates/era/tests/it/roundtrip.rs @@ -10,12 +10,16 @@ use alloy_consensus::{BlockBody, BlockHeader, Header, ReceiptWithBloom}; use rand::{prelude::IndexedRandom, rng}; use reth_era::{ + common::file_ops::{EraFileFormat, StreamReader, StreamWriter}, e2s::types::IndexEntry, - era1_file::{Era1File, Era1Reader, Era1Writer}, - era1_types::{Era1Group, Era1Id}, - era_file_ops::{EraFileFormat, StreamReader, StreamWriter}, - execution_types::{ - BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + era1::{ + file::{Era1File, Era1Reader, Era1Writer}, + types::{ + execution::{ + BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + }, + group::{Era1Group, Era1Id}, + }, }, }; use reth_ethereum_primitives::TransactionSigned; diff --git a/crates/stages/stages/src/stages/era.rs b/crates/stages/stages/src/stages/era.rs index 6fa10a297c7..7af667dce78 100644 --- a/crates/stages/stages/src/stages/era.rs +++ b/crates/stages/stages/src/stages/era.rs @@ -4,7 +4,7 @@ use futures_util::{Stream, StreamExt}; use reqwest::{Client, Url}; use reth_config::config::EtlConfig; use reth_db_api::{table::Value, transaction::DbTxMut}; -use reth_era::{era1_file::Era1Reader, era_file_ops::StreamReader}; +use reth_era::{common::file_ops::StreamReader, era1::file::Era1Reader}; use reth_era_downloader::{read_dir, EraClient, EraMeta, EraStream, EraStreamConfig}; use reth_era_utils as era; use reth_etl::Collector; From 2bcd7388d771665399f53daf3457aa923bff46be Mon Sep 17 00:00:00 2001 From: Maxim Evtush <154841002+maximevtush@users.noreply.github.com> Date: Wed, 5 Nov 2025 19:16:04 +0200 Subject: [PATCH 338/371] docs: fix license links in README.md (#19519) --- crates/storage/rpc-provider/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/storage/rpc-provider/README.md b/crates/storage/rpc-provider/README.md index 7180d41840d..f1b51a95749 100644 --- a/crates/storage/rpc-provider/README.md +++ b/crates/storage/rpc-provider/README.md @@ -65,7 +65,7 @@ This provider implements the same traits as the local `BlockchainProvider`, maki Licensed under either of: -- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) +- Apache License, Version 2.0, ([LICENSE-APACHE](../../../LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](../../../LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. From ba8be3fb64d51d712f62ce4ed26d46842bd155e3 Mon Sep 17 00:00:00 2001 From: Fibonacci747 Date: Wed, 5 Nov 2025 18:16:57 +0100 Subject: [PATCH 339/371] feat(optimism): Simplify trait bounds in revalidate_interop_txs_stream (#19500) --- crates/optimism/txpool/src/supervisor/client.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/optimism/txpool/src/supervisor/client.rs b/crates/optimism/txpool/src/supervisor/client.rs index b362fae2e10..a49704ac50a 100644 --- a/crates/optimism/txpool/src/supervisor/client.rs +++ b/crates/optimism/txpool/src/supervisor/client.rs @@ -1,7 +1,6 @@ //! This is our custom implementation of validator struct use crate::{ - interop::MaybeInteropTransaction, supervisor::{ metrics::SupervisorMetrics, parse_access_list_items_to_inbox_entries, ExecutingDescriptor, InteropTxValidatorError, @@ -139,8 +138,7 @@ impl SupervisorClient { where InputIter: IntoIterator + Send + 'a, InputIter::IntoIter: Send + 'a, - TItem: - MaybeInteropTransaction + PoolTransaction + Transaction + Clone + Send + Sync + 'static, + TItem: PoolTransaction + Transaction + Send, { stream::iter(txs_to_revalidate.into_iter().map(move |tx_item| { let client_for_async_task = self.clone(); From e3b38b2de5be10edf7c17e4e895ad1bd0a9b02f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Wed, 5 Nov 2025 20:42:07 +0100 Subject: [PATCH 340/371] chore(era): move `era` types to `era` module (#19527) --- crates/era/src/era/mod.rs | 3 +++ .../era/src/{consensus_types.rs => era/types/consensus.rs} | 0 crates/era/src/{era_types.rs => era/types/group.rs} | 4 ++-- crates/era/src/era/types/mod.rs | 6 ++++++ crates/era/src/lib.rs | 3 +-- crates/era/src/test_utils.rs | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) create mode 100644 crates/era/src/era/mod.rs rename crates/era/src/{consensus_types.rs => era/types/consensus.rs} (100%) rename crates/era/src/{era_types.rs => era/types/group.rs} (98%) create mode 100644 crates/era/src/era/types/mod.rs diff --git a/crates/era/src/era/mod.rs b/crates/era/src/era/mod.rs new file mode 100644 index 00000000000..108eeb30887 --- /dev/null +++ b/crates/era/src/era/mod.rs @@ -0,0 +1,3 @@ +//! Core era primitives. + +pub mod types; diff --git a/crates/era/src/consensus_types.rs b/crates/era/src/era/types/consensus.rs similarity index 100% rename from crates/era/src/consensus_types.rs rename to crates/era/src/era/types/consensus.rs diff --git a/crates/era/src/era_types.rs b/crates/era/src/era/types/group.rs similarity index 98% rename from crates/era/src/era_types.rs rename to crates/era/src/era/types/group.rs index 15d967875b1..bb250872ed5 100644 --- a/crates/era/src/era_types.rs +++ b/crates/era/src/era/types/group.rs @@ -1,10 +1,10 @@ -//! Era types for `.era` files +//! Era types for `.era` file content //! //! See also use crate::{ - consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, e2s::types::{Entry, IndexEntry, SLOT_INDEX}, + era::types::consensus::{CompressedBeaconState, CompressedSignedBeaconBlock}, }; /// Era file content group diff --git a/crates/era/src/era/types/mod.rs b/crates/era/src/era/types/mod.rs new file mode 100644 index 00000000000..cf91adca546 --- /dev/null +++ b/crates/era/src/era/types/mod.rs @@ -0,0 +1,6 @@ +//! Era types primitives. +//! +//! See also + +pub mod consensus; +pub mod group; diff --git a/crates/era/src/lib.rs b/crates/era/src/lib.rs index 4d57db2f6b5..2e4b755d76f 100644 --- a/crates/era/src/lib.rs +++ b/crates/era/src/lib.rs @@ -13,10 +13,9 @@ //! - Era1 format: pub mod common; -pub mod consensus_types; pub mod e2s; +pub mod era; pub mod era1; -pub mod era_types; #[cfg(test)] pub(crate) mod test_utils; diff --git a/crates/era/src/test_utils.rs b/crates/era/src/test_utils.rs index 5c80e178d82..f5aab53f74b 100644 --- a/crates/era/src/test_utils.rs +++ b/crates/era/src/test_utils.rs @@ -1,7 +1,7 @@ //! Utilities helpers to create era data structures for testing purposes. use crate::{ - consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, + era::types::consensus::{CompressedBeaconState, CompressedSignedBeaconBlock}, era1::types::execution::{ BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, }, From e20e56b75ec961731428b4193c31ac1065e2fd9f Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Thu, 6 Nov 2025 00:39:49 +0000 Subject: [PATCH 341/371] feat: add `Metadata` table and `StorageSettings` to `ProviderFactory` (#19384) --- Cargo.lock | 1 + crates/cli/commands/src/common.rs | 2 +- .../cli/commands/src/stage/dump/execution.rs | 2 +- .../src/stage/dump/hashing_account.rs | 2 +- .../src/stage/dump/hashing_storage.rs | 2 +- crates/cli/commands/src/stage/dump/merkle.rs | 2 +- crates/e2e-test-utils/src/setup_import.rs | 11 ++- crates/exex/test-utils/src/lib.rs | 2 +- crates/node/builder/src/launch/common.rs | 2 +- .../stages/stages/src/test_utils/test_db.rs | 6 +- crates/storage/db-api/src/models/metadata.rs | 39 ++++++++++ crates/storage/db-api/src/models/mod.rs | 2 + crates/storage/db-api/src/tables/mod.rs | 7 ++ crates/storage/db-common/src/init.rs | 28 ++++--- crates/storage/provider/src/lib.rs | 5 +- .../src/providers/database/builder.rs | 37 +++++----- .../provider/src/providers/database/mod.rs | 73 ++++++++++++++----- .../src/providers/database/provider.rs | 38 ++++++++-- crates/storage/provider/src/test_utils/mod.rs | 7 +- crates/storage/storage-api/Cargo.toml | 3 + crates/storage/storage-api/src/lib.rs | 7 ++ crates/storage/storage-api/src/metadata.rs | 53 ++++++++++++++ examples/rpc-db/src/main.rs | 2 +- 23 files changed, 265 insertions(+), 68 deletions(-) create mode 100644 crates/storage/db-api/src/models/metadata.rs create mode 100644 crates/storage/storage-api/src/metadata.rs diff --git a/Cargo.lock b/Cargo.lock index b7c8618f423..21d98573bc2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10567,6 +10567,7 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm-database", + "serde_json", ] [[package]] diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 5b8cfce7716..4d18d811841 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -132,7 +132,7 @@ impl EnvironmentArgs { db, self.chain.clone(), static_file_provider, - ) + )? .with_prune_modes(prune_modes.clone()); // Check for consistency between database and static files. diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 9e8e68e9800..887f97ddddf 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -42,7 +42,7 @@ where Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, - ), + )?, to, from, evm_config, diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 8b9ba5e937e..0e976d4235f 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -39,7 +39,7 @@ pub(crate) async fn dump_hashing_account_stage + pub receipts_in_static_files: bool, +} + +impl StorageSettings { + /// Creates a new `StorageSettings` with default values. + pub const fn new() -> Self { + Self { receipts_in_static_files: false } + } + + /// Creates `StorageSettings` for legacy nodes. + /// + /// This explicitly sets `receipts_in_static_files` to `false`, ensuring older nodes + /// continue writing receipts to the database when receipt pruning is enabled. + pub const fn legacy() -> Self { + Self { receipts_in_static_files: false } + } + + /// Sets the `receipts_static_files` flag to true. + pub const fn with_receipts_in_static_files(mut self) -> Self { + self.receipts_in_static_files = true; + self + } +} diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 31d9b301f8c..ebc36252506 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -20,12 +20,14 @@ use serde::{Deserialize, Serialize}; pub mod accounts; pub mod blocks; pub mod integer_list; +pub mod metadata; pub mod sharded_key; pub mod storage_sharded_key; pub use accounts::*; pub use blocks::*; pub use integer_list::IntegerList; +pub use metadata::*; pub use reth_db_models::{ AccountBeforeTx, ClientVersion, StaticFileBlockWithdrawals, StoredBlockBodyIndices, StoredBlockWithdrawals, diff --git a/crates/storage/db-api/src/tables/mod.rs b/crates/storage/db-api/src/tables/mod.rs index cf2a20fff04..483048383ab 100644 --- a/crates/storage/db-api/src/tables/mod.rs +++ b/crates/storage/db-api/src/tables/mod.rs @@ -540,6 +540,13 @@ tables! { type Key = ChainStateKey; type Value = BlockNumber; } + + /// Stores generic node metadata as key-value pairs. + /// Can store feature flags, configuration markers, and other node-specific data. + table Metadata { + type Key = String; + type Value = Vec; + } } /// Keys for the `ChainState` table. diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index de55cea3c99..3579d5360d6 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -15,9 +15,9 @@ use reth_primitives_traits::{ use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, - HashingWriter, HeaderProvider, HistoryWriter, OriginalValuesKnown, ProviderError, RevertsInit, - StageCheckpointReader, StageCheckpointWriter, StateWriter, StaticFileProviderFactory, - TrieWriter, + HashingWriter, HeaderProvider, HistoryWriter, MetadataWriter, OriginalValuesKnown, + ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, StateWriter, + StaticFileProviderFactory, StorageSettings, StorageSettingsCache, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -90,7 +90,8 @@ where + StaticFileProviderFactory> + ChainSpecProvider + StageCheckpointReader - + BlockHashReader, + + BlockHashReader + + StorageSettingsCache, PF::ProviderRW: StaticFileProviderFactory + StageCheckpointWriter + HistoryWriter @@ -98,6 +99,7 @@ where + HashingWriter + StateWriter + TrieWriter + + MetadataWriter + AsRef, PF::ChainSpec: EthChainSpec

::BlockHeader>, { @@ -161,9 +163,14 @@ where static_file_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(0)?; static_file_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(0)?; + // Behaviour reserved only for new nodes should be set here. + let storage_settings = StorageSettings::new(); + provider_rw.write_storage_settings(storage_settings)?; + // `commit_unwind`` will first commit the DB and then the static file provider, which is // necessary on `init_genesis`. provider_rw.commit()?; + factory.set_storage_settings_cache(storage_settings); Ok(hash) } @@ -726,11 +733,14 @@ mod tests { init_genesis(&factory).unwrap(); // Try to init db with a different genesis block - let genesis_hash = init_genesis(&ProviderFactory::::new( - factory.into_db(), - MAINNET.clone(), - static_file_provider, - )); + let genesis_hash = init_genesis( + &ProviderFactory::::new( + factory.into_db(), + MAINNET.clone(), + static_file_provider, + ) + .unwrap(), + ); assert!(matches!( genesis_hash.unwrap_err(), diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 70822c604bb..5cd598aa46b 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -49,7 +49,10 @@ pub use reth_chain_state::{ }; // reexport traits to avoid breaking changes -pub use reth_storage_api::{HistoryWriter, StatsReader}; +pub use reth_storage_api::{ + HistoryWriter, MetadataProvider, MetadataWriter, StatsReader, StorageSettings, + StorageSettingsCache, +}; pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { diff --git a/crates/storage/provider/src/providers/database/builder.rs b/crates/storage/provider/src/providers/database/builder.rs index 4bc8569432e..bcd61f188f9 100644 --- a/crates/storage/provider/src/providers/database/builder.rs +++ b/crates/storage/provider/src/providers/database/builder.rs @@ -3,13 +3,17 @@ //! This also includes general purpose staging types that provide builder style functions that lead //! up to the intended build target. -use crate::{providers::StaticFileProvider, ProviderFactory}; +use crate::{ + providers::{NodeTypesForProvider, StaticFileProvider}, + ProviderFactory, +}; use reth_db::{ mdbx::{DatabaseArguments, MaxReadTransactionDuration}, open_db_read_only, DatabaseEnv, }; use reth_db_api::{database_metrics::DatabaseMetrics, Database}; use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter}; +use reth_storage_errors::provider::ProviderResult; use std::{ marker::PhantomData, path::{Path, PathBuf}, @@ -48,10 +52,9 @@ impl ProviderFactoryBuilder { /// /// ```no_run /// use reth_chainspec::MAINNET; - /// use reth_node_types::NodeTypes; - /// use reth_provider::providers::ProviderFactoryBuilder; + /// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder}; /// - /// fn demo>() { + /// fn demo>() { /// let provider_factory = ProviderFactoryBuilder::::default() /// .open_read_only(MAINNET.clone(), "datadir") /// .unwrap(); @@ -64,11 +67,9 @@ impl ProviderFactoryBuilder { /// /// ```no_run /// use reth_chainspec::MAINNET; - /// use reth_node_types::NodeTypes; - /// - /// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig}; + /// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder, ReadOnlyConfig}; /// - /// fn demo>() { + /// fn demo>() { /// let provider_factory = ProviderFactoryBuilder::::default() /// .open_read_only(MAINNET.clone(), ReadOnlyConfig::from_datadir("datadir").no_watch()) /// .unwrap(); @@ -84,11 +85,9 @@ impl ProviderFactoryBuilder { /// /// ```no_run /// use reth_chainspec::MAINNET; - /// use reth_node_types::NodeTypes; - /// - /// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig}; + /// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder, ReadOnlyConfig}; /// - /// fn demo>() { + /// fn demo>() { /// let provider_factory = ProviderFactoryBuilder::::default() /// .open_read_only( /// MAINNET.clone(), @@ -103,15 +102,15 @@ impl ProviderFactoryBuilder { config: impl Into, ) -> eyre::Result>>> where - N: NodeTypes, + N: NodeTypesForProvider, { let ReadOnlyConfig { db_dir, db_args, static_files_dir, watch_static_files } = config.into(); - Ok(self - .db(Arc::new(open_db_read_only(db_dir, db_args)?)) + self.db(Arc::new(open_db_read_only(db_dir, db_args)?)) .chainspec(chainspec) .static_file(StaticFileProvider::read_only(static_files_dir, watch_static_files)?) - .build_provider_factory()) + .build_provider_factory() + .map_err(Into::into) } } @@ -320,11 +319,13 @@ impl TypesAnd3 { impl TypesAnd3, StaticFileProvider> where - N: NodeTypes, + N: NodeTypesForProvider, DB: Database + DatabaseMetrics + Clone + Unpin + 'static, { /// Creates the [`ProviderFactory`]. - pub fn build_provider_factory(self) -> ProviderFactory> { + pub fn build_provider_factory( + self, + ) -> ProviderResult>> { let Self { _types, val_1, val_2, val_3 } = self; ProviderFactory::new(val_1, val_2, val_3) } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 5d3b5280cda..a0de2f9e740 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -1,29 +1,31 @@ use crate::{ - providers::{state::latest::LatestStateProvider, StaticFileProvider}, + providers::{state::latest::LatestStateProvider, NodeTypesForProvider, StaticFileProvider}, to_range, traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, - HashedPostStateProvider, HeaderProvider, HeaderSyncGapProvider, ProviderError, - PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, + HashedPostStateProvider, HeaderProvider, HeaderSyncGapProvider, MetadataProvider, + ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, }; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use core::fmt; +use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_node_types::{ - BlockTy, HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithDBAdapter, ReceiptTy, TxTy, + BlockTy, HeaderTy, NodeTypesWithDB, NodeTypesWithDBAdapter, ReceiptTy, TxTy, }; use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - BlockBodyIndicesProvider, NodePrimitivesProvider, TryIntoHistoricalStateProvider, + BlockBodyIndicesProvider, NodePrimitivesProvider, StorageSettings, StorageSettingsCache, + TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::HashedPostState; @@ -64,31 +66,52 @@ pub struct ProviderFactory { prune_modes: PruneModes, /// The node storage handler. storage: Arc, + /// Storage configuration settings for this node + storage_settings: Arc>, } -impl ProviderFactory>> { +impl ProviderFactory>> { /// Instantiates the builder for this type pub fn builder() -> ProviderFactoryBuilder { ProviderFactoryBuilder::default() } } -impl ProviderFactory { +impl ProviderFactory { /// Create new database provider factory. pub fn new( db: N::DB, chain_spec: Arc, static_file_provider: StaticFileProvider, - ) -> Self { - Self { + ) -> ProviderResult { + // Load storage settings from database at init time. Creates a temporary provider + // to read persisted settings, falling back to legacy defaults if none exist. + // + // Both factory and all providers it creates should share these cached settings. + let legacy_settings = StorageSettings::legacy(); + let storage_settings = DatabaseProvider::<_, N>::new( + db.tx()?, + chain_spec.clone(), + static_file_provider.clone(), + Default::default(), + Default::default(), + Arc::new(RwLock::new(legacy_settings)), + ) + .storage_settings()? + .unwrap_or(legacy_settings); + + Ok(Self { db, chain_spec, static_file_provider, prune_modes: PruneModes::default(), storage: Default::default(), - } + storage_settings: Arc::new(RwLock::new(storage_settings)), + }) } +} +impl ProviderFactory { /// Enables metrics on the static file provider. pub fn with_static_files_metrics(mut self) -> Self { self.static_file_provider = self.static_file_provider.with_metrics(); @@ -113,7 +136,17 @@ impl ProviderFactory { } } -impl>> ProviderFactory { +impl StorageSettingsCache for ProviderFactory { + fn cached_storage_settings(&self) -> StorageSettings { + *self.storage_settings.read() + } + + fn set_storage_settings_cache(&self, settings: StorageSettings) { + *self.storage_settings.write() = settings; + } +} + +impl>> ProviderFactory { /// Create new database provider by passing a path. [`ProviderFactory`] will own the database /// instance. pub fn new_with_database_path>( @@ -122,13 +155,12 @@ impl>> ProviderFactory { args: DatabaseArguments, static_file_provider: StaticFileProvider, ) -> RethResult { - Ok(Self { - db: Arc::new(init_db(path, args).map_err(RethError::msg)?), + Self::new( + Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider, - prune_modes: PruneModes::default(), - storage: Default::default(), - }) + ) + .map_err(RethError::Provider) } } @@ -147,6 +179,7 @@ impl ProviderFactory { self.static_file_provider.clone(), self.prune_modes.clone(), self.storage.clone(), + self.storage_settings.clone(), )) } @@ -162,6 +195,7 @@ impl ProviderFactory { self.static_file_provider.clone(), self.prune_modes.clone(), self.storage.clone(), + self.storage_settings.clone(), ))) } @@ -545,13 +579,15 @@ where N: NodeTypesWithDB, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let Self { db, chain_spec, static_file_provider, prune_modes, storage } = self; + let Self { db, chain_spec, static_file_provider, prune_modes, storage, storage_settings } = + self; f.debug_struct("ProviderFactory") .field("db", &db) .field("chain_spec", &chain_spec) .field("static_file_provider", &static_file_provider) .field("prune_modes", &prune_modes) .field("storage", &storage) + .field("storage_settings", &*storage_settings.read()) .finish() } } @@ -564,6 +600,7 @@ impl Clone for ProviderFactory { static_file_provider: self.static_file_provider.clone(), prune_modes: self.prune_modes.clone(), storage: self.storage.clone(), + storage_settings: self.storage_settings.clone(), } } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index a90b2c2e640..b46ccd9a633 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -31,6 +31,7 @@ use alloy_primitives::{ Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, }; use itertools::Itertools; +use parking_lot::RwLock; use rayon::slice::ParallelSliceMut; use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec}; @@ -39,7 +40,7 @@ use reth_db_api::{ database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - BlockNumberHashedAddress, ShardedKey, StoredBlockBodyIndices, + BlockNumberHashedAddress, ShardedKey, StorageSettings, StoredBlockBodyIndices, }, table::Table, tables, @@ -57,8 +58,9 @@ use reth_prune_types::{ use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, StateProvider, - StorageChangeSetReader, TryIntoHistoricalStateProvider, + BlockBodyIndicesProvider, BlockBodyReader, MetadataProvider, MetadataWriter, + NodePrimitivesProvider, StateProvider, StorageChangeSetReader, StorageSettingsCache, + TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -153,6 +155,8 @@ pub struct DatabaseProvider { prune_modes: PruneModes, /// Node storage handler. storage: Arc, + /// Storage configuration settings for this node + storage_settings: Arc>, } impl DatabaseProvider { @@ -248,8 +252,9 @@ impl DatabaseProvider { static_file_provider: StaticFileProvider, prune_modes: PruneModes, storage: Arc, + storage_settings: Arc>, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { tx, chain_spec, static_file_provider, prune_modes, storage, storage_settings } } } @@ -494,8 +499,9 @@ impl DatabaseProvider { static_file_provider: StaticFileProvider, prune_modes: PruneModes, storage: Arc, + storage_settings: Arc>, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { tx, chain_spec, static_file_provider, prune_modes, storage, storage_settings } } /// Consume `DbTx` or `DbTxMut`. @@ -3133,6 +3139,28 @@ impl DBProvider for DatabaseProvider } } +impl MetadataProvider for DatabaseProvider { + fn get_metadata(&self, key: &str) -> ProviderResult>> { + self.tx.get::(key.to_string()).map_err(Into::into) + } +} + +impl MetadataWriter for DatabaseProvider { + fn write_metadata(&self, key: &str, value: Vec) -> ProviderResult<()> { + self.tx.put::(key.to_string(), value).map_err(Into::into) + } +} + +impl StorageSettingsCache for DatabaseProvider { + fn cached_storage_settings(&self) -> StorageSettings { + *self.storage_settings.read() + } + + fn set_storage_settings_cache(&self, settings: StorageSettings) { + *self.storage_settings.write() = settings; + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index ccda2d60e85..5530c7411c7 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,5 +1,5 @@ use crate::{ - providers::{ProviderNodeTypes, StaticFileProvider}, + providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, HashingWriter, ProviderFactory, TrieWriter, }; use alloy_primitives::B256; @@ -10,7 +10,7 @@ use reth_db::{ }; use reth_errors::ProviderResult; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter}; +use reth_node_types::NodeTypesWithDBAdapter; use reth_primitives_traits::{Account, StorageEntry}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; @@ -50,7 +50,7 @@ pub fn create_test_provider_factory_with_chain_spec( } /// Creates test provider factory with provided chain spec. -pub fn create_test_provider_factory_with_node_types( +pub fn create_test_provider_factory_with_node_types( chain_spec: Arc, ) -> ProviderFactory>>> { let (static_dir, _) = create_test_static_files_dir(); @@ -60,6 +60,7 @@ pub fn create_test_provider_factory_with_node_types( chain_spec, StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"), ) + .expect("failed to create test provider factory") } /// Inserts the genesis alloc from the provided chain spec into the trie. diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index a62193a5dd8..83cbbbd714e 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -32,6 +32,7 @@ alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true auto_impl.workspace = true +serde_json = { workspace = true, optional = true } [features] default = ["std"] @@ -50,10 +51,12 @@ std = [ "reth-storage-errors/std", "reth-db-models/std", "reth-trie-common/std", + "serde_json?/std", ] db-api = [ "dep:reth-db-api", + "dep:serde_json", ] serde = [ diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 897802da980..5a191f37505 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -94,5 +94,12 @@ pub use state_writer::*; mod header_sync_gap; pub use header_sync_gap::HeaderSyncGapProvider; +#[cfg(feature = "db-api")] +pub mod metadata; +#[cfg(feature = "db-api")] +pub use metadata::{MetadataProvider, MetadataWriter, StorageSettingsCache}; +#[cfg(feature = "db-api")] +pub use reth_db_api::models::StorageSettings; + mod full; pub use full::*; diff --git a/crates/storage/storage-api/src/metadata.rs b/crates/storage/storage-api/src/metadata.rs new file mode 100644 index 00000000000..2ff48f73385 --- /dev/null +++ b/crates/storage/storage-api/src/metadata.rs @@ -0,0 +1,53 @@ +//! Metadata provider trait for reading and writing node metadata. + +use reth_db_api::models::StorageSettings; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; + +/// Metadata keys. +pub mod keys { + /// Storage configuration settings for this node. + pub const STORAGE_SETTINGS: &str = "storage_settings"; +} + +/// Client trait for reading node metadata from the database. +#[auto_impl::auto_impl(&, Arc)] +pub trait MetadataProvider: Send + Sync { + /// Get a metadata value by key + fn get_metadata(&self, key: &str) -> ProviderResult>>; + + /// Get storage settings for this node + fn storage_settings(&self) -> ProviderResult> { + self.get_metadata(keys::STORAGE_SETTINGS)? + .map(|bytes| serde_json::from_slice(&bytes).map_err(ProviderError::other)) + .transpose() + } +} + +/// Client trait for writing node metadata to the database. +pub trait MetadataWriter: Send + Sync { + /// Write a metadata value + fn write_metadata(&self, key: &str, value: Vec) -> ProviderResult<()>; + + /// Write storage settings for this node + /// + /// Be sure to update provider factory cache with + /// [`StorageSettingsCache::set_storage_settings_cache`]. + fn write_storage_settings(&self, settings: StorageSettings) -> ProviderResult<()> { + self.write_metadata( + keys::STORAGE_SETTINGS, + serde_json::to_vec(&settings).map_err(ProviderError::other)?, + ) + } +} + +/// Trait for caching storage settings on a provider factory. +pub trait StorageSettingsCache: Send + Sync { + /// Gets the cached storage settings. + fn cached_storage_settings(&self) -> StorageSettings; + + /// Sets the storage settings of this `ProviderFactory`. + /// + /// IMPORTANT: It does not save settings in storage, that should be done by + /// [`MetadataWriter::write_storage_settings`] + fn set_storage_settings_cache(&self, settings: StorageSettings); +} diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 97bd1debdcc..b19d99776ab 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -53,7 +53,7 @@ async fn main() -> eyre::Result<()> { db.clone(), spec.clone(), StaticFileProvider::read_only(db_path.join("static_files"), true)?, - ); + )?; // 2. Set up the blockchain provider using only the database provider and a noop for the tree to // satisfy trait bounds. Tree is not used in this example since we are only operating on the From 99fe17582343828b5b1b8e346a11c0a8c83196aa Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 6 Nov 2025 07:47:03 +0000 Subject: [PATCH 342/371] feat(engine): record newPayload/forkchoiceUpdated metrics outside of RPC (#19522) --- crates/engine/tree/src/tree/metrics.rs | 129 ++++++++++++++++++-- crates/engine/tree/src/tree/mod.rs | 25 ++-- crates/engine/tree/src/tree/tests.rs | 28 ----- crates/rpc/rpc-engine-api/src/engine_api.rs | 17 +-- crates/rpc/rpc-engine-api/src/metrics.rs | 107 +--------------- 5 files changed, 140 insertions(+), 166 deletions(-) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 1d1e208b0a6..3adb16b0f6b 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -1,11 +1,13 @@ -use crate::tree::MeteredStateHook; +use crate::tree::{error::InsertBlockFatalError, MeteredStateHook, TreeOutcome}; use alloy_consensus::transaction::TxHashRef; use alloy_evm::{ block::{BlockExecutor, ExecutableTx}, Evm, }; +use alloy_rpc_types_engine::{PayloadStatus, PayloadStatusEnum}; use core::borrow::BorrowMut; -use reth_errors::BlockExecutionError; +use reth_engine_primitives::{ForkchoiceStatus, OnForkChoiceUpdated}; +use reth_errors::{BlockExecutionError, ProviderError}; use reth_evm::{metrics::ExecutorMetrics, OnStateHook}; use reth_execution_types::BlockExecutionOutput; use reth_metrics::{ @@ -15,7 +17,7 @@ use reth_metrics::{ use reth_primitives_traits::SignedTransaction; use reth_trie::updates::TrieUpdates; use revm::database::{states::bundle_state::BundleRetention, State}; -use std::time::Instant; +use std::time::{Duration, Instant}; use tracing::{debug_span, trace}; /// Metrics for the `EngineApi`. @@ -132,20 +134,20 @@ pub(crate) struct TreeMetrics { #[derive(Metrics)] #[metrics(scope = "consensus.engine.beacon")] pub(crate) struct EngineMetrics { + /// Engine API forkchoiceUpdated response type metrics + #[metric(skip)] + pub(crate) forkchoice_updated: ForkchoiceUpdatedMetrics, + /// Engine API newPayload response type metrics + #[metric(skip)] + pub(crate) new_payload: NewPayloadStatusMetrics, /// How many executed blocks are currently stored. pub(crate) executed_blocks: Gauge, /// How many already executed blocks were directly inserted into the tree. pub(crate) inserted_already_executed_blocks: Counter, /// The number of times the pipeline was run. pub(crate) pipeline_runs: Counter, - /// The total count of forkchoice updated messages received. - pub(crate) forkchoice_updated_messages: Counter, - /// The total count of forkchoice updated messages with payload received. - pub(crate) forkchoice_with_attributes_updated_messages: Counter, /// Newly arriving block hash is not present in executed blocks cache storage pub(crate) executed_new_block_cache_miss: Counter, - /// The total count of new payload messages received. - pub(crate) new_payload_messages: Counter, /// Histogram of persistence operation durations (in seconds) pub(crate) persistence_duration: Histogram, /// Tracks the how often we failed to deliver a newPayload response. @@ -160,6 +162,115 @@ pub(crate) struct EngineMetrics { pub(crate) block_insert_total_duration: Histogram, } +/// Metrics for engine forkchoiceUpdated responses. +#[derive(Metrics)] +#[metrics(scope = "consensus.engine.beacon")] +pub(crate) struct ForkchoiceUpdatedMetrics { + /// The total count of forkchoice updated messages received. + pub(crate) forkchoice_updated_messages: Counter, + /// The total count of forkchoice updated messages with payload received. + pub(crate) forkchoice_with_attributes_updated_messages: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [`Valid`](ForkchoiceStatus::Valid). + pub(crate) forkchoice_updated_valid: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [`Invalid`](ForkchoiceStatus::Invalid). + pub(crate) forkchoice_updated_invalid: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [`Syncing`](ForkchoiceStatus::Syncing). + pub(crate) forkchoice_updated_syncing: Counter, + /// The total count of forkchoice updated messages that were unsuccessful, i.e. we responded + /// with an error type that is not a [`PayloadStatusEnum`]. + pub(crate) forkchoice_updated_error: Counter, + /// Latency for the forkchoice updated calls. + pub(crate) forkchoice_updated_latency: Histogram, + /// Latency for the last forkchoice updated call. + pub(crate) forkchoice_updated_last: Gauge, +} + +impl ForkchoiceUpdatedMetrics { + /// Increment the forkchoiceUpdated counter based on the given result + pub(crate) fn update_response_metrics( + &self, + has_attrs: bool, + result: &Result, ProviderError>, + elapsed: Duration, + ) { + match result { + Ok(outcome) => match outcome.outcome.forkchoice_status() { + ForkchoiceStatus::Valid => self.forkchoice_updated_valid.increment(1), + ForkchoiceStatus::Invalid => self.forkchoice_updated_invalid.increment(1), + ForkchoiceStatus::Syncing => self.forkchoice_updated_syncing.increment(1), + }, + Err(_) => self.forkchoice_updated_error.increment(1), + } + self.forkchoice_updated_messages.increment(1); + if has_attrs { + self.forkchoice_with_attributes_updated_messages.increment(1); + } + self.forkchoice_updated_latency.record(elapsed); + self.forkchoice_updated_last.set(elapsed); + } +} + +/// Metrics for engine newPayload responses. +#[derive(Metrics)] +#[metrics(scope = "consensus.engine.beacon")] +pub(crate) struct NewPayloadStatusMetrics { + /// The total count of new payload messages received. + pub(crate) new_payload_messages: Counter, + /// The total count of new payload messages that we responded to with + /// [Valid](PayloadStatusEnum::Valid). + pub(crate) new_payload_valid: Counter, + /// The total count of new payload messages that we responded to with + /// [Invalid](PayloadStatusEnum::Invalid). + pub(crate) new_payload_invalid: Counter, + /// The total count of new payload messages that we responded to with + /// [Syncing](PayloadStatusEnum::Syncing). + pub(crate) new_payload_syncing: Counter, + /// The total count of new payload messages that we responded to with + /// [Accepted](PayloadStatusEnum::Accepted). + pub(crate) new_payload_accepted: Counter, + /// The total count of new payload messages that were unsuccessful, i.e. we responded with an + /// error type that is not a [`PayloadStatusEnum`]. + pub(crate) new_payload_error: Counter, + /// The total gas of valid new payload messages received. + pub(crate) new_payload_total_gas: Histogram, + /// The gas per second of valid new payload messages received. + pub(crate) new_payload_gas_per_second: Histogram, + /// Latency for the new payload calls. + pub(crate) new_payload_latency: Histogram, + /// Latency for the last new payload call. + pub(crate) new_payload_last: Gauge, +} + +impl NewPayloadStatusMetrics { + /// Increment the newPayload counter based on the given result + pub(crate) fn update_response_metrics( + &self, + result: &Result, InsertBlockFatalError>, + gas_used: u64, + elapsed: Duration, + ) { + match result { + Ok(outcome) => match outcome.outcome.status { + PayloadStatusEnum::Valid => { + self.new_payload_valid.increment(1); + self.new_payload_total_gas.record(gas_used as f64); + self.new_payload_gas_per_second.record(gas_used as f64 / elapsed.as_secs_f64()); + } + PayloadStatusEnum::Syncing => self.new_payload_syncing.increment(1), + PayloadStatusEnum::Accepted => self.new_payload_accepted.increment(1), + PayloadStatusEnum::Invalid { .. } => self.new_payload_invalid.increment(1), + }, + Err(_) => self.new_payload_error.increment(1), + } + self.new_payload_messages.increment(1); + self.new_payload_latency.record(elapsed); + self.new_payload_last.set(elapsed); + } +} + /// Metrics for non-execution related block validation. #[derive(Metrics)] #[metrics(scope = "sync.block_validation")] diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index ca8a93df079..7b73d844729 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -506,7 +506,6 @@ where payload: T::ExecutionData, ) -> Result, InsertBlockFatalError> { trace!(target: "engine::tree", "invoked new payload"); - self.metrics.engine.new_payload_messages.increment(1); // start timing for the new payload process let start = Instant::now(); @@ -985,7 +984,7 @@ where trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); // Record metrics - self.record_forkchoice_metrics(&attrs); + self.record_forkchoice_metrics(); // Pre-validation of forkchoice state if let Some(early_result) = self.validate_forkchoice_state(state)? { @@ -1008,11 +1007,7 @@ where } /// Records metrics for forkchoice updated calls - fn record_forkchoice_metrics(&self, attrs: &Option) { - self.metrics.engine.forkchoice_updated_messages.increment(1); - if attrs.is_some() { - self.metrics.engine.forkchoice_with_attributes_updated_messages.increment(1); - } + fn record_forkchoice_metrics(&self) { self.canonical_in_memory_state.on_forkchoice_update_received(); } @@ -1393,6 +1388,9 @@ where tx, version, } => { + let has_attrs = payload_attrs.is_some(); + + let start = Instant::now(); let mut output = self.on_forkchoice_updated(state, payload_attrs, version); @@ -1412,6 +1410,12 @@ where self.on_maybe_tree_event(res.event.take())?; } + let elapsed = start.elapsed(); + self.metrics + .engine + .forkchoice_updated + .update_response_metrics(has_attrs, &output, elapsed); + if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(Into::into)) { @@ -1423,7 +1427,14 @@ where } } BeaconEngineMessage::NewPayload { payload, tx } => { + let start = Instant::now(); + let gas_used = payload.gas_used(); let mut output = self.on_new_payload(payload); + let elapsed = start.elapsed(); + self.metrics + .engine + .new_payload + .update_response_metrics(&output, gas_used, elapsed); let maybe_event = output.as_mut().ok().and_then(|out| out.event.take()); diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 7fbae4cac5c..606faabaee3 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -1694,7 +1694,6 @@ mod payload_execution_tests { #[cfg(test)] mod forkchoice_updated_tests { use super::*; - use alloy_primitives::Address; /// Test that validates the forkchoice state pre-validation logic #[tokio::test] @@ -1914,33 +1913,6 @@ mod forkchoice_updated_tests { assert!(fcu_result.payload_status.is_syncing(), "Should return syncing during backfill"); } - /// Test metrics recording in forkchoice updated - #[tokio::test] - async fn test_record_forkchoice_metrics() { - let chain_spec = MAINNET.clone(); - let test_harness = TestHarness::new(chain_spec); - - // Get initial metrics state by checking if metrics are recorded - // We can't directly get counter values, but we can verify the methods are called - - // Test without attributes - let attrs_none = None; - test_harness.tree.record_forkchoice_metrics(&attrs_none); - - // Test with attributes - let attrs_some = Some(alloy_rpc_types_engine::PayloadAttributes { - timestamp: 1000, - prev_randao: B256::random(), - suggested_fee_recipient: Address::random(), - withdrawals: None, - parent_beacon_block_root: None, - }); - test_harness.tree.record_forkchoice_metrics(&attrs_some); - - // We can't directly verify counter values since they're private metrics - // But we can verify the methods don't panic and execute successfully - } - /// Test edge case: FCU with invalid ancestor #[tokio::test] async fn test_fcu_with_invalid_ancestor() { diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 6aeadeecba5..8902a111f27 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -21,8 +21,7 @@ use reth_chainspec::EthereumHardforks; use reth_engine_primitives::{ConsensusEngineHandle, EngineApiValidator, EngineTypes}; use reth_payload_builder::PayloadStore; use reth_payload_primitives::{ - validate_payload_timestamp, EngineApiMessageVersion, ExecutionPayload, PayloadOrAttributes, - PayloadTypes, + validate_payload_timestamp, EngineApiMessageVersion, PayloadOrAttributes, PayloadTypes, }; use reth_primitives_traits::{Block, BlockBody}; use reth_rpc_api::{EngineApiServer, IntoEngineApiRpcModule}; @@ -161,12 +160,9 @@ where payload: PayloadT::ExecutionData, ) -> EngineApiResult { let start = Instant::now(); - let gas_used = payload.gas_used(); - let res = Self::new_payload_v1(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v1.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); res } @@ -197,12 +193,9 @@ where payload: PayloadT::ExecutionData, ) -> EngineApiResult { let start = Instant::now(); - let gas_used = payload.gas_used(); - let res = Self::new_payload_v2(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v2.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); res } @@ -234,12 +227,10 @@ where payload: PayloadT::ExecutionData, ) -> RpcResult { let start = Instant::now(); - let gas_used = payload.gas_used(); let res = Self::new_payload_v3(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v3.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); Ok(res?) } @@ -271,13 +262,10 @@ where payload: PayloadT::ExecutionData, ) -> RpcResult { let start = Instant::now(); - let gas_used = payload.gas_used(); - let res = Self::new_payload_v4(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v4.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); Ok(res?) } @@ -320,7 +308,6 @@ where let start = Instant::now(); let res = Self::fork_choice_updated_v1(self, state, payload_attrs).await; self.inner.metrics.latency.fork_choice_updated_v1.record(start.elapsed()); - self.inner.metrics.fcu_response.update_response_metrics(&res); res } @@ -346,7 +333,6 @@ where let start = Instant::now(); let res = Self::fork_choice_updated_v2(self, state, payload_attrs).await; self.inner.metrics.latency.fork_choice_updated_v2.record(start.elapsed()); - self.inner.metrics.fcu_response.update_response_metrics(&res); res } @@ -372,7 +358,6 @@ where let start = Instant::now(); let res = Self::fork_choice_updated_v3(self, state, payload_attrs).await; self.inner.metrics.latency.fork_choice_updated_v3.record(start.elapsed()); - self.inner.metrics.fcu_response.update_response_metrics(&res); res } diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 95156e490b7..19f8a1520b5 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -1,8 +1,4 @@ -use std::time::Duration; - -use crate::EngineApiError; -use alloy_rpc_types_engine::{ForkchoiceUpdated, PayloadStatus, PayloadStatusEnum}; -use metrics::{Counter, Gauge, Histogram}; +use metrics::{Counter, Histogram}; use reth_metrics::Metrics; /// All beacon consensus engine metrics @@ -10,10 +6,6 @@ use reth_metrics::Metrics; pub(crate) struct EngineApiMetrics { /// Engine API latency metrics pub(crate) latency: EngineApiLatencyMetrics, - /// Engine API forkchoiceUpdated response type metrics - pub(crate) fcu_response: ForkchoiceUpdatedResponseMetrics, - /// Engine API newPayload response type metrics - pub(crate) new_payload_response: NewPayloadStatusResponseMetrics, /// Blob-related metrics pub(crate) blob_metrics: BlobMetrics, } @@ -58,58 +50,6 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) get_blobs_v2: Histogram, } -/// Metrics for engine API forkchoiceUpdated responses. -#[derive(Metrics)] -#[metrics(scope = "engine.rpc")] -pub(crate) struct ForkchoiceUpdatedResponseMetrics { - /// The total count of forkchoice updated messages received. - pub(crate) forkchoice_updated_messages: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Invalid`](alloy_rpc_types_engine::PayloadStatusEnum#Invalid). - pub(crate) forkchoice_updated_invalid: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Valid`](alloy_rpc_types_engine::PayloadStatusEnum#Valid). - pub(crate) forkchoice_updated_valid: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Syncing`](alloy_rpc_types_engine::PayloadStatusEnum#Syncing). - pub(crate) forkchoice_updated_syncing: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Accepted`](alloy_rpc_types_engine::PayloadStatusEnum#Accepted). - pub(crate) forkchoice_updated_accepted: Counter, - /// The total count of forkchoice updated messages that were unsuccessful, i.e. we responded - /// with an error type that is not a [`PayloadStatusEnum`]. - pub(crate) forkchoice_updated_error: Counter, -} - -/// Metrics for engine API newPayload responses. -#[derive(Metrics)] -#[metrics(scope = "engine.rpc")] -pub(crate) struct NewPayloadStatusResponseMetrics { - /// The total count of new payload messages received. - pub(crate) new_payload_messages: Counter, - /// The total count of new payload messages that we responded to with - /// [Invalid](alloy_rpc_types_engine::PayloadStatusEnum#Invalid). - pub(crate) new_payload_invalid: Counter, - /// The total count of new payload messages that we responded to with - /// [Valid](alloy_rpc_types_engine::PayloadStatusEnum#Valid). - pub(crate) new_payload_valid: Counter, - /// The total count of new payload messages that we responded to with - /// [Syncing](alloy_rpc_types_engine::PayloadStatusEnum#Syncing). - pub(crate) new_payload_syncing: Counter, - /// The total count of new payload messages that we responded to with - /// [Accepted](alloy_rpc_types_engine::PayloadStatusEnum#Accepted). - pub(crate) new_payload_accepted: Counter, - /// The total count of new payload messages that were unsuccessful, i.e. we responded with an - /// error type that is not a [`PayloadStatusEnum`]. - pub(crate) new_payload_error: Counter, - /// The total gas of valid new payload messages received. - pub(crate) new_payload_total_gas: Histogram, - /// The gas per second of valid new payload messages received. - pub(crate) new_payload_gas_per_second: Histogram, - /// Latency for the last `engine_newPayloadV*` call - pub(crate) new_payload_last: Gauge, -} - #[derive(Metrics)] #[metrics(scope = "engine.rpc.blobs")] pub(crate) struct BlobMetrics { @@ -126,48 +66,3 @@ pub(crate) struct BlobMetrics { /// Number of times getBlobsV2 responded with “miss” pub(crate) get_blobs_requests_failure_total: Counter, } - -impl NewPayloadStatusResponseMetrics { - /// Increment the newPayload counter based on the given rpc result - pub(crate) fn update_response_metrics( - &self, - result: &Result, - gas_used: u64, - time: Duration, - ) { - self.new_payload_last.set(time); - match result { - Ok(status) => match status.status { - PayloadStatusEnum::Valid => { - self.new_payload_valid.increment(1); - self.new_payload_total_gas.record(gas_used as f64); - self.new_payload_gas_per_second.record(gas_used as f64 / time.as_secs_f64()); - } - PayloadStatusEnum::Syncing => self.new_payload_syncing.increment(1), - PayloadStatusEnum::Accepted => self.new_payload_accepted.increment(1), - PayloadStatusEnum::Invalid { .. } => self.new_payload_invalid.increment(1), - }, - Err(_) => self.new_payload_error.increment(1), - } - self.new_payload_messages.increment(1); - } -} - -impl ForkchoiceUpdatedResponseMetrics { - /// Increment the forkchoiceUpdated counter based on the given rpc result - pub(crate) fn update_response_metrics( - &self, - result: &Result, - ) { - match result { - Ok(status) => match status.payload_status.status { - PayloadStatusEnum::Valid => self.forkchoice_updated_valid.increment(1), - PayloadStatusEnum::Syncing => self.forkchoice_updated_syncing.increment(1), - PayloadStatusEnum::Accepted => self.forkchoice_updated_accepted.increment(1), - PayloadStatusEnum::Invalid { .. } => self.forkchoice_updated_invalid.increment(1), - }, - Err(_) => self.forkchoice_updated_error.increment(1), - } - self.forkchoice_updated_messages.increment(1); - } -} From 0928059f5cf05957b2a84ee89752321fbc15ac7a Mon Sep 17 00:00:00 2001 From: oooLowNeoNooo Date: Thu, 6 Nov 2025 09:24:25 +0100 Subject: [PATCH 343/371] fix: replace unreachable libmdbx documentation URL (#19532) --- crates/storage/libmdbx-rs/README.md | 4 ++-- docs/repo/layout.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/storage/libmdbx-rs/README.md b/crates/storage/libmdbx-rs/README.md index df115ee69a0..f6989efa419 100644 --- a/crates/storage/libmdbx-rs/README.md +++ b/crates/storage/libmdbx-rs/README.md @@ -1,7 +1,7 @@ # libmdbx-rs -Rust bindings for [libmdbx](https://libmdbx.dqdkfa.ru). +Rust bindings for [libmdbx](https://github.com/erthink/libmdbx). Forked from an earlier Apache licenced version of the `libmdbx-rs` crate, before it changed licence to GPL. NOTE: Most of the repo came from [lmdb-rs bindings](https://github.com/mozilla/lmdb-rs). @@ -9,7 +9,7 @@ NOTE: Most of the repo came from [lmdb-rs bindings](https://github.com/mozilla/l ## Updating the libmdbx Version To update the libmdbx version you must clone it and copy the `dist/` folder in `mdbx-sys/`. -Make sure to follow the [building steps](https://libmdbx.dqdkfa.ru/usage.html#getting). +Make sure to follow the [building steps](https://github.com/erthink/libmdbx#building). ```bash # clone libmdbx to a repository outside at specific tag diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 22aae4c3512..22d13ffd01a 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -40,7 +40,7 @@ All binaries are stored in [`bin`](../../bin). These crates are related to the database. - [`storage/codecs`](../../crates/storage/codecs): Different storage codecs. -- [`storage/libmdbx-rs`](../../crates/storage/libmdbx-rs): Rust bindings for [libmdbx](https://libmdbx.dqdkfa.ru). A fork of an earlier Apache-licensed version of [libmdbx-rs][libmdbx-rs]. +- [`storage/libmdbx-rs`](../../crates/storage/libmdbx-rs): Rust bindings for [libmdbx](https://github.com/erthink/libmdbx). A fork of an earlier Apache-licensed version of [libmdbx-rs][libmdbx-rs]. - [`storage/db`](../../crates/storage/db): Strongly typed Database abstractions (transactions, cursors, tables) over lower level database backends. - Implemented backends: mdbx - [`storage/provider`](../../crates/storage/provider): Traits which provide a higher level api over the database to access the Ethereum state and historical data (transactions, blocks etc.) From 791de250d77f57bfbb093e4bc1187f802bc323c5 Mon Sep 17 00:00:00 2001 From: Avory Date: Thu, 6 Nov 2025 11:04:24 +0200 Subject: [PATCH 344/371] perf(era-utils): avoid unnecessary PathBuf clone in export (#19530) --- crates/era-utils/src/export.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs index d2aa706c798..0502f0e2eac 100644 --- a/crates/era-utils/src/export.rs +++ b/crates/era-utils/src/export.rs @@ -219,12 +219,12 @@ where writer.write_accumulator(&accumulator)?; writer.write_block_index(&block_index)?; writer.flush()?; - created_files.push(file_path.clone()); info!( target: "era::history::export", "Wrote ERA1 file: {file_path:?} with {blocks_written} blocks" ); + created_files.push(file_path); } } From ea4a686e86abd6373018b132f2de0327c06060ac Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Thu, 6 Nov 2025 10:19:39 +0100 Subject: [PATCH 345/371] fix(trie): InMemoryTrieCursor case where all DB nodes are deleted (#19464) --- crates/trie/trie/src/trie_cursor/in_memory.rs | 137 +++++++++++++----- 1 file changed, 99 insertions(+), 38 deletions(-) diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index d9658150f3a..2311dce82b3 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -75,7 +75,6 @@ pub struct InMemoryTrieCursor<'a, C> { in_memory_cursor: ForwardInMemoryCursor<'a, Nibbles, Option>, /// The key most recently returned from the Cursor. last_key: Option, - #[cfg(debug_assertions)] /// Whether an initial seek was called. seeked: bool, } @@ -88,14 +87,7 @@ impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { trie_updates: &'a [(Nibbles, Option)], ) -> Self { let in_memory_cursor = ForwardInMemoryCursor::new(trie_updates); - Self { - cursor, - cursor_entry: None, - in_memory_cursor, - last_key: None, - #[cfg(debug_assertions)] - seeked: false, - } + Self { cursor, cursor_entry: None, in_memory_cursor, last_key: None, seeked: false } } /// Asserts that the next entry to be returned from the cursor is not previous to the last entry @@ -113,13 +105,15 @@ impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { /// Seeks the `cursor_entry` field of the struct using the cursor. fn cursor_seek(&mut self, key: Nibbles) -> Result<(), DatabaseError> { - if let Some(entry) = self.cursor_entry.as_ref() && - entry.0 >= key - { - // If already seeked to the given key then don't do anything. Also if we're seeked past - // the given key then don't anything, because `TrieCursor` is specifically a - // forward-only cursor. - } else { + // Only seek if: + // 1. We have a cursor entry and need to seek forward (entry.0 < key), OR + // 2. We have no cursor entry and haven't seeked yet (!self.seeked) + let should_seek = match self.cursor_entry.as_ref() { + Some(entry) => entry.0 < key, + None => !self.seeked, + }; + + if should_seek { self.cursor_entry = self.cursor.as_mut().map(|c| c.seek(key)).transpose()?.flatten(); } @@ -128,10 +122,7 @@ impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { /// Seeks the `cursor_entry` field of the struct to the subsequent entry using the cursor. fn cursor_next(&mut self) -> Result<(), DatabaseError> { - #[cfg(debug_assertions)] - { - debug_assert!(self.seeked); - } + debug_assert!(self.seeked); // If the previous entry is `None`, and we've done a seek previously, then the cursor is // exhausted and we shouldn't call `next` again. @@ -189,10 +180,7 @@ impl TrieCursor for InMemoryTrieCursor<'_, C> { self.cursor_seek(key)?; let mem_entry = self.in_memory_cursor.seek(&key); - #[cfg(debug_assertions)] - { - self.seeked = true; - } + self.seeked = true; let entry = match (mem_entry, &self.cursor_entry) { (Some((mem_key, entry_inner)), _) if mem_key == key => { @@ -213,10 +201,7 @@ impl TrieCursor for InMemoryTrieCursor<'_, C> { self.cursor_seek(key)?; self.in_memory_cursor.seek(&key); - #[cfg(debug_assertions)] - { - self.seeked = true; - } + self.seeked = true; let entry = self.choose_next_entry()?; self.set_last_key(&entry); @@ -224,10 +209,7 @@ impl TrieCursor for InMemoryTrieCursor<'_, C> { } fn next(&mut self) -> Result, DatabaseError> { - #[cfg(debug_assertions)] - { - debug_assert!(self.seeked, "Cursor must be seek'd before next is called"); - } + debug_assert!(self.seeked, "Cursor must be seek'd before next is called"); // A `last_key` of `None` indicates that the cursor is exhausted. let Some(last_key) = self.last_key else { @@ -578,6 +560,80 @@ mod tests { assert_eq!(cursor.current().unwrap(), Some(Nibbles::from_nibbles([0x3]))); } + #[test] + fn test_all_storage_slots_deleted_not_wiped_exact_keys() { + use tracing::debug; + reth_tracing::init_test_tracing(); + + // This test reproduces an edge case where: + // - cursor is not None (not wiped) + // - All in-memory entries are deletions (None values) + // - Database has corresponding entries + // - Expected: NO leaves should be returned (all deleted) + + // Generate 42 trie node entries with keys distributed across the keyspace + let mut db_nodes: Vec<(Nibbles, BranchNodeCompact)> = (0..10) + .map(|i| { + let key_bytes = vec![(i * 6) as u8, i as u8]; // Spread keys across keyspace + let nibbles = Nibbles::unpack(key_bytes); + (nibbles, BranchNodeCompact::new(i as u16, i as u16, 0, vec![], None)) + }) + .collect(); + + db_nodes.sort_by_key(|(key, _)| *key); + db_nodes.dedup_by_key(|(key, _)| *key); + + for (key, _) in &db_nodes { + debug!("node at {key:?}"); + } + + // Create in-memory entries with same keys but all None values (deletions) + let in_memory_nodes: Vec<(Nibbles, Option)> = + db_nodes.iter().map(|(key, _)| (*key, None)).collect(); + + let db_nodes_map: BTreeMap = db_nodes.into_iter().collect(); + let db_nodes_arc = Arc::new(db_nodes_map); + let visited_keys = Arc::new(Mutex::new(Vec::new())); + let mock_cursor = MockTrieCursor::new(db_nodes_arc, visited_keys); + + let mut cursor = InMemoryTrieCursor::new(Some(mock_cursor), &in_memory_nodes); + + // Seek to beginning should return None (all nodes are deleted) + tracing::debug!("seeking to 0x"); + let result = cursor.seek(Nibbles::default()).unwrap(); + assert_eq!( + result, None, + "Expected no entries when all nodes are deleted, but got {:?}", + result + ); + + // Test seek operations at various positions - all should return None + let seek_keys = vec![ + Nibbles::unpack([0x00]), + Nibbles::unpack([0x5d]), + Nibbles::unpack([0x5e]), + Nibbles::unpack([0x5f]), + Nibbles::unpack([0xc2]), + Nibbles::unpack([0xc5]), + Nibbles::unpack([0xc9]), + Nibbles::unpack([0xf0]), + ]; + + for seek_key in seek_keys { + tracing::debug!("seeking to {seek_key:?}"); + let result = cursor.seek(seek_key).unwrap(); + assert_eq!( + result, None, + "Expected None when seeking to {:?} but got {:?}", + seek_key, result + ); + } + + // next() should also always return None + let result = cursor.next().unwrap(); + assert_eq!(result, None, "Expected None from next() but got {:?}", result); + } + mod proptest_tests { use super::*; use itertools::Itertools; @@ -628,7 +684,7 @@ mod tests { /// Generate a sorted vector of (Nibbles, `BranchNodeCompact`) entries fn sorted_db_nodes_strategy() -> impl Strategy> { prop::collection::vec( - (prop::collection::vec(any::(), 0..3), branch_node_strategy()), + (prop::collection::vec(any::(), 0..2), branch_node_strategy()), 0..20, ) .prop_map(|entries| { @@ -648,7 +704,7 @@ mod tests { ) -> impl Strategy)>> { prop::collection::vec( ( - prop::collection::vec(any::(), 0..3), + prop::collection::vec(any::(), 0..2), prop::option::of(branch_node_strategy()), ), 0..20, @@ -666,7 +722,7 @@ mod tests { } proptest! { - #![proptest_config(ProptestConfig::with_cases(1000))] + #![proptest_config(ProptestConfig::with_cases(10000))] #[test] fn proptest_in_memory_trie_cursor( @@ -677,7 +733,12 @@ mod tests { reth_tracing::init_test_tracing(); use tracing::debug; - debug!("Starting proptest!"); + debug!( + db_paths=?db_nodes.iter().map(|(k, _)| k).collect::>(), + in_mem_nodes=?in_memory_nodes.iter().map(|(k, v)| (k, v.is_some())).collect::>(), + num_op_choices=?op_choices.len(), + "Starting proptest!", + ); // Create the expected results by merging the two sorted vectors, // properly handling deletions (None values in in_memory_nodes) @@ -757,7 +818,7 @@ mod tests { continue; } - let key = *valid_keys[(choice as usize / 3) % valid_keys.len()]; + let key = *valid_keys[choice as usize % valid_keys.len()]; let control_result = control_cursor.seek(key).unwrap(); let test_result = test_cursor.seek(key).unwrap(); @@ -791,7 +852,7 @@ mod tests { continue; } - let key = *valid_keys[(choice as usize / 3) % valid_keys.len()]; + let key = *valid_keys[choice as usize % valid_keys.len()]; let control_result = control_cursor.seek_exact(key).unwrap(); let test_result = test_cursor.seek_exact(key).unwrap(); From 7cc4fdfaebac16ba8b6d11620fec0b07352c1d15 Mon Sep 17 00:00:00 2001 From: David Klank <155117116+davidjsonn@users.noreply.github.com> Date: Thu, 6 Nov 2025 12:35:22 +0200 Subject: [PATCH 346/371] fix(stages-types): resolve compilation errors in tests (#19501) --- crates/stages/types/Cargo.toml | 2 ++ crates/stages/types/src/checkpoints.rs | 2 ++ 2 files changed, 4 insertions(+) diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index 19e15304896..6e70fbe26a0 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -24,12 +24,14 @@ modular-bitfield = { workspace = true, optional = true } [dev-dependencies] reth-codecs.workspace = true +reth-trie-common = { workspace = true, features = ["reth-codec"] } alloy-primitives = { workspace = true, features = ["arbitrary", "rand"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true rand.workspace = true bytes.workspace = true +modular-bitfield.workspace = true [features] default = ["std"] diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index 16bee1387f6..04f4123c9f7 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -1,4 +1,6 @@ use super::StageId; +#[cfg(test)] +use alloc::vec; use alloc::{format, string::String, vec::Vec}; use alloy_primitives::{Address, BlockNumber, B256, U256}; use core::ops::RangeInclusive; From e5c47fe350311d120b6135e38d114f19d1b5971c Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 6 Nov 2025 10:53:38 +0000 Subject: [PATCH 347/371] feat(provider): configurable blocks per static file segment (#19458) --- crates/cli/commands/src/db/stats.rs | 3 +- crates/node/builder/src/launch/common.rs | 17 +-- crates/storage/provider/src/lib.rs | 2 +- .../provider/src/providers/database/mod.rs | 6 - crates/storage/provider/src/providers/mod.rs | 4 +- .../src/providers/static_file/manager.rs | 122 ++++++++++++------ .../provider/src/providers/static_file/mod.rs | 37 ++++-- .../src/providers/static_file/writer.rs | 4 +- 8 files changed, 120 insertions(+), 75 deletions(-) diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index c8398d795ce..23b9ad50f3b 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -192,7 +192,8 @@ impl Command { ) = (0, 0, 0, 0, 0, 0); for (block_range, tx_range) in &ranges { - let fixed_block_range = static_file_provider.find_fixed_range(block_range.start()); + let fixed_block_range = + static_file_provider.find_fixed_range(segment, block_range.start()); let jar_provider = static_file_provider .get_segment_provider(segment, || Some(fixed_block_range), None)? .ok_or_else(|| { diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 37f25e2cf41..1f5d5dff83b 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -67,7 +67,7 @@ use reth_node_metrics::{ use reth_provider::{ providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, BlockHashReader, BlockNumReader, ProviderError, ProviderFactory, ProviderResult, - StageCheckpointReader, StaticFileProviderFactory, + StageCheckpointReader, StaticFileProviderBuilder, StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_builder::config::RethRpcServerConfig; @@ -465,13 +465,14 @@ where N: ProviderNodeTypes, Evm: ConfigureEvm + 'static, { - let factory = ProviderFactory::new( - self.right().clone(), - self.chain_spec(), - StaticFileProvider::read_write(self.data_dir().static_files())?, - )? - .with_prune_modes(self.prune_modes()) - .with_static_files_metrics(); + let static_file_provider = + StaticFileProviderBuilder::read_write(self.data_dir().static_files())? + .with_metrics() + .build()?; + + let factory = + ProviderFactory::new(self.right().clone(), self.chain_spec(), static_file_provider)? + .with_prune_modes(self.prune_modes()); let has_receipt_pruning = self.toml_config().prune.has_receipts_pruning(); diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 5cd598aa46b..3cad94888a8 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -21,7 +21,7 @@ pub mod providers; pub use providers::{ DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW, HistoricalStateProvider, HistoricalStateProviderRef, LatestStateProvider, LatestStateProviderRef, ProviderFactory, - StaticFileAccess, StaticFileWriter, + StaticFileAccess, StaticFileProviderBuilder, StaticFileWriter, }; #[cfg(any(test, feature = "test-utils"))] diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index a0de2f9e740..03c5ee417b9 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -112,12 +112,6 @@ impl ProviderFactory { } impl ProviderFactory { - /// Enables metrics on the static file provider. - pub fn with_static_files_metrics(mut self) -> Self { - self.static_file_provider = self.static_file_provider.with_metrics(); - self - } - /// Sets the pruning configuration for an existing [`ProviderFactory`]. pub const fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { self.prune_modes = prune_modes; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 41e8121991b..5d517a82a2e 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -9,8 +9,8 @@ pub use database::*; mod static_file; pub use static_file::{ - StaticFileAccess, StaticFileJarProvider, StaticFileProvider, StaticFileProviderRW, - StaticFileProviderRWRefMut, StaticFileWriter, + StaticFileAccess, StaticFileJarProvider, StaticFileProvider, StaticFileProviderBuilder, + StaticFileProviderRW, StaticFileProviderRWRefMut, StaticFileWriter, }; mod state; diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index f9f0e688687..e26ad31d2e4 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -45,7 +45,6 @@ use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, fmt::Debug, - marker::PhantomData, ops::{Deref, Range, RangeBounds, RangeInclusive}, path::{Path, PathBuf}, sync::{atomic::AtomicU64, mpsc, Arc}, @@ -96,14 +95,56 @@ impl Clone for StaticFileProvider { } } -impl StaticFileProvider { - /// Creates a new [`StaticFileProvider`] with the given [`StaticFileAccess`]. - fn new(path: impl AsRef, access: StaticFileAccess) -> ProviderResult { - let provider = Self(Arc::new(StaticFileProviderInner::new(path, access)?)); +/// Builder for [`StaticFileProvider`] that allows configuration before initialization. +#[derive(Debug)] +pub struct StaticFileProviderBuilder { + inner: StaticFileProviderInner, +} + +impl StaticFileProviderBuilder { + /// Creates a new builder with read-write access. + pub fn read_write(path: impl AsRef) -> ProviderResult { + StaticFileProviderInner::new(path, StaticFileAccess::RW).map(|inner| Self { inner }) + } + + /// Creates a new builder with read-only access. + pub fn read_only(path: impl AsRef) -> ProviderResult { + StaticFileProviderInner::new(path, StaticFileAccess::RO).map(|inner| Self { inner }) + } + + /// Set a custom number of blocks per file for all segments. + pub fn with_blocks_per_file(mut self, blocks_per_file: u64) -> Self { + for segment in StaticFileSegment::iter() { + self.inner.blocks_per_file.insert(segment, blocks_per_file); + } + self + } + + /// Set a custom number of blocks per file for a specific segment. + pub fn with_blocks_per_file_for_segment( + mut self, + segment: StaticFileSegment, + blocks_per_file: u64, + ) -> Self { + self.inner.blocks_per_file.insert(segment, blocks_per_file); + self + } + + /// Enables metrics on the [`StaticFileProvider`]. + pub fn with_metrics(mut self) -> Self { + self.inner.metrics = Some(Arc::new(StaticFileProviderMetrics::default())); + self + } + + /// Builds the final [`StaticFileProvider`] and initializes the index. + pub fn build(self) -> ProviderResult> { + let provider = StaticFileProvider(Arc::new(self.inner)); provider.initialize_index()?; Ok(provider) } +} +impl StaticFileProvider { /// Creates a new [`StaticFileProvider`] with read-only access. /// /// Set `watch_directory` to `true` to track the most recent changes in static files. Otherwise, @@ -114,7 +155,7 @@ impl StaticFileProvider { /// /// See also [`StaticFileProvider::watch_directory`]. pub fn read_only(path: impl AsRef, watch_directory: bool) -> ProviderResult { - let provider = Self::new(path, StaticFileAccess::RO)?; + let provider = StaticFileProviderBuilder::read_only(path)?.build()?; if watch_directory { provider.watch_directory(); @@ -125,7 +166,7 @@ impl StaticFileProvider { /// Creates a new [`StaticFileProvider`] with read-write access. pub fn read_write(path: impl AsRef) -> ProviderResult { - Self::new(path, StaticFileAccess::RW) + StaticFileProviderBuilder::read_write(path)?.build() } /// Watches the directory for changes and updates the in-memory index when modifications @@ -260,12 +301,10 @@ pub struct StaticFileProviderInner { metrics: Option>, /// Access rights of the provider. access: StaticFileAccess, - /// Number of blocks per file. - blocks_per_file: u64, + /// Number of blocks per file, per segment. + blocks_per_file: HashMap, /// Write lock for when access is [`StaticFileAccess::RW`]. _lock_file: Option, - /// Node primitives - _pd: PhantomData, } impl StaticFileProviderInner { @@ -277,6 +316,11 @@ impl StaticFileProviderInner { None }; + let mut blocks_per_file = HashMap::new(); + for segment in StaticFileSegment::iter() { + blocks_per_file.insert(segment, DEFAULT_BLOCKS_PER_STATIC_FILE); + } + let provider = Self { map: Default::default(), writers: Default::default(), @@ -287,9 +331,8 @@ impl StaticFileProviderInner { path: path.as_ref().to_path_buf(), metrics: None, access, - blocks_per_file: DEFAULT_BLOCKS_PER_STATIC_FILE, + blocks_per_file, _lock_file, - _pd: Default::default(), }; Ok(provider) @@ -301,29 +344,18 @@ impl StaticFileProviderInner { /// Each static file has a fixed number of blocks. This gives out the range where the requested /// block is positioned. - pub const fn find_fixed_range(&self, block: BlockNumber) -> SegmentRangeInclusive { - find_fixed_range(block, self.blocks_per_file) + pub fn find_fixed_range( + &self, + segment: StaticFileSegment, + block: BlockNumber, + ) -> SegmentRangeInclusive { + let blocks_per_file = + self.blocks_per_file.get(&segment).copied().unwrap_or(DEFAULT_BLOCKS_PER_STATIC_FILE); + find_fixed_range(block, blocks_per_file) } } impl StaticFileProvider { - /// Set a custom number of blocks per file. - #[cfg(any(test, feature = "test-utils"))] - pub fn with_custom_blocks_per_file(self, blocks_per_file: u64) -> Self { - let mut provider = - Arc::try_unwrap(self.0).expect("should be called when initializing only"); - provider.blocks_per_file = blocks_per_file; - Self(Arc::new(provider)) - } - - /// Enables metrics on the [`StaticFileProvider`]. - pub fn with_metrics(self) -> Self { - let mut provider = - Arc::try_unwrap(self.0).expect("should be called when initializing only"); - provider.metrics = Some(Arc::new(StaticFileProviderMetrics::default())); - Self(Arc::new(provider)) - } - /// Reports metrics for the static files. pub fn report_metrics(&self) -> ProviderResult<()> { let Some(metrics) = &self.metrics else { return Ok(()) }; @@ -334,7 +366,7 @@ impl StaticFileProvider { let mut size = 0; for (block_range, _) in &ranges { - let fixed_block_range = self.find_fixed_range(block_range.start()); + let fixed_block_range = self.find_fixed_range(segment, block_range.start()); let jar_provider = self .get_segment_provider(segment, || Some(fixed_block_range), None)? .ok_or_else(|| { @@ -511,7 +543,7 @@ impl StaticFileProvider { segment: StaticFileSegment, block: BlockNumber, ) -> ProviderResult { - let fixed_block_range = self.find_fixed_range(block); + let fixed_block_range = self.find_fixed_range(segment, block); let key = (fixed_block_range.end(), segment); let jar = if let Some((_, jar)) = self.map.remove(&key) { jar.jar @@ -576,7 +608,7 @@ impl StaticFileProvider { .read() .get(&segment) .filter(|max| **max >= block) - .map(|_| self.find_fixed_range(block)) + .map(|_| self.find_fixed_range(segment, block)) } /// Gets a static file segment's fixed block range from the provider inner @@ -600,7 +632,7 @@ impl StaticFileProvider { } let tx_start = static_files_rev_iter.peek().map(|(tx_end, _)| *tx_end + 1).unwrap_or(0); if tx_start <= tx { - return Some(self.find_fixed_range(block_range.end())) + return Some(self.find_fixed_range(segment, block_range.end())) } } None @@ -625,7 +657,7 @@ impl StaticFileProvider { Some(segment_max_block) => { // Update the max block for the segment max_block.insert(segment, segment_max_block); - let fixed_range = self.find_fixed_range(segment_max_block); + let fixed_range = self.find_fixed_range(segment, segment_max_block); let jar = NippyJar::::load( &self.path.join(segment.filename(&fixed_range)), @@ -947,8 +979,9 @@ impl StaticFileProvider { /// Read-only. pub fn check_segment_consistency(&self, segment: StaticFileSegment) -> ProviderResult<()> { if let Some(latest_block) = self.get_highest_static_file_block(segment) { - let file_path = - self.directory().join(segment.filename(&self.find_fixed_range(latest_block))); + let file_path = self + .directory() + .join(segment.filename(&self.find_fixed_range(segment, latest_block))); let jar = NippyJar::::load(&file_path).map_err(ProviderError::other)?; @@ -1136,14 +1169,19 @@ impl StaticFileProvider { func: impl Fn(StaticFileJarProvider<'_, N>) -> ProviderResult>, ) -> ProviderResult> { if let Some(highest_block) = self.get_highest_static_file_block(segment) { - let mut range = self.find_fixed_range(highest_block); + let blocks_per_file = self + .blocks_per_file + .get(&segment) + .copied() + .unwrap_or(DEFAULT_BLOCKS_PER_STATIC_FILE); + let mut range = self.find_fixed_range(segment, highest_block); while range.end() > 0 { if let Some(res) = func(self.get_or_create_jar_provider(segment, &range)?)? { return Ok(Some(res)) } range = SegmentRangeInclusive::new( - range.start().saturating_sub(self.blocks_per_file), - range.end().saturating_sub(self.blocks_per_file), + range.start().saturating_sub(blocks_per_file), + range.end().saturating_sub(blocks_per_file), ); } } diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 3c25f157bb3..4b9557cb7ba 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -1,5 +1,7 @@ mod manager; -pub use manager::{StaticFileAccess, StaticFileProvider, StaticFileWriter}; +pub use manager::{ + StaticFileAccess, StaticFileProvider, StaticFileProviderBuilder, StaticFileWriter, +}; mod jar; pub use jar::StaticFileJarProvider; @@ -55,6 +57,7 @@ impl Deref for LoadedJar { mod tests { use super::*; use crate::{ + providers::static_file::manager::StaticFileProviderBuilder, test_utils::create_test_provider_factory, HeaderProvider, StaticFileProviderFactory, }; use alloy_consensus::{Header, SignableTransaction, Transaction, TxLegacy}; @@ -157,9 +160,11 @@ mod tests { // [ Headers Creation and Commit ] { - let sf_rw = StaticFileProvider::::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers).unwrap(); @@ -251,9 +256,11 @@ mod tests { // Test cases execution { - let sf_rw = StaticFileProvider::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); assert_eq!(sf_rw.get_highest_static_file_block(StaticFileSegment::Headers), Some(tip)); assert_eq!( @@ -466,15 +473,19 @@ mod tests { for segment in segments { let (static_dir, _) = create_test_static_files_dir(); - let sf_rw = StaticFileProvider::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); setup_tx_based_scenario(&sf_rw, segment, blocks_per_file); - let sf_rw = StaticFileProvider::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); let highest_tx = sf_rw.get_highest_static_file_tx(segment).unwrap(); // Test cases diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 7b0ae9ce11c..e5434af5b6f 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -161,7 +161,7 @@ impl StaticFileProviderRW { let static_file_provider = Self::upgrade_provider_to_strong_reference(&reader); - let block_range = static_file_provider.find_fixed_range(block); + let block_range = static_file_provider.find_fixed_range(segment, block); let (jar, path) = match static_file_provider.get_segment_provider_from_block( segment, block_range.start(), @@ -351,7 +351,7 @@ impl StaticFileProviderRW { self.data_path = data_path; *self.writer.user_header_mut() = SegmentHeader::new( - self.reader().find_fixed_range(last_block + 1), + self.reader().find_fixed_range(segment, last_block + 1), None, None, segment, From b94745506127417a382d55cd44e522a1f9cb16d6 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 6 Nov 2025 11:34:51 +0000 Subject: [PATCH 348/371] refactor(provider, cli): simplify getting provider for index or range (#19440) --- crates/cli/commands/src/db/get.rs | 20 +++---- crates/cli/commands/src/db/stats.rs | 2 +- .../src/providers/static_file/manager.rs | 58 +++++++++---------- .../provider/src/providers/static_file/mod.rs | 4 +- .../src/providers/static_file/writer.rs | 2 +- 5 files changed, 42 insertions(+), 44 deletions(-) diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 9d06a35dcaa..2f0fc05311a 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -77,17 +77,15 @@ impl Command { } }; - let content = tool.provider_factory.static_file_provider().find_static_file( - segment, - |provider| { - let mut cursor = provider.cursor()?; - cursor.get(key.into(), mask).map(|result| { - result.map(|vec| { - vec.iter().map(|slice| slice.to_vec()).collect::>() - }) - }) - }, - )?; + let content = tool + .provider_factory + .static_file_provider() + .get_segment_provider(segment, key)? + .cursor()? + .get(key.into(), mask) + .map(|result| { + result.map(|vec| vec.iter().map(|slice| slice.to_vec()).collect::>()) + })?; match content { Some(content) => { diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index 23b9ad50f3b..0f9ddb25e9a 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -195,7 +195,7 @@ impl Command { let fixed_block_range = static_file_provider.find_fixed_range(segment, block_range.start()); let jar_provider = static_file_provider - .get_segment_provider(segment, || Some(fixed_block_range), None)? + .get_segment_provider_for_range(segment, || Some(fixed_block_range), None)? .ok_or_else(|| { eyre::eyre!("Failed to get segment provider for segment: {}", segment) })?; diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index e26ad31d2e4..bff44ad1347 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -368,7 +368,7 @@ impl StaticFileProvider { for (block_range, _) in &ranges { let fixed_block_range = self.find_fixed_range(segment, block_range.start()); let jar_provider = self - .get_segment_provider(segment, || Some(fixed_block_range), None)? + .get_segment_provider_for_range(segment, || Some(fixed_block_range), None)? .ok_or_else(|| { ProviderError::MissingStaticFileBlock(segment, block_range.start()) })?; @@ -397,14 +397,28 @@ impl StaticFileProvider { Ok(()) } + /// Gets the [`StaticFileJarProvider`] of the requested segment and start index that can be + /// either block or transaction. + pub fn get_segment_provider( + &self, + segment: StaticFileSegment, + start: u64, + ) -> ProviderResult> { + if segment.is_block_based() { + self.get_segment_provider_for_block(segment, start, None) + } else { + self.get_segment_provider_for_transaction(segment, start, None) + } + } + /// Gets the [`StaticFileJarProvider`] of the requested segment and block. - pub fn get_segment_provider_from_block( + pub fn get_segment_provider_for_block( &self, segment: StaticFileSegment, block: BlockNumber, path: Option<&Path>, ) -> ProviderResult> { - self.get_segment_provider( + self.get_segment_provider_for_range( segment, || self.get_segment_ranges_from_block(segment, block), path, @@ -413,13 +427,13 @@ impl StaticFileProvider { } /// Gets the [`StaticFileJarProvider`] of the requested segment and transaction. - pub fn get_segment_provider_from_transaction( + pub fn get_segment_provider_for_transaction( &self, segment: StaticFileSegment, tx: TxNumber, path: Option<&Path>, ) -> ProviderResult> { - self.get_segment_provider( + self.get_segment_provider_for_range( segment, || self.get_segment_ranges_from_transaction(segment, tx), path, @@ -430,7 +444,7 @@ impl StaticFileProvider { /// Gets the [`StaticFileJarProvider`] of the requested segment and block or transaction. /// /// `fn_range` should make sure the range goes through `find_fixed_range`. - pub fn get_segment_provider( + pub fn get_segment_provider_for_range( &self, segment: StaticFileSegment, fn_range: impl Fn() -> Option, @@ -1212,13 +1226,7 @@ impl StaticFileProvider { /// If the static file is missing, the `result` is returned. macro_rules! get_provider { ($number:expr) => {{ - let provider = if segment.is_block_based() { - self.get_segment_provider_from_block(segment, $number, None) - } else { - self.get_segment_provider_from_transaction(segment, $number, None) - }; - - match provider { + match self.get_segment_provider(segment, $number) { Ok(provider) => provider, Err( ProviderError::MissingStaticFileBlock(_, _) | @@ -1283,15 +1291,7 @@ impl StaticFileProvider { F: Fn(&mut StaticFileCursor<'_>, u64) -> ProviderResult> + 'a, T: std::fmt::Debug, { - let get_provider = move |start: u64| { - if segment.is_block_based() { - self.get_segment_provider_from_block(segment, start, None) - } else { - self.get_segment_provider_from_transaction(segment, start, None) - } - }; - - let mut provider = Some(get_provider(range.start)?); + let mut provider = Some(self.get_segment_provider(segment, range.start)?); Ok(range.filter_map(move |number| { match get_fn(&mut provider.as_ref().expect("qed").cursor().ok()?, number).transpose() { Some(result) => Some(result), @@ -1301,7 +1301,7 @@ impl StaticFileProvider { // we don't drop the current provider before requesting the // next one. provider.take(); - provider = Some(get_provider(number).ok()?); + provider = Some(self.get_segment_provider(segment, number).ok()?); get_fn(&mut provider.as_ref().expect("qed").cursor().ok()?, number).transpose() } } @@ -1488,7 +1488,7 @@ impl> HeaderProvider for StaticFileProvide } fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) + self.get_segment_provider_for_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.header_by_number(num)) .or_else(|err| { if let ProviderError::MissingStaticFileBlock(_, _) = err { @@ -1515,7 +1515,7 @@ impl> HeaderProvider for StaticFileProvide &self, num: BlockNumber, ) -> ProviderResult>> { - self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) + self.get_segment_provider_for_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.sealed_header(num)) .or_else(|err| { if let ProviderError::MissingStaticFileBlock(_, _) = err { @@ -1546,7 +1546,7 @@ impl> HeaderProvider for StaticFileProvide impl BlockHashReader for StaticFileProvider { fn block_hash(&self, num: u64) -> ProviderResult> { - self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) + self.get_segment_provider_for_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.block_hash(num)) .or_else(|err| { if let ProviderError::MissingStaticFileBlock(_, _) = err { @@ -1577,7 +1577,7 @@ impl> Rec type Receipt = N::Receipt; fn receipt(&self, num: TxNumber) -> ProviderResult> { - self.get_segment_provider_from_transaction(StaticFileSegment::Receipts, num, None) + self.get_segment_provider_for_transaction(StaticFileSegment::Receipts, num, None) .and_then(|provider| provider.receipt(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { @@ -1706,7 +1706,7 @@ impl> TransactionsPr } fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { - self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) + self.get_segment_provider_for_transaction(StaticFileSegment::Transactions, num, None) .and_then(|provider| provider.transaction_by_id(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { @@ -1721,7 +1721,7 @@ impl> TransactionsPr &self, num: TxNumber, ) -> ProviderResult> { - self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) + self.get_segment_provider_for_transaction(StaticFileSegment::Transactions, num, None) .and_then(|provider| provider.transaction_by_id_unhashed(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 4b9557cb7ba..adc43ae402b 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -129,7 +129,7 @@ mod tests { let db_provider = factory.provider().unwrap(); let manager = db_provider.static_file_provider(); let jar_provider = manager - .get_segment_provider_from_block(StaticFileSegment::Headers, 0, Some(&static_file)) + .get_segment_provider_for_block(StaticFileSegment::Headers, 0, Some(&static_file)) .unwrap(); assert!(!headers.is_empty()); @@ -378,7 +378,7 @@ mod tests { block_ranges.iter().zip(expected_tx_ranges).for_each(|(block_range, expected_tx_range)| { assert_eq!( sf_rw - .get_segment_provider_from_block(segment, block_range.start, None) + .get_segment_provider_for_block(segment, block_range.start, None) .unwrap() .user_header() .tx_range(), diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index e5434af5b6f..2fc4ba61fc7 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -162,7 +162,7 @@ impl StaticFileProviderRW { let static_file_provider = Self::upgrade_provider_to_strong_reference(&reader); let block_range = static_file_provider.find_fixed_range(segment, block); - let (jar, path) = match static_file_provider.get_segment_provider_from_block( + let (jar, path) = match static_file_provider.get_segment_provider_for_block( segment, block_range.start(), None, From 671c690cc1b35550025e62d167d36f5df9beba28 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 6 Nov 2025 11:39:01 +0000 Subject: [PATCH 349/371] chore: add new engine metrics to dashboard, fix multiproof charts (#19540) --- etc/grafana/dashboards/overview.json | 823 +++++++++------------------ 1 file changed, 263 insertions(+), 560 deletions(-) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 480dba3b466..ef52e1c8cd9 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -1129,171 +1129,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 min", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p50", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p90", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p95", - "range": true, - "refId": "D", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p99", - "range": true, - "refId": "E", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 min", - "range": true, - "refId": "F", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p50", - "range": true, - "refId": "G", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p90", - "range": true, - "refId": "H", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p95", - "range": true, - "refId": "I", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p99", - "range": true, - "refId": "J", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 min", + "legendFormat": "min", "range": true, "refId": "K", "useBackend": false @@ -1304,12 +1145,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.5\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p50", + "legendFormat": "p50", "range": true, "refId": "L", "useBackend": false @@ -1320,12 +1161,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.9\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p90", + "legendFormat": "p90", "range": true, "refId": "M", "useBackend": false @@ -1336,12 +1177,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.95\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p95", + "legendFormat": "p95", "range": true, "refId": "N", "useBackend": false @@ -1352,12 +1193,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.99\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p99", + "legendFormat": "p99", "range": true, "refId": "O", "useBackend": false @@ -1401,328 +1242,89 @@ "type": "linear" }, "showPoints": "auto", - "showValues": false, - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [ - { - "matcher": { - "id": "byValue", - "options": { - "op": "gte", - "reducer": "allIsZero", - "value": 0 - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": true, - "tooltip": true, - "viz": true - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 21 - }, - "id": 210, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.2.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 min", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p50", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p90", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p95", - "range": true, - "refId": "D", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p99", - "range": true, - "refId": "E", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 min", - "range": true, - "refId": "F", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p50", - "range": true, - "refId": "G", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p90", - "range": true, - "refId": "H", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p95", - "range": true, - "refId": "I", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p99", - "range": true, - "refId": "J", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 min", - "range": true, - "refId": "K", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p50", - "range": true, - "refId": "L", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p90", - "range": true, - "refId": "M", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p95", - "range": true, - "refId": "N", - "useBackend": false + "unit": "s" }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p99", - "range": true, - "refId": "O", - "useBackend": false + "overrides": [ + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 21 + }, + "id": 210, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 min", + "legendFormat": "min", "range": true, "refId": "P", "useBackend": false @@ -1733,12 +1335,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.5\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p50", + "legendFormat": "p50", "range": true, "refId": "Q", "useBackend": false @@ -1749,12 +1351,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.9\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p90", + "legendFormat": "p90", "range": true, "refId": "R", "useBackend": false @@ -1765,12 +1367,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.95\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p95", + "legendFormat": "p95", "range": true, "refId": "S", "useBackend": false @@ -1781,12 +1383,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.99\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p99", + "legendFormat": "p99", "range": true, "refId": "T", "useBackend": false @@ -1882,7 +1484,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.5\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.5\"}", "legendFormat": "p50", "range": true, "refId": "A" @@ -1893,7 +1495,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.9\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.9\"}", "hide": false, "legendFormat": "p90", "range": true, @@ -1905,7 +1507,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.95\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.95\"}", "hide": false, "legendFormat": "p95", "range": true, @@ -1917,7 +1519,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.99\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.99\"}", "hide": false, "legendFormat": "p99", "range": true, @@ -2014,7 +1616,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.5\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.5\"}", "legendFormat": "p50", "range": true, "refId": "A" @@ -2025,7 +1627,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.9\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.9\"}", "hide": false, "legendFormat": "p90", "range": true, @@ -2037,7 +1639,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.95\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.95\"}", "hide": false, "legendFormat": "p95", "range": true, @@ -2049,7 +1651,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.99\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.99\"}", "hide": false, "legendFormat": "p99", "range": true, @@ -2342,7 +1944,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg(reth_sync_block_validation_state_root_duration{$instance_label=\"$instance\"})", + "expr": "reth_sync_block_validation_state_root_duration{$instance_label=\"$instance\"}", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -2358,7 +1960,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg(reth_sync_execution_execution_duration{$instance_label=\"$instance\"})", + "expr": "reth_sync_execution_execution_duration{$instance_label=\"$instance\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -3535,7 +3137,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg(reth_sync_block_validation_state_root_duration{$instance_label=\"$instance\"})", + "expr": "reth_sync_block_validation_state_root_duration{$instance_label=\"$instance\"}", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -3551,7 +3153,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg(reth_sync_execution_execution_duration{$instance_label=\"$instance\"})", + "expr": "reth_sync_execution_execution_duration{$instance_label=\"$instance\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -3796,7 +3398,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "avg by(quantile) (reth_sync_block_validation_trie_input_duration{$instance_label=\"$instance\", quantile=~\"(0|0.5|0.9|0.95|1)\"})", + "expr": "reth_sync_block_validation_trie_input_duration{$instance_label=\"$instance\", quantile=~\"(0|0.5|0.9|0.95|1)\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -3930,7 +3532,7 @@ "hide": false, "includeNullMetadata": true, "instant": false, - "legendFormat": "{{address}}", + "legendFormat": "Precompile cache hits", "range": true, "refId": "A", "useBackend": false @@ -4141,7 +3743,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "avg by (quantile) (reth_tree_root_proof_calculation_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"})", + "expr": "reth_tree_root_proof_calculation_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "instant": false, "legendFormat": "{{quantile}} percentile", "range": true, @@ -4241,11 +3843,89 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.5\"}", + "instant": false, + "legendFormat": "accounts p50", + "range": true, + "refId": "Branch Nodes" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.9\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p90", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.95\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p95", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.99\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p99", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.5\"}", + "hide": false, + "instant": false, + "legendFormat": "storage p50", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.9\"}", + "hide": false, + "instant": false, + "legendFormat": "storage p90", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.95\"}", + "hide": false, "instant": false, - "legendFormat": "storage {{quantile}} percentile", + "legendFormat": "storage p95", "range": true, - "refId": "Storage" + "refId": "F" }, { "datasource": { @@ -4253,11 +3933,12 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.99\"}", + "hide": false, "instant": false, - "legendFormat": "account {{quantile}} percentile", + "legendFormat": "storage p99", "range": true, - "refId": "Account" + "refId": "G" } ], "title": "Pending MultiProof requests", @@ -4323,38 +4004,7 @@ }, "unit": "none" }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Max storage workers" - }, - "properties": [ - { - "id": "custom.lineStyle", - "value": { - "dash": [10, 10], - "fill": "dash" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Max account workers" - }, - "properties": [ - { - "id": "custom.lineStyle", - "value": { - "dash": [10, 10], - "fill": "dash" - } - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, @@ -4362,7 +4012,6 @@ "x": 12, "y": 104 }, - "description": "The max metrics (Max storage workers and Max account workers) are displayed as dotted lines to highlight the configured upper limits.", "id": 256, "options": { "legend": { @@ -4385,9 +4034,9 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.5\"}", "instant": false, - "legendFormat": "Storage workers {{quantile}} percentile", + "legendFormat": "accounts p50", "range": true, "refId": "A" }, @@ -4397,9 +4046,10 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.9\"}", + "hide": false, "instant": false, - "legendFormat": "Account workers {{quantile}} percentile", + "legendFormat": "accounts p90", "range": true, "refId": "B" }, @@ -4409,9 +4059,10 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_tree_root_max_storage_workers{$instance_label=\"$instance\"}", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.95\"}", + "hide": false, "instant": false, - "legendFormat": "Max storage workers", + "legendFormat": "accounts p95", "range": true, "refId": "C" }, @@ -4421,14 +4072,66 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_tree_root_max_account_workers{$instance_label=\"$instance\"}", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.99\"}", + "hide": false, "instant": false, - "legendFormat": "Max account workers", + "legendFormat": "accounts p99", "range": true, "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.5\"}", + "instant": false, + "legendFormat": "storages p50", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.9\"}", + "hide": false, + "instant": false, + "legendFormat": "storages p90", + "range": true, + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.95\"}", + "hide": false, + "instant": false, + "legendFormat": "storages p95", + "range": true, + "refId": "G" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.99\"}", + "hide": false, + "instant": false, + "legendFormat": "storages p99", + "range": true, + "refId": "H" } ], - "title": "Active MultiProof Workers", + "title": "Active multiproof workers", "type": "timeseries" }, { @@ -4923,7 +4626,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "avg by (quantile) (reth_tree_root_multiproof_task_total_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"})", + "expr": "reth_tree_root_multiproof_task_total_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", "hide": false, "instant": false, "legendFormat": "Task duration {{quantile}} percentile", @@ -12167,6 +11870,6 @@ "timezone": "", "title": "Reth", "uid": "2k8BXz24x", - "version": 3, + "version": 4, "weekStart": "" } From 4f1f2d8033437b1c93f887be354426b3fe84c2f3 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 6 Nov 2025 11:39:04 +0000 Subject: [PATCH 350/371] ci: check that Grafana dashboard doesn't contain DS_PROMETHEUS (#19541) --- .github/workflows/grafana.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/grafana.yml diff --git a/.github/workflows/grafana.yml b/.github/workflows/grafana.yml new file mode 100644 index 00000000000..ffa09193952 --- /dev/null +++ b/.github/workflows/grafana.yml @@ -0,0 +1,21 @@ +name: grafana + +on: + pull_request: + merge_group: + push: + branches: [main] + +jobs: + check-dashboard: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Check for ${DS_PROMETHEUS} in overview.json + run: | + if grep -Fn '${DS_PROMETHEUS}' etc/grafana/dashboards/overview.json; then + echo "Error: overview.json contains '\${DS_PROMETHEUS}' placeholder" + echo "Please replace it with '\${datasource}'" + exit 1 + fi + echo "✓ overview.json does not contain '\${DS_PROMETHEUS}' placeholder" From 9d9c1d282429a892f7ca0de0ee1bf7fb5b98158b Mon Sep 17 00:00:00 2001 From: Karl Yu <43113774+0xKarl98@users.noreply.github.com> Date: Thu, 6 Nov 2025 20:16:32 +0800 Subject: [PATCH 351/371] feat: add capabilities to fetch Peer type (#19543) --- crates/net/network/src/fetch/mod.rs | 57 ++++++++++++++++++++++++++--- crates/net/network/src/state.rs | 1 + 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 55bde002b3e..9d603863a93 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -7,7 +7,9 @@ pub use client::FetchClient; use crate::{message::BlockRequest, session::BlockRangeInfo}; use alloy_primitives::B256; use futures::StreamExt; -use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives}; +use reth_eth_wire::{ + Capabilities, EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives, +}; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::{ error::{EthResponseValidator, PeerRequestResult, RequestError, RequestResult}, @@ -80,6 +82,7 @@ impl StateFetcher { peer_id: PeerId, best_hash: B256, best_number: u64, + capabilities: Arc, timeout: Arc, range_info: Option, ) { @@ -89,6 +92,7 @@ impl StateFetcher { state: PeerState::Idle, best_hash, best_number, + capabilities, timeout, last_response_likely_bad: false, range_info, @@ -341,6 +345,9 @@ struct Peer { best_hash: B256, /// Tracks the best number of the peer. best_number: u64, + /// Capabilities announced by the peer. + #[allow(dead_code)] + capabilities: Arc, /// Tracks the current timeout value we use for the peer. timeout: Arc, /// Tracks whether the peer has recently responded with a likely bad response. @@ -511,8 +518,23 @@ mod tests { // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); - fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(1)), None); - fetcher.new_active_peer(peer2, B256::random(), 2, Arc::new(AtomicU64::new(1)), None); + let capabilities = Arc::new(Capabilities::from(vec![])); + fetcher.new_active_peer( + peer1, + B256::random(), + 1, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(1)), + None, + ); + fetcher.new_active_peer( + peer2, + B256::random(), + 2, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(1)), + None, + ); let first_peer = fetcher.next_best_peer().unwrap(); assert!(first_peer == peer1 || first_peer == peer2); @@ -539,9 +561,31 @@ mod tests { let peer2_timeout = Arc::new(AtomicU64::new(300)); - fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(30)), None); - fetcher.new_active_peer(peer2, B256::random(), 2, Arc::clone(&peer2_timeout), None); - fetcher.new_active_peer(peer3, B256::random(), 3, Arc::new(AtomicU64::new(50)), None); + let capabilities = Arc::new(Capabilities::from(vec![])); + fetcher.new_active_peer( + peer1, + B256::random(), + 1, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(30)), + None, + ); + fetcher.new_active_peer( + peer2, + B256::random(), + 2, + Arc::clone(&capabilities), + Arc::clone(&peer2_timeout), + None, + ); + fetcher.new_active_peer( + peer3, + B256::random(), + 3, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(50)), + None, + ); // Must always get peer1 (lowest timeout) assert_eq!(fetcher.next_best_peer(), Some(peer1)); @@ -609,6 +653,7 @@ mod tests { peer_id, Default::default(), Default::default(), + Arc::new(Capabilities::from(vec![])), Default::default(), None, ); diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 57d1a73198e..d225ad6693c 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -161,6 +161,7 @@ impl NetworkState { peer, status.blockhash, block_number, + Arc::clone(&capabilities), timeout, range_info, ); From 65acaf330e23fe51f44ce3fdbbae3f216a291bed Mon Sep 17 00:00:00 2001 From: phrwlk Date: Thu, 6 Nov 2025 14:51:18 +0200 Subject: [PATCH 352/371] fix: remove redundant header insertion in extend_blocks and tests (#19534) --- crates/engine/tree/src/tree/tests.rs | 5 +---- crates/storage/provider/src/test_utils/mock.rs | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 606faabaee3..b22b1c1f698 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -7,7 +7,7 @@ use crate::{ TreeConfig, }, }; -use alloy_consensus::Header; + use alloy_eips::eip1898::BlockWithParent; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -336,15 +336,12 @@ impl TestHarness { fn persist_blocks(&self, blocks: Vec>) { let mut block_data: Vec<(B256, Block)> = Vec::with_capacity(blocks.len()); - let mut headers_data: Vec<(B256, Header)> = Vec::with_capacity(blocks.len()); for block in &blocks { block_data.push((block.hash(), block.clone_block())); - headers_data.push((block.hash(), block.header().clone())); } self.provider.extend_blocks(block_data); - self.provider.extend_headers(headers_data); } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 16388de91ae..4022efd9a95 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -118,7 +118,6 @@ impl MockEthProvider { /// Add multiple blocks to local block store pub fn extend_blocks(&self, iter: impl IntoIterator) { for (hash, block) in iter { - self.add_header(hash, block.header().clone()); self.add_block(hash, block) } } From d81d547c938c0e6fea7a2f9c4d3b9488b4937ce6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Ram=C3=ADrez-Chiquillo?= Date: Thu, 6 Nov 2025 08:33:15 -0500 Subject: [PATCH 353/371] docs: explain default db size and error in CLI help (#19533) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: ¨Andrurachi¨ <¨andruvrch@gmail.com¨> Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- crates/node/core/src/args/database.rs | 7 ++++++- docs/vocs/docs/pages/cli/reth/db.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/db/diff.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/download.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/export-era.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/import-era.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/import.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/init-state.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/init.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/node.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/prune.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/re-execute.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/stage/drop.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/stage/dump.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 6 +++++- docs/vocs/docs/pages/cli/reth/stage/unwind.mdx | 6 +++++- 16 files changed, 81 insertions(+), 16 deletions(-) diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 6f1d3bfc711..6384f36a806 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -25,7 +25,12 @@ pub struct DatabaseArgs { /// NFS volume. #[arg(long = "db.exclusive")] pub exclusive: Option, - /// Maximum database size (e.g., 4TB, 8MB) + /// Maximum database size (e.g., 4TB, 8TB). + /// + /// This sets the "map size" of the database. If the database grows beyond this + /// limit, the node will stop with an "environment map size limit reached" error. + /// + /// The default value is 8TB. #[arg(long = "db.max-size", value_parser = parse_byte_size)] pub max_size: Option, /// Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index feb902d4938..6b98c08112b 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -72,7 +72,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index 27cb2198aaf..56258531188 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -35,7 +35,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 6cdaa9ca2d3..0ba6c7407e8 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index a873781d9c3..051c81fcce9 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 77e7883e1bd..14aa47e0ef1 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 39762051649..b8051d9d2f8 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 7e97d087165..e43c87f806f 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index bf9dd671db6..6ad439c6a03 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 2326b40d7fc..57d5110bf36 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -726,7 +726,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 2d586edd5c3..1febf6cdd5b 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index e07b3f542c3..742cbe54822 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index c14db19c58c..05153f3fc2a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index c29547401be..b74ee2280bc 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -66,7 +66,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index f3e4ccc0e0c..769bebde10f 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 8bb44279f8d..5c3a7d54f4d 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -64,7 +64,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) From c5870312e48bdc1c00f657310c879f2180296b45 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 6 Nov 2025 14:34:05 +0000 Subject: [PATCH 354/371] feat(static-file): dynamic static file size (#19381) --- Cargo.lock | 1 + crates/cli/commands/src/db/stats.rs | 12 +- crates/static-file/types/src/lib.rs | 2 +- crates/static-file/types/src/segment.rs | 21 +- crates/storage/db/src/static_file/mod.rs | 29 +- crates/storage/provider/Cargo.toml | 1 + .../src/providers/static_file/manager.rs | 312 +++++++++++++++--- .../provider/src/providers/static_file/mod.rs | 97 +++++- 8 files changed, 393 insertions(+), 82 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 21d98573bc2..9603f4f10b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9864,6 +9864,7 @@ dependencies = [ "reth-storage-api", "reth-storage-errors", "reth-testing-utils", + "reth-tracing", "reth-trie", "reth-trie-db", "revm-database", diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index 0f9ddb25e9a..2aef43c582d 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -191,7 +191,7 @@ impl Command { mut segment_config_size, ) = (0, 0, 0, 0, 0, 0); - for (block_range, tx_range) in &ranges { + for (block_range, header) in &ranges { let fixed_block_range = static_file_provider.find_fixed_range(segment, block_range.start()); let jar_provider = static_file_provider @@ -221,7 +221,7 @@ impl Command { row.add_cell(Cell::new(segment)) .add_cell(Cell::new(format!("{block_range}"))) .add_cell(Cell::new( - tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")), + header.tx_range().map_or("N/A".to_string(), |range| format!("{range}")), )) .add_cell(Cell::new(format!("{columns} x {rows}"))); if self.detailed_sizes { @@ -271,10 +271,12 @@ impl Command { let tx_range = { let start = ranges .iter() - .find_map(|(_, tx_range)| tx_range.map(|r| r.start())) + .find_map(|(_, header)| header.tx_range().map(|range| range.start())) .unwrap_or_default(); - let end = - ranges.iter().rev().find_map(|(_, tx_range)| tx_range.map(|r| r.end())); + let end = ranges + .iter() + .rev() + .find_map(|(_, header)| header.tx_range().map(|range| range.end())); end.map(|end| SegmentRangeInclusive::new(start, end)) }; diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 9606b0ec98b..73d0ffe0506 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -78,7 +78,7 @@ impl StaticFileTargets { } /// Each static file has a fixed number of blocks. This gives out the range where the requested -/// block is positioned. Used for segment filename. +/// block is positioned, according to the specified number of blocks per static file. pub const fn find_fixed_range( block: BlockNumber, blocks_per_static_file: u64, diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index ca7d9ef24d5..59732aebd08 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -144,7 +144,7 @@ impl StaticFileSegment { } /// A segment header that contains information common to all segments. Used for storage. -#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Hash, Clone)] +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Hash, Clone, Copy)] pub struct SegmentHeader { /// Defines the expected block range for a static file segment. This attribute is crucial for /// scenarios where the file contains no data, allowing for a representation beyond a @@ -175,14 +175,19 @@ impl SegmentHeader { self.segment } + /// Returns the expected block range. + pub const fn expected_block_range(&self) -> SegmentRangeInclusive { + self.expected_block_range + } + /// Returns the block range. - pub const fn block_range(&self) -> Option<&SegmentRangeInclusive> { - self.block_range.as_ref() + pub const fn block_range(&self) -> Option { + self.block_range } /// Returns the transaction range. - pub const fn tx_range(&self) -> Option<&SegmentRangeInclusive> { - self.tx_range.as_ref() + pub const fn tx_range(&self) -> Option { + self.tx_range } /// The expected block start of the segment. @@ -331,10 +336,14 @@ impl SegmentRangeInclusive { } /// Returns the length of the inclusive range. - #[allow(clippy::len_without_is_empty)] pub const fn len(&self) -> u64 { self.end.saturating_sub(self.start).saturating_add(1) } + + /// Returns true if the range is empty. + pub const fn is_empty(&self) -> bool { + self.start > self.end + } } impl core::fmt::Display for SegmentRangeInclusive { diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index f2c9ce45fbc..6292020dd53 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -1,9 +1,6 @@ //! reth's static file database table import and access -use std::{ - collections::{hash_map::Entry, HashMap}, - path::Path, -}; +use std::{collections::HashMap, path::Path}; mod cursor; pub use cursor::StaticFileCursor; @@ -17,12 +14,11 @@ pub use masks::*; use reth_static_file_types::{SegmentHeader, SegmentRangeInclusive, StaticFileSegment}; /// Alias type for a map of [`StaticFileSegment`] and sorted lists of existing static file ranges. -type SortedStaticFiles = - HashMap)>>; +type SortedStaticFiles = HashMap>; /// Given the `static_files` directory path, it returns a list over the existing `static_files` /// organized by [`StaticFileSegment`]. Each segment has a sorted list of block ranges and -/// transaction ranges as presented in the file configuration. +/// segment headers as presented in the file configuration. pub fn iter_static_files(path: &Path) -> Result { if !path.exists() { reth_fs_util::create_dir_all(path).map_err(|err| NippyJarError::Custom(err.to_string()))?; @@ -39,25 +35,18 @@ pub fn iter_static_files(path: &Path) -> Result::load(&entry.path())?; - let (block_range, tx_range) = - (jar.user_header().block_range().copied(), jar.user_header().tx_range().copied()); - - if let Some(block_range) = block_range { - match static_files.entry(segment) { - Entry::Occupied(mut entry) => { - entry.get_mut().push((block_range, tx_range)); - } - Entry::Vacant(entry) => { - entry.insert(vec![(block_range, tx_range)]); - } - } + if let Some(block_range) = jar.user_header().block_range() { + static_files + .entry(segment) + .and_modify(|headers| headers.push((block_range, *jar.user_header()))) + .or_insert_with(|| vec![(block_range, *jar.user_header())]); } } } for range_list in static_files.values_mut() { // Sort by block end range. - range_list.sort_by_key(|(r, _)| r.end()); + range_list.sort_by_key(|(block_range, _)| block_range.end()); } Ok(static_files) diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index e8599a89706..b67064d0fff 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -70,6 +70,7 @@ reth-trie = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true reth-ethereum-engine-primitives.workspace = true reth-ethereum-primitives.workspace = true +reth-tracing.workspace = true revm-database-interface.workspace = true revm-state.workspace = true diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index bff44ad1347..e19a84821fb 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -43,7 +43,7 @@ use reth_static_file_types::{ use reth_storage_api::{BlockBodyIndicesProvider, DBProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap}, fmt::Debug, ops::{Deref, Range, RangeBounds, RangeInclusive}, path::{Path, PathBuf}, @@ -51,10 +51,9 @@ use std::{ }; use tracing::{debug, info, trace, warn}; -/// Alias type for a map that can be queried for block ranges from a transaction -/// segment respectively. It uses `TxNumber` to represent the transaction end of a static file -/// range. -type SegmentRanges = HashMap>; +/// Alias type for a map that can be queried for block or transaction ranges. It uses `u64` to +/// represent either a block or a transaction number end of a static file range. +type SegmentRanges = HashMap>; /// Access mode on a static file provider. RO/RW. #[derive(Debug, Default, PartialEq, Eq)] @@ -291,7 +290,18 @@ pub struct StaticFileProviderInner { earliest_history_height: AtomicU64, /// Max static file block for each segment static_files_max_block: RwLock>, - /// Available static file block ranges on disk indexed by max transactions. + /// Expected on disk static file block ranges indexed by max expected blocks. + /// + /// For example, a static file for expected block range `0..=499_000` may have only block range + /// `0..=1000` contained in it, as it wasn't fully filled yet. This index maps the max expected + /// block to the expected range, i.e. block `499_000` to block range `0..=499_000`. + static_files_expected_block_index: RwLock, + /// Available on disk static file block ranges indexed by max transactions. + /// + /// For example, a static file for block range `0..=499_000` may only have block range + /// `0..=1000` and transaction range `0..=2000` contained in it. This index maps the max + /// available transaction to the available block range, i.e. transaction `2000` to block range + /// `0..=1000`. static_files_tx_index: RwLock, /// Directory where `static_files` are located path: PathBuf, @@ -327,6 +337,7 @@ impl StaticFileProviderInner { static_files_min_block: Default::default(), earliest_history_height: Default::default(), static_files_max_block: Default::default(), + static_files_expected_block_index: Default::default(), static_files_tx_index: Default::default(), path: path.as_ref().to_path_buf(), metrics: None, @@ -344,15 +355,67 @@ impl StaticFileProviderInner { /// Each static file has a fixed number of blocks. This gives out the range where the requested /// block is positioned. - pub fn find_fixed_range( + /// + /// If the specified block falls into one of the ranges of already initialized static files, + /// this function will return that range. + /// + /// If no matching file exists, this function will derive a new range from the end of the last + /// existing file, if any. + pub fn find_fixed_range_with_block_index( &self, segment: StaticFileSegment, + block_index: Option<&BTreeMap>, block: BlockNumber, ) -> SegmentRangeInclusive { let blocks_per_file = self.blocks_per_file.get(&segment).copied().unwrap_or(DEFAULT_BLOCKS_PER_STATIC_FILE); + + if let Some(block_index) = block_index { + // Find first block range that contains the requested block + if let Some((_, range)) = block_index.iter().find(|(max_block, _)| block <= **max_block) + { + // Found matching range for an existing file using block index + return *range + } else if let Some((_, range)) = block_index.last_key_value() { + // Didn't find matching range for an existing file, derive a new range from the end + // of the last existing file range. + // + // `block` is always higher than `range.end()` here, because we iterated over all + // `block_index` ranges above and didn't find one that contains our block + let blocks_after_last_range = block - range.end(); + let segments_to_skip = (blocks_after_last_range - 1) / blocks_per_file; + let start = range.end() + 1 + segments_to_skip * blocks_per_file; + return SegmentRangeInclusive::new(start, start + blocks_per_file - 1) + } + } + // No block index is available, derive a new range using the fixed number of blocks, + // starting from the beginning. find_fixed_range(block, blocks_per_file) } + + /// Each static file has a fixed number of blocks. This gives out the range where the requested + /// block is positioned. + /// + /// If the specified block falls into one of the ranges of already initialized static files, + /// this function will return that range. + /// + /// If no matching file exists, this function will derive a new range from the end of the last + /// existing file, if any. + /// + /// This function will block indefinitely if a write lock for + /// [`Self::static_files_expected_block_index`] is acquired. In that case, use + /// [`Self::find_fixed_range_with_block_index`]. + pub fn find_fixed_range( + &self, + segment: StaticFileSegment, + block: BlockNumber, + ) -> SegmentRangeInclusive { + self.find_fixed_range_with_block_index( + segment, + self.static_files_expected_block_index.read().get(&segment), + block, + ) + } } impl StaticFileProvider { @@ -361,11 +424,11 @@ impl StaticFileProvider { let Some(metrics) = &self.metrics else { return Ok(()) }; let static_files = iter_static_files(&self.path).map_err(ProviderError::other)?; - for (segment, ranges) in static_files { + for (segment, headers) in static_files { let mut entries = 0; let mut size = 0; - for (block_range, _) in &ranges { + for (block_range, _) in &headers { let fixed_block_range = self.find_fixed_range(segment, block_range.start()); let jar_provider = self .get_segment_provider_for_range(segment, || Some(fixed_block_range), None)? @@ -391,7 +454,7 @@ impl StaticFileProvider { size += data_size + index_size + offsets_size + config_size; } - metrics.record_segment(segment, size, ranges.len(), entries); + metrics.record_segment(segment, size, headers.len(), entries); } Ok(()) @@ -573,7 +636,7 @@ impl StaticFileProvider { NippyJar::::load(&file).map_err(ProviderError::other)? }; - let header = jar.user_header().clone(); + let header = *jar.user_header(); jar.delete().map_err(ProviderError::other)?; // SAFETY: this is currently necessary to ensure that certain indexes like @@ -665,13 +728,18 @@ impl StaticFileProvider { ) -> ProviderResult<()> { let mut min_block = self.static_files_min_block.write(); let mut max_block = self.static_files_max_block.write(); + let mut expected_block_index = self.static_files_expected_block_index.write(); let mut tx_index = self.static_files_tx_index.write(); match segment_max_block { Some(segment_max_block) => { // Update the max block for the segment max_block.insert(segment, segment_max_block); - let fixed_range = self.find_fixed_range(segment, segment_max_block); + let fixed_range = self.find_fixed_range_with_block_index( + segment, + expected_block_index.get(&segment), + segment_max_block, + ); let jar = NippyJar::::load( &self.path.join(segment.filename(&fixed_range)), @@ -693,7 +761,7 @@ impl StaticFileProvider { // 2. Sync to block 100, this update sets min_block = [0..=100] // 3. Pruner calls get_lowest_static_file_block() -> returns 100 (correct). Without // this update, it would incorrectly return 0 (stale) - if let Some(current_block_range) = jar.user_header().block_range().copied() { + if let Some(current_block_range) = jar.user_header().block_range() { min_block .entry(segment) .and_modify(|current_min| { @@ -706,14 +774,24 @@ impl StaticFileProvider { .or_insert(current_block_range); } + // Update the expected block index + expected_block_index + .entry(segment) + .and_modify(|index| { + index.retain(|_, block_range| block_range.start() < fixed_range.start()); + + index.insert(fixed_range.end(), fixed_range); + }) + .or_insert_with(|| BTreeMap::from([(fixed_range.end(), fixed_range)])); + // Updates the tx index by first removing all entries which have a higher // block_start than our current static file. if let Some(tx_range) = jar.user_header().tx_range() { - let tx_end = tx_range.end(); - // Current block range has the same block start as `fixed_range``, but block end // might be different if we are still filling this static file. - if let Some(current_block_range) = jar.user_header().block_range().copied() { + if let Some(current_block_range) = jar.user_header().block_range() { + let tx_end = tx_range.end(); + // Considering that `update_index` is called when we either append/truncate, // we are sure that we are handling the latest data // points. @@ -753,9 +831,10 @@ impl StaticFileProvider { self.map.retain(|(end, seg), _| !(*seg == segment && *end > fixed_range.end())); } None => { - tx_index.remove(&segment); max_block.remove(&segment); min_block.remove(&segment); + expected_block_index.remove(&segment); + tx_index.remove(&segment); } }; @@ -766,34 +845,46 @@ impl StaticFileProvider { pub fn initialize_index(&self) -> ProviderResult<()> { let mut min_block = self.static_files_min_block.write(); let mut max_block = self.static_files_max_block.write(); + let mut expected_block_index = self.static_files_expected_block_index.write(); let mut tx_index = self.static_files_tx_index.write(); min_block.clear(); max_block.clear(); tx_index.clear(); - for (segment, ranges) in iter_static_files(&self.path).map_err(ProviderError::other)? { + for (segment, headers) in iter_static_files(&self.path).map_err(ProviderError::other)? { // Update first and last block for each segment - if let Some((first_block_range, _)) = ranges.first() { - min_block.insert(segment, *first_block_range); + if let Some((block_range, _)) = headers.first() { + min_block.insert(segment, *block_range); } - if let Some((last_block_range, _)) = ranges.last() { - max_block.insert(segment, last_block_range.end()); + if let Some((block_range, _)) = headers.last() { + max_block.insert(segment, block_range.end()); } - // Update tx -> block_range index - for (block_range, tx_range) in ranges { - if let Some(tx_range) = tx_range { + for (block_range, header) in headers { + // Update max expected block -> expected_block_range index + expected_block_index + .entry(segment) + .and_modify(|index| { + index.insert(header.expected_block_end(), header.expected_block_range()); + }) + .or_insert_with(|| { + BTreeMap::from([( + header.expected_block_end(), + header.expected_block_range(), + )]) + }); + + // Update max tx -> block_range index + if let Some(tx_range) = header.tx_range() { let tx_end = tx_range.end(); - match tx_index.entry(segment) { - Entry::Occupied(mut index) => { - index.get_mut().insert(tx_end, block_range); - } - Entry::Vacant(index) => { - index.insert(BTreeMap::from([(tx_end, block_range)])); - } - }; + tx_index + .entry(segment) + .and_modify(|index| { + index.insert(tx_end, block_range); + }) + .or_insert_with(|| BTreeMap::from([(tx_end, block_range)])); } } } @@ -1182,21 +1273,12 @@ impl StaticFileProvider { segment: StaticFileSegment, func: impl Fn(StaticFileJarProvider<'_, N>) -> ProviderResult>, ) -> ProviderResult> { - if let Some(highest_block) = self.get_highest_static_file_block(segment) { - let blocks_per_file = self - .blocks_per_file - .get(&segment) - .copied() - .unwrap_or(DEFAULT_BLOCKS_PER_STATIC_FILE); - let mut range = self.find_fixed_range(segment, highest_block); - while range.end() > 0 { - if let Some(res) = func(self.get_or_create_jar_provider(segment, &range)?)? { + if let Some(ranges) = self.static_files_expected_block_index.read().get(&segment) { + // Iterate through all ranges in reverse order (highest to lowest) + for range in ranges.values().rev() { + if let Some(res) = func(self.get_or_create_jar_provider(segment, range)?)? { return Ok(Some(res)) } - range = SegmentRangeInclusive::new( - range.start().saturating_sub(blocks_per_file), - range.end().saturating_sub(blocks_per_file), - ); } } @@ -1408,6 +1490,12 @@ impl StaticFileProvider { pub fn tx_index(&self) -> &RwLock { &self.static_files_tx_index } + + /// Returns `static_files` expected block index + #[cfg(any(test, feature = "test-utils"))] + pub fn expected_block_index(&self) -> &RwLock { + &self.static_files_expected_block_index + } } /// Helper trait to manage different [`StaticFileProviderRW`] of an `Arc eyre::Result<()> { + let (static_dir, _) = create_test_static_files_dir(); + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(100) + .build()?; + + let segment = StaticFileSegment::Headers; + + // Test with None - should use default behavior + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, None, 0), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, None, 250), + SegmentRangeInclusive::new(200, 299) + ); + + // Test with empty index - should fall back to default behavior + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&BTreeMap::new()), 150), + SegmentRangeInclusive::new(100, 199) + ); + + // Create block index with existing ranges + let block_index = BTreeMap::from_iter([ + (99, SegmentRangeInclusive::new(0, 99)), + (199, SegmentRangeInclusive::new(100, 199)), + (299, SegmentRangeInclusive::new(200, 299)), + ]); + + // Test blocks within existing ranges - should return the matching range + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 0), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 50), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 99), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 100), + SegmentRangeInclusive::new(100, 199) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 150), + SegmentRangeInclusive::new(100, 199) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 199), + SegmentRangeInclusive::new(100, 199) + ); + + // Test blocks beyond existing ranges - should derive new ranges from the last range + // Block 300 is exactly one segment after the last range + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 300), + SegmentRangeInclusive::new(300, 399) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 350), + SegmentRangeInclusive::new(300, 399) + ); + + // Block 500 skips one segment (300-399) + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 500), + SegmentRangeInclusive::new(500, 599) + ); + + // Block 1000 skips many segments + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 1000), + SegmentRangeInclusive::new(1000, 1099) + ); + + // Test with block index having different sizes than blocks_per_file setting + // This simulates the scenario where blocks_per_file was changed between runs + let mixed_size_index = BTreeMap::from_iter([ + (49, SegmentRangeInclusive::new(0, 49)), // 50 blocks + (149, SegmentRangeInclusive::new(50, 149)), // 100 blocks + (349, SegmentRangeInclusive::new(150, 349)), // 200 blocks + ]); + + // Blocks within existing ranges should return those ranges regardless of size + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 25), + SegmentRangeInclusive::new(0, 49) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 100), + SegmentRangeInclusive::new(50, 149) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 200), + SegmentRangeInclusive::new(150, 349) + ); + + // Block after the last range should derive using current blocks_per_file (100) + // from the end of the last range (349) + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 350), + SegmentRangeInclusive::new(350, 449) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 450), + SegmentRangeInclusive::new(450, 549) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 550), + SegmentRangeInclusive::new(550, 649) + ); + + Ok(()) + } +} diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index adc43ae402b..85544e7a45a 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -61,7 +61,7 @@ mod tests { test_utils::create_test_provider_factory, HeaderProvider, StaticFileProviderFactory, }; use alloy_consensus::{Header, SignableTransaction, Transaction, TxLegacy}; - use alloy_primitives::{BlockHash, Signature, TxNumber, B256}; + use alloy_primitives::{map::HashMap, BlockHash, Signature, TxNumber, B256}; use rand::seq::SliceRandom; use reth_db::test_utils::create_test_static_files_dir; use reth_db_api::{transaction::DbTxMut, CanonicalHeaders, HeaderNumbers, Headers}; @@ -71,7 +71,7 @@ mod tests { }; use reth_storage_api::{ReceiptProvider, TransactionsProvider}; use reth_testing_utils::generators::{self, random_header_range}; - use std::{fmt::Debug, fs, ops::Range, path::Path}; + use std::{collections::BTreeMap, fmt::Debug, fs, ops::Range, path::Path}; fn assert_eyre(got: T, expected: T, msg: &str) -> eyre::Result<()> { if got != expected { @@ -81,7 +81,7 @@ mod tests { } #[test] - fn test_snap() { + fn test_static_files() { // Ranges let row_count = 100u64; let range = 0..=(row_count - 1); @@ -382,7 +382,7 @@ mod tests { .unwrap() .user_header() .tx_range(), - expected_tx_range.as_ref() + expected_tx_range ); }); @@ -558,4 +558,93 @@ mod tests { Ok(count) } + + #[test] + fn test_dynamic_size() -> eyre::Result<()> { + let (static_dir, _) = create_test_static_files_dir(); + + { + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(10) + .build()?; + let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers)?; + + let mut header = Header::default(); + for num in 0..=15 { + header.number = num; + header_writer.append_header(&header, &BlockHash::default()).unwrap(); + } + header_writer.commit().unwrap(); + + assert_eq!(sf_rw.headers_range(0..=15)?.len(), 16); + assert_eq!( + sf_rw.expected_block_index().read().deref(), + &HashMap::from([( + StaticFileSegment::Headers, + BTreeMap::from([ + (9, SegmentRangeInclusive::new(0, 9)), + (19, SegmentRangeInclusive::new(10, 19)) + ]) + )]) + ) + } + + { + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(5) + .build()?; + let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers)?; + + let mut header = Header::default(); + for num in 16..=22 { + header.number = num; + header_writer.append_header(&header, &BlockHash::default()).unwrap(); + } + header_writer.commit().unwrap(); + + assert_eq!(sf_rw.headers_range(0..=22)?.len(), 23); + assert_eq!( + sf_rw.expected_block_index().read().deref(), + &HashMap::from([( + StaticFileSegment::Headers, + BTreeMap::from([ + (9, SegmentRangeInclusive::new(0, 9)), + (19, SegmentRangeInclusive::new(10, 19)), + (24, SegmentRangeInclusive::new(20, 24)) + ]) + )]) + ) + } + + { + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(15) + .build()?; + let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers)?; + + let mut header = Header::default(); + for num in 23..=40 { + header.number = num; + header_writer.append_header(&header, &BlockHash::default()).unwrap(); + } + header_writer.commit().unwrap(); + + assert_eq!(sf_rw.headers_range(0..=40)?.len(), 41); + assert_eq!( + sf_rw.expected_block_index().read().deref(), + &HashMap::from([( + StaticFileSegment::Headers, + BTreeMap::from([ + (9, SegmentRangeInclusive::new(0, 9)), + (19, SegmentRangeInclusive::new(10, 19)), + (24, SegmentRangeInclusive::new(20, 24)), + (39, SegmentRangeInclusive::new(25, 39)), + (54, SegmentRangeInclusive::new(40, 54)) + ]) + )]) + ) + } + + Ok(()) + } } From 04093cc3bee85b377e5ffea30633341070a5f60f Mon Sep 17 00:00:00 2001 From: Snezhkko Date: Thu, 6 Nov 2025 17:49:08 +0200 Subject: [PATCH 355/371] fix(prune): use saturating_sub in PruneLimiter::deleted_entries_limit_left (#19535) --- crates/prune/prune/src/limiter.rs | 33 ++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/crates/prune/prune/src/limiter.rs b/crates/prune/prune/src/limiter.rs index d347ecddbd5..a32e6ab2437 100644 --- a/crates/prune/prune/src/limiter.rs +++ b/crates/prune/prune/src/limiter.rs @@ -96,7 +96,7 @@ impl PruneLimiter { /// Returns the number of deleted entries left before the limit is reached. pub fn deleted_entries_limit_left(&self) -> Option { - self.deleted_entries_limit.as_ref().map(|limit| limit.limit - limit.deleted) + self.deleted_entries_limit.as_ref().map(|limit| limit.limit.saturating_sub(limit.deleted)) } /// Returns the limit on the number of deleted entries (rows in the database). @@ -411,4 +411,35 @@ mod tests { sleep(Duration::new(0, 10_000_000)); // 10 milliseconds assert!(limiter.is_limit_reached(), "Limit should be reached when time limit is reached"); } + + #[test] + fn test_deleted_entries_limit_left_saturation_and_normal() { + // less than limit → no saturation + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); + limiter.increment_deleted_entries_count_by(3); + assert_eq!(limiter.deleted_entries_limit_left(), Some(7)); + + // equal to limit → saturates to 0 + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(3); + limiter.increment_deleted_entries_count_by(3); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + + // overrun past limit → saturates to 0 + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); + limiter.increment_deleted_entries_count_by(12); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + + // lowering limit via set → saturates to 0 if below deleted + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(20); + limiter.increment_deleted_entries_count_by(15); + let limiter = limiter.set_deleted_entries_limit(10); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + + // lowering limit via floor → saturates to 0 if below deleted + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(15); + limiter.increment_deleted_entries_count_by(14); + let denominator = NonZeroUsize::new(8).unwrap(); + let limiter = limiter.floor_deleted_entries_limit_to_multiple_of(denominator); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + } } From a6c0ac8cf4ade4eafd38ea2d09457bb02ba0ed68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erce=20Can=20Bekt=C3=BCre?= <47954181+ercecan@users.noreply.github.com> Date: Thu, 6 Nov 2025 19:20:50 +0300 Subject: [PATCH 356/371] fix: Update filter last poll timestamp in filter logs (#19549) --- crates/rpc/rpc/src/eth/filter.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 22b14d7a174..f0dac574c33 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -298,11 +298,12 @@ where /// Handler for `eth_getFilterLogs` pub async fn filter_logs(&self, id: FilterId) -> Result, EthFilterError> { let filter = { - let filters = self.inner.active_filters.inner.lock().await; - if let FilterKind::Log(ref filter) = - filters.get(&id).ok_or_else(|| EthFilterError::FilterNotFound(id.clone()))?.kind - { - *filter.clone() + let mut filters = self.inner.active_filters.inner.lock().await; + let filter = + filters.get_mut(&id).ok_or_else(|| EthFilterError::FilterNotFound(id.clone()))?; + if let FilterKind::Log(ref inner_filter) = filter.kind { + filter.last_poll_timestamp = Instant::now(); + *inner_filter.clone() } else { // Not a log filter return Err(EthFilterError::FilterNotFound(id)) From 27cf27a984bc1646092cbcfceabf3516d9c3a5ed Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 6 Nov 2025 17:36:03 +0000 Subject: [PATCH 357/371] refactor(provider): introduce `EitherWriter` (#19554) --- crates/storage/provider/src/either_writer.rs | 43 ++++++++++++++++ crates/storage/provider/src/lib.rs | 26 ++++------ .../src/providers/database/provider.rs | 50 ++++++++----------- 3 files changed, 74 insertions(+), 45 deletions(-) create mode 100644 crates/storage/provider/src/either_writer.rs diff --git a/crates/storage/provider/src/either_writer.rs b/crates/storage/provider/src/either_writer.rs new file mode 100644 index 00000000000..5c50141f651 --- /dev/null +++ b/crates/storage/provider/src/either_writer.rs @@ -0,0 +1,43 @@ +//! Generic writer abstraction for writing to either database tables or static files. + +use crate::providers::StaticFileProviderRWRefMut; +use alloy_primitives::{BlockNumber, TxNumber}; +use reth_db::table::Value; +use reth_db_api::{cursor::DbCursorRW, tables}; +use reth_node_types::NodePrimitives; +use reth_storage_errors::provider::ProviderResult; + +/// Represents a destination for writing data, either to database or static files. +#[derive(Debug)] +pub enum EitherWriter<'a, CURSOR, N> { + /// Write to database table via cursor + Database(CURSOR), + /// Write to static file + StaticFile(StaticFileProviderRWRefMut<'a, N>), +} + +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> { + /// Increment the block number. + /// + /// Relevant only for [`Self::StaticFile`]. It is a no-op for [`Self::Database`]. + pub fn increment_block(&mut self, expected_block_number: BlockNumber) -> ProviderResult<()> { + match self { + Self::Database(_) => Ok(()), + Self::StaticFile(writer) => writer.increment_block(expected_block_number), + } + } +} + +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> +where + N::Receipt: Value, + CURSOR: DbCursorRW>, +{ + /// Append a transaction receipt. + pub fn append_receipt(&mut self, tx_num: TxNumber, receipt: &N::Receipt) -> ProviderResult<()> { + match self { + Self::Database(cursor) => Ok(cursor.append(tx_num, receipt)?), + Self::StaticFile(writer) => writer.append_receipt(tx_num, receipt), + } + } +} diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 3cad94888a8..84e1a4f8b46 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -24,35 +24,31 @@ pub use providers::{ StaticFileAccess, StaticFileProviderBuilder, StaticFileWriter, }; +pub mod changesets_utils; + #[cfg(any(test, feature = "test-utils"))] /// Common test helpers for mocking the Provider. pub mod test_utils; -/// Re-export provider error. -pub use reth_storage_errors::provider::{ProviderError, ProviderResult}; - -pub use reth_static_file_types as static_file; -pub use static_file::StaticFileSegment; - -pub use reth_execution_types::*; - -pub mod changesets_utils; -/// Re-export `OriginalValuesKnown` -pub use revm_database::states::OriginalValuesKnown; - -/// Writer standalone type. -pub mod writer; +pub mod either_writer; +pub use either_writer::*; pub use reth_chain_state::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, CanonStateNotifications, CanonStateSubscriptions, }; - +pub use reth_execution_types::*; +/// Re-export `OriginalValuesKnown` +pub use revm_database::states::OriginalValuesKnown; // reexport traits to avoid breaking changes +pub use reth_static_file_types as static_file; pub use reth_storage_api::{ HistoryWriter, MetadataProvider, MetadataWriter, StatsReader, StorageSettings, StorageSettingsCache, }; +/// Re-export provider error. +pub use reth_storage_errors::provider::{ProviderError, ProviderResult}; +pub use static_file::StaticFileSegment; pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index b46ccd9a633..974f0257d44 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -13,12 +13,12 @@ use crate::{ }, AccountReader, BlockBodyWriter, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, - DBProvider, HashingWriter, HeaderProvider, HeaderSyncGapProvider, HistoricalStateProvider, - HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, - StageCheckpointReader, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, - StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, TrieReader, TrieWriter, + DBProvider, EitherWriter, HashingWriter, HeaderProvider, HeaderSyncGapProvider, + HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, + LatestStateProviderRef, OriginalValuesKnown, ProviderError, PruneCheckpointReader, + PruneCheckpointWriter, RevertsInit, StageCheckpointReader, StateProviderBox, StateWriter, + StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, + TransactionsProvider, TransactionsProviderExt, TrieReader, TrieWriter, }; use alloy_consensus::{ transaction::{SignerRecoverable, TransactionMeta, TxHashRef}, @@ -82,7 +82,7 @@ use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet}, fmt::Debug, - ops::{Deref, DerefMut, Not, Range, RangeBounds, RangeFrom, RangeInclusive}, + ops::{Deref, DerefMut, Range, RangeBounds, RangeFrom, RangeInclusive}, sync::Arc, }; use tracing::{debug, trace}; @@ -1611,21 +1611,17 @@ impl StateWriter )); } - let has_receipts_pruning = self.prune_modes.has_receipts_pruning(); - - // Prepare receipts cursor if we are going to write receipts to the database - // - // We are writing to database if requested or if there's any kind of receipt pruning - // configured - let mut receipts_cursor = self.tx.cursor_write::>()?; - - // Prepare receipts static writer if we are going to write receipts to static files - // - // We are writing to static files if requested and if there's no receipt pruning configured - let mut receipts_static_writer = has_receipts_pruning - .not() - .then(|| self.static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)) - .transpose()?; + // Write receipts to static files only if they're explicitly enabled or we don't have + // receipts pruning + let mut receipts_writer = if self.storage_settings.read().receipts_in_static_files || + !self.prune_modes.has_receipts_pruning() + { + EitherWriter::StaticFile( + self.static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?, + ) + } else { + EitherWriter::Database(self.tx.cursor_write::>()?) + }; // All receipts from the last 128 blocks are required for blockchain tree, even with // [`PruneSegment::ContractLogs`]. @@ -1638,9 +1634,7 @@ impl StateWriter let block_number = first_block + idx as u64; // Increment block number for receipts static file writer - if let Some(writer) = receipts_static_writer.as_mut() { - writer.increment_block(block_number)?; - } + receipts_writer.increment_block(block_number)?; // Skip writing receipts if pruning configuration requires us to. if prunable_receipts && @@ -1654,11 +1648,7 @@ impl StateWriter for (idx, receipt) in receipts.iter().enumerate() { let receipt_idx = first_tx_index + idx as u64; - if let Some(writer) = &mut receipts_static_writer { - writer.append_receipt(receipt_idx, receipt)?; - } else { - receipts_cursor.append(receipt_idx, receipt)?; - } + receipts_writer.append_receipt(receipt_idx, receipt)?; } } From 7997cd42832f4be41e0a20e5796f8dc194b7b137 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 6 Nov 2025 18:06:43 +0000 Subject: [PATCH 358/371] chore(provider): exhaustive segment match in static files check (#19556) --- crates/static-file/types/src/segment.rs | 13 ++------- .../src/providers/static_file/manager.rs | 28 +++++++++++-------- 2 files changed, 18 insertions(+), 23 deletions(-) diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index 59732aebd08..10ae9d99753 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -7,7 +7,7 @@ use alloy_primitives::TxNumber; use core::{ops::RangeInclusive, str::FromStr}; use derive_more::Display; use serde::{Deserialize, Serialize}; -use strum::{AsRefStr, EnumString}; +use strum::{AsRefStr, EnumIs, EnumString}; #[derive( Debug, @@ -23,6 +23,7 @@ use strum::{AsRefStr, EnumString}; EnumString, AsRefStr, Display, + EnumIs, )] #[cfg_attr(feature = "clap", derive(clap::ValueEnum))] /// Segment of the data that can be moved to static files. @@ -122,16 +123,6 @@ impl StaticFileSegment { Some((segment, SegmentRangeInclusive::new(block_start, block_end))) } - /// Returns `true` if the segment is `StaticFileSegment::Headers`. - pub const fn is_headers(&self) -> bool { - matches!(self, Self::Headers) - } - - /// Returns `true` if the segment is `StaticFileSegment::Receipts`. - pub const fn is_receipts(&self) -> bool { - matches!(self, Self::Receipts) - } - /// Returns `true` if a segment row is linked to a transaction. pub const fn is_tx_based(&self) -> bool { matches!(self, Self::Receipts | Self::Transactions) diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index e19a84821fb..bbf334ba009 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -966,19 +966,23 @@ impl StaticFileProvider { }; for segment in StaticFileSegment::iter() { - if has_receipt_pruning && segment.is_receipts() { - // Pruned nodes (including full node) do not store receipts as static files. - continue - } + match segment { + StaticFileSegment::Headers | StaticFileSegment::Transactions => {} + StaticFileSegment::Receipts => { + if has_receipt_pruning { + // Pruned nodes (including full node) do not store receipts as static files. + continue + } - if segment.is_receipts() && - (NamedChain::Gnosis == provider.chain_spec().chain_id() || - NamedChain::Chiado == provider.chain_spec().chain_id()) - { - // Gnosis and Chiado's historical import is broken and does not work with this - // check. They are importing receipts along with importing - // headers/bodies. - continue; + if NamedChain::Gnosis == provider.chain_spec().chain_id() || + NamedChain::Chiado == provider.chain_spec().chain_id() + { + // Gnosis and Chiado's historical import is broken and does not work with + // this check. They are importing receipts along + // with importing headers/bodies. + continue; + } + } } let initial_highest_block = self.get_highest_static_file_block(segment); From cb78b9da67c362a4e6c512d52adbc80e73c391cc Mon Sep 17 00:00:00 2001 From: phrwlk Date: Thu, 6 Nov 2025 23:26:07 +0200 Subject: [PATCH 359/371] fix(net): preserve ECIESError in connect_without_timeout (#19558) Co-authored-by: Matthias Seitz --- crates/net/ecies/src/stream.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index d99422f512f..adf4dc7634d 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -67,8 +67,7 @@ where secret_key: SecretKey, remote_id: PeerId, ) -> Result { - let ecies = ECIESCodec::new_client(secret_key, remote_id) - .map_err(|_| io::Error::other("invalid handshake"))?; + let ecies = ECIESCodec::new_client(secret_key, remote_id)?; let mut transport = ecies.framed(transport); From 4d9d712b436c95700bd25b82071837285b06ef4f Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 7 Nov 2025 01:33:57 +0400 Subject: [PATCH 360/371] refactor: provide default implementation for send_raw_transaction (#19564) --- crates/optimism/rpc/src/eth/transaction.rs | 16 +++++++-------- crates/rpc/rpc-eth-api/src/core.rs | 2 +- .../rpc-eth-api/src/helpers/transaction.rs | 20 +++++++++++++++---- crates/rpc/rpc/src/eth/helpers/transaction.rs | 20 +++++++++---------- 4 files changed, 35 insertions(+), 23 deletions(-) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 14ed9dbe247..58d367012f1 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -7,16 +7,16 @@ use futures::StreamExt; use op_alloy_consensus::{transaction::OpTransactionInfo, OpTransaction}; use reth_chain_state::CanonStateSubscriptions; use reth_optimism_primitives::DepositReceipt; -use reth_primitives_traits::{BlockBody, SignedTransaction}; +use reth_primitives_traits::{BlockBody, Recovered, SignedTransaction, WithEncoded}; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, EthTransactions, LoadReceipt, LoadTransaction}, try_into_op_tx_info, EthApiTypes as _, FromEthApiError, FromEvmError, RpcConvert, RpcNodeCore, RpcReceipt, TxInfoMapper, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; +use reth_rpc_eth_types::EthApiError; use reth_storage_api::{errors::ProviderError, ReceiptProvider}; use reth_transaction_pool::{ - AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, + AddedTransactionOutcome, PoolPooledTx, PoolTransaction, TransactionOrigin, TransactionPool, }; use std::{ fmt::{Debug, Formatter}, @@ -39,11 +39,11 @@ where self.inner.eth_api.send_raw_transaction_sync_timeout() } - /// Decodes and recovers the transaction and submits it to the pool. - /// - /// Returns the hash of the transaction. - async fn send_raw_transaction(&self, tx: Bytes) -> Result { - let recovered = recover_raw_transaction(&tx)?; + async fn send_transaction( + &self, + tx: WithEncoded>>, + ) -> Result { + let (tx, recovered) = tx.split(); // broadcast raw transaction to subscribers if there is any. self.eth_api().broadcast_raw_transaction(tx.clone()); diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 40f19c86227..4e0afbf6ab9 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -827,7 +827,7 @@ where /// Handler for: `eth_sendTransaction` async fn send_transaction(&self, request: RpcTxReq) -> RpcResult { trace!(target: "rpc::eth", ?request, "Serving eth_sendTransaction"); - Ok(EthTransactions::send_transaction(self, request).await?) + Ok(EthTransactions::send_transaction_request(self, request).await?) } /// Handler for: `eth_sendRawTransaction` diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 8a49208cd8c..2b1f3d05332 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -19,10 +19,11 @@ use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionInfo}; use futures::{Future, StreamExt}; use reth_chain_state::CanonStateSubscriptions; use reth_node_api::BlockBody; -use reth_primitives_traits::{RecoveredBlock, SignedTransaction, TxTy}; +use reth_primitives_traits::{Recovered, RecoveredBlock, SignedTransaction, TxTy, WithEncoded}; use reth_rpc_convert::{transaction::RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ - utils::binary_search, EthApiError, EthApiError::TransactionConfirmationTimeout, + utils::{binary_search, recover_raw_transaction}, + EthApiError::{self, TransactionConfirmationTimeout}, FillTransactionResult, SignError, TransactionSource, }; use reth_storage_api::{ @@ -30,7 +31,7 @@ use reth_storage_api::{ TransactionsProvider, }; use reth_transaction_pool::{ - AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, + AddedTransactionOutcome, PoolPooledTx, PoolTransaction, TransactionOrigin, TransactionPool, }; use std::{sync::Arc, time::Duration}; @@ -76,6 +77,17 @@ pub trait EthTransactions: LoadTransaction { fn send_raw_transaction( &self, tx: Bytes, + ) -> impl Future> + Send { + async move { + let recovered = recover_raw_transaction::>(&tx)?; + self.send_transaction(WithEncoded::new(tx, recovered)).await + } + } + + /// Submits the transaction to the pool. + fn send_transaction( + &self, + tx: WithEncoded>>, ) -> impl Future> + Send; /// Decodes and recovers the transaction and submits it to the pool. @@ -384,7 +396,7 @@ pub trait EthTransactions: LoadTransaction { /// Signs transaction with a matching signer, if any and submits the transaction to the pool. /// Returns the hash of the signed transaction. - fn send_transaction( + fn send_transaction_request( &self, mut request: RpcTxReq, ) -> impl Future> + Send diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 7889dd1f54c..8f2c5bf93ef 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -5,19 +5,19 @@ use std::time::Duration; use crate::EthApi; use alloy_consensus::BlobTransactionValidationError; use alloy_eips::{eip7594::BlobTransactionSidecarVariant, BlockId, Typed2718}; -use alloy_primitives::{hex, Bytes, B256}; +use alloy_primitives::{hex, B256}; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_primitives_traits::AlloyBlockHeader; +use reth_primitives_traits::{AlloyBlockHeader, Recovered, WithEncoded}; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, FromEvmError, RpcNodeCore, }; -use reth_rpc_eth_types::{error::RpcPoolError, utils::recover_raw_transaction, EthApiError}; +use reth_rpc_eth_types::{error::RpcPoolError, EthApiError}; use reth_storage_api::BlockReaderIdExt; use reth_transaction_pool::{ error::Eip4844PoolTransactionError, AddedTransactionOutcome, EthBlobTransactionSidecar, - EthPoolTransaction, PoolTransaction, TransactionPool, + EthPoolTransaction, PoolPooledTx, PoolTransaction, TransactionPool, }; impl EthTransactions for EthApi @@ -36,12 +36,11 @@ where self.inner.send_raw_transaction_sync_timeout() } - /// Decodes and recovers the transaction and submits it to the pool. - /// - /// Returns the hash of the transaction. - async fn send_raw_transaction(&self, tx: Bytes) -> Result { - let recovered = recover_raw_transaction(&tx)?; - + async fn send_transaction( + &self, + tx: WithEncoded>>, + ) -> Result { + let (tx, recovered) = tx.split(); let mut pool_transaction = ::Transaction::from_pooled(recovered); @@ -147,6 +146,7 @@ mod tests { }; use reth_rpc_eth_api::node::RpcNodeCoreAdapter; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; + use revm_primitives::Bytes; use std::collections::HashMap; fn mock_eth_api( From 7ade95e9fa2e4e7af01d7cc6f4591cb83c9788f7 Mon Sep 17 00:00:00 2001 From: MIHAO PARK Date: Thu, 6 Nov 2025 22:53:45 +0100 Subject: [PATCH 361/371] chore(consensus): update GasLimitInvalidIncrease/GasLimitInvalidDecrease error msg (#18561) Co-authored-by: Matthias Seitz --- crates/consensus/consensus/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index b3dfa30e61b..e714b614090 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -16,7 +16,7 @@ use alloy_consensus::Header; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256}; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{ - constants::{MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT}, + constants::{GAS_LIMIT_BOUND_DIVISOR, MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT}, transaction::error::InvalidTransactionError, Block, GotExpected, GotExpectedBoxed, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, @@ -349,7 +349,7 @@ pub enum ConsensusError { }, /// Error when the child gas limit exceeds the maximum allowed increase. - #[error("child gas_limit {child_gas_limit} max increase is {parent_gas_limit}/1024")] + #[error("child gas_limit {child_gas_limit} exceeds the max allowed increase ({parent_gas_limit}/{GAS_LIMIT_BOUND_DIVISOR})")] GasLimitInvalidIncrease { /// The parent gas limit. parent_gas_limit: u64, @@ -378,7 +378,7 @@ pub enum ConsensusError { }, /// Error when the child gas limit exceeds the maximum allowed decrease. - #[error("child gas_limit {child_gas_limit} max decrease is {parent_gas_limit}/1024")] + #[error("child gas_limit {child_gas_limit} is below the max allowed decrease ({parent_gas_limit}/{GAS_LIMIT_BOUND_DIVISOR})")] GasLimitInvalidDecrease { /// The parent gas limit. parent_gas_limit: u64, From 3385ec5e6e43c33e2da6f0aa5d43686245f3c695 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 6 Nov 2025 22:34:13 +0000 Subject: [PATCH 362/371] test(static-file): StaticFileSegment string and serde roundtrips (#19561) --- Cargo.lock | 1 + crates/static-file/types/Cargo.toml | 2 ++ crates/static-file/types/src/segment.rs | 43 +++++++++++++++++++++---- 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9603f4f10b4..6c03c02c01a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10545,6 +10545,7 @@ dependencies = [ "derive_more", "reth-nippy-jar", "serde", + "serde_json", "strum 0.27.2", ] diff --git a/crates/static-file/types/Cargo.toml b/crates/static-file/types/Cargo.toml index e2cd90c2686..18d85a37c20 100644 --- a/crates/static-file/types/Cargo.toml +++ b/crates/static-file/types/Cargo.toml @@ -21,6 +21,7 @@ strum = { workspace = true, features = ["derive"] } [dev-dependencies] reth-nippy-jar.workspace = true +serde_json.workspace = true [features] default = ["std"] @@ -29,5 +30,6 @@ std = [ "derive_more/std", "serde/std", "strum/std", + "serde_json/std", ] clap = ["dep:clap"] diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index 10ae9d99753..be72510fbb4 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -7,7 +7,7 @@ use alloy_primitives::TxNumber; use core::{ops::RangeInclusive, str::FromStr}; use derive_more::Display; use serde::{Deserialize, Serialize}; -use strum::{AsRefStr, EnumIs, EnumString}; +use strum::{EnumIs, EnumString}; #[derive( Debug, @@ -21,21 +21,18 @@ use strum::{AsRefStr, EnumIs, EnumString}; Deserialize, Serialize, EnumString, - AsRefStr, Display, EnumIs, )] +#[strum(serialize_all = "kebab-case")] #[cfg_attr(feature = "clap", derive(clap::ValueEnum))] /// Segment of the data that can be moved to static files. pub enum StaticFileSegment { - #[strum(serialize = "headers")] /// Static File segment responsible for the `CanonicalHeaders`, `Headers`, /// `HeaderTerminalDifficulties` tables. Headers, - #[strum(serialize = "transactions")] /// Static File segment responsible for the `Transactions` table. Transactions, - #[strum(serialize = "receipts")] /// Static File segment responsible for the `Receipts` table. Receipts, } @@ -43,6 +40,8 @@ pub enum StaticFileSegment { impl StaticFileSegment { /// Returns the segment as a string. pub const fn as_str(&self) -> &'static str { + // `strum` doesn't generate a doc comment for `into_str` when using `IntoStaticStr` derive + // macro, so we need to manually implement it. match self { Self::Headers => "headers", Self::Transactions => "transactions", @@ -73,7 +72,7 @@ impl StaticFileSegment { pub fn filename(&self, block_range: &SegmentRangeInclusive) -> String { // ATTENTION: if changing the name format, be sure to reflect those changes in // [`Self::parse_filename`]. - format!("static_file_{}_{}_{}", self.as_ref(), block_range.start(), block_range.end()) + format!("static_file_{}_{}_{}", self.as_str(), block_range.start(), block_range.end()) } /// Returns file name for the provided segment and range, alongside filters, compression. @@ -473,4 +472,36 @@ mod tests { ); } } + + /// Used in filename writing/parsing + #[test] + fn test_static_file_segment_str_roundtrip() { + for segment in StaticFileSegment::iter() { + let static_str = segment.as_str(); + assert_eq!(StaticFileSegment::from_str(static_str).unwrap(), segment); + + let expected_str = match segment { + StaticFileSegment::Headers => "headers", + StaticFileSegment::Transactions => "transactions", + StaticFileSegment::Receipts => "receipts", + }; + assert_eq!(static_str, expected_str); + } + } + + /// Used in segment headers serialize/deserialize + #[test] + fn test_static_file_segment_serde_roundtrip() { + for segment in StaticFileSegment::iter() { + let ser = serde_json::to_string(&segment).unwrap(); + assert_eq!(serde_json::from_str::(&ser).unwrap(), segment); + + let expected_str = match segment { + StaticFileSegment::Headers => "Headers", + StaticFileSegment::Transactions => "Transactions", + StaticFileSegment::Receipts => "Receipts", + }; + assert_eq!(ser, format!("\"{expected_str}\"")); + } + } } From e813681c5df6cb863f3b7b477c7feb8d7b9b66df Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 7 Nov 2025 00:05:11 +0100 Subject: [PATCH 363/371] feat(debug): re-establish block subscription (#19550) --- .../debug-client/src/providers/rpc.rs | 56 +++++++++++-------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/crates/consensus/debug-client/src/providers/rpc.rs b/crates/consensus/debug-client/src/providers/rpc.rs index 0c9dfbce7de..f670639dc66 100644 --- a/crates/consensus/debug-client/src/providers/rpc.rs +++ b/crates/consensus/debug-client/src/providers/rpc.rs @@ -61,34 +61,42 @@ where type Block = PrimitiveBlock; async fn subscribe_blocks(&self, tx: Sender) { - let Ok(mut stream) = self.full_block_stream().await.inspect_err(|err| { - warn!( - target: "consensus::debug-client", - %err, - url=%self.url, - "Failed to subscribe to blocks", - ); - }) else { - return - }; + loop { + let Ok(mut stream) = self.full_block_stream().await.inspect_err(|err| { + warn!( + target: "consensus::debug-client", + %err, + url=%self.url, + "Failed to subscribe to blocks", + ); + }) else { + return + }; - while let Some(res) = stream.next().await { - match res { - Ok(block) => { - if tx.send((self.convert)(block)).await.is_err() { - // Channel closed. - break; + while let Some(res) = stream.next().await { + match res { + Ok(block) => { + if tx.send((self.convert)(block)).await.is_err() { + // Channel closed. + break; + } + } + Err(err) => { + warn!( + target: "consensus::debug-client", + %err, + url=%self.url, + "Failed to fetch a block", + ); } - } - Err(err) => { - warn!( - target: "consensus::debug-client", - %err, - url=%self.url, - "Failed to fetch a block", - ); } } + // if stream terminated we want to re-establish it again + debug!( + target: "consensus::debug-client", + url=%self.url, + "Re-estbalishing block subscription", + ); } } From f69c544da6ee3288e268621f01998fca48121eaf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 7 Nov 2025 00:43:58 +0100 Subject: [PATCH 364/371] refactor: replace GenericArray with regular arrays in ECIES (#19563) --- Cargo.lock | 2 -- Cargo.toml | 2 -- crates/net/ecies/Cargo.toml | 6 +++--- crates/net/ecies/src/algorithm.rs | 9 ++++----- crates/net/ecies/src/mac.rs | 13 ++----------- 5 files changed, 9 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6c03c02c01a..617a95cf186 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8005,7 +8005,6 @@ dependencies = [ "ctr", "digest 0.10.7", "futures", - "generic-array", "hmac", "pin-project", "rand 0.8.5", @@ -8018,7 +8017,6 @@ dependencies = [ "tokio-stream", "tokio-util", "tracing", - "typenum", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 6e6f4226598..1ee73703824 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -549,8 +549,6 @@ dirs-next = "2.0.0" dyn-clone = "1.0.17" eyre = "0.6" fdlimit = "0.3.0" -# pinned until downstream crypto libs migrate to 1.0 because 0.14.8 marks all types as deprecated -generic-array = "=0.14.7" humantime = "2.1" humantime-serde = "1.1" itertools = { version = "0.14", default-features = false } diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index a55e5fa7e8f..75a4bc78978 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -25,9 +25,6 @@ pin-project.workspace = true tracing = { workspace = true, features = ["attributes"] } -# HeaderBytes -generic-array.workspace = true -typenum.workspace = true byteorder.workspace = true # crypto @@ -42,3 +39,6 @@ aes.workspace = true hmac.workspace = true block-padding.workspace = true cipher = { workspace = true, features = ["block-padding"] } + +[dev-dependencies] +tokio = { workspace = true, features = ["net", "rt", "macros"] } diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index dae5e501695..a6355c294f6 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -2,7 +2,7 @@ use crate::{ error::ECIESErrorImpl, - mac::{HeaderBytes, MAC}, + mac::MAC, util::{hmac_sha256, sha256}, ECIESError, }; @@ -639,7 +639,6 @@ impl ECIES { header[..3].copy_from_slice(&buf[..3]); header[3..6].copy_from_slice(&[194, 128, 128]); - let mut header = HeaderBytes::from(header); self.egress_aes.as_mut().unwrap().apply_keystream(&mut header); self.egress_mac.as_mut().unwrap().update_header(&header); let tag = self.egress_mac.as_mut().unwrap().digest(); @@ -660,7 +659,7 @@ impl ECIES { } let (header_bytes, mac_bytes) = split_at_mut(data, 16)?; - let header = HeaderBytes::from_mut_slice(header_bytes); + let header: &mut [u8; 16] = header_bytes.try_into().unwrap(); let mac = B128::from_slice(&mac_bytes[..16]); self.ingress_mac.as_mut().unwrap().update_header(header); @@ -670,11 +669,11 @@ impl ECIES { } self.ingress_aes.as_mut().unwrap().apply_keystream(header); - if header.as_slice().len() < 3 { + if header.len() < 3 { return Err(ECIESErrorImpl::InvalidHeader.into()) } - let body_size = usize::try_from(header.as_slice().read_uint::(3)?)?; + let body_size = usize::try_from((&header[..]).read_uint::(3)?)?; self.body_size = Some(body_size); diff --git a/crates/net/ecies/src/mac.rs b/crates/net/ecies/src/mac.rs index 03847d091ee..fcccae72679 100644 --- a/crates/net/ecies/src/mac.rs +++ b/crates/net/ecies/src/mac.rs @@ -14,16 +14,7 @@ use alloy_primitives::{B128, B256}; use block_padding::NoPadding; use cipher::BlockEncrypt; use digest::KeyInit; -use generic_array::GenericArray; use sha3::{Digest, Keccak256}; -use typenum::U16; - -/// Type alias for a fixed-size array of 16 bytes used as headers. -/// -/// This type is defined as [`GenericArray`] and is commonly employed in Ethereum `RLPx` -/// protocol-related structures for headers. It represents 16 bytes of data used in various -/// cryptographic operations, such as MAC (Message Authentication Code) computation. -pub type HeaderBytes = GenericArray; /// [`Ethereum MAC`](https://github.com/ethereum/devp2p/blob/master/rlpx.md#mac) state. /// @@ -49,8 +40,8 @@ impl MAC { self.hasher.update(data) } - /// Accumulate the given [`HeaderBytes`] into the MAC's internal state. - pub fn update_header(&mut self, data: &HeaderBytes) { + /// Accumulate the given header bytes into the MAC's internal state. + pub fn update_header(&mut self, data: &[u8; 16]) { let aes = Aes256Enc::new_from_slice(self.secret.as_ref()).unwrap(); let mut encrypted = self.digest().0; From 4d258cb98d8e905dda0623db09508b9e03271edb Mon Sep 17 00:00:00 2001 From: Vitalyr <158586577+Vitaliyr888@users.noreply.github.com> Date: Fri, 7 Nov 2025 00:31:09 +0000 Subject: [PATCH 365/371] fix: update docs and arg comments (#19537) Co-authored-by: Matthias Seitz --- crates/node/core/src/args/network.rs | 4 ++-- docs/vocs/docs/pages/cli/reth/node.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 4 ++-- docs/vocs/docs/pages/cli/reth/stage/run.mdx | 4 ++-- docs/vocs/docs/pages/sdk/custom-node/transactions.mdx | 4 ++-- testing/ef-tests/src/models.rs | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/crates/node/core/src/args/network.rs b/crates/node/core/src/args/network.rs index 52ff52b1cee..4e57839e3ec 100644 --- a/crates/node/core/src/args/network.rs +++ b/crates/node/core/src/args/network.rs @@ -100,11 +100,11 @@ pub struct NetworkArgs { #[arg(long = "port", value_name = "PORT", default_value_t = DEFAULT_DISCOVERY_PORT)] pub port: u16, - /// Maximum number of outbound requests. default: 100 + /// Maximum number of outbound peers. default: 100 #[arg(long)] pub max_outbound_peers: Option, - /// Maximum number of inbound requests. default: 30 + /// Maximum number of inbound peers. default: 30 #[arg(long)] pub max_inbound_peers: Option, diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 57d5110bf36..6ea817a2a6d 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -179,10 +179,10 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 --max-tx-reqs Max concurrent `GetPooledTransactions` requests. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index bbe6b375e5b..a7670bacce9 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -125,10 +125,10 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 --max-tx-reqs Max concurrent `GetPooledTransactions` requests. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index 533bd71de2e..76afd9a4cf5 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -125,10 +125,10 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 --max-tx-reqs Max concurrent `GetPooledTransactions` requests. diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 769bebde10f..b7a5a41aaf9 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -229,10 +229,10 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 --max-tx-reqs Max concurrent `GetPooledTransactions` requests. diff --git a/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx b/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx index 52881a368fb..407ebcfdec3 100644 --- a/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx +++ b/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx @@ -67,7 +67,7 @@ pub enum CustomTransaction { /// A regular Optimism transaction as defined by [`OpTxEnvelope`]. #[envelope(flatten)] Op(OpTxEnvelope), - /// A [`TxPayment`] tagged with type 0x7E. + /// A [`TxPayment`] tagged with type 0x2A (decimal 42). #[envelope(ty = 42)] Payment(Signed), } @@ -178,7 +178,7 @@ pub enum CustomPooledTransaction { /// A regular Optimism transaction as defined by [`OpPooledTransaction`]. #[envelope(flatten)] Op(OpPooledTransaction), - /// A [`TxPayment`] tagged with type 0x7E. + /// A [`TxPayment`] tagged with type 0x2A (decimal 42). #[envelope(ty = 42)] Payment(Signed), } diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 49c49bf1936..c6f4e44cffa 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -265,7 +265,7 @@ pub enum ForkSpec { FrontierToHomesteadAt5, /// Homestead Homestead, - /// Homestead to Tangerine + /// Homestead to DAO HomesteadToDaoAt5, /// Homestead to Tangerine HomesteadToEIP150At5, From 42ac2aaeb69781b26f3c8101c3960358585adcf4 Mon Sep 17 00:00:00 2001 From: rakita Date: Fri, 7 Nov 2025 08:34:56 +0100 Subject: [PATCH 366/371] chore: bump revm v31.0.1 (#19567) --- Cargo.lock | 52 ++++++++++++++++++++++++++-------------------------- Cargo.toml | 22 +++++++++++----------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 617a95cf186..1a94cccdbac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6236,9 +6236,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "12.0.0" +version = "12.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e599c71e91670fb922e3cdcb04783caed1226352da19d674bd001b3bf2bc433" +checksum = "dcd8cb3274e87936b595eb2247ad3bda146695fceb7159afa76010529af53553" dependencies = [ "auto_impl", "revm", @@ -10927,9 +10927,9 @@ dependencies = [ [[package]] name = "revm" -version = "31.0.0" +version = "31.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7bba993ce958f0b6eb23d2644ea8360982cb60baffedf961441e36faba6a2ca" +checksum = "93df0ff5eb70facbc872f82da4b815d7bd8e36b7ee525c637cabcb2a6af8a708" dependencies = [ "revm-bytecode", "revm-context", @@ -10946,9 +10946,9 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "7.1.0" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2b51c414b7e79edd4a0569d06e2c4c029f8b60e5f3ee3e2fa21dc6c3717ee3" +checksum = "e2c6b5e6e8dd1e28a4a60e5f46615d4ef0809111c9e63208e55b5c7058200fb0" dependencies = [ "bitvec", "phf", @@ -10958,9 +10958,9 @@ dependencies = [ [[package]] name = "revm-context" -version = "11.0.0" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f69efee45130bd9e5b0a7af27552fddc70bc161dafed533c2f818a2d1eb654e6" +checksum = "583c80d674f51b28a0d0a7309bda0867bcb0fd41b4e34976eded145edbb089fc" dependencies = [ "bitvec", "cfg-if", @@ -10975,9 +10975,9 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "12.0.0" +version = "12.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce2525e93db0ae2a3ec7dcde5443dfdb6fbf321c5090380d775730c67bc6cee" +checksum = "f6d701e2c2347d65216b066489ab22a0a8e1f7b2568256110d73a7d5eff3385c" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10991,9 +10991,9 @@ dependencies = [ [[package]] name = "revm-database" -version = "9.0.3" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2602625aa11ab1eda8e208e96b652c0bfa989b86c104a36537a62b081228af9" +checksum = "7a4505d9688482fe0c3b8c09d9afbc4656e2bf9b48855e1c86c93bd4508e496a" dependencies = [ "alloy-eips", "revm-bytecode", @@ -11005,9 +11005,9 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "8.0.4" +version = "8.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58a4621143d6515e32f969306d9c85797ae0d3fe0c74784f1fda02ba441e5a08" +checksum = "8cce03e3780287b07abe58faf4a7f5d8be7e81321f93ccf3343c8f7755602bae" dependencies = [ "auto_impl", "either", @@ -11018,9 +11018,9 @@ dependencies = [ [[package]] name = "revm-handler" -version = "12.0.0" +version = "12.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e756198d43b6c4c5886548ffbc4594412d1a82b81723525c6e85ed6da0e91c5f" +checksum = "b3da9e26f05ed723cf423b92f012a7775eef9e7d897633d11ec83535e92cda2d" dependencies = [ "auto_impl", "derive-where", @@ -11037,9 +11037,9 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "12.0.0" +version = "12.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fdd1e74cc99c6173c8692b6e480291e2ad0c21c716d9dc16e937ab2e0da219" +checksum = "57afb06e5985dbd2e8a48a3e6727cb0dd45148e4e6e028ac8222e262e440d3de" dependencies = [ "auto_impl", "either", @@ -11075,9 +11075,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "29.0.0" +version = "29.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44efb7c2f4034a5bfd3d71ebfed076e48ac75e4972f1c117f2a20befac7716cd" +checksum = "22789ce92c5808c70185e3bc49732f987dc6fd907f77828c8d3470b2299c9c65" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -11088,9 +11088,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "29.0.0" +version = "29.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585098ede6d84d6fc6096ba804b8e221c44dc77679571d32664a55e665aa236b" +checksum = "968b124028960201abf6d6bf8e223f15fadebb4307df6b7dc9244a0aab5d2d05" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11113,9 +11113,9 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "21.0.1" +version = "21.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "536f30e24c3c2bf0d3d7d20fa9cf99b93040ed0f021fd9301c78cddb0dacda13" +checksum = "29e161db429d465c09ba9cbff0df49e31049fe6b549e28eb0b7bd642fcbd4412" dependencies = [ "alloy-primitives", "num_enum", @@ -11125,9 +11125,9 @@ dependencies = [ [[package]] name = "revm-state" -version = "8.1.0" +version = "8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0b4873815e31cbc3e5b183b9128b86c09a487c027aaf8cc5cf4b9688878f9b" +checksum = "7d8be953b7e374dbdea0773cf360debed8df394ea8d82a8b240a6b5da37592fc" dependencies = [ "bitflags 2.10.0", "revm-bytecode", diff --git a/Cargo.toml b/Cargo.toml index 1ee73703824..2a82fcfa8ad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -466,17 +466,17 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { version = "31.0.0", default-features = false } -revm-bytecode = { version = "7.1.0", default-features = false } -revm-database = { version = "9.0.3", default-features = false } -revm-state = { version = "8.1.0", default-features = false } -revm-primitives = { version = "21.0.1", default-features = false } -revm-interpreter = { version = "29.0.0", default-features = false } -revm-inspector = { version = "12.0.0", default-features = false } -revm-context = { version = "11.0.0", default-features = false } -revm-context-interface = { version = "12.0.0", default-features = false } -revm-database-interface = { version = "8.0.4", default-features = false } -op-revm = { version = "12.0.0", default-features = false } +revm = { version = "31.0.1", default-features = false } +revm-bytecode = { version = "7.1.1", default-features = false } +revm-database = { version = "9.0.4", default-features = false } +revm-state = { version = "8.1.1", default-features = false } +revm-primitives = { version = "21.0.2", default-features = false } +revm-interpreter = { version = "29.0.1", default-features = false } +revm-inspector = { version = "12.0.1", default-features = false } +revm-context = { version = "11.0.1", default-features = false } +revm-context-interface = { version = "12.0.1", default-features = false } +revm-database-interface = { version = "8.0.5", default-features = false } +op-revm = { version = "12.0.1", default-features = false } revm-inspectors = "0.32.0" # eth From 9f9ab671c3236f14d9abf518452fb000f7acbad6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 7 Nov 2025 09:17:44 +0100 Subject: [PATCH 367/371] ci: tag (#19572) --- .github/workflows/docker-tag-latest.yml | 39 +++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 .github/workflows/docker-tag-latest.yml diff --git a/.github/workflows/docker-tag-latest.yml b/.github/workflows/docker-tag-latest.yml new file mode 100644 index 00000000000..7688875641c --- /dev/null +++ b/.github/workflows/docker-tag-latest.yml @@ -0,0 +1,39 @@ +# Tag a specific Docker release version as latest + +name: docker-tag-latest + +on: + workflow_dispatch: + inputs: + version: + description: 'Release version to tag as latest (e.g., v1.8.4)' + required: true + type: string + +env: + DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth + DOCKER_USERNAME: ${{ github.actor }} + +jobs: + tag-latest: + name: Tag release as latest + runs-on: ubuntu-24.04 + permissions: + packages: write + contents: read + steps: + - name: Log in to Docker + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin + + - name: Pull release image + run: | + docker pull ${DOCKER_IMAGE_NAME}:${{ inputs.version }} + + - name: Tag as latest + run: | + docker tag ${DOCKER_IMAGE_NAME}:${{ inputs.version }} ${DOCKER_IMAGE_NAME}:latest + + - name: Push latest tag + run: | + docker push ${DOCKER_IMAGE_NAME}:latest From 5464312672a397c43d5912c515e5de509dcebf80 Mon Sep 17 00:00:00 2001 From: YK Date: Fri, 7 Nov 2025 16:10:16 +0800 Subject: [PATCH 368/371] fix(bench-compare): validate remote git references (#19569) --- bin/reth-bench-compare/src/git.rs | 54 +++++++++++++++---------------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/bin/reth-bench-compare/src/git.rs b/bin/reth-bench-compare/src/git.rs index 0da82b14018..001466969d4 100644 --- a/bin/reth-bench-compare/src/git.rs +++ b/bin/reth-bench-compare/src/git.rs @@ -181,45 +181,43 @@ impl GitManager { /// Validate that the specified git references exist (branches, tags, or commits) pub(crate) fn validate_refs(&self, refs: &[&str]) -> Result<()> { for &git_ref in refs { - // Try branch first, then tag, then commit - let branch_check = Command::new("git") - .args(["rev-parse", "--verify", &format!("refs/heads/{git_ref}")]) + // Try to resolve the ref similar to `git checkout` by peeling to a commit. + // First try the ref as-is with ^{commit}, then fall back to origin/{ref}^{commit}. + let as_is = format!("{git_ref}^{{commit}}"); + let ref_check = Command::new("git") + .args(["rev-parse", "--verify", &as_is]) .current_dir(&self.repo_root) .output(); - let tag_check = Command::new("git") - .args(["rev-parse", "--verify", &format!("refs/tags/{git_ref}")]) - .current_dir(&self.repo_root) - .output(); - - let commit_check = Command::new("git") - .args(["rev-parse", "--verify", &format!("{git_ref}^{{commit}}")]) - .current_dir(&self.repo_root) - .output(); - - let found = if let Ok(output) = branch_check && - output.status.success() - { - info!("Validated branch exists: {}", git_ref); - true - } else if let Ok(output) = tag_check && + let found = if let Ok(output) = ref_check && output.status.success() { - info!("Validated tag exists: {}", git_ref); - true - } else if let Ok(output) = commit_check && - output.status.success() - { - info!("Validated commit exists: {}", git_ref); + info!("Validated reference exists: {}", git_ref); true } else { - false + // Try remote-only branches via origin/{ref} + let origin_ref = format!("origin/{git_ref}^{{commit}}"); + let origin_check = Command::new("git") + .args(["rev-parse", "--verify", &origin_ref]) + .current_dir(&self.repo_root) + .output(); + + if let Ok(output) = origin_check && + output.status.success() + { + info!("Validated remote reference exists: origin/{}", git_ref); + true + } else { + false + } }; if !found { return Err(eyre!( - "Git reference '{}' does not exist as branch, tag, or commit", - git_ref + "Git reference '{}' does not exist as branch, tag, or commit (tried '{}' and 'origin/{}^{{commit}}')", + git_ref, + format!("{git_ref}^{{commit}}"), + git_ref, )); } } From 4394860df4024d6a81a7f06704139f244ebb0135 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 7 Nov 2025 09:48:43 +0100 Subject: [PATCH 369/371] ci: add independent tagging options (#19574) --- .github/workflows/docker-tag-latest.yml | 52 ++++++++++++++++++++----- 1 file changed, 43 insertions(+), 9 deletions(-) diff --git a/.github/workflows/docker-tag-latest.yml b/.github/workflows/docker-tag-latest.yml index 7688875641c..1f76254d49d 100644 --- a/.github/workflows/docker-tag-latest.yml +++ b/.github/workflows/docker-tag-latest.yml @@ -9,15 +9,25 @@ on: description: 'Release version to tag as latest (e.g., v1.8.4)' required: true type: string + tag_reth: + description: 'Tag reth image as latest' + required: false + type: boolean + default: true + tag_op_reth: + description: 'Tag op-reth image as latest' + required: false + type: boolean + default: false env: - DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth DOCKER_USERNAME: ${{ github.actor }} jobs: - tag-latest: - name: Tag release as latest + tag-reth-latest: + name: Tag reth as latest runs-on: ubuntu-24.04 + if: ${{ inputs.tag_reth }} permissions: packages: write contents: read @@ -26,14 +36,38 @@ jobs: run: | echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin - - name: Pull release image + - name: Pull reth release image run: | - docker pull ${DOCKER_IMAGE_NAME}:${{ inputs.version }} + docker pull ghcr.io/${{ github.repository_owner }}/reth:${{ inputs.version }} - - name: Tag as latest + - name: Tag reth as latest run: | - docker tag ${DOCKER_IMAGE_NAME}:${{ inputs.version }} ${DOCKER_IMAGE_NAME}:latest + docker tag ghcr.io/${{ github.repository_owner }}/reth:${{ inputs.version }} ghcr.io/${{ github.repository_owner }}/reth:latest - - name: Push latest tag + - name: Push reth latest tag run: | - docker push ${DOCKER_IMAGE_NAME}:latest + docker push ghcr.io/${{ github.repository_owner }}/reth:latest + + tag-op-reth-latest: + name: Tag op-reth as latest + runs-on: ubuntu-24.04 + if: ${{ inputs.tag_op_reth }} + permissions: + packages: write + contents: read + steps: + - name: Log in to Docker + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin + + - name: Pull op-reth release image + run: | + docker pull ghcr.io/${{ github.repository_owner }}/op-reth:${{ inputs.version }} + + - name: Tag op-reth as latest + run: | + docker tag ghcr.io/${{ github.repository_owner }}/op-reth:${{ inputs.version }} ghcr.io/${{ github.repository_owner }}/op-reth:latest + + - name: Push op-reth latest tag + run: | + docker push ghcr.io/${{ github.repository_owner }}/op-reth:latest From b48c72fad20c0964306392066f5c077a376e5fa9 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Fri, 7 Nov 2025 10:16:55 +0100 Subject: [PATCH 370/371] chore: BuiltPayloadExecutedBlock (#19430) --- Cargo.lock | 5 +- crates/node/builder/src/launch/engine.rs | 4 +- crates/optimism/payload/Cargo.toml | 2 +- crates/optimism/payload/src/builder.rs | 9 ++-- crates/optimism/payload/src/payload.rs | 11 +++-- crates/payload/primitives/Cargo.toml | 4 ++ crates/payload/primitives/src/lib.rs | 4 +- crates/payload/primitives/src/traits.rs | 62 ++++++++++++++++++++++-- crates/trie/common/src/hashed_state.rs | 43 ++++++++++++++++ examples/custom-node/Cargo.toml | 1 - examples/custom-node/src/engine.rs | 11 ++--- 11 files changed, 127 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1a94cccdbac..a3adedb36f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3579,7 +3579,6 @@ dependencies = [ "op-alloy-rpc-types", "op-alloy-rpc-types-engine", "op-revm", - "reth-chain-state", "reth-codecs", "reth-db-api", "reth-engine-primitives", @@ -9532,10 +9531,10 @@ dependencies = [ "alloy-rpc-types-debug", "alloy-rpc-types-engine", "derive_more", + "either", "op-alloy-consensus", "op-alloy-rpc-types-engine", "reth-basic-payload-builder", - "reth-chain-state", "reth-chainspec", "reth-evm", "reth-execution-types", @@ -9742,7 +9741,9 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-errors", + "reth-execution-types", "reth-primitives-traits", + "reth-trie-common", "serde", "thiserror 2.0.17", "tokio", diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index ffe07aaac88..48b8708cedc 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -280,8 +280,8 @@ impl EngineNodeLauncher { tokio::select! { payload = built_payloads.select_next_some() => { if let Some(executed_block) = payload.executed_block() { - debug!(target: "reth::cli", block=?executed_block.recovered_block().num_hash(), "inserting built payload"); - engine_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block).into()); + debug!(target: "reth::cli", block=?executed_block.recovered_block.num_hash(), "inserting built payload"); + engine_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block.into_executed_payload()).into()); } } event = engine_service.next() => { diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index e75075a12cf..0674ed7cf73 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -25,7 +25,6 @@ reth-payload-builder-primitives.workspace = true reth-payload-util.workspace = true reth-payload-primitives = { workspace = true, features = ["op"] } reth-basic-payload-builder.workspace = true -reth-chain-state.workspace = true reth-payload-validator.workspace = true # op-reth @@ -52,3 +51,4 @@ tracing.workspace = true thiserror.workspace = true sha2.workspace = true serde.workspace = true +either.workspace = true diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 3d047c5f617..1f7c0c00f91 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -9,7 +9,6 @@ use alloy_primitives::{B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; -use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ block::BlockExecutorFor, @@ -28,7 +27,7 @@ use reth_optimism_txpool::{ OpPooledTx, }; use reth_payload_builder_primitives::PayloadBuilderError; -use reth_payload_primitives::{BuildNextEnv, PayloadBuilderAttributes}; +use reth_payload_primitives::{BuildNextEnv, BuiltPayloadExecutedBlock, PayloadBuilderAttributes}; use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; use reth_primitives_traits::{ HeaderTy, NodePrimitives, SealedHeader, SealedHeaderFor, SignedTransaction, TxTy, @@ -384,11 +383,11 @@ impl OpBuilder<'_, Txs> { ); // create the executed block data - let executed: ExecutedBlock = ExecutedBlock { + let executed: BuiltPayloadExecutedBlock = BuiltPayloadExecutedBlock { recovered_block: Arc::new(block), execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - trie_updates: Arc::new(trie_updates), + hashed_state: either::Either::Left(Arc::new(hashed_state)), + trie_updates: either::Either::Left(Arc::new(trie_updates)), }; let no_tx_pool = ctx.attributes().no_tx_pool(); diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 6f530acd853..b44f69ddb7e 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -16,12 +16,13 @@ use op_alloy_consensus::{encode_holocene_extra_data, encode_jovian_extra_data, E use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4, }; -use reth_chain_state::ExecutedBlock; use reth_chainspec::EthChainSpec; use reth_optimism_evm::OpNextBlockEnvAttributes; use reth_optimism_forks::OpHardforks; use reth_payload_builder::{EthPayloadBuilderAttributes, PayloadBuilderError}; -use reth_payload_primitives::{BuildNextEnv, BuiltPayload, PayloadBuilderAttributes}; +use reth_payload_primitives::{ + BuildNextEnv, BuiltPayload, BuiltPayloadExecutedBlock, PayloadBuilderAttributes, +}; use reth_primitives_traits::{ NodePrimitives, SealedBlock, SealedHeader, SignedTransaction, WithEncoded, }; @@ -176,7 +177,7 @@ pub struct OpBuiltPayload { /// Sealed block pub(crate) block: Arc>, /// Block execution data for the payload, if any. - pub(crate) executed_block: Option>, + pub(crate) executed_block: Option>, /// The fees of the block pub(crate) fees: U256, } @@ -189,7 +190,7 @@ impl OpBuiltPayload { id: PayloadId, block: Arc>, fees: U256, - executed_block: Option>, + executed_block: Option>, ) -> Self { Self { id, block, fees, executed_block } } @@ -226,7 +227,7 @@ impl BuiltPayload for OpBuiltPayload { self.fees } - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { self.executed_block.clone() } diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index 670727e3c6d..ce8e07fc8ee 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -17,6 +17,8 @@ reth-primitives-traits.workspace = true reth-chainspec.workspace = true reth-errors.workspace = true reth-chain-state.workspace = true +reth-execution-types.workspace = true +reth-trie-common.workspace = true # alloy alloy-eips.workspace = true @@ -38,6 +40,8 @@ assert_matches.workspace = true default = ["std"] std = [ "reth-chainspec/std", + "reth-execution-types/std", + "reth-trie-common/std", "alloy-eips/std", "alloy-primitives/std", "alloy-rpc-types-engine/std", diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index ca3cccda883..1c32bb3fa3d 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -26,8 +26,8 @@ pub use error::{ mod traits; pub use traits::{ - BuildNextEnv, BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, - PayloadBuilderAttributes, + BuildNextEnv, BuiltPayload, BuiltPayloadExecutedBlock, PayloadAttributes, + PayloadAttributesBuilder, PayloadBuilderAttributes, }; mod payload; diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 160956afa27..fdc078887dc 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,7 +1,7 @@ //! Core traits for working with execution payloads. use crate::PayloadBuilderError; -use alloc::{boxed::Box, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, eip7685::Requests, @@ -9,8 +9,60 @@ use alloy_eips::{ use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use core::fmt; -use reth_chain_state::ExecutedBlock; -use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader}; +use either::Either; +use reth_execution_types::ExecutionOutcome; +use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; +use reth_trie_common::{ + updates::{TrieUpdates, TrieUpdatesSorted}, + HashedPostState, HashedPostStateSorted, +}; + +/// Represents an executed block for payload building purposes. +/// +/// This type captures the complete execution state of a built block, +/// including the recovered block, execution outcome, hashed state, and trie updates. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct BuiltPayloadExecutedBlock { + /// Recovered Block + pub recovered_block: Arc>, + /// Block's execution outcome. + pub execution_output: Arc>, + /// Block's hashed state. + /// + /// Supports both unsorted and sorted variants so payload builders can avoid cloning in order + /// to convert from one to the other when it's not necessary. + pub hashed_state: Either, Arc>, + /// Trie updates that result from calculating the state root for the block. + /// + /// Supports both unsorted and sorted variants so payload builders can avoid cloning in order + /// to convert from one to the other when it's not necessary. + pub trie_updates: Either, Arc>, +} + +impl BuiltPayloadExecutedBlock { + /// Converts this into an [`reth_chain_state::ExecutedBlock`]. + /// + /// If the hashed state or trie updates are in sorted form, they will be converted + /// back to their unsorted representations. + pub fn into_executed_payload(self) -> reth_chain_state::ExecutedBlock { + let hashed_state = match self.hashed_state { + Either::Left(unsorted) => unsorted, + Either::Right(sorted) => Arc::new(Arc::unwrap_or_clone(sorted).into()), + }; + + let trie_updates = match self.trie_updates { + Either::Left(unsorted) => unsorted, + Either::Right(sorted) => Arc::new(Arc::unwrap_or_clone(sorted).into()), + }; + + reth_chain_state::ExecutedBlock { + recovered_block: self.recovered_block, + execution_output: self.execution_output, + hashed_state, + trie_updates, + } + } +} /// Represents a successfully built execution payload (block). /// @@ -30,7 +82,7 @@ pub trait BuiltPayload: Send + Sync + fmt::Debug { /// Returns the complete execution result including state updates. /// /// Returns `None` if execution data is not available or not tracked. - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { None } @@ -155,7 +207,7 @@ where } } -impl PayloadAttributesBuilder for either::Either +impl PayloadAttributesBuilder for Either where L: PayloadAttributesBuilder, R: PayloadAttributesBuilder, diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 8d99ee5ebbb..22f57d9f34d 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -624,6 +624,49 @@ impl HashedStorageSorted { } } +impl From for HashedStorage { + fn from(sorted: HashedStorageSorted) -> Self { + let mut storage = B256Map::default(); + + // Add all non-zero valued slots + for (slot, value) in sorted.non_zero_valued_slots { + storage.insert(slot, value); + } + + // Add all zero valued slots + for slot in sorted.zero_valued_slots { + storage.insert(slot, U256::ZERO); + } + + Self { wiped: sorted.wiped, storage } + } +} + +impl From for HashedPostState { + fn from(sorted: HashedPostStateSorted) -> Self { + let mut accounts = B256Map::default(); + + // Add all updated accounts + for (address, account) in sorted.accounts.accounts { + accounts.insert(address, Some(account)); + } + + // Add all destroyed accounts + for address in sorted.accounts.destroyed_accounts { + accounts.insert(address, None); + } + + // Convert storages + let storages = sorted + .storages + .into_iter() + .map(|(address, storage)| (address, storage.into())) + .collect(); + + Self { accounts, storages } + } +} + /// An iterator that yields chunks of the state updates of at most `size` account and storage /// targets. /// diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index fe1f0006256..8eb3dbd143b 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -7,7 +7,6 @@ license.workspace = true [dependencies] # reth -reth-chain-state.workspace = true reth-codecs.workspace = true reth-network-peers.workspace = true reth-node-builder.workspace = true diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index 0c80e52a661..d7eabdc19f7 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -6,14 +6,13 @@ use crate::{ }; use alloy_eips::eip2718::WithEncoded; use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload}; -use reth_chain_state::ExecutedBlock; use reth_engine_primitives::EngineApiValidator; use reth_ethereum::{ node::api::{ - validate_version_specific_fields, AddOnsContext, BuiltPayload, EngineApiMessageVersion, - EngineObjectValidationError, ExecutionPayload, FullNodeComponents, NewPayloadError, - NodePrimitives, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, - PayloadTypes, PayloadValidator, + validate_version_specific_fields, AddOnsContext, BuiltPayload, BuiltPayloadExecutedBlock, + EngineApiMessageVersion, EngineObjectValidationError, ExecutionPayload, FullNodeComponents, + NewPayloadError, NodePrimitives, PayloadAttributes, PayloadBuilderAttributes, + PayloadOrAttributes, PayloadTypes, PayloadValidator, }, primitives::{RecoveredBlock, SealedBlock}, storage::StateProviderFactory, @@ -167,7 +166,7 @@ impl BuiltPayload for CustomBuiltPayload { self.0.fees() } - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { self.0.executed_block() } From c67c132ef6345b32262d0032b5b6422908c6b63a Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Tue, 11 Nov 2025 20:16:59 +0000 Subject: [PATCH 371/371] feat: ongoing upstream merge Signed-off-by: Gregory Edison --- Cargo.lock | 1705 +++++++++-------- Cargo.toml | 28 +- .../engine/tree/src/tree/payload_validator.rs | 4 +- crates/node/builder/src/components/pool.rs | 4 +- crates/scroll/alloy/evm/src/block/mod.rs | 17 +- crates/scroll/alloy/evm/src/lib.rs | 2 + crates/scroll/alloy/evm/src/system_caller.rs | 9 +- .../engine-primitives/src/payload/built.rs | 9 +- crates/scroll/evm/src/build.rs | 15 +- crates/scroll/openvm-compat/Cargo.toml | 2 +- crates/scroll/payload/src/builder.rs | 40 +- crates/scroll/rpc/src/eth/mod.rs | 4 +- crates/stages/stages/src/sets.rs | 2 +- 13 files changed, 964 insertions(+), 877 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5fae5c4fb69..0d04ac07097 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -74,6 +74,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b" +dependencies = [ + "equator", +] + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -112,9 +121,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59094911f05dbff1cf5b29046a00ef26452eccc8d47136d50a47c0cf22f00c85" +checksum = "90d103d3e440ad6f703dd71a5b58a6abd24834563bde8a5fabe706e00242f810" dependencies = [ "alloy-eips", "alloy-primitives", @@ -134,14 +143,14 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-consensus-any" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903cb8f728107ca27c816546f15be38c688df3c381d7bd1a4a9f215effc1ddb4" +checksum = "48ead76c8c84ab3a50c31c56bc2c748c2d64357ad2131c32f9b10ab790a25e1a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -154,9 +163,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03df5cb3b428ac96b386ad64c11d5c6e87a5505682cf1fbd6f8f773e9eda04f6" +checksum = "d5903097e4c131ad2dd80d87065f23c715ccb9cdb905fa169dffab8e1e798bae" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -171,7 +180,7 @@ dependencies = [ "futures", "futures-util", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -203,7 +212,7 @@ dependencies = [ "crc", "rand 0.8.5", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -232,14 +241,14 @@ dependencies = [ "rand 0.8.5", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-eips" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac7f1c9a1ccc7f3e03c36976455751a6166a4f0d2d2c530c3f87dfe7d0cdc836" +checksum = "7bdbec74583d0067798d77afa43d58f00d93035335d7ceaa5d3f93857d461bb9" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -256,15 +265,15 @@ dependencies = [ "ethereum_ssz_derive", "serde", "serde_with", - "sha2 0.10.9", - "thiserror 2.0.16", + "sha2", + "thiserror 2.0.17", ] [[package]] name = "alloy-evm" -version = "0.21.2" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a5f67ee74999aa4fe576a83be1996bdf74a30fce3d248bf2007d6fc7dae8aa" +checksum = "6223235f0b785a83dd10dc1599b7f3763c65e4f98b4e9e4e10e576bbbdf7dfa2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -280,14 +289,14 @@ dependencies = [ "op-alloy-rpc-types-engine", "op-revm", "revm", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-genesis" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1421f6c9d15e5b86afbfe5865ca84dea3b9f77173a0963c1a2ee4e626320ada9" +checksum = "c25d5acb35706e683df1ea333c862bdb6b7c5548836607cd5bb56e501cca0b4f" dependencies = [ "alloy-eips", "alloy-primitives", @@ -299,9 +308,9 @@ dependencies = [ [[package]] name = "alloy-hardforks" -version = "0.3.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "889eb3949b58368a09d4f16931c660275ef5fb08e5fbd4a96573b19c7085c41f" +checksum = "1e29d7eacf42f89c21d7f089916d0bdb4f36139a31698790e8837d2dbbd4b2c3" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -325,24 +334,24 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65f763621707fa09cece30b73ecc607eb43fd7a72451fe3b46f645b905086926" +checksum = "31b67c5a702121e618217f7a86f314918acb2622276d0273490e2d4534490bc0" dependencies = [ "alloy-primitives", "alloy-sol-types", "http", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "alloy-network" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f59a869fa4b4c3a7f08b1c8cb79aec61c29febe6e24a24fe0fcfded8a9b5703" +checksum = "612296e6b723470bb1101420a73c63dfd535aa9bf738ce09951aedbd4ab7292e" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -361,14 +370,14 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-network-primitives" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46e9374c667c95c41177602ebe6f6a2edd455193844f011d973d374b65501b38" +checksum = "a0e7918396eecd69d9c907046ec8a93fb09b89e2f325d5e7ea9c4e3929aa0dd2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -379,9 +388,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.21.2" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17aaeb600740c181bf29c9f138f9b228d115ea74fa6d0f0343e1952f1a766968" +checksum = "3ad8f3a679eb44ee21481edabd628d191c9a42d182ed29923b4d43a27a0f2cc8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -392,13 +401,14 @@ dependencies = [ "op-alloy-consensus", "op-revm", "revm", + "thiserror 2.0.17", ] [[package]] name = "alloy-op-hardforks" -version = "0.3.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "599c1d7dfbccb66603cb93fde00980d12848d32fe5e814f50562104a92df6487" +checksum = "95ac97adaba4c26e17192d81f49186ac20c1e844e35a00e169c8d3d58bc84e6b" dependencies = [ "alloy-chains", "alloy-hardforks", @@ -422,7 +432,7 @@ dependencies = [ "foldhash 0.2.0", "getrandom 0.3.3", "hashbrown 0.16.0", - "indexmap 2.11.1", + "indexmap 2.12.0", "itoa", "k256", "keccak-asm", @@ -439,9 +449,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77818b7348bd5486491a5297579dbfe5f706a81f8e1f5976393025f1e22a7c7d" +checksum = "55c1313a527a2e464d067c031f3c2ec073754ef615cc0eabca702fd0fe35729c" dependencies = [ "alloy-chains", "alloy-consensus", @@ -475,7 +485,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", "url", @@ -484,9 +494,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "249b45103a66c9ad60ad8176b076106d03a2399a37f0ee7b0e03692e6b354cb9" +checksum = "810766eeed6b10ffa11815682b3f37afc5019809e3b470b23555297d5770ce63" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -528,9 +538,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2430d5623e428dd012c6c2156ae40b7fe638d6fca255e3244e0fba51fa698e93" +checksum = "45f802228273056528dfd6cc8845cc91a7c7e0c6fc1a66d19e8673743dacdc7e" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -554,9 +564,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e131624d08a25cfc40557041e7dc42e1182fa1153e7592d120f769a1edce56" +checksum = "33ff3df608dcabd6bdd197827ff2b8faaa6cefe0c462f7dc5e74108666a01f56" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -567,9 +577,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59407723b1850ebaa49e46d10c2ba9c10c10b3aedf2f7e97015ee23c3f4e639" +checksum = "00e11a40c917c704888aa5aa6ffa563395123b732868d2e072ec7dd46c3d4672" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -579,9 +589,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65e3266095e6d8e8028aab5f439c6b8736c5147314f7e606c61597e014cb8a0" +checksum = "ac2bc988d7455e02dfb53460e1caa61f932b3f8452e12424e68ba8dcf60bba90" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -591,9 +601,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07429a1099cd17227abcddb91b5e38c960aaeb02a6967467f5bb561fbe716ac6" +checksum = "cdbf6d1766ca41e90ac21c4bc5cbc5e9e965978a25873c3f90b3992d905db4cb" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -602,28 +612,29 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e0e876b20eb9debf316d3e875536f389070635250f22b5a678cf4632a3e0cf" +checksum = "ab94e446a003dcef86843eea60d05a6cec360eb8e1829e4cf388ef94d799b5cf" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "derive_more", "ethereum_ssz", "ethereum_ssz_derive", "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tree_hash", "tree_hash_derive", ] [[package]] name = "alloy-rpc-types-debug" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeff305b7d10cc1c888456d023e7bb8a5ea82e9e42b951e37619b88cc1a1486d" +checksum = "977698b458738369ba5ca645d2cdb4d51ba07a81db37306ff85322853161ea3a" dependencies = [ "alloy-primitives", "derive_more", @@ -633,9 +644,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222ecadcea6aac65e75e32b6735635ee98517aa63b111849ee01ae988a71d685" +checksum = "07da696cc7fbfead4b1dda8afe408685cae80975cbb024f843ba74d9639cd0d3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -654,9 +665,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db46b0901ee16bbb68d986003c66dcb74a12f9d9b3c44f8e85d51974f2458f0f" +checksum = "a15e4831b71eea9d20126a411c1c09facf1d01d5cac84fd51d532d3c429cfc26" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -671,14 +682,14 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-rpc-types-mev" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a60d4baadd3f278faa4e2305cca095dfd4ab286e071b768ff09181d8ae215" +checksum = "4c5d8f6f2c3b68af83a32d5c7fa1353d9b2e30441a3f0b8c3c5657c603b7238c" dependencies = [ "alloy-consensus", "alloy-eips", @@ -691,23 +702,23 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f10620724bd45f80c79668a8cdbacb6974f860686998abce28f6196ae79444" +checksum = "fb0c800e2ce80829fca1491b3f9063c29092850dc6cf19249d5f678f0ce71bb0" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864f41befa90102d4e02327679699a7e9510930e2924c529e31476086609fa89" +checksum = "2f82e3068673a3cf93fbbc2f60a59059395cd54bbe39af895827faa5e641cc8f" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -717,9 +728,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5413814be7a22fbc81e0f04a2401fcc3eb25e56fd53b04683e8acecc6e1fe01b" +checksum = "751d1887f7d202514a82c5b3caf28ee8bd4a2ad9549e4f498b6f0bff99b52add" dependencies = [ "alloy-primitives", "arbitrary", @@ -729,9 +740,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53410a18a61916e2c073a6519499514e027b01e77eeaf96acd1df7cf96ef6bb2" +checksum = "9cf0b42ffbf558badfecf1dde0c3c5ed91f29bb7e97876d0bed008c3d5d67171" dependencies = [ "alloy-primitives", "async-trait", @@ -739,14 +750,14 @@ dependencies = [ "either", "elliptic-curve", "k256", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-signer-local" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6006c4cbfa5d08cadec1fcabea6cb56dc585a30a9fce40bcf81e307d6a71c8e" +checksum = "3e7d555ee5f27be29af4ae312be014b57c6cff9acb23fe2cf008500be6ca7e33" dependencies = [ "alloy-consensus", "alloy-network", @@ -757,7 +768,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 2.0.16", + "thiserror 2.0.17", "zeroize", ] @@ -784,7 +795,7 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.11.1", + "indexmap 2.12.0", "proc-macro-error2", "proc-macro2", "quote", @@ -833,12 +844,11 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d94ee404368a3d9910dfe61b203e888c6b0e151a50e147f95da8baff9f9c7763" +checksum = "71b3deee699d6f271eab587624a9fa84d02d0755db7a95a043d52a6488d16ebe" dependencies = [ "alloy-json-rpc", - "alloy-primitives", "auto_impl", "base64 0.22.1", "derive_more", @@ -847,7 +857,7 @@ dependencies = [ "parking_lot", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tower", "tracing", @@ -857,9 +867,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2f8a6338d594f6c6481292215ee8f2fd7b986c80aba23f3f44e761a8658de78" +checksum = "1720bd2ba8fe7e65138aca43bb0f680e4e0bcbd3ca39bf9d3035c9d7d2757f24" dependencies = [ "alloy-json-rpc", "alloy-rpc-types-engine", @@ -878,9 +888,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17a37a8ca18006fa0a58c7489645619ff58cfa073f2b29c4e052c9bd114b123a" +checksum = "ea89c214c7ddd2bcad100da929d6b642bbfed85788caf3b1be473abacd3111f9" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -898,9 +908,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "679b0122b7bca9d4dc5eb2c0549677a3c53153f6e232f23f4b3ba5575f74ebde" +checksum = "571aadf0afce0d515a28b2c6352662a39cb9f48b4eeff9a5c34557d6ea126730" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -936,11 +946,10 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64c09ec565a90ed8390d82aa08cd3b22e492321b96cb4a3d4f58414683c9e2f" +checksum = "cd7ce8ed34106acd6e21942022b6a15be6454c2c3ead4d76811d3bdcd63cf771" dependencies = [ - "alloy-primitives", "darling 0.21.3", "proc-macro2", "quote", @@ -1389,7 +1398,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e6fa871e4334a622afd6bb2f611635e8083a6f5e2936c0f90f37c7ef9856298" dependencies = [ "async-channel", - "futures-lite", + "futures-lite 1.13.0", "http-types", "log", "memchr", @@ -1677,15 +1686,6 @@ dependencies = [ "wyz", ] -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -1704,6 +1704,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block2" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdeb9d870516001442e364c5220d3574d2da8dc765554b4a617230d33fa58ef5" +dependencies = [ + "objc2", +] + [[package]] name = "blst" version = "0.3.15" @@ -1718,25 +1727,26 @@ dependencies = [ [[package]] name = "boa_ast" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c340fe0f0b267787095cbe35240c6786ff19da63ec7b69367ba338eace8169b" +checksum = "bc119a5ad34c3f459062a96907f53358989b173d104258891bb74f95d93747e8" dependencies = [ "bitflags 2.9.4", "boa_interner", "boa_macros", "boa_string", - "indexmap 2.11.1", + "indexmap 2.12.0", "num-bigint", "rustc-hash 2.1.1", ] [[package]] name = "boa_engine" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f620c3f06f51e65c0504ddf04978be1b814ac6586f0b45f6019801ab5efd37f9" +checksum = "e637ec52ea66d76b0ca86180c259d6c7bb6e6a6e14b2f36b85099306d8b00cc3" dependencies = [ + "aligned-vec", "arrayvec", "bitflags 2.9.4", "boa_ast", @@ -1744,61 +1754,66 @@ dependencies = [ "boa_interner", "boa_macros", "boa_parser", - "boa_profiler", "boa_string", "bytemuck", "cfg-if", + "cow-utils", "dashmap 6.1.0", + "dynify", "fast-float2", - "hashbrown 0.15.5", - "icu_normalizer 1.5.0", - "indexmap 2.11.1", + "float16", + "futures-channel", + "futures-concurrency", + "futures-lite 2.6.1", + "hashbrown 0.16.0", + "icu_normalizer", + "indexmap 2.12.0", "intrusive-collections", - "itertools 0.13.0", + "itertools 0.14.0", "num-bigint", "num-integer", "num-traits", "num_enum", - "once_cell", - "pollster", + "paste", "portable-atomic", - "rand 0.8.5", + "rand 0.9.2", "regress", "rustc-hash 2.1.1", "ryu-js", "serde", "serde_json", - "sptr", + "small_btree", "static_assertions", + "tag_ptr", "tap", "thin-vec", - "thiserror 2.0.16", + "thiserror 2.0.17", "time", + "xsum", ] [[package]] name = "boa_gc" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2425c0b7720d42d73eaa6a883fbb77a5c920da8694964a3d79a67597ac55cce2" +checksum = "f1179f690cbfcbe5364cceee5f1cb577265bb6f07b0be6f210aabe270adcf9da" dependencies = [ "boa_macros", - "boa_profiler", "boa_string", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "thin-vec", ] [[package]] name = "boa_interner" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42407a3b724cfaecde8f7d4af566df4b56af32a2f11f0956f5570bb974e7f749" +checksum = "9626505d33dc63d349662437297df1d3afd9d5fc4a2b3ad34e5e1ce879a78848" dependencies = [ "boa_gc", "boa_macros", - "hashbrown 0.15.5", - "indexmap 2.11.1", + "hashbrown 0.16.0", + "indexmap 2.12.0", "once_cell", "phf", "rustc-hash 2.1.1", @@ -1807,10 +1822,12 @@ dependencies = [ [[package]] name = "boa_macros" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" +checksum = "7f36418a46544b152632c141b0a0b7a453cd69ca150caeef83aee9e2f4b48b7d" dependencies = [ + "cfg-if", + "cow-utils", "proc-macro2", "quote", "syn 2.0.106", @@ -1819,39 +1836,33 @@ dependencies = [ [[package]] name = "boa_parser" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cc142dac798cdc6e2dbccfddeb50f36d2523bb977a976e19bdb3ae19b740804" +checksum = "02f99bf5b684f0de946378fcfe5f38c3a0fbd51cbf83a0f39ff773a0e218541f" dependencies = [ "bitflags 2.9.4", "boa_ast", "boa_interner", "boa_macros", - "boa_profiler", "fast-float2", - "icu_properties 1.5.1", + "icu_properties", "num-bigint", "num-traits", "regress", "rustc-hash 2.1.1", ] -[[package]] -name = "boa_profiler" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4064908e7cdf9b6317179e9b04dcb27f1510c1c144aeab4d0394014f37a0f922" - [[package]] name = "boa_string" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7debc13fbf7997bf38bf8e9b20f1ad5e2a7d27a900e1f6039fe244ce30f589b5" +checksum = "45ce9d7aa5563a2e14eab111e2ae1a06a69a812f6c0c3d843196c9d03fbef440" dependencies = [ "fast-float2", + "itoa", "paste", "rustc-hash 2.1.1", - "sptr", + "ryu-js", "static_assertions", ] @@ -1891,7 +1902,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ - "sha2 0.10.9", + "sha2", "tinyvec", ] @@ -1926,18 +1937,18 @@ checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" [[package]] name = "bytemuck" -version = "1.23.2" +version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.10.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f154e572231cb6ba2bd1176980827e3d5dc04cc183a75dea38109fbdd672d29" +checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" dependencies = [ "proc-macro2", "quote", @@ -2017,7 +2028,7 @@ dependencies = [ "semver 1.0.26", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -2257,7 +2268,7 @@ dependencies = [ "hmac", "k256", "serde", - "sha2 0.10.9", + "sha2", "thiserror 1.0.69", ] @@ -2273,7 +2284,7 @@ dependencies = [ "once_cell", "pbkdf2", "rand 0.8.5", - "sha2 0.10.9", + "sha2", "thiserror 1.0.69", ] @@ -2291,7 +2302,7 @@ dependencies = [ "generic-array", "ripemd", "serde", - "sha2 0.10.9", + "sha2", "sha3", "thiserror 1.0.69", ] @@ -2309,7 +2320,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -2445,6 +2456,16 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "cordyceps" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688d7fbb8092b8de775ef2536f36c8c31f2bc4006ece2e8d8ad2d17d00ce0a2a" +dependencies = [ + "loom", + "tracing", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -2480,6 +2501,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "cow-utils" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "417bef24afe1460300965a25ff4a24b8b45ad011948302ec221e8a0a81eb2c79" + [[package]] name = "cpufeatures" version = "0.2.17" @@ -2661,6 +2688,17 @@ dependencies = [ "cipher", ] +[[package]] +name = "ctrlc" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73736a89c4aff73035ba2ed2e565061954da00d4970fc9ac25dcc85a2a20d790" +dependencies = [ + "dispatch2", + "nix 0.30.1", + "windows-sys 0.61.0", +] + [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -2688,6 +2726,19 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "custom-hardforks" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "reth-chainspec", + "reth-network-peers", + "serde", +] + [[package]] name = "darling" version = "0.20.11" @@ -2935,6 +2986,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "diatomic-waker" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c" + [[package]] name = "diff" version = "0.1.13" @@ -2956,7 +3013,7 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "const-oid", "crypto-common", "subtle", @@ -2990,7 +3047,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.2", - "windows-sys 0.59.0", + "windows-sys 0.61.0", ] [[package]] @@ -3006,9 +3063,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.9.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b4e7798d2ff74e29cee344dc490af947ae657d6ab5273dde35d58ce06a4d71" +checksum = "f170f4f6ed0e1df52bf43b403899f0081917ecf1500bfe312505cc3b515a8899" dependencies = [ "aes", "aes-gcm", @@ -3037,6 +3094,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "dispatch2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" +dependencies = [ + "bitflags 2.9.4", + "block2", + "libc", + "objc2", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -3075,6 +3144,26 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "dynify" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81acb15628a3e22358bf73de5e7e62360b8a777dbcb5fc9ac7dfa9ae73723747" +dependencies = [ + "dynify-macros", +] + +[[package]] +name = "dynify-macros" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec431cd708430d5029356535259c5d645d60edd3d39c54e5eea9782d46caa7d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "ecdsa" version = "0.16.9" @@ -3110,7 +3199,7 @@ dependencies = [ "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.9", + "sha2", "subtle", "zeroize", ] @@ -3129,7 +3218,7 @@ dependencies = [ [[package]] name = "ef-test-runner" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "ef-tests", @@ -3137,7 +3226,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3164,7 +3253,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "walkdir", ] @@ -3274,6 +3363,26 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "equator" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc" +dependencies = [ + "equator-macro", +] + +[[package]] +name = "equator-macro" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -3287,7 +3396,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -3307,7 +3416,7 @@ checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" dependencies = [ "cpufeatures", "ring", - "sha2 0.10.9", + "sha2", ] [[package]] @@ -3371,7 +3480,7 @@ dependencies = [ "reth-ethereum", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -3414,7 +3523,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -3460,7 +3569,7 @@ dependencies = [ "reth-payload-builder", "reth-tracing", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] @@ -3514,7 +3623,6 @@ dependencies = [ "op-alloy-rpc-types", "op-alloy-rpc-types-engine", "op-revm", - "reth-chain-state", "reth-codecs", "reth-db-api", "reth-engine-primitives", @@ -3531,7 +3639,7 @@ dependencies = [ "revm", "revm-primitives", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -3617,7 +3725,7 @@ dependencies = [ [[package]] name = "example-full-contract-state" -version = "1.8.2" +version = "1.9.0" dependencies = [ "eyre", "reth-ethereum", @@ -3756,7 +3864,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "clap", @@ -3873,6 +3981,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + [[package]] name = "flate2" version = "1.1.2" @@ -3883,6 +3997,16 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float16" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bffafbd079d520191c7c2779ae9cf757601266cf4167d3f659ff09617ff8483" +dependencies = [ + "cfg-if", + "rustc_version 0.2.3", +] + [[package]] name = "fnv" version = "1.0.7" @@ -3955,6 +4079,19 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-buffered" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8e0e1f38ec07ba4abbde21eed377082f17ccb988be9d988a5adbf4bafc118fd" +dependencies = [ + "cordyceps", + "diatomic-waker", + "futures-core", + "pin-project-lite", + "spin", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -3965,6 +4102,21 @@ dependencies = [ "futures-sink", ] +[[package]] +name = "futures-concurrency" +version = "7.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eb68017df91f2e477ed4bea586c59eaecaa47ed885a770d0444e21e62572cd2" +dependencies = [ + "fixedbitset", + "futures-buffered", + "futures-core", + "futures-lite 2.6.1", + "pin-project", + "slab", + "smallvec", +] + [[package]] name = "futures-core" version = "0.3.31" @@ -4003,6 +4155,19 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" +dependencies = [ + "fastrand 2.3.0", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.31" @@ -4080,7 +4245,6 @@ version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ - "serde", "typenum", "version_check", "zeroize", @@ -4238,7 +4402,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.11.1", + "indexmap 2.12.0", "slab", "tokio", "tokio-util", @@ -4299,6 +4463,8 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" dependencies = [ + "allocator-api2", + "equivalent", "foldhash 0.2.0", "serde", ] @@ -4371,7 +4537,7 @@ dependencies = [ "rand 0.9.2", "ring", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tinyvec", "tokio", "tracing", @@ -4395,7 +4561,7 @@ dependencies = [ "resolv-conf", "serde", "smallvec", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -4467,7 +4633,7 @@ dependencies = [ "anyhow", "async-channel", "base64 0.13.1", - "futures-lite", + "futures-lite 1.13.0", "infer", "pin-project-lite", "rand 0.7.3", @@ -4554,6 +4720,19 @@ dependencies = [ "webpki-roots 1.0.2", ] +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "hyper-tls" version = "0.6.0" @@ -4608,7 +4787,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.58.0", + "windows-core 0.57.0", ] [[package]] @@ -4620,18 +4799,6 @@ dependencies = [ "cc", ] -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke 0.7.5", - "zerofrom", - "zerovec 0.10.4", -] - [[package]] name = "icu_collections" version = "2.0.0" @@ -4640,9 +4807,9 @@ checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", "potential_utf", - "yoke 0.8.0", + "yoke", "zerofrom", - "zerovec 0.11.4", + "zerovec", ] [[package]] @@ -4652,61 +4819,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", - "litemap 0.8.0", - "tinystr 0.8.1", - "writeable 0.6.1", - "zerovec 0.11.4", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap 0.7.5", - "tinystr 0.7.6", - "writeable 0.5.5", - "zerovec 0.10.4", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider 1.5.0", - "tinystr 0.7.6", - "zerovec 0.10.4", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections 1.5.0", - "icu_normalizer_data 1.5.1", - "icu_properties 1.5.1", - "icu_provider 1.5.0", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec 0.10.4", + "litemap", + "tinystr", + "writeable", + "zerovec", ] [[package]] @@ -4716,41 +4832,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", - "icu_collections 2.0.0", - "icu_normalizer_data 2.0.0", - "icu_properties 2.0.1", - "icu_provider 2.0.0", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", "smallvec", - "zerovec 0.11.4", + "utf16_iter", + "write16", + "zerovec", ] -[[package]] -name = "icu_normalizer_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" - [[package]] name = "icu_normalizer_data" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" -[[package]] -name = "icu_properties" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" -dependencies = [ - "displaydoc", - "icu_collections 1.5.0", - "icu_locid_transform", - "icu_properties_data 1.5.1", - "icu_provider 1.5.0", - "tinystr 0.7.6", - "zerovec 0.10.4", -] - [[package]] name = "icu_properties" version = "2.0.1" @@ -4758,44 +4855,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", - "icu_collections 2.0.0", + "icu_collections", "icu_locale_core", - "icu_properties_data 2.0.1", - "icu_provider 2.0.0", + "icu_properties_data", + "icu_provider", "potential_utf", "zerotrie", - "zerovec 0.11.4", + "zerovec", ] -[[package]] -name = "icu_properties_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" - [[package]] name = "icu_properties_data" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr 0.7.6", - "writeable 0.5.5", - "yoke 0.7.5", - "zerofrom", - "zerovec 0.10.4", -] - [[package]] name = "icu_provider" version = "2.0.0" @@ -4805,23 +4879,12 @@ dependencies = [ "displaydoc", "icu_locale_core", "stable_deref_trait", - "tinystr 0.8.1", - "writeable 0.6.1", - "yoke 0.8.0", + "tinystr", + "writeable", + "yoke", "zerofrom", "zerotrie", - "zerovec 0.11.4", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", + "zerovec", ] [[package]] @@ -4847,15 +4910,15 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ - "icu_normalizer 2.0.0", - "icu_properties 2.0.1", + "icu_normalizer", + "icu_properties", ] [[package]] name = "if-addrs" -version = "0.13.4" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b2eeee38fef3aa9b4cc5f1beea8a2444fc00e7377cafae396de3f5c2065e24" +checksum = "bf39cc0423ee66021dc5eccface85580e4a001e0c5288bae8bea7ecb69225e90" dependencies = [ "libc", "windows-sys 0.59.0", @@ -4919,14 +4982,15 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206a8042aec68fa4a62e8d3f7aa4ceb508177d9324faf261e1959e495b7a1921" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "serde", + "serde_core", ] [[package]] @@ -5064,7 +5128,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -5183,7 +5247,7 @@ dependencies = [ "rustls-pki-types", "rustls-platform-verifier", "soketto", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-rustls", "tokio-util", @@ -5211,7 +5275,7 @@ dependencies = [ "rustc-hash 2.1.1", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tower", @@ -5236,7 +5300,7 @@ dependencies = [ "rustls-platform-verifier", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tower", "url", @@ -5274,7 +5338,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -5291,7 +5355,7 @@ dependencies = [ "http", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -5346,7 +5410,7 @@ dependencies = [ "elliptic-curve", "once_cell", "serdect", - "sha2 0.10.9", + "sha2", "signature", ] @@ -5442,8 +5506,8 @@ dependencies = [ "k256", "multihash", "quick-protobuf", - "sha2 0.10.9", - "thiserror 2.0.16", + "sha2", + "thiserror 2.0.17", "tracing", "zeroize", ] @@ -5470,52 +5534,6 @@ dependencies = [ "redox_syscall", ] -[[package]] -name = "libsecp256k1" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79019718125edc905a079a70cfa5f3820bc76139fc91d6f9abc27ea2a887139" -dependencies = [ - "arrayref", - "base64 0.22.1", - "digest 0.9.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.5", - "serde", - "sha2 0.9.9", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" -dependencies = [ - "libsecp256k1-core", -] - [[package]] name = "libz-sys" version = "1.1.22" @@ -5556,12 +5574,6 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" -[[package]] -name = "litemap" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" - [[package]] name = "litemap" version = "0.8.0" @@ -5735,7 +5747,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd7399781913e5393588a8d8c6a2867bf85fb38eaf2502fdce465aad2dc6f034" dependencies = [ "base64 0.22.1", - "indexmap 2.11.1", + "indexmap 2.12.0", "metrics", "metrics-util", "quanta", @@ -5767,7 +5779,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.5", - "indexmap 2.11.1", + "indexmap 2.12.0", "metrics", "ordered-float", "quanta", @@ -5791,7 +5803,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -5958,6 +5970,30 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.9.4", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags 2.9.4", + "cfg-if", + "cfg_aliases", + "libc", +] + [[package]] name = "nom" version = "7.1.3" @@ -6147,6 +6183,21 @@ dependencies = [ "smallvec", ] +[[package]] +name = "objc2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c2599ce0ec54857b29ce62166b0ed9b4f6f1a70ccc9a71165b6154caca8c05" +dependencies = [ + "objc2-encode", +] + +[[package]] +name = "objc2-encode" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" + [[package]] name = "object" version = "0.36.7" @@ -6180,9 +6231,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "op-alloy-consensus" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a501241474c3118833d6195312ae7eb7cc90bbb0d5f524cbb0b06619e49ff67" +checksum = "e42e9de945efe3c2fbd207e69720c9c1af2b8caa6872aee0e216450c25a3ca70" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6195,7 +6246,7 @@ dependencies = [ "derive_more", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -6206,9 +6257,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f80108e3b36901200a4c5df1db1ee9ef6ce685b59ea79d7be1713c845e3765da" +checksum = "9c9da49a2812a0189dd05e81e4418c3ae13fd607a92654107f02ebad8e91ed9e" dependencies = [ "alloy-consensus", "alloy-network", @@ -6222,9 +6273,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8eb878fc5ea95adb5abe55fb97475b3eb0dcc77dfcd6f61bd626a68ae0bdba1" +checksum = "b62ceb771ab9323647093ea2e58dc7f25289a1b95cbef2faa2620f6ca2dee4d9" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -6232,9 +6283,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "753d6f6b03beca1ba9cbd344c05fee075a2ce715ee9d61981c10b9c764a824a2" +checksum = "9cd1eb7bddd2232856ba9d259320a094f9edf2b9061acfe5966e7960208393e6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6247,14 +6298,14 @@ dependencies = [ "op-alloy-consensus", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "op-alloy-rpc-types-engine" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e50c94013a1d036a529df259151991dbbd6cf8dc215e3b68b784f95eec60e6" +checksum = "5429622150d18d8e6847a701135082622413e2451b64d03f979415d764566bef" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6269,12 +6320,12 @@ dependencies = [ "op-alloy-consensus", "serde", "snap", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "op-reth" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "reth-cli-util", @@ -6292,8 +6343,8 @@ dependencies = [ [[package]] name = "op-revm" -version = "10.1.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "12.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "auto_impl", "revm", @@ -6352,39 +6403,37 @@ dependencies = [ [[package]] name = "opentelemetry" -version = "0.29.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e87237e2775f74896f9ad219d26a2081751187eb7c9f5c58dde20a23b95d16c" +checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0" dependencies = [ "futures-core", "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "opentelemetry-http" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46d7ab32b827b5b495bd90fa95a6cb65ccc293555dcc3199ae2937d2d237c8ed" +checksum = "d7a6d09a73194e6b66df7c8f1b680f156d916a1a942abf2de06823dd02b7855d" dependencies = [ "async-trait", "bytes", "http", "opentelemetry", "reqwest", - "tracing", ] [[package]] name = "opentelemetry-otlp" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d899720fe06916ccba71c01d04ecd77312734e2de3467fd30d9d580c8ce85656" +checksum = "7a2366db2dca4d2ad033cad11e6ee42844fd727007af5ad04a1730f4cb8163bf" dependencies = [ - "futures-core", "http", "opentelemetry", "opentelemetry-http", @@ -6392,44 +6441,44 @@ dependencies = [ "opentelemetry_sdk", "prost", "reqwest", - "thiserror 2.0.16", + "thiserror 2.0.17", + "tokio", + "tonic", "tracing", ] [[package]] name = "opentelemetry-proto" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c40da242381435e18570d5b9d50aca2a4f4f4d8e146231adb4e7768023309b3" +checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f" dependencies = [ "opentelemetry", "opentelemetry_sdk", "prost", "tonic", + "tonic-prost", ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b29a9f89f1a954936d5aa92f19b2feec3c8f3971d3e96206640db7f9706ae3" +checksum = "e62e29dfe041afb8ed2a6c9737ab57db4907285d999ef8ad3a59092a36bdc846" [[package]] name = "opentelemetry_sdk" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afdefb21d1d47394abc1ba6c57363ab141be19e27cc70d0e422b7f303e4d290b" +checksum = "e14ae4f5991976fd48df6d843de219ca6d31b01daaab2dad5af2badeded372bd" dependencies = [ "futures-channel", "futures-executor", "futures-util", - "glob", "opentelemetry", "percent-encoding", "rand 0.9.2", - "serde_json", - "thiserror 2.0.16", - "tracing", + "thiserror 2.0.17", ] [[package]] @@ -6456,7 +6505,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "primeorder", - "sha2 0.10.9", + "sha2", ] [[package]] @@ -6567,7 +6616,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21e0a3a33733faeaf8651dfee72dd0f388f0c8e5ad496a3478fa5a922f49cfa8" dependencies = [ "memchr", - "thiserror 2.0.16", + "thiserror 2.0.17", "ucd-trie", ] @@ -6583,9 +6632,9 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" dependencies = [ "phf_macros", "phf_shared", @@ -6594,19 +6643,19 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737" dependencies = [ + "fastrand 2.3.0", "phf_shared", - "rand 0.8.5", ] [[package]] name = "phf_macros" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" +checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef" dependencies = [ "phf_generator", "phf_shared", @@ -6617,9 +6666,9 @@ dependencies = [ [[package]] name = "phf_shared" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" dependencies = [ "siphasher", ] @@ -6709,12 +6758,6 @@ dependencies = [ "plotters-backend", ] -[[package]] -name = "pollster" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f3a9f18d041e6d0e102a0a46750538147e5e8992d3b4873aaafee2520b00ce3" - [[package]] name = "polyval" version = "0.6.2" @@ -6739,7 +6782,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" dependencies = [ - "zerovec 0.11.4", + "zerovec", ] [[package]] @@ -6916,9 +6959,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", "prost-derive", @@ -6926,9 +6969,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", "itertools 0.14.0", @@ -6992,7 +7035,7 @@ dependencies = [ "rustc-hash 2.1.1", "rustls", "socket2 0.6.0", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", "web-time", @@ -7013,7 +7056,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.16", + "thiserror 2.0.17", "tinyvec", "tracing", "web-time", @@ -7030,14 +7073,14 @@ dependencies = [ "once_cell", "socket2 0.6.0", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] name = "quote" -version = "1.0.40" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] @@ -7259,7 +7302,7 @@ checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ "getrandom 0.2.16", "libredox", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -7378,7 +7421,7 @@ checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" [[package]] name = "reth" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7425,7 +7468,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7448,7 +7491,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7479,15 +7522,41 @@ dependencies = [ "reth-tracing", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tower", "tracing", ] +[[package]] +name = "reth-bench-compare" +version = "1.9.0" +dependencies = [ + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "chrono", + "clap", + "csv", + "ctrlc", + "eyre", + "nix 0.29.0", + "reth-chainspec", + "reth-cli-runner", + "reth-cli-util", + "reth-node-core", + "reth-tracing", + "serde", + "serde_json", + "shellexpand", + "shlex", + "tokio", + "tracing", +] + [[package]] name = "reth-chain-state" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7519,7 +7588,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7539,7 +7608,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-genesis", "clap", @@ -7552,7 +7621,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7624,6 +7693,7 @@ dependencies = [ "serde", "serde_json", "tar", + "tempfile", "tokio", "tokio-stream", "toml", @@ -7633,7 +7703,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.8.2" +version = "1.9.0" dependencies = [ "reth-tasks", "tokio", @@ -7642,7 +7712,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7655,14 +7725,14 @@ dependencies = [ "secp256k1 0.30.0", "serde", "snmalloc-rs", - "thiserror 2.0.16", + "thiserror 2.0.17", "tikv-jemallocator", "tracy-client", ] [[package]] name = "reth-codecs" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7686,7 +7756,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.8.2" +version = "1.9.0" dependencies = [ "proc-macro2", "quote", @@ -7696,7 +7766,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "eyre", @@ -7713,19 +7783,19 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", "auto_impl", "reth-execution-types", "reth-primitives-traits", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-consensus-common" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7739,7 +7809,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7764,7 +7834,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7793,12 +7863,12 @@ dependencies = [ "strum 0.27.2", "sysinfo", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-db-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7829,7 +7899,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7853,13 +7923,13 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-db-models" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7875,14 +7945,13 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", "assert_matches", "discv5", "enr", - "generic-array", "itertools 0.14.0", "parking_lot", "rand 0.8.5", @@ -7894,7 +7963,7 @@ dependencies = [ "schnellru", "secp256k1 0.30.0", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -7902,7 +7971,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7920,14 +7989,14 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1 0.30.0", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-dns-discovery" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7947,7 +8016,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -7955,7 +8024,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7984,7 +8053,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -7993,7 +8062,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8050,7 +8119,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.8.2" +version = "1.9.0" dependencies = [ "aes", "alloy-primitives", @@ -8062,25 +8131,23 @@ dependencies = [ "ctr", "digest 0.10.7", "futures", - "generic-array", "hmac", "pin-project", "rand 0.8.5", "reth-network-peers", "secp256k1 0.30.0", - "sha2 0.10.9", + "sha2", "sha3", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", "tracing", - "typenum", ] [[package]] name = "reth-engine-local" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8094,7 +8161,7 @@ dependencies = [ "reth-optimism-chainspec", "reth-payload-builder", "reth-payload-primitives", - "reth-provider", + "reth-storage-api", "reth-transaction-pool", "scroll-alloy-rpc-types-engine", "tokio", @@ -8104,7 +8171,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8122,13 +8189,13 @@ dependencies = [ "reth-primitives-traits", "reth-trie-common", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] [[package]] name = "reth-engine-service" -version = "1.8.2" +version = "1.9.0" dependencies = [ "futures", "pin-project", @@ -8157,7 +8224,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8212,7 +8279,6 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "reth-trie", - "reth-trie-db", "reth-trie-parallel", "reth-trie-sparse", "reth-trie-sparse-parallel", @@ -8222,14 +8288,14 @@ dependencies = [ "schnellru", "serde_json", "smallvec", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-engine-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8256,7 +8322,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8272,13 +8338,13 @@ dependencies = [ "snap", "tempfile", "test-case", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] [[package]] name = "reth-era-downloader" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "bytes", @@ -8287,7 +8353,7 @@ dependencies = [ "futures-util", "reqwest", "reth-fs-util", - "sha2 0.10.9", + "sha2", "tempfile", "test-case", "tokio", @@ -8295,7 +8361,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8321,17 +8387,17 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.8.2" +version = "1.9.0" dependencies = [ "reth-consensus", "reth-execution-errors", "reth-storage-errors", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-eth-wire" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8360,7 +8426,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -8369,7 +8435,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8389,12 +8455,12 @@ dependencies = [ "reth-ethereum-primitives", "reth-primitives-traits", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-ethereum" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8434,7 +8500,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "eyre", @@ -8450,13 +8516,15 @@ dependencies = [ "reth-node-metrics", "reth-rpc-server-types", "reth-tracing", + "reth-tracing-otlp", "tempfile", "tracing", + "url", ] [[package]] name = "reth-ethereum-consensus" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8472,7 +8540,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8484,13 +8552,13 @@ dependencies = [ "reth-primitives-traits", "serde", "serde_json", - "sha2 0.10.9", - "thiserror 2.0.16", + "sha2", + "thiserror 2.0.17", ] [[package]] name = "reth-ethereum-forks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8503,7 +8571,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8531,7 +8599,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8558,7 +8626,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "rayon", @@ -8568,7 +8636,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8593,7 +8661,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8617,19 +8685,19 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-evm", "alloy-primitives", "alloy-rlp", "nybbles", "reth-storage-errors", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-execution-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8649,7 +8717,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8685,7 +8753,7 @@ dependencies = [ "rmp-serde", "secp256k1 0.30.0", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-util", "tracing", @@ -8693,7 +8761,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "eyre", @@ -8718,13 +8786,13 @@ dependencies = [ "reth-tasks", "reth-transaction-pool", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] [[package]] name = "reth-exex-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8741,18 +8809,19 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-invalid-block-hooks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", @@ -8760,23 +8829,29 @@ dependencies = [ "futures", "jsonrpsee", "pretty_assertions", + "reth-chainspec", "reth-engine-primitives", + "reth-ethereum-primitives", "reth-evm", + "reth-evm-ethereum", "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", + "reth-testing-utils", "reth-tracing", "reth-trie", + "revm", "revm-bytecode", "revm-database", "serde", "serde_json", + "tempfile", ] [[package]] name = "reth-ipc" -version = "1.8.2" +version = "1.9.0" dependencies = [ "bytes", "futures", @@ -8788,7 +8863,7 @@ dependencies = [ "reth-tracing", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -8798,7 +8873,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.8.2" +version = "1.9.0" dependencies = [ "bitflags 2.9.4", "byteorder", @@ -8810,13 +8885,13 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-mdbx-sys" -version = "1.8.2" +version = "1.9.0" dependencies = [ "bindgen 0.71.1", "cc", @@ -8824,7 +8899,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.8.2" +version = "1.9.0" dependencies = [ "futures", "metrics", @@ -8835,28 +8910,28 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.8.2" +version = "1.9.0" dependencies = [ "futures-util", "if-addrs", "reqwest", "reth-tracing", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-network" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8907,7 +8982,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "smallvec", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -8917,7 +8992,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8935,14 +9010,14 @@ dependencies = [ "reth-primitives-traits", "reth-tokio-util", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", ] [[package]] name = "reth-network-p2p" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8964,7 +9039,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8974,14 +9049,14 @@ dependencies = [ "secp256k1 0.30.0", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "url", ] [[package]] name = "reth-network-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -8994,7 +9069,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.8.2" +version = "1.9.0" dependencies = [ "anyhow", "bincode 1.3.3", @@ -9005,14 +9080,14 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", "zstd", ] [[package]] name = "reth-node-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -9035,7 +9110,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9106,7 +9181,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9143,12 +9218,12 @@ dependencies = [ "reth-storage-api", "reth-storage-errors", "reth-tracing", + "reth-tracing-otlp", "reth-transaction-pool", "secp256k1 0.30.0", "serde", "shellexpand", "strum 0.27.2", - "thiserror 2.0.16", "tokio", "toml", "tracing", @@ -9159,7 +9234,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9212,7 +9287,7 @@ dependencies = [ [[package]] name = "reth-node-ethstats" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9225,7 +9300,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-tungstenite", @@ -9235,7 +9310,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9258,7 +9333,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.8.2" +version = "1.9.0" dependencies = [ "eyre", "http", @@ -9280,7 +9355,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9291,7 +9366,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.8.2" +version = "1.9.0" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9331,13 +9406,14 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-hardforks", + "alloy-op-hardforks", "alloy-primitives", "derive_more", "miniz_oxide", @@ -9353,12 +9429,12 @@ dependencies = [ "serde", "serde_json", "tar-no-std", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-optimism-cli" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9398,16 +9474,18 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-tracing", + "reth-tracing-otlp", "serde", "tempfile", "tokio", "tokio-util", "tracing", + "url", ] [[package]] name = "reth-optimism-consensus" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9432,13 +9510,13 @@ dependencies = [ "reth-trie", "reth-trie-common", "revm", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-optimism-evm" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9462,12 +9540,12 @@ dependencies = [ "reth-rpc-eth-api", "reth-storage-errors", "revm", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-optimism-flashblocks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9480,20 +9558,20 @@ dependencies = [ "futures-util", "metrics", "reth-chain-state", + "reth-engine-primitives", "reth-errors", "reth-evm", "reth-execution-types", "reth-metrics", - "reth-node-api", "reth-optimism-evm", "reth-optimism-payload-builder", "reth-optimism-primitives", + "reth-payload-primitives", "reth-primitives-traits", "reth-revm", "reth-rpc-eth-types", "reth-storage-api", "reth-tasks", - "reth-trie", "ringbuffer", "serde", "serde_json", @@ -9506,7 +9584,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9516,7 +9594,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9574,19 +9652,20 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", + "alloy-evm", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", "alloy-rpc-types-engine", "derive_more", + "either", "op-alloy-consensus", "op-alloy-rpc-types-engine", "reth-basic-payload-builder", - "reth-chain-state", "reth-chainspec", "reth-evm", "reth-execution-types", @@ -9605,14 +9684,14 @@ dependencies = [ "reth-transaction-pool", "revm", "serde", - "sha2 0.10.9", - "thiserror 2.0.16", + "sha2", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-optimism-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9639,11 +9718,12 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-json-rpc", + "alloy-op-hardforks", "alloy-primitives", "alloy-rpc-client", "alloy-rpc-types-debug", @@ -9682,7 +9762,6 @@ dependencies = [ "reth-primitives-traits", "reth-rpc", "reth-rpc-api", - "reth-rpc-convert", "reth-rpc-engine-api", "reth-rpc-eth-api", "reth-rpc-eth-types", @@ -9692,7 +9771,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tower", @@ -9701,7 +9780,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "reth-codecs", @@ -9713,7 +9792,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9743,14 +9822,14 @@ dependencies = [ "reth-storage-api", "reth-transaction-pool", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-payload-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9770,7 +9849,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "pin-project", "reth-payload-primitives", @@ -9781,7 +9860,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9793,16 +9872,18 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-errors", + "reth-execution-types", "reth-primitives-traits", + "reth-trie-common", "scroll-alloy-rpc-types-engine", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] [[package]] name = "reth-payload-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9811,7 +9892,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9820,7 +9901,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9842,7 +9923,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9875,12 +9956,12 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-provider" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9903,7 +9984,6 @@ dependencies = [ "reth-errors", "reth-ethereum-engine-primitives", "reth-ethereum-primitives", - "reth-evm", "reth-execution-types", "reth-fs-util", "reth-metrics", @@ -9916,6 +9996,7 @@ dependencies = [ "reth-storage-api", "reth-storage-errors", "reth-testing-utils", + "reth-tracing", "reth-trie", "reth-trie-db", "revm-database", @@ -9929,16 +10010,14 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.8.2" +version = "1.9.0" dependencies = [ - "alloy-consensus", "alloy-eips", "alloy-primitives", "assert_matches", "itertools 0.14.0", "metrics", "rayon", - "reth-chainspec", "reth-config", "reth-db", "reth-db-api", @@ -9954,14 +10033,18 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash 2.1.1", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] +[[package]] +name = "reth-prune-db" +version = "1.9.0" + [[package]] name = "reth-prune-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -9973,13 +10056,14 @@ dependencies = [ "reth-codecs", "serde", "serde_json", - "thiserror 2.0.16", + "strum 0.27.2", + "thiserror 2.0.17", "toml", ] [[package]] name = "reth-ress-protocol" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10005,7 +10089,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10031,7 +10115,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10045,7 +10129,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10118,8 +10202,8 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "sha2 0.10.9", - "thiserror 2.0.16", + "sha2", + "thiserror 2.0.17", "tokio", "tokio-stream", "tower", @@ -10129,7 +10213,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-genesis", @@ -10156,7 +10240,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10175,7 +10259,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-network", @@ -10220,7 +10304,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-util", "tower", @@ -10230,7 +10314,7 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-json-rpc", @@ -10257,12 +10341,12 @@ dependencies = [ "scroll-alloy-evm", "scroll-alloy-rpc-types", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-rpc-e2e-tests" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -10282,7 +10366,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10311,14 +10395,14 @@ dependencies = [ "reth-testing-utils", "reth-transaction-pool", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-rpc-eth-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10362,7 +10446,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10401,7 +10485,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -10409,7 +10493,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10426,7 +10510,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10441,7 +10525,7 @@ dependencies = [ [[package]] name = "reth-scroll-chainspec" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -10465,7 +10549,7 @@ dependencies = [ [[package]] name = "reth-scroll-cli" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "eyre", @@ -10490,7 +10574,7 @@ dependencies = [ [[package]] name = "reth-scroll-consensus" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10504,13 +10588,13 @@ dependencies = [ "reth-scroll-primitives", "scroll-alloy-consensus", "scroll-alloy-hardforks", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-scroll-engine-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10532,12 +10616,12 @@ dependencies = [ "scroll-alloy-hardforks", "scroll-alloy-rpc-types-engine", "serde", - "sha2 0.10.9", + "sha2", ] [[package]] name = "reth-scroll-evm" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10561,13 +10645,13 @@ dependencies = [ "scroll-alloy-consensus", "scroll-alloy-evm", "scroll-alloy-hardforks", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-scroll-forks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -10580,7 +10664,7 @@ dependencies = [ [[package]] name = "reth-scroll-node" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10633,7 +10717,7 @@ dependencies = [ [[package]] name = "reth-scroll-payload" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10657,13 +10741,13 @@ dependencies = [ "reth-transaction-pool", "revm", "scroll-alloy-hardforks", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-scroll-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10684,7 +10768,7 @@ dependencies = [ [[package]] name = "reth-scroll-rpc" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10717,14 +10801,14 @@ dependencies = [ "scroll-alloy-hardforks", "scroll-alloy-network", "scroll-alloy-rpc-types", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-scroll-txpool" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10750,7 +10834,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10800,14 +10884,14 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-stages-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10828,7 +10912,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -10836,7 +10920,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -10852,7 +10936,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10860,6 +10944,7 @@ dependencies = [ "alloy-rpc-types-debug", "alloy-trie", "itertools 0.14.0", + "k256", "reth-chainspec", "reth-consensus", "reth-errors", @@ -10870,14 +10955,15 @@ dependencies = [ "reth-revm", "reth-trie-common", "reth-trie-sparse", + "secp256k1 0.30.0", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-static-file" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "assert_matches", @@ -10900,19 +10986,20 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "clap", "derive_more", "reth-nippy-jar", "serde", + "serde_json", "strum 0.27.2", ] [[package]] name = "reth-storage-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10930,11 +11017,12 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm-database", + "serde_json", ] [[package]] name = "reth-storage-errors" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10944,12 +11032,12 @@ dependencies = [ "reth-prune-types", "reth-static-file-types", "revm-database-interface", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-storage-rpc-provider" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10978,7 +11066,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "auto_impl", "dyn-clone", @@ -10987,7 +11075,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", "tracing-futures", @@ -10995,7 +11083,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11010,7 +11098,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "tokio", "tokio-stream", @@ -11019,22 +11107,26 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "eyre", + "reth-tracing-otlp", "rolling-file", "tracing", "tracing-appender", "tracing-journald", "tracing-logfmt", "tracing-subscriber 0.3.20", + "url", ] [[package]] name = "reth-tracing-otlp" -version = "1.8.2" +version = "1.9.0" dependencies = [ + "clap", + "eyre", "opentelemetry", "opentelemetry-otlp", "opentelemetry-semantic-conventions", @@ -11042,11 +11134,12 @@ dependencies = [ "tracing", "tracing-opentelemetry", "tracing-subscriber 0.3.20", + "url", ] [[package]] name = "reth-transaction-pool" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11086,7 +11179,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -11094,7 +11187,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11127,7 +11220,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -11137,6 +11230,7 @@ dependencies = [ "alloy-serde", "alloy-trie", "arbitrary", + "arrayvec", "bincode 1.3.3", "bytes", "codspeed-criterion-compat", @@ -11159,7 +11253,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11184,11 +11278,12 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", "codspeed-criterion-compat", + "crossbeam-channel", "dashmap 6.1.0", "derive_more", "itertools 0.14.0", @@ -11197,7 +11292,6 @@ dependencies = [ "proptest-arbitrary-interop", "rand 0.9.2", "rayon", - "reth-db-api", "reth-execution-errors", "reth-metrics", "reth-primitives-traits", @@ -11207,14 +11301,14 @@ dependencies = [ "reth-trie-common", "reth-trie-db", "reth-trie-sparse", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-trie-sparse" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11247,7 +11341,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11276,15 +11370,15 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.8.2" +version = "1.9.0" dependencies = [ "zstd", ] [[package]] name = "revm" -version = "29.0.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "31.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "revm-bytecode", "revm-context", @@ -11301,8 +11395,8 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "6.2.2" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "7.1.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "bitvec", "phf", @@ -11312,8 +11406,8 @@ dependencies = [ [[package]] name = "revm-context" -version = "9.1.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "11.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "bitvec", "cfg-if", @@ -11328,8 +11422,8 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "10.2.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "12.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -11343,8 +11437,8 @@ dependencies = [ [[package]] name = "revm-database" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "9.0.4" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "alloy-eips", "revm-bytecode", @@ -11356,8 +11450,8 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "8.0.5" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "auto_impl", "either", @@ -11368,8 +11462,8 @@ dependencies = [ [[package]] name = "revm-handler" -version = "10.0.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "12.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "auto_impl", "derive-where", @@ -11386,8 +11480,8 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "10.0.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "12.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "auto_impl", "either", @@ -11403,9 +11497,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.29.2" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fdb678b03faa678a7007a7c761a78efa9ca9adcd9434ef3d1ad894aec6e43d1" +checksum = "21caa99f22184a6818946362778cccd3ff02f743c1e085bee87700671570ecb7" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -11418,24 +11512,25 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "revm-interpreter" -version = "25.0.3" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "29.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "revm-bytecode", "revm-context-interface", "revm-primitives", + "revm-state", "serde", ] [[package]] name = "revm-precompile" -version = "27.0.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "29.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11448,19 +11543,18 @@ dependencies = [ "c-kzg", "cfg-if", "k256", - "libsecp256k1", "p256", "revm-primitives", "ripemd", "rug", "secp256k1 0.31.1", - "sha2 0.10.9", + "sha2", ] [[package]] name = "revm-primitives" -version = "20.2.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "21.0.2" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "alloy-primitives", "num_enum", @@ -11471,7 +11565,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm#f5ae93d909f185461af8d0263b2549081e9781c8" +source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv97#b2dce3260bd04428b242483a40d945342d1df93c" dependencies = [ "auto_impl", "enumn", @@ -11484,8 +11578,8 @@ dependencies = [ [[package]] name = "revm-state" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "8.1.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "bitflags 2.9.4", "revm-bytecode", @@ -11702,6 +11796,15 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver 0.9.0", +] + [[package]] name = "rustc_version" version = "0.3.3" @@ -11730,7 +11833,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -11743,7 +11846,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -11801,7 +11904,7 @@ dependencies = [ "security-framework 3.4.0", "security-framework-sys", "webpki-root-certs 0.26.11", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -11918,7 +12021,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scroll-alloy-consensus" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11941,7 +12044,7 @@ dependencies = [ [[package]] name = "scroll-alloy-evm" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11963,7 +12066,7 @@ dependencies = [ [[package]] name = "scroll-alloy-hardforks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-hardforks", "auto_impl", @@ -11972,7 +12075,7 @@ dependencies = [ [[package]] name = "scroll-alloy-network" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-network", @@ -11986,7 +12089,7 @@ dependencies = [ [[package]] name = "scroll-alloy-provider" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-provider", @@ -12020,14 +12123,14 @@ dependencies = [ "reth-transaction-pool", "scroll-alloy-network", "scroll-alloy-rpc-types-engine", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tower", ] [[package]] name = "scroll-alloy-rpc-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12045,7 +12148,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types-engine" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -12056,7 +12159,7 @@ dependencies = [ [[package]] name = "scroll-reth" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "reth-cli-util", @@ -12157,13 +12260,22 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser 0.7.0", +] + [[package]] name = "semver" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ - "semver-parser", + "semver-parser 0.10.3", ] [[package]] @@ -12175,6 +12287,12 @@ dependencies = [ "serde", ] +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + [[package]] name = "semver-parser" version = "0.10.3" @@ -12237,14 +12355,15 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.144" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56177480b00303e689183f110b4e727bb4211d692c62d4fcd16d02be93077d40" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.11.1", + "indexmap 2.12.0", "itoa", "memchr", "ryu", + "serde", "serde_core", ] @@ -12290,7 +12409,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.11.1", + "indexmap 2.12.0", "schemars 0.9.0", "schemars 1.0.4", "serde", @@ -12333,19 +12452,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha2" version = "0.10.9" @@ -12470,7 +12576,7 @@ checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 2.0.16", + "thiserror 2.0.17", "time", ] @@ -12507,6 +12613,15 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" +[[package]] +name = "small_btree" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ba60d2df92ba73864714808ca68c059734853e6ab722b40e1cf543ebb3a057a" +dependencies = [ + "arrayvec", +] + [[package]] name = "smallvec" version = "1.15.1" @@ -12578,6 +12693,12 @@ dependencies = [ "sha1", ] +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" + [[package]] name = "spki" version = "0.7.3" @@ -12588,12 +12709,6 @@ dependencies = [ "der", ] -[[package]] -name = "sptr" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" - [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -12749,6 +12864,12 @@ dependencies = [ "libc", ] +[[package]] +name = "tag_ptr" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0e973b34477b7823833469eb0f5a3a60370fef7a453e02d751b59180d0a5a05" + [[package]] name = "tagptr" version = "0.2.0" @@ -12793,7 +12914,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix 1.1.2", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -12898,11 +13019,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.16" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.16", + "thiserror-impl 2.0.17", ] [[package]] @@ -12918,9 +13039,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.16" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", @@ -12978,11 +13099,12 @@ dependencies = [ [[package]] name = "time" -version = "0.3.43" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83bde6f1ec10e72d583d91623c939f623002284ef622b87de38cfd546cbf2031" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", + "itoa", "js-sys", "libc", "num-conv", @@ -13018,16 +13140,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "tinystr" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" -dependencies = [ - "displaydoc", - "zerovec 0.10.4", -] - [[package]] name = "tinystr" version = "0.8.1" @@ -13035,7 +13147,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", - "zerovec 0.11.4", + "zerovec", ] [[package]] @@ -13185,7 +13297,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.11.1", + "indexmap 2.12.0", "serde", "serde_spanned", "toml_datetime", @@ -13201,9 +13313,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tonic" -version = "0.12.3" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" dependencies = [ "async-trait", "base64 0.22.1", @@ -13211,15 +13323,31 @@ dependencies = [ "http", "http-body", "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", - "prost", + "sync_wrapper", + "tokio", "tokio-stream", + "tower", "tower-layer", "tower-service", "tracing", ] +[[package]] +name = "tonic-prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +dependencies = [ + "bytes", + "prost", + "tonic", +] + [[package]] name = "tower" version = "0.5.2" @@ -13229,7 +13357,7 @@ dependencies = [ "futures-core", "futures-util", "hdrhistogram", - "indexmap 2.11.1", + "indexmap 2.12.0", "pin-project-lite", "slab", "sync_wrapper", @@ -13374,15 +13502,16 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.30.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8e764bd6f5813fd8bebc3117875190c5b0415be8f7f8059bffb6ecd979c444" +checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e" dependencies = [ "js-sys", - "once_cell", "opentelemetry", "opentelemetry_sdk", + "rustversion", "smallvec", + "thiserror 2.0.17", "tracing", "tracing-core", "tracing-log", @@ -13514,7 +13643,7 @@ dependencies = [ "rustls", "rustls-pki-types", "sha1", - "thiserror 2.0.16", + "thiserror 2.0.17", "utf-8", ] @@ -14587,12 +14716,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - [[package]] name = "writeable" version = "0.6.1" @@ -14612,7 +14735,7 @@ dependencies = [ "pharos", "rustc_version 0.4.1", "send_wrapper 0.6.0", - "thiserror 2.0.16", + "thiserror 2.0.17", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -14638,22 +14761,16 @@ dependencies = [ ] [[package]] -name = "yansi" -version = "1.0.1" +name = "xsum" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +checksum = "0637d3a5566a82fa5214bae89087bc8c9fb94cd8e8a3c07feb691bb8d9c632db" [[package]] -name = "yoke" -version = "0.7.5" +name = "yansi" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive 0.7.5", - "zerofrom", -] +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" @@ -14663,22 +14780,10 @@ checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", - "yoke-derive 0.8.0", + "yoke-derive", "zerofrom", ] -[[package]] -name = "yoke-derive" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", - "synstructure", -] - [[package]] name = "yoke-derive" version = "0.8.0" @@ -14759,19 +14864,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" dependencies = [ "displaydoc", - "yoke 0.8.0", - "zerofrom", -] - -[[package]] -name = "zerovec" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" -dependencies = [ - "yoke 0.7.5", + "yoke", "zerofrom", - "zerovec-derive 0.10.3", ] [[package]] @@ -14780,20 +14874,9 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" dependencies = [ - "yoke 0.8.0", + "yoke", "zerofrom", - "zerovec-derive 0.11.1", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", + "zerovec-derive", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index b6345af276e..12826f50d15 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -485,18 +485,18 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { git = "https://github.com/scroll-tech/revm", default-features = false, features = ["enable_eip7702", "enable_eip7623"] } -revm-bytecode = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-database = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-state = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-primitives = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-interpreter = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-inspector = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-context = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-context-interface = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-database-interface = { git = "https://github.com/scroll-tech/revm", default-features = false } -op-revm = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", default-features = false } +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false, features = ["enable_eip7702", "enable_eip7623"] } +revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", branch = "feat/v97", default-features = false } revm-inspectors = "0.32.0" # eth @@ -771,8 +771,8 @@ walkdir = "2.3.3" vergen-git2 = "1.0.5" [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm" } -op-revm = { git = "https://github.com/scroll-tech/revm" } +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97" } +op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97" } # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 7005968f1cd..d74b9e9ce2e 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -15,9 +15,7 @@ use alloy_consensus::transaction::Either; use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_evm::Evm; use alloy_primitives::B256; -use reth_chain_state::{ - CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, -}; +use reth_chain_state::{CanonicalInMemoryState, ExecutedBlock}; use reth_consensus::{ConsensusError, FullConsensus, HeaderValidator}; use reth_engine_primitives::{ ConfigureEngineEvm, ExecutableTxIterator, ExecutionPayload, InvalidBlockHook, PayloadValidator, diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index 6758281645d..3cd8529dc5f 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -4,8 +4,8 @@ use crate::{BuilderContext, FullNodeTypes}; use alloy_primitives::Address; use reth_chain_state::CanonStateSubscriptions; -use reth_chainspec::ChainSpecProvider; -use reth_node_api::TxTy; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_node_api::{NodeTypes, TxTy}; use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolConfig, PoolTransaction, SubPoolLimit, TransactionPool, TransactionValidationTaskExecutor, TransactionValidator, diff --git a/crates/scroll/alloy/evm/src/block/mod.rs b/crates/scroll/alloy/evm/src/block/mod.rs index 6b221ff1506..610f67122ea 100644 --- a/crates/scroll/alloy/evm/src/block/mod.rs +++ b/crates/scroll/alloy/evm/src/block/mod.rs @@ -28,7 +28,7 @@ use alloy_primitives::{B256, U256}; use revm::{ context::{ result::{InvalidTransaction, ResultAndState}, - TxEnv, + Block, TxEnv, }, database::State, handler::PrecompileProvider, @@ -154,7 +154,7 @@ where fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> { // set state clear flag if the block is after the Spurious Dragon hardfork. let state_clear_flag = - self.spec.is_spurious_dragon_active_at_block(self.evm.block().number.to()); + self.spec.is_spurious_dragon_active_at_block(self.evm.block().number().to()); self.evm.db_mut().set_state_clear_flag(state_clear_flag); // load the l1 gas oracle contract in cache. @@ -169,7 +169,7 @@ where if self .spec .scroll_fork_activation(ScrollHardfork::Curie) - .transitions_at_block(self.evm.block().number.to()) + .transitions_at_block(self.evm.block().number().to()) { if let Err(err) = apply_curie_hard_fork(self.evm.db_mut()) { return Err(BlockExecutionError::msg(format!( @@ -183,7 +183,7 @@ where if self .spec .scroll_fork_activation(ScrollHardfork::Feynman) - .active_at_timestamp(self.evm.block().timestamp.to()) + .active_at_timestamp(self.evm.block().timestamp().to()) { if let Err(err) = apply_feynman_hard_fork(self.evm.db_mut()) { return Err(BlockExecutionError::msg(format!( @@ -206,7 +206,7 @@ where let is_l1_message = tx.tx().ty() == L1_MESSAGE_TRANSACTION_TYPE; // The sum of the transaction’s gas limit and the gas utilized in this block prior, // must be no greater than the block’s gasLimit. - let block_available_gas = self.evm.block().gas_limit - self.gas_used; + let block_available_gas = self.evm.block().gas_limit() - self.gas_used; if tx.tx().gas_limit() > block_available_gas { return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { transaction_gas_limit: tx.tx().gas_limit(), @@ -219,14 +219,14 @@ where let block = self.evm.block(); // verify the transaction type is accepted by the current fork. - if tx.tx().is_eip2930() && !chain_spec.is_curie_active_at_block(block.number.to()) { + if tx.tx().is_eip2930() && !chain_spec.is_curie_active_at_block(block.number().to()) { return Err(BlockValidationError::InvalidTx { hash, error: Box::new(InvalidTransaction::Eip2930NotSupported), } .into()) } - if tx.tx().is_eip1559() && !chain_spec.is_curie_active_at_block(block.number.to()) { + if tx.tx().is_eip1559() && !chain_spec.is_curie_active_at_block(block.number().to()) { return Err(BlockValidationError::InvalidTx { hash, error: Box::new(InvalidTransaction::Eip1559NotSupported), @@ -241,7 +241,7 @@ where .into()) } if tx.tx().is_eip7702() && - !chain_spec.is_euclid_v2_active_at_timestamp(block.timestamp.to()) + !chain_spec.is_euclid_v2_active_at_timestamp(block.timestamp().to()) { return Err(BlockValidationError::InvalidTx { hash, @@ -296,6 +296,7 @@ where receipts: self.receipts, requests: Default::default(), gas_used: self.gas_used, + blob_gas_used: 0, }, )) } diff --git a/crates/scroll/alloy/evm/src/lib.rs b/crates/scroll/alloy/evm/src/lib.rs index f8a7fda6db6..a76fb55f46f 100644 --- a/crates/scroll/alloy/evm/src/lib.rs +++ b/crates/scroll/alloy/evm/src/lib.rs @@ -112,6 +112,7 @@ where type Error = EVMError; type HaltReason = HaltReason; type Spec = ScrollSpecId; + type BlockEnv = BlockEnv; type Precompiles = P; type Inspector = I; @@ -207,6 +208,7 @@ impl EvmFactory for ScrollEvmFactory

{ type Error = EVMError; type HaltReason = HaltReason; type Spec = ScrollSpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm( diff --git a/crates/scroll/alloy/evm/src/system_caller.rs b/crates/scroll/alloy/evm/src/system_caller.rs index f57d3473b72..12f87c4d4a0 100644 --- a/crates/scroll/alloy/evm/src/system_caller.rs +++ b/crates/scroll/alloy/evm/src/system_caller.rs @@ -6,7 +6,10 @@ use alloy_evm::{ Evm, }; use alloy_primitives::B256; -use revm::{context::result::ResultAndState, DatabaseCommit}; +use revm::{ + context::{result::ResultAndState, Block}, + DatabaseCommit, +}; use scroll_alloy_hardforks::ScrollHardforks; /// An ephemeral helper type for executing system calls. @@ -62,13 +65,13 @@ fn transact_blockhashes_contract_call( evm: &mut impl Evm, ) -> Result>, BlockExecutionError> { // if Feynman is not active at timestamp then no system transaction occurs. - if !spec.is_feynman_active_at_timestamp(evm.block().timestamp.to()) { + if !spec.is_feynman_active_at_timestamp(evm.block().timestamp().to()) { return Ok(None); } // if the block number is zero (genesis block) then no system transaction may occur as per // EIP-2935 - if evm.block().number.to::() == 0u64 { + if evm.block().number().to::() == 0u64 { return Ok(None); } diff --git a/crates/scroll/engine-primitives/src/payload/built.rs b/crates/scroll/engine-primitives/src/payload/built.rs index cea13302aa1..4fa87053c16 100644 --- a/crates/scroll/engine-primitives/src/payload/built.rs +++ b/crates/scroll/engine-primitives/src/payload/built.rs @@ -10,8 +10,7 @@ use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV4, ExecutionPayloadFieldV2, ExecutionPayloadV1, ExecutionPayloadV3, PayloadId, }; -use reth_chain_state::ExecutedBlockWithTrieUpdates; -use reth_payload_primitives::BuiltPayload; +use reth_payload_primitives::{BuiltPayload, BuiltPayloadExecutedBlock}; use reth_primitives_traits::SealedBlock; use reth_scroll_primitives::{ScrollBlock, ScrollPrimitives}; @@ -23,7 +22,7 @@ pub struct ScrollBuiltPayload { /// Sealed block pub(crate) block: Arc>, /// Block execution data for the payload - pub(crate) executed_block: Option>, + pub(crate) executed_block: Option>, /// The fees of the block pub(crate) fees: U256, } @@ -33,7 +32,7 @@ impl ScrollBuiltPayload { pub const fn new( id: PayloadId, block: Arc>, - executed_block: Option>, + executed_block: Option>, fees: U256, ) -> Self { Self { id, block, executed_block, fees } @@ -71,7 +70,7 @@ impl BuiltPayload for ScrollBuiltPayload { self.fees } - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { self.executed_block.clone() } diff --git a/crates/scroll/evm/src/build.rs b/crates/scroll/evm/src/build.rs index 2645dad697b..ad39ff5a4ec 100644 --- a/crates/scroll/evm/src/build.rs +++ b/crates/scroll/evm/src/build.rs @@ -7,6 +7,7 @@ use reth_evm::execute::{BlockAssembler, BlockAssemblerInput}; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::SignedTransaction; use reth_scroll_primitives::ScrollReceipt; +use revm::context::Block; use scroll_alloy_evm::ScrollBlockExecutionCtx; use scroll_alloy_hardforks::ScrollHardforks; @@ -53,7 +54,7 @@ where .. } = input; - let timestamp = evm_env.block_env.timestamp; + let timestamp = evm_env.block_env.timestamp(); let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = ScrollReceipt::calculate_receipt_root_no_memo(receipts); @@ -69,15 +70,15 @@ where withdrawals_root: None, logs_bloom, timestamp: timestamp.to(), - mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), nonce: BEACON_NONCE.into(), base_fee_per_gas: self .chain_spec - .is_curie_active_at_block(evm_env.block_env.number.to()) - .then_some(evm_env.block_env.basefee), - number: evm_env.block_env.number.to(), - gas_limit: evm_env.block_env.gas_limit, - difficulty: evm_env.block_env.difficulty, + .is_curie_active_at_block(evm_env.block_env.number().to()) + .then_some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, extra_data: Default::default(), parent_beacon_block_root: None, diff --git a/crates/scroll/openvm-compat/Cargo.toml b/crates/scroll/openvm-compat/Cargo.toml index e7df83724da..a4a8c0eb396 100644 --- a/crates/scroll/openvm-compat/Cargo.toml +++ b/crates/scroll/openvm-compat/Cargo.toml @@ -28,4 +28,4 @@ scroll-alloy-consensus = { path = "../alloy/consensus", default-features = false scroll-alloy-rpc-types = { path = "../alloy/rpc-types", default-features = false } [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm" } +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97" } diff --git a/crates/scroll/payload/src/builder.rs b/crates/scroll/payload/src/builder.rs index ca754cbadb3..812953dad37 100644 --- a/crates/scroll/payload/src/builder.rs +++ b/crates/scroll/payload/src/builder.rs @@ -3,7 +3,7 @@ use super::ScrollPayloadBuilderError; use crate::config::{PayloadBuildingBreaker, ScrollBuilderConfig}; -use alloy_consensus::{Transaction, Typed2718}; +use alloy_consensus::{BlockHeader, Transaction, Typed2718}; use alloy_primitives::U256; use alloy_rlp::Encodable; use core::fmt::Debug; @@ -11,7 +11,6 @@ use reth_basic_payload_builder::{ is_better_payload, BuildArguments, BuildOutcome, BuildOutcomeKind, MissingPayloadBehaviour, PayloadBuilder, PayloadConfig, }; -use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ block::{BlockExecutionError, BlockValidationError}, @@ -20,17 +19,21 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_payload_builder::PayloadId; -use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_primitives::{ + BuiltPayloadExecutedBlock, PayloadBuilderAttributes, PayloadBuilderError, +}; use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction, TxTy}; -use reth_revm::{cancelled::CancelOnDrop, database::StateProviderDatabase, db::State}; +use reth_revm::{ + cancelled::CancelOnDrop, context::either, database::StateProviderDatabase, db::State, +}; use reth_scroll_chainspec::{ChainConfig, ScrollChainConfig}; use reth_scroll_engine_primitives::{ScrollBuiltPayload, ScrollPayloadBuilderAttributes}; use reth_scroll_evm::{ScrollBaseFeeProvider, ScrollNextBlockEnvAttributes}; use reth_scroll_primitives::{ScrollPrimitives, ScrollTransactionSigned}; use reth_storage_api::{BaseFeeProvider, StateProvider, StateProviderFactory}; use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; -use revm::context::{Block, BlockEnv}; +use revm::context::Block; use scroll_alloy_hardforks::ScrollHardforks; use std::{boxed::Box, sync::Arc, vec, vec::Vec}; @@ -308,20 +311,17 @@ impl ScrollBuilder<'_, Txs> { let execution_outcome = ExecutionOutcome::new( db.take_bundle(), vec![execution_result.receipts], - block.number, + block.number(), Vec::new(), ); // create the executed block data - let executed: ExecutedBlockWithTrieUpdates = - ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block), - execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - }, - trie: ExecutedTrieUpdates::Present(Arc::new(trie_updates)), - }; + let executed: BuiltPayloadExecutedBlock = BuiltPayloadExecutedBlock { + recovered_block: Arc::new(block), + execution_output: Arc::new(execution_outcome), + hashed_state: either::Either::Left(Arc::new(hashed_state)), + trie_updates: either::Either::Left(Arc::new(trie_updates)), + }; let no_tx_pool = ctx.attributes().no_tx_pool; @@ -376,9 +376,9 @@ where } /// Returns the current fee settings for transactions from the mempool - pub fn best_transaction_attributes(&self, block_env: &BlockEnv) -> BestTransactionsAttributes { + pub fn best_transaction_attributes(&self, block_env: impl Block) -> BestTransactionsAttributes { BestTransactionsAttributes::new( - block_env.basefee, + block_env.basefee(), block_env.blob_gasprice().map(|p| p as u64), ) } @@ -435,7 +435,7 @@ where builder: &mut impl BlockBuilder, ) -> Result { let mut info = ExecutionInfo::new(); - let block_gas_limit = builder.evm().block().gas_limit; + let block_gas_limit = builder.evm().block().gas_limit(); let mut gas_spent_by_transactions = Vec::new(); for sequencer_tx in &self.attributes().transactions { @@ -506,8 +506,8 @@ where builder_config: &ScrollBuilderConfig, breaker: PayloadBuildingBreaker, ) -> Result, PayloadBuilderError> { - let block_gas_limit = builder.evm_mut().block().gas_limit; - let base_fee = builder.evm_mut().block().basefee; + let block_gas_limit = builder.evm_mut().block().gas_limit(); + let base_fee = builder.evm_mut().block().basefee(); while let Some(tx) = best_txs.next(()) { let tx = tx.into_consensus(); diff --git a/crates/scroll/rpc/src/eth/mod.rs b/crates/scroll/rpc/src/eth/mod.rs index ae75327a23b..ebe58a23ef1 100644 --- a/crates/scroll/rpc/src/eth/mod.rs +++ b/crates/scroll/rpc/src/eth/mod.rs @@ -16,8 +16,8 @@ use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_convert::{RpcConvert, RpcConverter, RpcTypes, SignableTxRequest}; use reth_rpc_eth_api::{ helpers::{ - pending_block::BuildPendingEnv, AddDevSigners, EthApiSpec, EthState, LoadFee, - LoadPendingBlock, LoadState, SpawnBlocking, Trace, + pending_block::BuildPendingEnv, EthApiSpec, EthState, LoadFee, LoadPendingBlock, LoadState, + SpawnBlocking, Trace, }, EthApiTypes, FullEthApiServer, RpcNodeCore, RpcNodeCoreExt, }; diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 41565ddf9ce..f81f4972427 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -404,7 +404,7 @@ where /// - [`StorageHashingStage`] /// - [`MerkleStage`] (execute) /// - [`MerkleChangeSets`] -#[derive(Debug, Default)] +#[derive(Debug)] #[non_exhaustive] pub struct HashingStages { /// Configuration for each stage in the pipeline