diff --git a/Cargo.toml b/Cargo.toml index 91b02e6..ef8ad59 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ members = ["crates/*"] resolver = "2" [workspace.package] -version = "0.14.0" +version = "0.15.0" edition = "2024" rust-version = "1.88" authors = ["init4"] @@ -34,34 +34,34 @@ debug = false incremental = false [workspace.dependencies] -signet-blobber = { version = "0.14", path = "crates/blobber" } -signet-block-processor = { version = "0.14", path = "crates/block-processor" } -signet-db = { version = "0.14", path = "crates/db" } -signet-genesis = { version = "0.14", path = "crates/genesis" } -signet-node = { version = "0.14", path = "crates/node" } -signet-node-config = { version = "0.14", path = "crates/node-config" } -signet-node-tests = { version = "0.14", path = "crates/node-tests" } -signet-node-types = { version = "0.14", path = "crates/node-types" } -signet-rpc = { version = "0.14", path = "crates/rpc" } +signet-blobber = { version = "0.15", path = "crates/blobber" } +signet-block-processor = { version = "0.15", path = "crates/block-processor" } +signet-db = { version = "0.15", path = "crates/db" } +signet-genesis = { version = "0.15", path = "crates/genesis" } +signet-node = { version = "0.15", path = "crates/node" } +signet-node-config = { version = "0.15", path = "crates/node-config" } +signet-node-tests = { version = "0.15", path = "crates/node-tests" } +signet-node-types = { version = "0.15", path = "crates/node-types" } +signet-rpc = { version = "0.15", path = "crates/rpc" } init4-bin-base = { version = "0.13.1", features = ["alloy"] } -signet-bundle = "0.14" -signet-constants = "0.14" -signet-evm = "0.14" -signet-extract = "0.14" -signet-test-utils = "0.14" -signet-tx-cache = "0.14" -signet-types = "0.14" -signet-zenith = "0.14" -signet-journal = "0.14" +signet-bundle = "0.15" +signet-constants = "0.15" +signet-evm = "0.15" +signet-extract = "0.15" +signet-test-utils = "0.15" +signet-tx-cache = "0.15" +signet-types = "0.15" +signet-zenith = "0.15" +signet-journal = "0.15" # ajj ajj = { version = "0.3.4" } # trevm -trevm = { version = "0.29.0", features = ["full_env_cfg"] } -revm-inspectors = "0.30.0" # should be 1 more than trevm version, usually +trevm = { version = "0.31.0", features = ["full_env_cfg"] } +revm-inspectors = "0.32.0" # should be 1 more than trevm version, usually # Alloy periphery crates alloy = { version = "1.0.35", features = [ @@ -74,22 +74,22 @@ alloy = { version = "1.0.35", features = [ alloy-contract = { version = "1.0.35", features = ["pubsub"] } # Reth -reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-eth-wire-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } -reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.8.3" } +reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-eth-wire-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } +reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.0" } # Foundry periphery foundry-blob-explorers = "0.17" diff --git a/crates/block-processor/src/utils.rs b/crates/block-processor/src/utils.rs index 4341433..f6140d6 100644 --- a/crates/block-processor/src/utils.rs +++ b/crates/block-processor/src/utils.rs @@ -4,7 +4,13 @@ use reth_chainspec::EthereumHardforks; /// Equivalent to [`reth_evm_ethereum::revm_spec`], however, always starts at /// [`SpecId::PRAGUE`] and transitions to [`SpecId::OSAKA`]. pub fn revm_spec(chain_spec: &reth::chainspec::ChainSpec, timestamp: u64) -> SpecId { - if chain_spec.is_osaka_active_at_timestamp(timestamp) { SpecId::OSAKA } else { SpecId::PRAGUE } + if chain_spec.is_amsterdam_active_at_timestamp(timestamp) { + SpecId::AMSTERDAM + } else if chain_spec.is_osaka_active_at_timestamp(timestamp) { + SpecId::OSAKA + } else { + SpecId::PRAGUE + } } /// This is simply a compile-time assertion to ensure that all SpecIds are @@ -32,6 +38,7 @@ const fn assert_in_range(spec_id: SpecId) { | SpecId::SHANGHAI | SpecId::CANCUN | SpecId::PRAGUE - | SpecId::OSAKA => {} + | SpecId::OSAKA + | SpecId::AMSTERDAM => {} } } diff --git a/crates/db/src/provider.rs b/crates/db/src/provider.rs index fa9d17a..13c6d4f 100644 --- a/crates/db/src/provider.rs +++ b/crates/db/src/provider.rs @@ -4,15 +4,15 @@ use crate::{ traits::RuWriter, }; use alloy::{ - consensus::{BlockHeader, TxReceipt}, - primitives::{Address, B256, BlockNumber, U256, map::HashSet}, + consensus::BlockHeader, + primitives::{Address, B256, BlockNumber}, }; use reth::{ primitives::StaticFileSegment, providers::{ BlockBodyIndicesProvider, BlockNumReader, BlockReader, BlockWriter, Chain, DBProvider, HistoryWriter, OriginalValuesKnown, ProviderError, ProviderResult, StageCheckpointWriter, - StateWriter, StaticFileProviderFactory, StaticFileWriter, StorageLocation, + StateWriter, StaticFileProviderFactory, StaticFileWriter, }, }; use reth_db::{ @@ -30,7 +30,7 @@ use signet_zenith::{ Transactor::Transact, Zenith, }; -use std::ops::RangeInclusive; +use std::ops::{Not, RangeInclusive}; use tracing::{debug, instrument, trace, warn}; impl RuWriter for SignetDbRw @@ -144,14 +144,13 @@ where header: Option, block: &RecoveredBlock, journal_hash: B256, - write_to: StorageLocation, ) -> ProviderResult { // Implementation largely copied from // `BlockWriter::insert_block` // in `reth/crates/storage/provider/src/providers/database/provider.rs` // duration metrics have been removed // - // Last reviewed at tag v1.8.1 + // Last reviewed at tag v1.9.0 let block_number = block.number(); // SIGNET-SPECIFIC @@ -159,7 +158,6 @@ where if let Some(header) = header { self.insert_signet_header(header, block_number)?; } - // SIGNET-SPECIFIC // Put journal hash into the DB self.tx_ref().put::(block_number, journal_hash)?; @@ -167,24 +165,10 @@ where let block_hash = block.block.header.hash(); let block_header = block.block.header.header(); - if write_to.database() { - self.tx_ref().put::(block_number, block_hash)?; - self.tx_ref().put::(block_number, block_header.clone())?; - // NB: while this is meaningless for zenith blocks, it is necessary for - // the RPC server to function properly. If the TTD for a block is not - // set, the RPC server will return an error indicating that the block - // is not found. - self.tx_ref() - .put::(block_number, U256::ZERO.into())?; - } - - if write_to.static_files() { - let sf = self.static_file_provider(); - let mut writer = sf.get_writer(block_number, StaticFileSegment::Headers)?; - writer.append_header(block_header, U256::ZERO, &block_hash)?; - } + self.static_file_provider() + .get_writer(block_number, StaticFileSegment::Headers)? + .append_header(block_header, &block_hash)?; - // Append the block number corresponding to a header on the DB self.tx_ref().put::(block_hash, block_number)?; let mut next_tx_num = self @@ -211,7 +195,7 @@ where next_tx_num += 1; } - self.append_signet_block_body((block_number, block), write_to)?; + self.append_signet_block_body((block_number, block))?; debug!(?block_number, "Inserted block"); @@ -219,50 +203,35 @@ where } /// Appends the body of a signet block to the database. - fn append_signet_block_body( - &self, - body: (BlockNumber, &RecoveredBlock), - write_to: StorageLocation, - ) -> ProviderResult<()> { + fn append_signet_block_body(&self, body: (BlockNumber, &RecoveredBlock)) -> ProviderResult<()> { // Implementation largely copied from // `DatabaseProvider::append_block_bodies` // in `reth/crates/storage/provider/src/providers/database/provider.rs` // duration metrics have been removed, and the implementation has been // modified to work with a single signet block. // - // last reviewed at tag v1.8.1 + // last reviewed at tag v1.9.0 + let from_block = body.0; let sf = self.static_file_provider(); // Initialize writer if we will be writing transactions to staticfiles - let mut tx_static_writer = write_to - .static_files() - .then(|| sf.get_writer(body.0, StaticFileSegment::Transactions)) - .transpose()?; + let mut tx_writer = sf.get_writer(from_block, StaticFileSegment::Transactions)?; let mut block_indices_cursor = self.tx_ref().cursor_write::()?; let mut tx_block_cursor = self.tx_ref().cursor_write::()?; - // Initialize curosr if we will be writing transactions to database - let mut tx_cursor = write_to - .database() - .then(|| self.tx_ref().cursor_write::()) - .transpose()?; - let block_number = body.0; let block = body.1; // Get id for the next tx_num or zero if there are no transactions. let mut next_tx_num = tx_block_cursor.last()?.map(|(n, _)| n + 1).unwrap_or_default(); - // Increment block on static file header if we're writing to static files. - if let Some(writer) = tx_static_writer.as_mut() { - writer.increment_block(block_number)?; - } + // Increment block on static file header + tx_writer.increment_block(block_number)?; - let tx_count = block.block.body.transactions.len(); - let block_indices = - StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count: tx_count as u64 }; + let tx_count = block.block.body.transactions.len() as u64; + let block_indices = StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count }; // insert block meta block_indices_cursor.append(block_number, &block_indices)?; @@ -274,13 +243,7 @@ where // Write transactions for transaction in block.block.body.transactions() { - if let Some(writer) = tx_static_writer.as_mut() { - writer.append_transaction(next_tx_num, transaction)?; - } - - if let Some(cursor) = tx_cursor.as_mut() { - cursor.append(next_tx_num, transaction)?; - } + tx_writer.append_transaction(next_tx_num, transaction)?; // Increment transaction id for each transaction next_tx_num += 1; @@ -326,7 +289,6 @@ where fn take_signet_headers_above( &self, target: BlockNumber, - _remove_from: StorageLocation, ) -> ProviderResult> { // Implementation largely copied from // `DatabaseProvider::get_or_take` @@ -347,11 +309,7 @@ where } /// Remove [`Zenith::BlockHeader`] objects above the specified height from the DB. - fn remove_signet_headers_above( - &self, - target: BlockNumber, - _remove_from: StorageLocation, - ) -> ProviderResult<()> { + fn remove_signet_headers_above(&self, target: BlockNumber) -> ProviderResult<()> { self.remove::(target + 1..)?; Ok(()) } @@ -373,11 +331,10 @@ where fn take_signet_events_above( &self, target: BlockNumber, - remove_from: StorageLocation, ) -> ProviderResult> { let range = target + 1..=self.last_block_number()?; let items = self.get_signet_events(range)?; - self.remove_signet_events_above(target, remove_from)?; + self.remove_signet_events_above(target)?; Ok(items) } @@ -385,11 +342,7 @@ where /// [`Transactor::Transact`] events above the specified height from the DB. /// /// [`Transactor::Transact`]: signet_zenith::Transactor::Transact - fn remove_signet_events_above( - &self, - target: BlockNumber, - _remove_from: StorageLocation, - ) -> ProviderResult<()> { + fn remove_signet_events_above(&self, target: BlockNumber) -> ProviderResult<()> { self.remove::(target + 1..)?; Ok(()) } @@ -419,15 +372,15 @@ where // in `reth/crates/storage/provider/src/providers/database/provider.rs` // duration metrics have been removed // - // last reviewed at tag v1.8.1 + // last reviewed at tag v1.9.0 let BlockResult { sealed_block: block, execution_outcome, .. } = block_result; let ru_height = block.number(); - self.insert_signet_block(header, block, journal_hash, StorageLocation::Database)?; + self.insert_signet_block(header, block, journal_hash)?; // Write the state and match the storage location that Reth uses. - self.ru_write_state(execution_outcome, OriginalValuesKnown::No, StorageLocation::Database)?; + self.ru_write_state(execution_outcome, OriginalValuesKnown::No)?; // NB: At this point, reth writes hashed state and trie updates. Signet // skips this. We re-use these tables to write the enters, enter tokens, @@ -462,22 +415,18 @@ where } #[instrument(skip(self))] - fn ru_take_blocks_and_execution_above( - &self, - target: BlockNumber, - remove_from: StorageLocation, - ) -> ProviderResult { + fn ru_take_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult { // Implementation largely copied from // `BlockExecutionWriter::take_block_and_execution_above` // in `reth/crates/storage/provider/src/providers/database/provider.rs` // - // last reviewed at tag v1.8.1 + // last reviewed at tag v1.9.0 let range = target + 1..=self.last_block_number()?; // This block is copied from `unwind_trie_state_range` // - // last reviewed at tag v1.8.1 + // last reviewed at tag v1.9.0 { let changed_accounts = self .tx_ref() @@ -489,14 +438,14 @@ where // root calculation, which we don't use. self.unwind_account_history_indices(changed_accounts.iter())?; - let storage_range = BlockNumberAddress::range(range.clone()); + let storage_start = BlockNumberAddress((target, Address::ZERO)); // Unwind storage history indices. Similarly, we don't need to // unwind storage hashes, since we don't use them. let changed_storages = self .tx_ref() .cursor_read::()? - .walk_range(storage_range)? + .walk_range(storage_start..)? .collect::, _>>()?; self.unwind_storage_history_indices(changed_storages.iter().copied())?; @@ -506,7 +455,7 @@ where trace!("trie state unwound"); - let execution_state = self.take_state_above(target, remove_from)?; + let execution_state = self.take_state_above(target)?; trace!("state taken"); @@ -517,14 +466,14 @@ where // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove_blocks_above(target, remove_from)?; + self.remove_blocks_above(target)?; trace!("blocks removed"); + // SIGNET-SPECIFIC // This is a Signet-specific addition that removes the enters, // entertokens, zenith headers, and transact events. - let ru_info = - self.take_extraction_results_above(target, remove_from)?.into_iter().collect(); + let ru_info = self.take_extraction_results_above(target)?.into_iter().collect(); trace!("extraction results taken"); @@ -539,51 +488,53 @@ where } #[instrument(skip(self))] - fn ru_remove_blocks_and_execution_above( - &self, - block: BlockNumber, - remove_from: StorageLocation, - ) -> ProviderResult<()> { + fn ru_remove_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult<()> { // Implementation largely copied from // `BlockExecutionWriter::remove_block_and_execution_above` // in `reth/crates/storage/provider/src/providers/database/provider.rs` // duration metrics have been removed // - // last reviewed at tag v1.8.1 + // last reviewed at tag v1.9.0 + + let range = target + 1..=self.last_block_number()?; // This block is copied from `unwind_trie_state_range` // - // last reviewed at tag v1.8.1 + // last reviewed at tag v1.9.0 { - let range = block + 1..=self.last_block_number()?; let changed_accounts = self .tx_ref() .cursor_read::()? .walk_range(range.clone())? .collect::, _>>()?; - + // There's no need to also unwind account hashes, since that is + // only useful for filling intermediate tables that deal with state + // root calculation, which we don't use. self.unwind_account_history_indices(changed_accounts.iter())?; - let storage_range = BlockNumberAddress::range(range.clone()); + let storage_start = BlockNumberAddress((target, Address::ZERO)); - // Unwind storage history indices. + // Unwind storage history indices. Similarly, we don't need to + // unwind storage hashes, since we don't use them. let changed_storages = self .tx_ref() .cursor_read::()? - .walk_range(storage_range)? + .walk_range(storage_start..)? .collect::, _>>()?; self.unwind_storage_history_indices(changed_storages.iter().copied())?; + + // We also skip calculating the reverted root here. } - self.remove_state_above(block, remove_from)?; - self.remove_blocks_above(block, remove_from)?; + self.remove_state_above(target)?; + self.remove_blocks_above(target)?; // Signet specific: - self.remove_extraction_results_above(block, remove_from)?; + self.remove_extraction_results_above(target)?; // Update pipeline stages - self.update_pipeline_stages(block, true)?; + self.update_pipeline_stages(target, true)?; Ok(()) } @@ -592,13 +543,12 @@ where &self, execution_outcome: &signet_evm::ExecutionOutcome, is_value_known: OriginalValuesKnown, - write_receipts_to: StorageLocation, ) -> ProviderResult<()> { // Implementation largely copied from // `StateWriter::write_state` for `DatabaseProvider` // in `reth/crates/storage/provider/src/providers/database/provider.rs` // - // Last reviewed at tag v1.8.1 + // Last reviewed at tag v1.9.0 let first_block = execution_outcome.first_block(); let block_count = execution_outcome.len() as u64; let last_block = execution_outcome.last_block(); @@ -633,14 +583,8 @@ where // // We are writing to database if requested or if there's any kind of receipt pruning // configured - let mut receipts_cursor = (write_receipts_to.database() || has_receipts_pruning) - .then(|| self.tx_ref().cursor_write::>()) - .transpose()?; - - // Prepare receipts static writer if we are going to write receipts to static files - // - // We are writing to static files if requested and if there's no receipt pruning configured - let should_static = write_receipts_to.static_files() && !has_receipts_pruning; + let mut receipts_cursor = + self.tx_ref().cursor_write::>()?; // SIGNET: This is a departure from Reth's implementation. Becuase their // impl is on `DatabaseProvider`, it has access to the static file @@ -649,27 +593,18 @@ where // to borrow from the inner, only to clone it. So we break up the // static file provider into a separate variable, and then use it to // create the static file writer. - let sfp = should_static.then(|| self.0.static_file_provider()); - let mut receipts_static_writer = sfp - .as_ref() - .map(|sfp| sfp.get_writer(first_block, StaticFileSegment::Receipts)) - .transpose()?; + let sfp = self.0.static_file_provider(); - let has_contract_log_filter = !self.prune_modes_ref().receipts_log_filter.is_empty(); - let contract_log_pruner = - self.prune_modes_ref().receipts_log_filter.group_by_block(tip, None)?; + let mut receipts_static_writer = has_receipts_pruning + .not() + .then(|| sfp.get_writer(first_block, StaticFileSegment::Receipts)) + .transpose()?; // All receipts from the last 128 blocks are required for blockchain tree, even with // [`PruneSegment::ContractLogs`]. let prunable_receipts = PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(first_block, tip); - // Prepare set of addresses which logs should not be pruned. - let mut allowed_addresses: HashSet = HashSet::new(); - for (_, addresses) in contract_log_pruner.range(..first_block) { - allowed_addresses.extend(addresses.iter().copied()); - } - for (idx, (receipts, first_tx_index)) in execution_outcome.receipts().iter().zip(block_indices).enumerate() { @@ -690,28 +625,13 @@ where continue; } - // If there are new addresses to retain after this block number, track them - if let Some(new_addresses) = contract_log_pruner.get(&block_number) { - allowed_addresses.extend(new_addresses.iter().copied()); - } - for (idx, receipt) in receipts.iter().map(DataCompat::clone_convert).enumerate() { let receipt_idx = first_tx_index + idx as u64; - // Skip writing receipt if log filter is active and it does not have any logs to - // retain - if prunable_receipts - && has_contract_log_filter - && !receipt.logs().iter().any(|log| allowed_addresses.contains(&log.address)) - { - continue; - } if let Some(writer) = &mut receipts_static_writer { writer.append_receipt(receipt_idx, &receipt)?; - } - - if let Some(cursor) = &mut receipts_cursor { - cursor.append(receipt_idx, &receipt)?; + } else { + receipts_cursor.append(receipt_idx, &receipt)?; } } } diff --git a/crates/db/src/traits.rs b/crates/db/src/traits.rs index 5de0f2c..04efed9 100644 --- a/crates/db/src/traits.rs +++ b/crates/db/src/traits.rs @@ -1,7 +1,7 @@ use crate::{DbExtractionResults, DbSignetEvent, RuChain, SignetDbRw}; use alloy::primitives::{B256, BlockNumber}; use itertools::Itertools; -use reth::providers::{OriginalValuesKnown, ProviderResult, StorageLocation}; +use reth::providers::{OriginalValuesKnown, ProviderResult}; use reth_db::models::StoredBlockBodyIndices; use signet_evm::BlockResult; use signet_node_types::NodeTypesDbTrait; @@ -44,15 +44,10 @@ pub trait RuWriter { header: Option, block: &RecoveredBlock, journal_hash: B256, - write_to: StorageLocation, ) -> ProviderResult; /// Append a zenith block body to the DB. - fn append_signet_block_body( - &self, - body: (BlockNumber, &RecoveredBlock), - write_to: StorageLocation, - ) -> ProviderResult<()>; + fn append_signet_block_body(&self, body: (BlockNumber, &RecoveredBlock)) -> ProviderResult<()>; /// Get zenith headers from the DB. fn get_signet_headers( @@ -64,15 +59,10 @@ pub trait RuWriter { fn take_signet_headers_above( &self, target: BlockNumber, - remove_from: StorageLocation, ) -> ProviderResult>; /// Remove [`Zenith::BlockHeader`] objects above the specified height from the DB. - fn remove_signet_headers_above( - &self, - target: BlockNumber, - remove_from: StorageLocation, - ) -> ProviderResult<()>; + fn remove_signet_headers_above(&self, target: BlockNumber) -> ProviderResult<()>; /// Store an enter event in the DB. fn insert_enter(&self, height: u64, index: u64, exit: Passage::Enter) -> ProviderResult<()>; @@ -158,16 +148,11 @@ pub trait RuWriter { fn take_signet_events_above( &self, target: BlockNumber, - remove_from: StorageLocation, ) -> ProviderResult>; /// Remove [`Passage::EnterToken`], [`Passage::Enter`] and /// [`Transactor::Transact`] events above the specified height from the DB. - fn remove_signet_events_above( - &self, - target: BlockNumber, - remove_from: StorageLocation, - ) -> ProviderResult<()>; + fn remove_signet_events_above(&self, target: BlockNumber) -> ProviderResult<()>; /// Get extraction results from the DB. fn get_extraction_results( @@ -214,13 +199,12 @@ pub trait RuWriter { fn take_extraction_results_above( &self, target: BlockNumber, - remove_from: StorageLocation, ) -> ProviderResult> { let range = target..=(1 + self.last_block_number()?); let items = self.get_extraction_results(range)?; trace!(count = items.len(), "got extraction results"); - self.remove_extraction_results_above(target, remove_from)?; + self.remove_extraction_results_above(target)?; trace!("removed extraction results"); Ok(items) } @@ -232,13 +216,9 @@ pub trait RuWriter { /// - [`Passage::Enter`] events /// - [`Transactor::Transact`] events /// - [`Passage::EnterToken`] events - fn remove_extraction_results_above( - &self, - target: BlockNumber, - remove_from: StorageLocation, - ) -> ProviderResult<()> { - self.remove_signet_headers_above(target, remove_from)?; - self.remove_signet_events_above(target, remove_from)?; + fn remove_extraction_results_above(&self, target: BlockNumber) -> ProviderResult<()> { + self.remove_signet_headers_above(target)?; + self.remove_signet_events_above(target)?; Ok(()) } @@ -256,18 +236,10 @@ pub trait RuWriter { /// Take the block and execution range from the DB, reverting the blocks /// and returning the removed information - fn ru_take_blocks_and_execution_above( - &self, - target: BlockNumber, - remove_from: StorageLocation, - ) -> ProviderResult; + fn ru_take_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult; /// Remove the block and execution range from the DB. - fn ru_remove_blocks_and_execution_above( - &self, - target: BlockNumber, - remove_from: StorageLocation, - ) -> ProviderResult<()>; + fn ru_remove_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult<()>; /// Write the state of the rollup to the database. /// @@ -279,7 +251,6 @@ pub trait RuWriter { &self, execution_outcome: &signet_evm::ExecutionOutcome, is_value_known: OriginalValuesKnown, - write_receipts_to: StorageLocation, ) -> ProviderResult<()>; } diff --git a/crates/db/tests/db.rs b/crates/db/tests/db.rs index f59cbfb..70654e0 100644 --- a/crates/db/tests/db.rs +++ b/crates/db/tests/db.rs @@ -6,7 +6,7 @@ use alloy::{ primitives::{Address, B256, U256}, signers::Signature, }; -use reth::providers::{BlockNumReader, BlockReader}; +use reth::providers::{BlockNumReader, BlockReader, HeaderProvider, TransactionsProvider}; use signet_constants::test_utils::{DEPLOY_HEIGHT, RU_CHAIN_ID}; use signet_db::RuWriter; use signet_types::primitives::{RecoveredBlock, SealedBlock, SealedHeader}; @@ -55,9 +55,10 @@ fn test_insert_signet_block() { senders: std::iter::repeat_n(Address::repeat_byte(0x33), 10).collect(), }; - writer - .insert_signet_block(header, &block, journal_hash, reth::providers::StorageLocation::Both) - .unwrap(); + writer.insert_signet_block(header, &block, journal_hash).unwrap(); + writer.commit().unwrap(); + + let writer = factory.provider_rw().unwrap(); // Check basic updates assert_eq!(writer.last_block_number().unwrap(), block.number()); @@ -66,6 +67,11 @@ fn test_insert_signet_block() { // This tests resolving `BlockId::Latest` assert_eq!(writer.best_block_number().unwrap(), block.number()); + let txns = writer.transactions_by_block(block.number().into()).unwrap().unwrap(); + let header_ = writer.header_by_number(block.number()).unwrap().unwrap(); + dbg!(header_); + let block_ = writer.block(block.number().into()).unwrap().unwrap(); + // Check that the block can be loaded back let loaded_block = writer .recovered_block_range(block.number()..=block.number()) diff --git a/crates/node/src/node.rs b/crates/node/src/node.rs index 67934e7..2a9fc8b 100644 --- a/crates/node/src/node.rs +++ b/crates/node/src/node.rs @@ -660,10 +660,7 @@ where let mut reverted = None; self.ru_provider.provider_rw()?.update(|writer| { - reverted = Some(writer.ru_take_blocks_and_execution_above( - target, - reth::providers::StorageLocation::Both, - )?); + reverted = Some(writer.ru_take_blocks_and_execution_above(target)?); Ok(()) })?;