From 4060a537f9f6966170fb598e19adea703b9c5cb4 Mon Sep 17 00:00:00 2001 From: Arun Dhyani Date: Thu, 30 Apr 2026 12:18:36 +0530 Subject: [PATCH 01/10] feat(exex/trie): Wire up the Engine in Exex --- rust/Cargo.lock | 2 +- rust/op-reth/crates/exex/Cargo.toml | 3 +- rust/op-reth/crates/exex/src/lib.rs | 700 +++++++----------- rust/op-reth/crates/node/Cargo.toml | 1 - rust/op-reth/crates/node/src/args.rs | 22 +- rust/op-reth/crates/node/src/proof_history.rs | 2 - rust/op-reth/crates/trie/Cargo.toml | 2 + rust/op-reth/crates/trie/src/db/mod.rs | 3 + rust/op-reth/crates/trie/src/engine/handle.rs | 2 +- rust/op-reth/crates/trie/src/engine/mod.rs | 4 +- .../crates/trie/src/engine/tasks/mod.rs | 4 +- rust/op-reth/crates/trie/src/lib.rs | 6 +- rust/op-reth/crates/trie/src/live.rs | 234 ------ rust/op-reth/crates/trie/src/metrics.rs | 125 ++-- rust/op-reth/crates/trie/tests/live.rs | 152 ++-- .../tests/proofs/core/account_proofs_test.go | 6 +- .../tests/proofs/core/execute_payload_test.go | 4 +- .../proofs/core/execution_witness_test.go | 2 +- .../tests/proofs/core/simple_storage_test.go | 10 +- rust/op-reth/tests/proofs/prune/prune_test.go | 6 +- rust/op-reth/tests/proofs/reorg/reorg_test.go | 2 +- 21 files changed, 427 insertions(+), 865 deletions(-) delete mode 100644 rust/op-reth/crates/trie/src/live.rs diff --git a/rust/Cargo.lock b/rust/Cargo.lock index e6568370b7a..e477560ba85 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -11117,7 +11117,6 @@ dependencies = [ "eyre", "futures", "futures-util", - "humantime", "op-alloy-consensus", "op-alloy-network", "op-alloy-rpc-types-engine", @@ -11346,6 +11345,7 @@ dependencies = [ "reth-execution-errors", "reth-metrics", "reth-node-api", + "reth-optimism-trie", "reth-primitives-traits", "reth-provider", "reth-revm", diff --git a/rust/op-reth/crates/exex/Cargo.toml b/rust/op-reth/crates/exex/Cargo.toml index 9ba9500eda0..28b01c85dbc 100644 --- a/rust/op-reth/crates/exex/Cargo.toml +++ b/rust/op-reth/crates/exex/Cargo.toml @@ -31,12 +31,13 @@ alloy-eips.workspace = true eyre.workspace = true futures-util.workspace = true tracing.workspace = true -tokio.workspace = true [dev-dependencies] futures.workspace = true reth-db = { workspace = true, features = ["test-utils"] } +reth-optimism-trie = { workspace = true, features = ["test-utils"] } reth-node-builder.workspace = true +tokio.workspace = true reth-optimism-node.workspace = true reth-optimism-chainspec.workspace = true reth-primitives-traits.workspace = true diff --git a/rust/op-reth/crates/exex/src/lib.rs b/rust/op-reth/crates/exex/src/lib.rs index 96bf159f5df..15167dd4850 100644 --- a/rust/op-reth/crates/exex/src/lib.rs +++ b/rust/op-reth/crates/exex/src/lib.rs @@ -14,38 +14,30 @@ use futures_util::TryStreamExt; use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::{FullNodeComponents, NodePrimitives, NodeTypes}; +use reth_provider::BlockNumReader; use reth_optimism_trie::{ - OpProofStoragePrunerTask, OpProofsProviderRO, OpProofsStorage, OpProofsStore, - live::LiveTrieCollector, + engine::EngineHandle, + OpProofsProviderRO, OpProofStoragePruner, OpProofsStore, }; -use reth_provider::{BlockNumReader, BlockReader, TransactionVariant}; -use reth_trie::{HashedPostStateSorted, SortedTrieData, updates::TrieUpdatesSorted}; -use std::{sync::Arc, time::Duration}; -use tokio::{sync::watch, task, time}; -use tracing::{debug, error, info}; +use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, SortedTrieData}; +use std::sync::Arc; +use tracing::{debug, info}; // Safety threshold for maximum blocks to prune automatically on startup. // If the required prune exceeds this, the node will error out and require manual pruning. Default // is 1000 blocks. const MAX_PRUNE_BLOCKS_STARTUP: u64 = 1000; -/// How many blocks to process in a single batch before yielding. Default is 50 blocks. -const SYNC_BLOCKS_BATCH_SIZE: usize = 50; - -/// How close to tip before we process blocks in real-time vs batch. Default is 1024 blocks. -const REAL_TIME_BLOCKS_THRESHOLD: u64 = 1024; - -/// How long to sleep when sync task is caught up. Default is 5 seconds. -const SYNC_IDLE_SLEEP_SECS: u64 = 5; - /// Default proofs history window: 1 month of blocks at 2s block time const DEFAULT_PROOFS_HISTORY_WINDOW: u64 = 1_296_000; -/// Default interval between proof-storage prune runs. Default is 15 seconds. -const DEFAULT_PRUNE_INTERVAL: Duration = Duration::from_secs(15); +/// Default verification interval: disabled (0 = no periodic re-execution) +const DEFAULT_VERIFICATION_INTERVAL: u64 = 0; -/// Default verification interval: disabled -const DEFAULT_VERIFICATION_INTERVAL: u64 = 0; // disabled +/// Number of blocks behind the chain tip within which we consider the ExEx to be in real-time +/// operation. Notifications further behind than this are treated as sync catch-up and handled +/// asynchronously by the engine. +const REAL_TIME_BLOCKS_THRESHOLD: u64 = 64; /// Builder for [`OpProofsExEx`]. #[derive(Debug)] @@ -54,9 +46,8 @@ where Node: FullNodeComponents, { ctx: ExExContext, - storage: OpProofsStorage, + storage: Storage, proofs_history_window: u64, - proofs_history_prune_interval: Duration, verification_interval: u64, } @@ -65,12 +56,11 @@ where Node: FullNodeComponents, { /// Create a new builder with required parameters and defaults. - pub const fn new(ctx: ExExContext, storage: OpProofsStorage) -> Self { + pub const fn new(ctx: ExExContext, storage: Storage) -> Self { Self { ctx, storage, proofs_history_window: DEFAULT_PROOFS_HISTORY_WINDOW, - proofs_history_prune_interval: DEFAULT_PRUNE_INTERVAL, verification_interval: DEFAULT_VERIFICATION_INTERVAL, } } @@ -81,13 +71,11 @@ where self } - /// Sets the interval between proof-storage prune runs. - pub const fn with_proofs_history_prune_interval(mut self, interval: Duration) -> Self { - self.proofs_history_prune_interval = interval; - self - } - - /// Sets the verification interval. + /// Sets the interval at which blocks are re-executed to verify pre-computed trie data. + /// + /// Every `interval`-th block (by block number) will be executed in full even when + /// pre-computed trie data is available, allowing detection of any divergence. + /// Set to `0` (the default) to disable periodic verification. pub const fn with_verification_interval(mut self, interval: u64) -> Self { self.verification_interval = interval; self @@ -99,7 +87,6 @@ where ctx: self.ctx, storage: self.storage, proofs_history_window: self.proofs_history_window, - proofs_history_prune_interval: self.proofs_history_prune_interval, verification_interval: self.verification_interval, } } @@ -124,8 +111,8 @@ where /// use reth_node_builder::{NodeBuilder, NodeConfig}; /// use reth_optimism_chainspec::BASE_MAINNET; /// use reth_optimism_exex::OpProofsExEx; -/// use reth_optimism_node::{OpNode, args::RollupArgs}; -/// use reth_optimism_trie::{InMemoryProofsStorage, OpProofsStorage, db::MdbxProofsStorage}; +/// use reth_optimism_node::{args::RollupArgs, OpNode}; +/// use reth_optimism_trie::{db::MdbxProofsStorageV2, InMemoryProofsStorage, OpProofsStorage}; /// use reth_provider::providers::BlockchainProvider; /// use std::{sync::Arc, time::Duration}; /// @@ -142,13 +129,12 @@ where /// # let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); /// # let storage_path = temp_dir.path().join("proofs_storage"); /// -/// # let storage: OpProofsStorage> = Arc::new( -/// # MdbxProofsStorage::new(&storage_path).expect("Failed to create MdbxProofsStorage"), +/// # let storage: OpProofsStorage> = Arc::new( +/// # MdbxProofsStorageV2::new(&storage_path).expect("Failed to create MdbxProofsStorageV2"), /// # ).into(); /// /// let storage_exec = storage.clone(); /// let proofs_history_window = 1_296_000u64; -/// let proofs_history_prune_interval = Duration::from_secs(3600); /// /// // Verification interval: perform full execution every N blocks /// let verification_interval = 0; // 0 = disabled, 100 = verify every 100 blocks @@ -162,7 +148,6 @@ where /// .install_exex("proofs-history", move |exex_context| async move { /// Ok(OpProofsExEx::builder(exex_context, storage_exec) /// .with_proofs_history_window(proofs_history_window) -/// .with_proofs_history_prune_interval(proofs_history_prune_interval) /// .with_verification_interval(verification_interval) /// .build() /// .run() @@ -180,15 +165,12 @@ where /// events. ctx: ExExContext, /// The type of storage DB. - storage: OpProofsStorage, + storage: Storage, /// The window to span blocks for proofs history. Value is the number of blocks, received as /// cli arg. proofs_history_window: u64, - /// Interval between proof-storage prune runs - proofs_history_prune_interval: Duration, - /// Verification interval: perform full block execution every N blocks for data integrity. - /// If 0, verification is disabled (always use fast path when available). - /// If 1, verification is always enabled (always execute blocks). + /// How often (in blocks) to re-execute a block for verification even when pre-computed trie + /// data is available. `0` disables periodic verification. verification_interval: u64, } @@ -197,14 +179,14 @@ where Node: FullNodeComponents, { /// Create a new `OpProofsExEx` instance. - pub fn new(ctx: ExExContext, storage: OpProofsStorage) -> Self { + pub fn new(ctx: ExExContext, storage: Storage) -> Self { OpProofsExExBuilder::new(ctx, storage).build() } /// Create a new builder for `OpProofsExEx`. pub const fn builder( ctx: ExExContext, - storage: OpProofsStorage, + storage: Storage, ) -> OpProofsExExBuilder { OpProofsExExBuilder::new(ctx, storage) } @@ -214,31 +196,28 @@ impl OpProofsExEx where Node: FullNodeComponents>, Primitives: NodePrimitives, + Primitives::Block: Clone, Storage: OpProofsStore + Clone + 'static, { /// Main execution loop for the ExEx pub async fn run(mut self) -> eyre::Result<()> { self.ensure_initialized()?; - let sync_target_tx = self.spawn_sync_task(); - let prune_task = OpProofStoragePrunerTask::new( + let pruner = OpProofStoragePruner::new( self.storage.clone(), self.ctx.provider().clone(), self.proofs_history_window, - self.proofs_history_prune_interval, ); - self.ctx - .task_executor() - .spawn_with_graceful_shutdown_signal(|signal| Box::pin(prune_task.run(signal))); - let collector = LiveTrieCollector::new( + let engine_handle = EngineHandle::spawn( self.ctx.evm_config().clone(), self.ctx.provider().clone(), - &self.storage, + self.storage.clone(), + pruner, ); while let Some(notification) = self.ctx.notifications.try_next().await? { - self.handle_notification(notification, &collector, &sync_target_tx)?; + self.handle_notification(notification, &engine_handle)?; } Ok(()) @@ -257,7 +236,7 @@ where } }; - let latest_block_number: u64 = match provider_ro.get_latest_block_number()? { + let latest_block_number = match provider_ro.get_latest_block_number()? { Some((n, _)) => n, None => { return Err(eyre::eyre!( @@ -277,137 +256,29 @@ where "Configuration requires pruning {} blocks, which exceeds the safety threshold of {}. \ Huge prune operations can stall the node. \ Please run 'op-reth proofs prune' manually before starting the node.", - blocks_to_prune, - MAX_PRUNE_BLOCKS_STARTUP + blocks_to_prune, + MAX_PRUNE_BLOCKS_STARTUP )); } } - // Need to update the earliest block metric on startup as this is not called frequently and - // can show outdated info. When metrics are disabled, this is a no-op. - #[cfg(feature = "metrics")] - { - self.storage - .metrics() - .block_metrics() - .earliest_number - .set(earliest_block_number as f64); - } - - Ok(()) - } - - /// Spawn the background sync task and return the target sender - fn spawn_sync_task(&self) -> watch::Sender { - let (sync_target_tx, sync_target_rx) = watch::channel(0u64); - - let task_storage = self.storage.clone(); - let task_provider = self.ctx.provider().clone(); - let task_evm_config = self.ctx.evm_config().clone(); - - self.ctx.task_executor().spawn_critical_task( - "optimism::exex::proofs_storage_sync_loop", - async move { - let storage = task_storage.clone(); - let task_collector = - LiveTrieCollector::new(task_evm_config, task_provider.clone(), &storage); - Self::sync_loop(sync_target_rx, task_storage, task_provider, &task_collector).await; - }, - ); - - sync_target_tx - } - - /// Background sync loop that processes blocks up to the target - async fn sync_loop( - mut sync_target_rx: watch::Receiver, - storage: OpProofsStorage, - provider: Node::Provider, - collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, - ) { - debug!(target: "optimism::exex", "Starting proofs storage sync loop"); - - loop { - let target = *sync_target_rx.borrow_and_update(); - let latest = match storage.provider_ro().and_then(|p| p.get_latest_block_number()) { - Ok(Some((n, _))) => n, - Ok(None) => { - error!(target: "optimism::exex", "No blocks stored in proofs storage during sync loop"); - continue; - } - Err(e) => { - error!(target: "optimism::exex", error = ?e, "Failed to get latest block"); - continue; - } - }; - - if latest >= target { - time::sleep(Duration::from_secs(SYNC_IDLE_SLEEP_SECS)).await; - continue; - } - - // Process one batch - if let Err(e) = - Self::process_batch(latest, target, &provider, collector, SYNC_BLOCKS_BATCH_SIZE) - { - error!(target: "optimism::exex", error = ?e, "Batch processing failed"); - } - - // Yield to allow other tasks to run - debug!(target: "optimism::exex", latest_stored = latest, target, "Batch processed, yielding"); - task::yield_now().await; - } - } - - /// Process a batch of blocks from start to target (up to `batch_size`) - fn process_batch( - start: u64, - target: u64, - provider: &Node::Provider, - collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, - batch_size: usize, - ) -> eyre::Result<()> { - let end = (start + batch_size as u64).min(target); - debug!( - target: "optimism::exex", - start, - end, - "Processing proofs storage sync batch" - ); - - for block_num in (start + 1)..=end { - let block = provider - .recovered_block(block_num.into(), TransactionVariant::NoHash)? - .ok_or_else(|| eyre::eyre!("Missing block {}", block_num))?; - - collector.execute_and_store_block_updates(&block)?; - } - Ok(()) } fn handle_notification( &self, notification: ExExNotification, - collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, - sync_target_tx: &watch::Sender, + engine_handle: &EngineHandle, ) -> eyre::Result<()> { - let latest_stored = match self.storage.provider_ro()?.get_latest_block_number()? { - Some((n, _)) => n, - None => { - return Err(eyre::eyre!("No blocks stored in proofs storage")); - } - }; - match ¬ification { ExExNotification::ChainCommitted { new } => { - self.handle_chain_committed(new.clone(), latest_stored, collector, sync_target_tx)? + self.handle_chain_committed(new.clone(), engine_handle)? } ExExNotification::ChainReorged { old, new } => { - self.handle_chain_reorged(old.clone(), new.clone(), latest_stored, collector)? + self.handle_chain_reorged(old.clone(), new.clone(), engine_handle)? } ExExNotification::ChainReverted { old } => { - self.handle_chain_reverted(old.clone(), latest_stored, collector)? + self.handle_chain_reverted(old.clone(), engine_handle)? } } @@ -421,9 +292,7 @@ where fn handle_chain_committed( &self, new: Arc>, - latest_stored: u64, - collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, - sync_target_tx: &watch::Sender, + engine_handle: &EngineHandle, ) -> eyre::Result<()> { debug!( target: "optimism::exex", @@ -432,120 +301,34 @@ where "ChainCommitted notification received", ); - // If tip is not newer than what we have, nothing to do. - if new.tip().number() <= latest_stored { - debug!( - target: "optimism::exex", - block_number = new.tip().number(), - latest_stored, - "Already processed, skipping" - ); - return Ok(()); - } - let best_block = self.ctx.provider().best_block_number()?; - let is_sequential = new.tip().number() == latest_stored + 1; let is_near_tip = best_block.saturating_sub(new.tip().number()) < REAL_TIME_BLOCKS_THRESHOLD; - - if is_sequential && is_near_tip { - debug!( - target: "optimism::exex", - block_number = new.tip().number(), - latest_stored, - best_block, - "Processing in real-time" - ); - - // Process each block from latest_stored + 1 to tip - let start = latest_stored.saturating_add(1); - for block_number in start..=new.tip().number() { - self.process_block(block_number, &new, collector)?; - } - } else { - debug!( - target: "optimism::exex", - block_number = new.tip().number(), - latest_stored, - best_block, - is_sequential, - is_near_tip, - "Scheduling batch processing via sync task" - ); - - // Update the sync target to the new tip - sync_target_tx.send(new.tip().number())?; + if !is_near_tip { + engine_handle.sync_to(new.tip().number())?; + return Ok(()); } - Ok(()) - } + // `Chain::blocks()` is a BTreeMap so iteration is already ordered oldest → newest. + for (&block_number, block) in new.blocks() { + // Fast path: use pre-computed trie data only when verification is not due. + let should_verify = self.verification_interval > 0 + && block_number.is_multiple_of(self.verification_interval); + let precomputed = (!should_verify).then(|| new.trie_data_at(block_number)).flatten(); - /// Process a single block - either from chain or provider - fn process_block( - &self, - block_number: u64, - chain: &Chain, - collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, - ) -> eyre::Result<()> { - // Check if this block should be verified via full execution - let should_verify = self.verification_interval > 0 && - block_number.is_multiple_of(self.verification_interval); - - // Try to get block data from the chain first - // 1. Fast Path: Try to use pre-computed state from the notification - if let Some(block) = chain.blocks().get(&block_number) { - // Check if we have BOTH trie updates and hashed state. - // If either is missing, we fall back to execution to ensure data integrity. - if let Some((trie_updates, hashed_state)) = chain.trie_data_at(block_number).map(|d| { + if let Some(d) = precomputed { let SortedTrieData { hashed_state, trie_updates } = d.get(); - (trie_updates, hashed_state) - }) { - // Use fast path only if we're not scheduled to verify this block - if !should_verify { - debug!( - target: "optimism::exex", - block_number, - "Using pre-computed state updates from notification" - ); - - collector.store_block_updates( - block.block_with_parent(), - (**trie_updates).clone(), - (**hashed_state).clone(), - )?; - - return Ok(()); - } - - info!( - target: "optimism::exex", - block_number, - verification_interval = self.verification_interval, - "Periodic verification: performing full block execution" - ); + engine_handle.index_block( + block.block_with_parent(), + (**trie_updates).clone(), + (**hashed_state).clone(), + )?; + } else { + // Slow path: execute the block in full (no trie data, or verification interval hit). + engine_handle.execute_block(block)?; } - - debug!( - target: "optimism::exex", - block_number, - "Block present in notification but state updates missing, falling back to execution" - ); } - // 2. Slow Path: Block not in chain (or state missing), fetch from provider and execute - debug!( - target: "optimism::exex", - block_number, - "Fetching block from provider for execution", - ); - - let block = self - .ctx - .provider() - .recovered_block(block_number.into(), TransactionVariant::NoHash)? - .ok_or_else(|| eyre::eyre!("Missing block {} in provider", block_number))?; - - collector.execute_and_store_block_updates(&block)?; Ok(()) } @@ -553,8 +336,7 @@ where &self, old: Arc>, new: Arc>, - latest_stored: u64, - collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + engine_handle: &EngineHandle, ) -> eyre::Result<()> { info!( old_block_number = old.tip().number(), @@ -564,27 +346,20 @@ where "ChainReorged notification received", ); - if old.first().number() > latest_stored { - debug!(target: "optimism::exex", "Reorg beyond stored blocks, skipping"); - return Ok(()); + if old.fork_block() != new.fork_block() { + return Err(eyre::eyre!( + "Fork blocks do not match: old fork block {:?}, new fork block {:?}", + old.fork_block(), + new.fork_block() + )); } - // find the common ancestor let mut block_updates: Vec<( BlockWithParent, Arc, Arc, )> = Vec::with_capacity(new.len()); for block_number in new.blocks().keys() { - // verify if the fork point matches - if old.fork_block() != new.fork_block() { - return Err(eyre::eyre!( - "Fork blocks do not match: old fork block {:?}, new fork block {:?}", - old.fork_block(), - new.fork_block() - )); - } - let block = new .blocks() .get(block_number) @@ -605,7 +380,7 @@ where )); } - collector.unwind_and_store_block_updates(block_updates)?; + engine_handle.reorg(block_updates)?; Ok(()) } @@ -613,8 +388,7 @@ where fn handle_chain_reverted( &self, old: Arc>, - latest_stored: u64, - collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + engine_handle: &EngineHandle, ) -> eyre::Result<()> { info!( target: "optimism::exex", @@ -623,17 +397,7 @@ where "ChainReverted notification received", ); - if old.first().number() > latest_stored { - debug!( - target: "optimism::exex", - first_block_number = old.first().number(), - latest_stored = latest_stored, - "Fork block number is greater than latest stored, skipping", - ); - return Ok(()); - } - - collector.unwind_history(old.first().block_with_parent())?; + engine_handle.unwind(old.first().block_with_parent())?; Ok(()) } } @@ -642,21 +406,17 @@ where mod tests { use super::*; use alloy_consensus::private::alloy_primitives::B256; - use alloy_eips::{BlockNumHash, NumHash, eip1898::BlockWithParent}; + use alloy_eips::{eip1898::BlockWithParent, BlockNumHash, NumHash}; use reth_db::test_utils::tempdir_path; use reth_ethereum_primitives::{Block, Receipt}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_optimism_trie::{ - BlockStateDiff, OpProofsProviderRO, OpProofsProviderRw, OpProofsStorage, OpProofsStore, - db::MdbxProofsStorage, + db::MdbxProofsStorageV2, engine::EngineHandle, BlockStateDiff, OpProofsProviderRO, + OpProofsProviderRw, OpProofsStore, }; - - fn get_latest(proofs: &OpProofsStorage) -> Option<(u64, B256)> { - proofs.provider_ro().expect("provider_ro").get_latest_block_number().expect("get latest") - } use reth_primitives_traits::RecoveredBlock; - use reth_trie::{HashedPostStateSorted, LazyTrieData, updates::TrieUpdatesSorted}; - use std::{collections::BTreeMap, default::Default, sync::Arc, time::Duration}; + use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, LazyTrieData}; + use std::{collections::BTreeMap, default::Default, sync::Arc}; // ------------------------------------------------------------------------- // Helpers: deterministic blocks and deterministic Chain with precomputed updates @@ -717,25 +477,23 @@ mod tests { } // Init_storage to the genesis block - fn init_storage(storage: OpProofsStorage) { + fn init_storage(storage: S) { let genesis_block = NumHash::new(0, b256(0x00)); - let provider_rw = storage.provider_rw().expect("provider_rw"); - provider_rw - .set_earliest_block_number(genesis_block.number, genesis_block.hash) + let rw = storage.provider_rw().expect("provider rw"); + rw.set_earliest_block_number(genesis_block.number, genesis_block.hash) .expect("set earliest"); - provider_rw - .store_trie_updates( - BlockWithParent::new(genesis_block.hash, genesis_block), - BlockStateDiff::default(), - ) - .expect("store trie update"); - provider_rw.commit().expect("commit"); + rw.store_trie_updates( + BlockWithParent::new(genesis_block.hash, genesis_block), + BlockStateDiff::default(), + ) + .expect("store trie update"); + rw.commit().expect("commit"); } // Initialize exex with config fn build_test_exex( ctx: ExExContext, - storage: OpProofsStorage, + storage: Store, ) -> OpProofsExEx where NodeT: FullNodeComponents, @@ -743,7 +501,6 @@ mod tests { { OpProofsExEx::builder(ctx, storage) .with_proofs_history_window(20) - .with_proofs_history_prune_interval(Duration::from_secs(3600)) .with_verification_interval(1000) .build() } @@ -752,30 +509,36 @@ mod tests { async fn handle_notification_chain_committed() { // MDBX proofs storage let dir = tempdir_path(); - let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); - let proofs: OpProofsStorage> = store.clone().into(); + let store = Arc::new(MdbxProofsStorageV2::new(dir.as_path()).expect("env")); - init_storage(proofs.clone()); + init_storage(store.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let collector = LiveTrieCollector::new( + let pruner = OpProofStoragePruner::new( + store.clone(), + ctx.components.provider.clone(), + 20, + ); + let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), - &proofs, + store.clone(), + pruner, + 1, + 2, ); - let exex = build_test_exex(ctx, proofs.clone()); + let exex = build_test_exex(ctx, store.clone()); // Notification: chain committed 1..5 let new_chain = Arc::new(mk_chain_with_updates(1, 1, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; - let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + exex.handle_notification(notif, &engine_handle).expect("handle chain commit"); - exex.handle_notification(notif, &collector, &sync_target_tx).expect("handle chain commit"); - - let latest = get_latest(&proofs).expect("ok").0; + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 1); } @@ -783,39 +546,46 @@ mod tests { async fn handle_notification_chain_committed_skips_already_processed() { // MDBX proofs storage let dir = tempdir_path(); - let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); - let proofs: OpProofsStorage> = store.clone().into(); + let store = Arc::new(MdbxProofsStorageV2::new(dir.as_path()).expect("env")); - init_storage(proofs.clone()); + init_storage(store.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let collector = LiveTrieCollector::new( + let pruner = OpProofStoragePruner::new( + store.clone(), + ctx.components.provider.clone(), + 20, + ); + let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), - &proofs, + store.clone(), + pruner, + 1, + 2, ); - let exex = build_test_exex(ctx, proofs.clone()); + let exex = build_test_exex(ctx, store.clone()); - let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); // Process blocks 1..5 sequentially to trigger real-time path (synchronous) for i in 1..=5 { let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; - exex.handle_notification(notif, &collector, &sync_target_tx) - .expect("handle chain commit"); + exex.handle_notification(notif, &engine_handle).expect("handle chain commit"); } - let latest = get_latest(&proofs).expect("ok").0; + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 5); // Try to handle already processed notification let new_chain = Arc::new(mk_chain_with_updates(5, 5, Some(hash_for_num(10)))); let notif = ExExNotification::ChainCommitted { new: new_chain }; - exex.handle_notification(notif, &collector, &sync_target_tx).expect("handle chain commit"); - let latest = get_latest(&proofs).expect("ok"); + exex.handle_notification(notif, &engine_handle).expect("handle chain commit"); + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok"); assert_eq!(latest.0, 5); assert_eq!(latest.1, hash_for_num(5)); // block was not updated } @@ -824,32 +594,38 @@ mod tests { async fn handle_notification_chain_reorged() { // MDBX proofs storage let dir = tempdir_path(); - let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); - let proofs: OpProofsStorage> = store.clone().into(); + let store = Arc::new(MdbxProofsStorageV2::new(dir.as_path()).expect("env")); - init_storage(proofs.clone()); + init_storage(store.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let collector = LiveTrieCollector::new( + let pruner = OpProofStoragePruner::new( + store.clone(), + ctx.components.provider.clone(), + 20, + ); + let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), - &proofs, + store.clone(), + pruner, + 1, + 2, ); - let exex = build_test_exex(ctx, proofs.clone()); - - let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + let exex = build_test_exex(ctx, store.clone()); for i in 1..=10 { let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; - exex.handle_notification(notif, &collector, &sync_target_tx) + exex.handle_notification(notif, &engine_handle) .expect("handle chain commit"); } - let latest = get_latest(&proofs).expect("ok").0; + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 10); // Now the tip is 10, and we want to reorg from block 6..12 @@ -859,9 +635,10 @@ mod tests { // Notification: chain reorged 6..12 let notif = ExExNotification::ChainReorged { new: new_chain, old: old_chain }; - exex.handle_notification(notif, &collector, &sync_target_tx) + exex.handle_notification(notif, &engine_handle) .expect("handle chain re-orged"); - let latest = get_latest(&proofs).expect("ok").0; + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 12); } @@ -869,45 +646,54 @@ mod tests { async fn handle_notification_chain_reorged_skips_beyond_stored_blocks() { // MDBX proofs storage let dir = tempdir_path(); - let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); - let proofs: OpProofsStorage> = store.clone().into(); + let store = Arc::new(MdbxProofsStorageV2::new(dir.as_path()).expect("env")); - init_storage(proofs.clone()); + init_storage(store.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let collector = LiveTrieCollector::new( + let pruner = OpProofStoragePruner::new( + store.clone(), + ctx.components.provider.clone(), + 20, + ); + let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), - &proofs, + store.clone(), + pruner, + 1, + 2, ); - let exex = build_test_exex(ctx, proofs.clone()); - - let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + let exex = build_test_exex(ctx, store.clone()); for i in 1..=10 { let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; - exex.handle_notification(notif, &collector, &sync_target_tx) + exex.handle_notification(notif, &engine_handle) .expect("handle chain commit"); } - let latest = get_latest(&proofs).expect("ok").0; + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 10); - // Now the tip is 10, and we want to reorg from block 12..15 + // Now the tip is 10, and we want to reorg starting at block 12 (beyond stored tip). + // Both chains share the same fork point (block 11), so this is a valid reorg notification + // that starts beyond what we've indexed — the engine should skip it. let old_chain = Arc::new(mk_chain_with_updates(12, 15, None)); - let new_chain = Arc::new(mk_chain_with_updates(10, 20, None)); + let new_chain = Arc::new(mk_chain_with_updates(12, 20, None)); - // Notification: chain reorged 12..15 + // Notification: chain reorged 12..20, fork at 11 let notif = ExExNotification::ChainReorged { new: new_chain, old: old_chain }; - exex.handle_notification(notif, &collector, &sync_target_tx) + exex.handle_notification(notif, &engine_handle) .expect("handle chain re-orged"); - let latest = get_latest(&proofs).expect("ok").0; + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 10); } @@ -915,33 +701,39 @@ mod tests { async fn handle_notification_chain_reverted() { // MDBX proofs storage let dir = tempdir_path(); - let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); - let proofs: OpProofsStorage> = store.clone().into(); + let store = Arc::new(MdbxProofsStorageV2::new(dir.as_path()).expect("env")); - init_storage(proofs.clone()); + init_storage(store.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let collector = LiveTrieCollector::new( + let pruner = OpProofStoragePruner::new( + store.clone(), + ctx.components.provider.clone(), + 20, + ); + let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), - &proofs, + store.clone(), + pruner, + 1, + 2, ); - let exex = build_test_exex(ctx, proofs.clone()); - - let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + let exex = build_test_exex(ctx, store.clone()); for i in 1..=10 { let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; - exex.handle_notification(notif, &collector, &sync_target_tx) + exex.handle_notification(notif, &engine_handle) .expect("handle chain commit"); } - let latest = get_latest(&proofs).expect("ok").0; + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 10); // Now the tip is 10, and we want to revert from block 9..10 @@ -950,9 +742,10 @@ mod tests { // Notification: chain reverted 9..10 let notif = ExExNotification::ChainReverted { old: old_chain }; - exex.handle_notification(notif, &collector, &sync_target_tx) + exex.handle_notification(notif, &engine_handle) .expect("handle chain reverted"); - let latest = get_latest(&proofs).expect("ok").0; + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 8); } @@ -960,33 +753,38 @@ mod tests { async fn handle_notification_chain_reverted_skips_beyond_stored_blocks() { // MDBX proofs storage let dir = tempdir_path(); - let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); - let proofs: OpProofsStorage> = store.clone().into(); + let store = Arc::new(MdbxProofsStorageV2::new(dir.as_path()).expect("env")); - init_storage(proofs.clone()); + init_storage(store.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let collector = LiveTrieCollector::new( + let pruner = OpProofStoragePruner::new( + store.clone(), + ctx.components.provider.clone(), + 20, + ); + let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), - &proofs, + store.clone(), + pruner, + 1, + 2, ); - let exex = build_test_exex(ctx, proofs.clone()); - - let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); + let exex = build_test_exex(ctx, store.clone()); for i in 1..=5 { let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; - exex.handle_notification(notif, &collector, &sync_target_tx) - .expect("handle chain commit"); + exex.handle_notification(notif, &engine_handle).expect("handle chain commit"); } - let latest = get_latest(&proofs).expect("ok").0; + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 5); // Now the tip is 10, and we want to revert from block 9..10 @@ -995,9 +793,10 @@ mod tests { // Notification: chain reverted 9..10 let notif = ExExNotification::ChainReverted { old: old_chain }; - exex.handle_notification(notif, &collector, &sync_target_tx) + exex.handle_notification(notif, &engine_handle) .expect("handle chain reverted"); - let latest = get_latest(&proofs).expect("ok").0; + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; assert_eq!(latest, 5); } @@ -1005,13 +804,12 @@ mod tests { async fn ensure_initialized_errors_on_storage_not_initialized() { // MDBX proofs storage let dir = tempdir_path(); - let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); - let proofs: OpProofsStorage> = store.clone().into(); + let store = Arc::new(MdbxProofsStorageV2::new(dir.as_path()).expect("env")); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let exex = build_test_exex(ctx, proofs.clone()); + let exex = build_test_exex(ctx, store.clone()); let _ = exex.ensure_initialized().expect_err("should return error"); } @@ -1019,25 +817,27 @@ mod tests { async fn ensure_initialized_errors_when_prune_exceeds_threshold() { // MDBX proofs storage let dir = tempdir_path(); - let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); - let proofs: OpProofsStorage> = store.clone().into(); + let store = Arc::new(MdbxProofsStorageV2::new(dir.as_path()).expect("env")); - init_storage(proofs.clone()); + init_storage(store.clone()); for i in 1..1100 { - let p = proofs.provider_rw().expect("provider_rw"); - p.store_trie_updates( - BlockWithParent::new(hash_for_num(i - 1), BlockNumHash::new(i, hash_for_num(i))), + let rw = store.provider_rw().expect("provider rw"); + rw.store_trie_updates( + BlockWithParent::new( + hash_for_num(i - 1), + BlockNumHash::new(i, hash_for_num(i)), + ), BlockStateDiff::default(), ) .expect("store trie update"); - p.commit().expect("commit"); + rw.commit().expect("commit"); } let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let exex = build_test_exex(ctx, proofs.clone()); + let exex = build_test_exex(ctx, store.clone()); let _ = exex.ensure_initialized().expect_err("should return error"); } @@ -1045,15 +845,14 @@ mod tests { async fn ensure_initialized_succeeds() { // MDBX proofs storage let dir = tempdir_path(); - let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); - let proofs: OpProofsStorage> = store.clone().into(); + let store = Arc::new(MdbxProofsStorageV2::new(dir.as_path()).expect("env")); - init_storage(proofs.clone()); + init_storage(store.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let exex = build_test_exex(ctx, proofs.clone()); + let exex = build_test_exex(ctx, store.clone()); exex.ensure_initialized().expect("should not return error"); } @@ -1061,71 +860,76 @@ mod tests { async fn handle_notification_errors_on_empty_storage() { // MDBX proofs storage let dir = tempdir_path(); - let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); - let proofs: OpProofsStorage> = store.clone().into(); + let store = Arc::new(MdbxProofsStorageV2::new(dir.as_path()).expect("env")); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let collector = LiveTrieCollector::new( + let pruner = OpProofStoragePruner::new( + store.clone(), + ctx.components.provider.clone(), + 20, + ); + let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), - &proofs, + store.clone(), + pruner, + 1, + 2, ); - let exex = build_test_exex(ctx, proofs.clone()); + let exex = build_test_exex(ctx, store.clone()); // Any notification will do let new_chain = Arc::new(mk_chain_with_updates(1, 5, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; - let (sync_target_tx, _) = tokio::sync::watch::channel(0u64); - let err = exex.handle_notification(notif, &collector, &sync_target_tx).unwrap_err(); - assert_eq!(err.to_string(), "No blocks stored in proofs storage"); + let err = exex.handle_notification(notif, &engine_handle).unwrap_err(); + // Error now comes from the engine layer (storage not initialised). + assert_eq!(err.to_string(), "No blocks found"); } #[tokio::test] async fn handle_notification_schedules_async_on_gap() { // MDBX proofs storage let dir = tempdir_path(); - let store = Arc::new(MdbxProofsStorage::new(dir.as_path()).expect("env")); - let proofs: OpProofsStorage> = store.clone().into(); + let store = Arc::new(MdbxProofsStorageV2::new(dir.as_path()).expect("env")); - init_storage(proofs.clone()); + init_storage(store.clone()); let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let collector = LiveTrieCollector::new( + let pruner = OpProofStoragePruner::new( + store.clone(), + ctx.components.provider.clone(), + 20, + ); + let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), - &proofs, + store.clone(), + pruner, + 1, + 2, ); - let exex = build_test_exex(ctx, proofs.clone()); + + let exex = build_test_exex(ctx, store.clone()); // Notification: chain committed 5..10 (Blocks 1,2,3,4 are missing from storage) let new_chain = Arc::new(mk_chain_with_updates(5, 10, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; - let (sync_target_tx, mut sync_target_rx) = tokio::sync::watch::channel(0u64); - - // Process notification - exex.handle_notification(notif, &collector, &sync_target_tx) + // Process notification — should return immediately (gap detected, deferred to engine). + exex.handle_notification(notif, &engine_handle) .expect("handle chain commit should return ok immediately"); - // Verify async signal was sent - // The target in the channel should now be 10 (the tip of the new chain) - assert_eq!( - *sync_target_rx.borrow_and_update(), - 10, - "Should have scheduled sync to block 10" - ); - - // Verify Main Thread did NOT process it - // Because we didn't spawn the actual worker thread in this test, storage should still be at - // 0. This proves the 'handle_notification' returned instantly without doing the - // heavy lifting. - let latest = get_latest(&proofs).expect("ok").0; + // Verify the notification handler did NOT process blocks synchronously. + // The engine has a sync target set but no blocks in the provider, so its catch-up + // will error out without writing anything. Storage stays at block 0. + engine_handle.flush(); + let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get").expect("ok").0; assert_eq!(latest, 0, "Main thread should not have processed the blocks synchronously"); } } diff --git a/rust/op-reth/crates/node/Cargo.toml b/rust/op-reth/crates/node/Cargo.toml index de80e7caceb..376308a6779 100644 --- a/rust/op-reth/crates/node/Cargo.toml +++ b/rust/op-reth/crates/node/Cargo.toml @@ -77,7 +77,6 @@ clap.workspace = true serde.workspace = true eyre.workspace = true url.workspace = true -humantime.workspace = true futures-util.workspace = true tracing.workspace = true diff --git a/rust/op-reth/crates/node/src/args.rs b/rust/op-reth/crates/node/src/args.rs index d4d8748fa82..9929ced27f8 100644 --- a/rust/op-reth/crates/node/src/args.rs +++ b/rust/op-reth/crates/node/src/args.rs @@ -4,7 +4,7 @@ use clap::builder::ArgPredicate; use op_alloy_consensus::interop::SafetyLevel; -use std::{path::PathBuf, time::Duration}; +use std::path::PathBuf; use url::Url; /// Parameters for rollup configuration @@ -105,25 +105,6 @@ pub struct RollupArgs { )] pub proofs_history_window: u64, - /// Interval between proof-storage prune runs. Accepts human-friendly durations - /// like "100s", "5m", "1h". Defaults to 15s. - /// - /// - Shorter intervals prune smaller batches more often, so each prune run tends to be faster - /// and the blocking pause for writes is shorter, at the cost of more frequent pauses. - /// - Longer intervals prune larger batches less often, which reduces how often pruning runs, - /// but each run can take longer and block writes for longer. - /// - /// A shorter interval is preferred so that prune - /// runs stay small and don’t stall writes for too long. - /// - /// CLI: `--proofs-history.prune-interval 10m` - #[arg( - long = "proofs-history.prune-interval", - value_name = "PROOFS_HISTORY_PRUNE_INTERVAL", - default_value = "15s", - value_parser = humantime::parse_duration - )] - pub proofs_history_prune_interval: Duration, /// Verification interval: perform full block execution every N blocks for data integrity. /// - 0: Disabled (Default) (always use fast path with pre-computed data from notifications) /// - 1: Always verify (always execute blocks, slowest) @@ -159,7 +140,6 @@ impl Default for RollupArgs { proofs_history: false, proofs_history_storage_path: None, proofs_history_window: 1_296_000, - proofs_history_prune_interval: Duration::from_secs(15), proofs_history_verification_interval: 0, } } diff --git a/rust/op-reth/crates/node/src/proof_history.rs b/rust/op-reth/crates/node/src/proof_history.rs index 404b938bc1b..7a452657032 100644 --- a/rust/op-reth/crates/node/src/proof_history.rs +++ b/rust/op-reth/crates/node/src/proof_history.rs @@ -28,7 +28,6 @@ pub async fn launch_node_with_proof_history( let RollupArgs { proofs_history, proofs_history_window, - proofs_history_prune_interval, proofs_history_verification_interval, .. } = args; @@ -63,7 +62,6 @@ pub async fn launch_node_with_proof_history( .install_exex("proofs-history", async move |exex_context| { Ok(OpProofsExEx::builder(exex_context, storage_exec) .with_proofs_history_window(proofs_history_window) - .with_proofs_history_prune_interval(proofs_history_prune_interval) .with_verification_interval(proofs_history_verification_interval) .build() .run() diff --git a/rust/op-reth/crates/trie/Cargo.toml b/rust/op-reth/crates/trie/Cargo.toml index 64d53924340..b9e47f72e46 100644 --- a/rust/op-reth/crates/trie/Cargo.toml +++ b/rust/op-reth/crates/trie/Cargo.toml @@ -56,6 +56,7 @@ crossbeam-channel.workspace = true derive_more.workspace = true [dev-dependencies] +reth-optimism-trie = { path = ".", features = ["test-utils"] } reth-codecs = { workspace = true, features = ["test-utils"] } tempfile.workspace = true tokio = { workspace = true, features = ["test-util", "rt-multi-thread", "macros"] } @@ -81,6 +82,7 @@ eyre.workspace = true serial_test.workspace = true [features] +test-utils = [] serde-bincode-compat = [ "reth-trie-common/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", diff --git a/rust/op-reth/crates/trie/src/db/mod.rs b/rust/op-reth/crates/trie/src/db/mod.rs index b32f557341d..047d9cd9000 100644 --- a/rust/op-reth/crates/trie/src/db/mod.rs +++ b/rust/op-reth/crates/trie/src/db/mod.rs @@ -11,6 +11,9 @@ pub use models::*; mod store; pub use store::{MdbxProofsProvider, MdbxProofsStorage}; +/// Placeholder alias for V2 storage format (full implementation deferred to a later PR). +pub type MdbxProofsStorageV2 = MdbxProofsStorage; + mod cursor; pub use cursor::{ BlockNumberVersionedCursor, MdbxAccountCursor, MdbxStorageCursor, MdbxTrieCursor, diff --git a/rust/op-reth/crates/trie/src/engine/handle.rs b/rust/op-reth/crates/trie/src/engine/handle.rs index bf88fba0190..b81691b6ee6 100644 --- a/rust/op-reth/crates/trie/src/engine/handle.rs +++ b/rust/op-reth/crates/trie/src/engine/handle.rs @@ -164,7 +164,7 @@ impl EngineHandle } /// Block until any in-progress background persistence completes (test/utility only). - #[cfg(test)] + #[cfg(any(test, feature = "test-utils"))] pub fn flush(&self) { use super::tasks::FlushTask; let (reply_tx, reply_rx) = bounded(1); diff --git a/rust/op-reth/crates/trie/src/engine/mod.rs b/rust/op-reth/crates/trie/src/engine/mod.rs index b8536bcb386..a3bf419a86b 100644 --- a/rust/op-reth/crates/trie/src/engine/mod.rs +++ b/rust/op-reth/crates/trie/src/engine/mod.rs @@ -42,7 +42,7 @@ enum EngineAction { /// Unwind indexed data back to a given block. Unwind(tasks::UnwindTask), /// Block the caller until any in-flight persistence completes. - #[cfg(test)] + #[cfg(any(test, feature = "test-utils"))] Flush(tasks::FlushTask), /// Update the sync catch-up target (fire-and-forget). SyncTo(tasks::SyncToTask), @@ -68,7 +68,7 @@ impl EngineAction { Self::IndexBlock(task) => task.execute(state), Self::Reorg(task) => task.execute(state), Self::Unwind(task) => task.execute(state), - #[cfg(test)] + #[cfg(any(test, feature = "test-utils"))] Self::Flush(task) => task.execute(state), Self::SyncTo(task) => task.execute(state), } diff --git a/rust/op-reth/crates/trie/src/engine/tasks/mod.rs b/rust/op-reth/crates/trie/src/engine/tasks/mod.rs index 47740d0a369..0dfb1c600b7 100644 --- a/rust/op-reth/crates/trie/src/engine/tasks/mod.rs +++ b/rust/op-reth/crates/trie/src/engine/tasks/mod.rs @@ -5,7 +5,7 @@ //! the reply. The engine dispatcher is a thin match with no business logic. mod execute_block; -#[cfg(test)] +#[cfg(any(test, feature = "test-utils"))] mod flush; mod index_block; mod reorg; @@ -13,7 +13,7 @@ mod sync_to; mod unwind; pub(super) use execute_block::{ExecuteBlockTask, run as execute_block}; -#[cfg(test)] +#[cfg(any(test, feature = "test-utils"))] pub(super) use flush::FlushTask; pub(super) use index_block::IndexBlockTask; pub(super) use reorg::ReorgTask; diff --git a/rust/op-reth/crates/trie/src/lib.rs b/rust/op-reth/crates/trie/src/lib.rs index 4c578812715..2b34b1faa83 100644 --- a/rust/op-reth/crates/trie/src/lib.rs +++ b/rust/op-reth/crates/trie/src/lib.rs @@ -30,7 +30,9 @@ pub use in_memory::{ }; pub mod db; -pub use db::{MdbxAccountCursor, MdbxProofsStorage, MdbxStorageCursor, MdbxTrieCursor}; +pub use db::{ + MdbxAccountCursor, MdbxProofsStorage, MdbxProofsStorageV2, MdbxStorageCursor, MdbxTrieCursor, +}; #[cfg(feature = "metrics")] pub mod metrics; @@ -48,8 +50,6 @@ pub mod proof; pub mod provider; -pub mod live; - pub mod engine; pub use engine::EngineHandle; diff --git a/rust/op-reth/crates/trie/src/live.rs b/rust/op-reth/crates/trie/src/live.rs deleted file mode 100644 index ce7cf6c895e..00000000000 --- a/rust/op-reth/crates/trie/src/live.rs +++ /dev/null @@ -1,234 +0,0 @@ -//! Live trie collector for external proofs storage. - -use crate::{ - BlockStateDiff, OpProofsStorage, OpProofsStorageError, OpProofsStore, - api::{OpProofsProviderRO, OpProofsProviderRw, OperationDurations}, - provider::OpProofsStateProviderRef, -}; -use alloy_eips::{BlockNumHash, NumHash, eip1898::BlockWithParent}; -use derive_more::Constructor; -use reth_evm::{ConfigureEvm, execute::Executor}; -use reth_primitives_traits::{AlloyBlockHeader, BlockTy, RecoveredBlock}; -use reth_provider::{ - DatabaseProviderFactory, HashedPostStateProvider, StateProviderFactory, StateReader, - StateRootProvider, -}; -use reth_revm::database::StateProviderDatabase; -use reth_trie_common::{HashedPostStateSorted, updates::TrieUpdatesSorted}; -use std::{sync::Arc, time::Instant}; -use tracing::info; - -/// Live trie collector for external proofs storage. -#[derive(Debug, Constructor)] -pub struct LiveTrieCollector<'tx, Evm, Provider, PreimageStore> -where - Evm: ConfigureEvm, - Provider: StateReader + DatabaseProviderFactory + StateProviderFactory, -{ - evm_config: Evm, - provider: Provider, - storage: &'tx OpProofsStorage, -} - -impl<'tx, Evm, Provider, Store> LiveTrieCollector<'tx, Evm, Provider, Store> -where - Evm: ConfigureEvm, - Provider: StateReader + DatabaseProviderFactory + StateProviderFactory, - Store: 'tx + OpProofsStore + Clone + 'static, -{ - /// Execute a block and store the updates in the storage. - pub fn execute_and_store_block_updates( - &self, - block: &RecoveredBlock>, - ) -> Result<(), OpProofsStorageError> { - let mut operation_durations = OperationDurations::default(); - - let start = Instant::now(); - // ensure that we have the state of the parent block - let provider_ro = self.storage.provider_ro()?; - let (Some((earliest, _)), Some((latest, _))) = - (provider_ro.get_earliest_block_number()?, provider_ro.get_latest_block_number()?) - else { - return Err(OpProofsStorageError::NoBlocksFound); - }; - - let parent_block_number = block.number() - 1; - if parent_block_number < earliest { - return Err(OpProofsStorageError::UnknownParent); - } - - if parent_block_number > latest { - return Err(OpProofsStorageError::MissingParentBlock { - block_number: block.number(), - parent_block_number, - latest_block_number: latest, - }); - } - - let block_ref = - BlockWithParent::new(block.parent_hash(), NumHash::new(block.number(), block.hash())); - - // TODO: should we check block hash here? - - let state_provider = OpProofsStateProviderRef::new( - self.provider.state_by_block_hash(block.parent_hash())?, - self.storage.provider_ro()?, - parent_block_number, - ); - - let db = StateProviderDatabase::new(&state_provider); - let block_executor = self.evm_config.batch_executor(db); - - let execution_result = block_executor.execute(&(*block).clone())?; - - operation_durations.execution_duration_seconds = start.elapsed(); - - let hashed_state = state_provider.hashed_post_state(&execution_result.state); - let (state_root, trie_updates) = - state_provider.state_root_with_updates(hashed_state.clone())?; - - operation_durations.state_root_duration_seconds = - start.elapsed() - operation_durations.execution_duration_seconds; - - if state_root != block.state_root() { - return Err(OpProofsStorageError::StateRootMismatch { - block_number: block.number(), - current_state_hash: state_root, - expected_state_hash: block.state_root(), - }); - } - - let provider_rw = self.storage.provider_rw()?; - let update_result = provider_rw.store_trie_updates( - block_ref, - BlockStateDiff { - sorted_trie_updates: trie_updates.into_sorted(), - sorted_post_state: hashed_state.into_sorted(), - }, - )?; - provider_rw.commit()?; - - operation_durations.total_duration_seconds = start.elapsed(); - operation_durations.write_duration_seconds = operation_durations.total_duration_seconds - - operation_durations.state_root_duration_seconds - - operation_durations.execution_duration_seconds; - - #[cfg(feature = "metrics")] - { - let block_metrics = self.storage.metrics().block_metrics(); - block_metrics.record_operation_durations(&operation_durations); - block_metrics.increment_write_counts(&update_result); - } - - info!( - block_number = block.number(), - ?operation_durations, - ?update_result, - "Block executed and trie updates stored successfully", - ); - - Ok(()) - } - - /// Store trie updates for a given block. - pub fn store_block_updates( - &self, - block: BlockWithParent, - sorted_trie_updates: TrieUpdatesSorted, - sorted_post_state: HashedPostStateSorted, - ) -> Result<(), OpProofsStorageError> { - let start = Instant::now(); - let mut operation_durations = OperationDurations::default(); - - let provider_rw = self.storage.provider_rw()?; - let storage_result = provider_rw - .store_trie_updates(block, BlockStateDiff { sorted_trie_updates, sorted_post_state })?; - provider_rw.commit()?; - - let write_duration = start.elapsed(); - operation_durations.total_duration_seconds = write_duration; - operation_durations.write_duration_seconds = write_duration; - - #[cfg(feature = "metrics")] - { - let block_metrics = self.storage.metrics().block_metrics(); - block_metrics.record_operation_durations(&operation_durations); - block_metrics.increment_write_counts(&storage_result); - } - - info!( - block_number = block.block.number, - ?operation_durations, - ?storage_result, - "Trie updates stored successfully", - ); - - Ok(()) - } - - /// Handles chain reorganizations by replacing block updates after a common ancestor. - /// - /// This method removes all block updates after the latest common ancestor (the block before - /// the first block in `new_blocks`) and replaces them with the updates from the provided new - /// chain. - /// - /// # Arguments - /// - /// * `new_blocks` - A vector of references to `RecoveredBlock` instances representing the new - /// blocks to be added to the trie storage. - pub fn unwind_and_store_block_updates( - &self, - block_updates: Vec<(BlockWithParent, Arc, Arc)>, - ) -> Result<(), OpProofsStorageError> { - if block_updates.is_empty() { - return Ok(()); - } - - let start = Instant::now(); - let mut operation_durations = OperationDurations::default(); - let first = &block_updates[0].0; - let latest_common_block = - BlockNumHash::new(first.block.number.saturating_sub(1), first.parent); - let mut block_trie_updates: Vec<(BlockWithParent, BlockStateDiff)> = - Vec::with_capacity(block_updates.len()); - - for (block, trie_updates, hashed_state) in &block_updates { - block_trie_updates.push(( - *block, - BlockStateDiff { - sorted_trie_updates: (**trie_updates).clone(), - sorted_post_state: (**hashed_state).clone(), - }, - )); - } - - let provider_rw = self.storage.provider_rw()?; - provider_rw.replace_updates(latest_common_block, block_trie_updates)?; - provider_rw.commit()?; - let write_duration = start.elapsed(); - operation_durations.total_duration_seconds = write_duration; - operation_durations.write_duration_seconds = write_duration; - - #[cfg(feature = "metrics")] - { - let block_metrics = self.storage.metrics().block_metrics(); - block_metrics.record_operation_durations(&operation_durations); - } - - info!( - start_block_number = block_updates.first().map(|(b, _, _)| b.block.number), - end_block_number = block_updates.last().map(|(b, _, _)| b.block.number), - ?operation_durations, - "Trie updates rewound and stored successfully", - ); - Ok(()) - } - - /// Remove account, storage and trie updates from historical storage for all blocks from - /// the specified block (inclusive). - pub fn unwind_history(&self, to: BlockWithParent) -> Result<(), OpProofsStorageError> { - let provider_rw = self.storage.provider_rw()?; - provider_rw.unwind_history(to)?; - provider_rw.commit() - } -} diff --git a/rust/op-reth/crates/trie/src/metrics.rs b/rust/op-reth/crates/trie/src/metrics.rs index 3d42a07c1ed..a4ea943c139 100644 --- a/rust/op-reth/crates/trie/src/metrics.rs +++ b/rust/op-reth/crates/trie/src/metrics.rs @@ -1,17 +1,16 @@ //! Storage wrapper that records metrics for all operations. use crate::{ - BlockStateDiff, OpProofsStorageResult, OpProofsStore, api::{ InitialStateAnchor, OpProofsInitProvider, OpProofsProviderRO, OpProofsProviderRw, - OperationDurations, WriteCounts, + WriteCounts, }, - cursor, + cursor, BlockStateDiff, OpProofsStorageResult, OpProofsStore, }; -use alloy_eips::{BlockNumHash, eip1898::BlockWithParent}; -use alloy_primitives::{B256, U256, map::HashMap}; +use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; +use alloy_primitives::{map::HashMap, B256, U256}; use derive_more::Constructor; -use metrics::{Counter, Gauge, Histogram}; +use metrics::{Gauge, Histogram}; use reth_db::DatabaseError; use reth_metrics::Metrics; use reth_primitives_traits::Account; @@ -28,8 +27,8 @@ use std::{ }; use strum::{EnumCount, EnumIter, IntoEnumIterator}; -/// Alias for [`OpProofsStorageWithMetrics`]. -pub type OpProofsStorage = OpProofsStorageWithMetrics; +/// Alias for [`OpProofsStoreWithMetrics`]. +pub type OpProofsStorage = OpProofsStoreWithMetrics; /// Alias for [`TrieCursor`](cursor::OpProofsTrieCursor) with metrics layer. pub type OpProofsTrieCursor = cursor::OpProofsTrieCursor>; @@ -87,13 +86,23 @@ impl StorageOperation { } } +/// Metrics tracking the range of blocks available for proof generation. +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_trie.proof_window")] +pub struct ProofWindowMetrics { + /// Earliest block number available in the proof window. + pub earliest: Gauge, + /// Latest block number available in the proof window. + pub latest: Gauge, +} + /// Metrics for storage operations. #[derive(Debug)] pub struct StorageMetrics { /// Cache of operation metrics handles, keyed by (operation, context) operations: HashMap, - /// Block-level metrics - block_metrics: BlockMetrics, + /// Proof window metrics + pub proof_window: ProofWindowMetrics, } impl StorageMetrics { @@ -101,7 +110,7 @@ impl StorageMetrics { pub fn new() -> Self { Self { operations: Self::generate_operation_handles(), - block_metrics: BlockMetrics::new_with_labels(&[] as &[(&str, &str)]), + proof_window: ProofWindowMetrics::new_with_labels(&[] as &[(&str, &str)]), } } @@ -120,7 +129,11 @@ impl StorageMetrics { /// Record a storage operation with timing. pub fn record_operation(&self, operation: StorageOperation, f: impl FnOnce() -> R) -> R { - if let Some(metrics) = self.operations.get(&operation) { metrics.record(f) } else { f() } + if let Some(metrics) = self.operations.get(&operation) { + metrics.record(f) + } else { + f() + } } /// Record a storage operation with timing (async version). @@ -139,11 +152,6 @@ impl StorageMetrics { result } - /// Get block metrics for recording high-level timing. - pub const fn block_metrics(&self) -> &BlockMetrics { - &self.block_metrics - } - /// Record a pre-measured duration for an operation. pub fn record_duration(&self, operation: StorageOperation, duration: Duration) { if let Some(metrics) = self.operations.get(&operation) { @@ -201,51 +209,6 @@ impl OperationMetrics { } } -/// High-level block processing metrics. -#[derive(Metrics, Clone)] -#[metrics(scope = "optimism_trie.block")] -pub struct BlockMetrics { - /// Total time to process a block (end-to-end) in seconds - pub total_duration_seconds: Histogram, - /// Time spent executing the block (EVM) in seconds - pub execution_duration_seconds: Histogram, - /// Time spent calculating state root in seconds - pub state_root_duration_seconds: Histogram, - /// Time spent writing trie updates to storage in seconds - pub write_duration_seconds: Histogram, - /// Number of trie updates written - pub account_trie_updates_written_total: Counter, - /// Number of storage trie updates written - pub storage_trie_updates_written_total: Counter, - /// Number of hashed accounts written - pub hashed_accounts_written_total: Counter, - /// Number of hashed storages written - pub hashed_storages_written_total: Counter, - /// Earliest block number that the proofs storage has stored. - pub earliest_number: Gauge, - /// Latest block number that the proofs storage has stored. - pub latest_number: Gauge, -} - -impl BlockMetrics { - /// Record operation durations for the processing of a block. - pub fn record_operation_durations(&self, durations: &OperationDurations) { - self.total_duration_seconds.record(durations.total_duration_seconds); - self.execution_duration_seconds.record(durations.execution_duration_seconds); - self.state_root_duration_seconds.record(durations.state_root_duration_seconds); - self.write_duration_seconds.record(durations.write_duration_seconds); - } - - /// Increment write counts of historical trie updates for a single block. - pub fn increment_write_counts(&self, counts: &WriteCounts) { - self.account_trie_updates_written_total - .increment(counts.account_trie_updates_written_total); - self.storage_trie_updates_written_total - .increment(counts.storage_trie_updates_written_total); - self.hashed_accounts_written_total.increment(counts.hashed_accounts_written_total); - self.hashed_storages_written_total.increment(counts.hashed_storages_written_total); - } -} /// Wrapper for [`TrieCursor`] that records metrics. #[derive(Debug, Constructor, Clone)] @@ -336,12 +299,12 @@ impl HashedStorageCursor for OpProofsHashedCursorWithMet /// Wrapper around [`OpProofsStore`] type that records metrics for all operations. #[derive(Debug, Clone)] -pub struct OpProofsStorageWithMetrics { +pub struct OpProofsStoreWithMetrics { storage: S, metrics: Arc, } -impl OpProofsStorageWithMetrics { +impl OpProofsStoreWithMetrics { /// Initializes new [`StorageMetrics`] and wraps given storage instance. pub fn new(storage: S) -> Self { Self { storage, metrics: Arc::new(StorageMetrics::default()) } @@ -358,7 +321,7 @@ impl OpProofsStorageWithMetrics { } } -impl OpProofsStore for OpProofsStorageWithMetrics +impl OpProofsStore for OpProofsStoreWithMetrics where S: OpProofsStore, { @@ -422,7 +385,7 @@ impl OpProofsProviderRO for OpProofsProviderROWithMetrics fn get_earliest_block_number(&self) -> OpProofsStorageResult> { let result = self.provider.get_earliest_block_number()?; if let Some((number, _)) = result { - self.metrics.block_metrics.earliest_number.set(number as f64); + self.metrics.proof_window.earliest.set(number as f64); } Ok(result) } @@ -431,7 +394,7 @@ impl OpProofsProviderRO for OpProofsProviderROWithMetrics fn get_latest_block_number(&self) -> OpProofsStorageResult> { let result = self.provider.get_latest_block_number()?; if let Some((number, _)) = result { - self.metrics.block_metrics.latest_number.set(number as f64); + self.metrics.proof_window.latest.set(number as f64); } Ok(result) } @@ -509,7 +472,7 @@ impl OpProofsProviderRO for OpProofsProviderRwWithMetrics fn get_earliest_block_number(&self) -> OpProofsStorageResult> { let result = self.provider.get_earliest_block_number()?; if let Some((number, _)) = result { - self.metrics.block_metrics.earliest_number.set(number as f64); + self.metrics.proof_window.earliest.set(number as f64); } Ok(result) } @@ -518,7 +481,7 @@ impl OpProofsProviderRO for OpProofsProviderRwWithMetrics fn get_latest_block_number(&self) -> OpProofsStorageResult> { let result = self.provider.get_latest_block_number()?; if let Some((number, _)) = result { - self.metrics.block_metrics.latest_number.set(number as f64); + self.metrics.proof_window.latest.set(number as f64); } Ok(result) } @@ -575,7 +538,7 @@ impl OpProofsProviderRw for OpProofsProviderRwWithMetrics block_state_diff: BlockStateDiff, ) -> OpProofsStorageResult { let result = self.provider.store_trie_updates(block_ref, block_state_diff)?; - self.metrics.block_metrics.latest_number.set(block_ref.block.number as f64); + self.metrics.proof_window.latest.set(block_ref.block.number as f64); Ok(result) } @@ -586,7 +549,7 @@ impl OpProofsProviderRw for OpProofsProviderRwWithMetrics ) -> OpProofsStorageResult { let result = self.provider.store_trie_updates_batch(updates.clone())?; if let Some((latest_block_ref, _)) = updates.last() { - self.metrics.block_metrics.latest_number.set(latest_block_ref.block.number as f64); + self.metrics.proof_window.latest.set(latest_block_ref.block.number as f64); } Ok(result) } @@ -596,7 +559,7 @@ impl OpProofsProviderRw for OpProofsProviderRwWithMetrics &self, new_earliest_block_ref: BlockWithParent, ) -> OpProofsStorageResult { - self.metrics.block_metrics.earliest_number.set(new_earliest_block_ref.block.number as f64); + self.metrics.proof_window.earliest.set(new_earliest_block_ref.block.number as f64); self.provider.prune_earliest_state(new_earliest_block_ref) } @@ -620,7 +583,7 @@ impl OpProofsProviderRw for OpProofsProviderRwWithMetrics block_number: u64, hash: B256, ) -> OpProofsStorageResult<()> { - self.metrics.block_metrics.earliest_number.set(block_number as f64); + self.metrics.proof_window.earliest.set(block_number as f64); self.provider.set_earliest_block_number(block_number, hash) } @@ -657,6 +620,8 @@ impl OpProofsInitProvider for OpProofsInitProviderWithM let start = Instant::now(); let result = self.provider.store_account_branches(account_nodes); let duration = start.elapsed(); + + // Record per-item duration if count > 0 { self.metrics.record_duration_per_item( StorageOperation::StoreAccountBranch, @@ -664,6 +629,7 @@ impl OpProofsInitProvider for OpProofsInitProviderWithM count, ); } + result } @@ -677,6 +643,8 @@ impl OpProofsInitProvider for OpProofsInitProviderWithM let start = Instant::now(); let result = self.provider.store_storage_branches(hashed_address, storage_nodes); let duration = start.elapsed(); + + // Record per-item duration if count > 0 { self.metrics.record_duration_per_item( StorageOperation::StoreStorageBranch, @@ -684,6 +652,7 @@ impl OpProofsInitProvider for OpProofsInitProviderWithM count, ); } + result } @@ -696,6 +665,8 @@ impl OpProofsInitProvider for OpProofsInitProviderWithM let start = Instant::now(); let result = self.provider.store_hashed_accounts(accounts); let duration = start.elapsed(); + + // Record per-item duration if count > 0 { self.metrics.record_duration_per_item( StorageOperation::StoreHashedAccount, @@ -703,6 +674,7 @@ impl OpProofsInitProvider for OpProofsInitProviderWithM count, ); } + result } @@ -716,6 +688,8 @@ impl OpProofsInitProvider for OpProofsInitProviderWithM let start = Instant::now(); let result = self.provider.store_hashed_storages(hashed_address, storages); let duration = start.elapsed(); + + // Record per-item duration if count > 0 { self.metrics.record_duration_per_item( StorageOperation::StoreHashedStorage, @@ -723,13 +697,14 @@ impl OpProofsInitProvider for OpProofsInitProviderWithM count, ); } + result } #[inline] fn commit_initial_state(&self) -> OpProofsStorageResult { let block = self.provider.commit_initial_state()?; - self.metrics.block_metrics.earliest_number.set(block.number as f64); + self.metrics.proof_window.earliest.set(block.number as f64); Ok(block) } @@ -739,7 +714,7 @@ impl OpProofsInitProvider for OpProofsInitProviderWithM } } -impl From for OpProofsStorageWithMetrics +impl From for OpProofsStoreWithMetrics where S: OpProofsStore + Clone + 'static, { diff --git a/rust/op-reth/crates/trie/tests/live.rs b/rust/op-reth/crates/trie/tests/live.rs index 64f08632ff8..9ee82888b7d 100644 --- a/rust/op-reth/crates/trie/tests/live.rs +++ b/rust/op-reth/crates/trie/tests/live.rs @@ -12,8 +12,9 @@ use reth_evm::{ConfigureEvm, execute::Executor}; use reth_evm_ethereum::EthEvmConfig; use reth_node_api::{NodePrimitives, NodeTypesWithDB}; use reth_optimism_trie::{ - MdbxProofsStorage, OpProofsStorage, OpProofsStorageError, initialize::InitializationJob, - live::LiveTrieCollector, + MdbxProofsStorage, OpProofStoragePruner, OpProofsStorage, + OpProofsStore, initialize::InitializationJob, + engine::{EngineHandle, EngineError}, }; use reth_primitives_traits::{Block as _, RecoveredBlock}; use reth_provider::{ @@ -24,8 +25,15 @@ use reth_provider::{ }; use reth_revm::database::StateProviderDatabase; use secp256k1::{Keypair, Secp256k1, rand::rng}; +use serial_test::serial; use std::sync::Arc; use tempfile::TempDir; +use test_case::test_case; + +fn create_mdbx_proofs_storage() -> Arc { + let path = TempDir::new().unwrap(); + Arc::new(MdbxProofsStorage::new(path.path()).unwrap()) +} /// Converts a secp256k1 public key to an Ethereum address. fn public_key_to_address(pubkey: secp256k1::PublicKey) -> Address { @@ -214,18 +222,20 @@ where } /// Runs a test scenario with the given configuration -fn run_test_scenario( +fn run_test_scenario( scenario: TestScenario, provider_factory: ProviderFactory, chain_spec: Arc, key_pair: Keypair, - storage: OpProofsStorage>, + raw_storage: S, ) -> eyre::Result<()> where N: ProviderNodeTypes< Primitives: NodePrimitives, > + NodeTypesWithDB, + S: OpProofsStore + Send + Sync + Clone + std::fmt::Debug + 'static, { + let storage: OpProofsStorage = raw_storage.into(); let genesis_hash = chain_spec.genesis_hash(); let mut nonce_counter = 0u64; let mut last_block_hash = genesis_hash; @@ -257,8 +267,14 @@ where initialization_job.run(last_block_number, last_block_hash)?; } - // Execute blocks after initialization using live collector + // Execute blocks after initialization using live collector. + // A single collector is shared across all blocks so the in-memory buffer accumulates + // state between iterations (the new async-persistence architecture requires this). let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; + let pruner = OpProofStoragePruner::new(storage.clone(), blockchain_db.clone(), 1000); + let engine_handle = + EngineHandle::spawn(evm_config, blockchain_db, storage, pruner); for (idx, block_spec) in scenario.blocks_after_initialization.iter().enumerate() { let block_number = last_block_number + idx as u64 + 1; @@ -274,13 +290,8 @@ where // Execute the block to get the correct state root let execution_output = execute_block(&mut block, &provider_factory, &chain_spec)?; - // Create a fresh blockchain provider to ensure it sees all committed blocks - let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; - let live_trie_collector = - LiveTrieCollector::new(evm_config.clone(), blockchain_db, &storage); - // Use the live collector to execute and store trie updates - live_trie_collector.execute_and_store_block_updates(&block)?; + engine_handle.execute_block(&block)?; // Commit the block to the database so subsequent blocks can build on it commit_block_to_database(&block, &execution_output, &provider_factory)?; @@ -288,6 +299,9 @@ where last_block_hash = block.hash(); } + // Drain any pending in-memory blocks to disk before returning. + engine_handle.flush(); + Ok(()) } @@ -295,11 +309,12 @@ where /// (1) Creates a chain with some state /// (2) Stores the genesis state into storage via initialization /// (3) Executes a block and calculates the state root using the stored state -#[test] -fn test_execute_and_store_block_updates() { - let dir = TempDir::new().unwrap(); - let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); - +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_execute_and_store_block_updates(storage: S) -> Result<(), eyre::Error> +where + S: OpProofsStore + Clone + Send + Sync + std::fmt::Debug + 'static, +{ // Create a keypair for signing transactions let secp = Secp256k1::new(); let key_pair = Keypair::new(&secp, &mut rng()); @@ -324,15 +339,17 @@ fn test_execute_and_store_block_updates() { vec![BlockSpec::new(vec![TxSpec::transfer(recipient, U256::from(1))])], ); - run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).unwrap(); + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage) } -#[test] -fn test_execute_and_store_block_updates_missing_parent_block() { - let dir = TempDir::new().unwrap(); - let storage: OpProofsStorage> = - Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); - +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_execute_and_store_block_updates_missing_parent_block( + storage: S, +) -> Result<(), eyre::Error> +where + S: OpProofsStore + Clone + Send + Sync + std::fmt::Debug + 'static, +{ let secp = Secp256k1::new(); let key_pair = Keypair::new(&secp, &mut rng()); let sender = public_key_to_address(key_pair.public_key()); @@ -351,11 +368,10 @@ fn test_execute_and_store_block_updates_missing_parent_block() { chain_spec.clone(), key_pair, storage.clone(), - ) - .unwrap(); + )?; - // Create a block whose parent block number is missing. - let incorrect_block_number = 2; + // Create a block that is sequential but has a wrong parent hash. + let incorrect_block_number = 1; let incorrect_parent_hash = B256::repeat_byte(0x11); let mut nonce_counter = 0; @@ -369,21 +385,29 @@ fn test_execute_and_store_block_updates_missing_parent_block() { ); let blockchain_db = BlockchainProvider::new(provider_factory).unwrap(); - let collector = - LiveTrieCollector::new(EthEvmConfig::ethereum(chain_spec.clone()), blockchain_db, &storage); + let pruner = OpProofStoragePruner::new(storage.clone(), blockchain_db.clone(), 1000); + let engine_handle = EngineHandle::spawn( + EthEvmConfig::ethereum(chain_spec.clone()), + blockchain_db, + storage, + pruner, + ); - // EXPECT: MissingParentBlock - let err = collector.execute_and_store_block_updates(&incorrect_block).unwrap_err(); + // EXPECT: ParentHashMismatch (block is at correct number but wrong parent hash) + let err = engine_handle.execute_block(&incorrect_block).unwrap_err(); - assert!(matches!(err, OpProofsStorageError::MissingParentBlock { .. })); + assert!(matches!(err, EngineError::ParentHashMismatch { .. })); + Ok(()) } -#[test] -fn test_execute_and_store_block_updates_state_root_mismatch() { - let dir = TempDir::new().unwrap(); - let storage: OpProofsStorage> = - Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); - +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_execute_and_store_block_updates_state_root_mismatch( + storage: Arc, +) -> Result<(), eyre::Error> +where + S: OpProofsStore + Send + Sync + std::fmt::Debug + 'static, +{ let secp = Secp256k1::new(); let key_pair = Keypair::new(&secp, &mut rng()); let sender = public_key_to_address(key_pair.public_key()); @@ -405,18 +429,23 @@ fn test_execute_and_store_block_updates_state_root_mismatch() { chain_spec.clone(), key_pair, storage.clone(), - ) - .unwrap(); + )?; // Generate a second block normally let blockchain_db = BlockchainProvider::new(provider_factory.clone()).unwrap(); - let collector = - LiveTrieCollector::new(EthEvmConfig::ethereum(chain_spec.clone()), blockchain_db, &storage); + let pruner = OpProofStoragePruner::new(storage.clone(), blockchain_db.clone(), 1000); + let engine_handle = EngineHandle::spawn( + EthEvmConfig::ethereum(chain_spec.clone()), + blockchain_db, + storage, + pruner, + ); - // Create the next block + // Create the next block — sequential after genesis so the parent hash check passes + // and execution runs, allowing us to verify the state root mismatch path. let mut nonce_counter = 0; - let last_block_hash = chain_spec.genesis_hash(); // because scenario executes 1 block - let next_number = 2; + let last_block_hash = chain_spec.genesis_hash(); + let next_number = 1; let mut block = create_block_from_spec( &BlockSpec::new(vec![]), @@ -434,17 +463,21 @@ fn test_execute_and_store_block_updates_state_root_mismatch() { block.header_mut().state_root = B256::repeat_byte(0xAA); // EXPECT: StateRootMismatch - let err = collector.execute_and_store_block_updates(&block).unwrap_err(); + let err = engine_handle.execute_block(&block).unwrap_err(); - assert!(matches!(err, OpProofsStorageError::StateRootMismatch { .. })); + assert!(matches!(err, EngineError::StateRootMismatch { .. })); + Ok(()) } /// Test with multiple blocks before and after initialization -#[test] -fn test_multiple_blocks_before_and_after_initialization() { - let dir = TempDir::new().unwrap(); - let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); - +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_multiple_blocks_before_and_after_initialization( + storage: Arc, +) -> Result<(), eyre::Error> +where + S: OpProofsStore + Send + Sync + std::fmt::Debug + 'static, +{ let secp = Secp256k1::new(); let key_pair = Keypair::new(&secp, &mut rng()); let sender = public_key_to_address(key_pair.public_key()); @@ -473,15 +506,16 @@ fn test_multiple_blocks_before_and_after_initialization() { ], ); - run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).unwrap(); + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage) } /// Test with blocks containing multiple transactions -#[test] -fn test_blocks_with_multiple_transactions() { - let dir = TempDir::new().unwrap(); - let storage = Arc::new(MdbxProofsStorage::new(dir.path()).expect("env")).into(); - +#[test_case(create_mdbx_proofs_storage(); "Mdbx")] +#[serial] +fn test_blocks_with_multiple_transactions(storage: Arc) -> Result<(), eyre::Error> +where + S: OpProofsStore + Send + Sync + std::fmt::Debug + 'static, +{ let secp = Secp256k1::new(); let key_pair = Keypair::new(&secp, &mut rng()); let sender = public_key_to_address(key_pair.public_key()); @@ -504,5 +538,5 @@ fn test_blocks_with_multiple_transactions() { ])], ); - run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage).unwrap(); + run_test_scenario(scenario, provider_factory, chain_spec, key_pair, storage) } diff --git a/rust/op-reth/tests/proofs/core/account_proofs_test.go b/rust/op-reth/tests/proofs/core/account_proofs_test.go index 2e5fc4f6c58..2d23c42ca0d 100644 --- a/rust/op-reth/tests/proofs/core/account_proofs_test.go +++ b/rust/op-reth/tests/proofs/core/account_proofs_test.go @@ -40,7 +40,7 @@ func TestL2MultipleTransactionsInDifferentBlocks(gt *testing.T) { require.Equal(t, types.ReceiptStatusSuccessful, receipt1.Status) t.Logf("Transaction 1 included in block: %d", bigs.Uint64Strict(receipt1.BlockNumber)) - sys.L2ELValidatorNode().WaitForBlockNumber(bigs.Uint64Strict(receipt1.BlockNumber)) + utils.WaitForProofsStoreBlock(t, sys.RethWithProofL2ELNode().Escape().L2EthClient(), bigs.Uint64Strict(receipt1.BlockNumber)) utils.FetchAndVerifyProofs(t, sys, accounts[0].Address(), []common.Hash{}, bigs.Uint64Strict(receipt1.BlockNumber)) sys.L2ELSequencerNode().WaitForBlockNumber(currentBlock.Number + 1) @@ -55,7 +55,7 @@ func TestL2MultipleTransactionsInDifferentBlocks(gt *testing.T) { require.Equal(t, types.ReceiptStatusSuccessful, receipt2.Status) t.Logf("Transaction 2 included in block: %d", bigs.Uint64Strict(receipt2.BlockNumber)) - sys.L2ELValidatorNode().WaitForBlockNumber(bigs.Uint64Strict(receipt2.BlockNumber)) + utils.WaitForProofsStoreBlock(t, sys.RethWithProofL2ELNode().Escape().L2EthClient(), bigs.Uint64Strict(receipt2.BlockNumber)) utils.FetchAndVerifyProofs(t, sys, accounts[1].Address(), []common.Hash{}, bigs.Uint64Strict(receipt2.BlockNumber)) // Also verify we can get proofs for account 0 at block 2 (different block height) @@ -98,7 +98,7 @@ func TestL2MultipleTransactionsInSingleBlock(gt *testing.T) { require.Equal(t, types.ReceiptStatusSuccessful, receipt1.Status) t.Logf("Transaction 1 included in block %d", bigs.Uint64Strict(receipt1.BlockNumber)) - sys.L2ELValidatorNode().WaitForBlockNumber(bigs.Uint64Strict(receipt1.BlockNumber)) + utils.WaitForProofsStoreBlock(t, sys.RethWithProofL2ELNode().Escape().L2EthClient(), bigs.Uint64Strict(receipt1.BlockNumber)) // Txns can land in the same or different blocks depending on timing. if bigs.Uint64Strict(receipt0.BlockNumber) == bigs.Uint64Strict(receipt1.BlockNumber) { t.Logf("Both transactions included in the same L2 block: %d", bigs.Uint64Strict(receipt0.BlockNumber)) diff --git a/rust/op-reth/tests/proofs/core/execute_payload_test.go b/rust/op-reth/tests/proofs/core/execute_payload_test.go index d4e3d5e7986..d9d59c3a331 100644 --- a/rust/op-reth/tests/proofs/core/execute_payload_test.go +++ b/rust/op-reth/tests/proofs/core/execute_payload_test.go @@ -22,7 +22,7 @@ func TestExecutePayloadSuccess(gt *testing.T) { if err != nil { gt.Fatal(err) } - sys.L2ELValidatorNode().WaitForBlockNumber(seqHead.NumberU64()) + utils.WaitForProofsStoreBlock(t, opRethELNode.Escape().L2EthClient(), seqHead.NumberU64()) plannedTxOption := user.PlanTransfer(user.Address(), eth.OneWei) plannedTx := txplan.NewPlannedTx(plannedTxOption) @@ -90,7 +90,7 @@ func TestExecutePayloadWithInvalidParentHash(gt *testing.T) { if err != nil { gt.Fatal(err) } - sys.L2ELValidatorNode().WaitForBlockNumber(seqHead.NumberU64()) + utils.WaitForProofsStoreBlock(t, opRethELNode.Escape().L2EthClient(), seqHead.NumberU64()) plannedTxOption := user.PlanTransfer(user.Address(), eth.OneWei) plannedTx := txplan.NewPlannedTx(plannedTxOption) diff --git a/rust/op-reth/tests/proofs/core/execution_witness_test.go b/rust/op-reth/tests/proofs/core/execution_witness_test.go index 1a047e91be2..3ad3dd2caec 100644 --- a/rust/op-reth/tests/proofs/core/execution_witness_test.go +++ b/rust/op-reth/tests/proofs/core/execution_witness_test.go @@ -49,7 +49,7 @@ func TestDebugExecutionWitness(gt *testing.T) { require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status) t.Logf("Transaction included in block: %d", bigs.Uint64Strict(receipt.BlockNumber)) - sys.L2ELValidatorNode().WaitForBlockNumber(bigs.Uint64Strict(receipt.BlockNumber)) + utils.WaitForProofsStoreBlock(t, opRethELNode.Escape().L2EthClient(), bigs.Uint64Strict(receipt.BlockNumber)) l2RethClient := opRethELNode.Escape().L2EthClient() // Get the block to inspect the state changes diff --git a/rust/op-reth/tests/proofs/core/simple_storage_test.go b/rust/op-reth/tests/proofs/core/simple_storage_test.go index 0e3f08702cf..f9f8e39a431 100644 --- a/rust/op-reth/tests/proofs/core/simple_storage_test.go +++ b/rust/op-reth/tests/proofs/core/simple_storage_test.go @@ -21,7 +21,7 @@ func TestStorageProofUsingSimpleStorageContract(gt *testing.T) { contract, receipt := utils.DeploySimpleStorage(t, user) t.Logf("contract deployed at address %s in L2 block %d", contract.Address().Hex(), bigs.Uint64Strict(receipt.BlockNumber)) - sys.L2ELValidatorNode().WaitForBlockNumber(bigs.Uint64Strict(receipt.BlockNumber)) + utils.WaitForProofsStoreBlock(t, sys.RethWithProofL2ELNode().Escape().L2EthClient(), bigs.Uint64Strict(receipt.BlockNumber)) // fetch and verify initial proof (should be zeroed storage) utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{common.HexToHash("0x0")}, bigs.Uint64Strict(receipt.BlockNumber)) @@ -49,7 +49,7 @@ func TestStorageProofUsingSimpleStorageContract(gt *testing.T) { }) t.Logf("reset setValue transaction included in L2 block %d", callRes.BlockNumber) - sys.L2ELValidatorNode().WaitForBlockNumber(bigs.Uint64Strict(callRes.BlockNumber)) + utils.WaitForProofsStoreBlock(t, sys.RethWithProofL2ELNode().Escape().L2EthClient(), bigs.Uint64Strict(callRes.BlockNumber)) // for each case, get proof and verify for _, c := range cases { utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{common.HexToHash("0x0")}, c.Block) @@ -70,7 +70,7 @@ func TestStorageProofUsingMultiStorageContract(gt *testing.T) { contract, receipt := utils.DeployMultiStorage(t, user) t.Logf("contract deployed at address %s in L2 block %d", contract.Address().Hex(), bigs.Uint64Strict(receipt.BlockNumber)) - sys.L2ELValidatorNode().WaitForBlockNumber(bigs.Uint64Strict(receipt.BlockNumber)) + utils.WaitForProofsStoreBlock(t, sys.RethWithProofL2ELNode().Escape().L2EthClient(), bigs.Uint64Strict(receipt.BlockNumber)) // fetch and verify initial proof (should be zeroed storage) utils.FetchAndVerifyProofs(t, sys, contract.Address(), []common.Hash{common.HexToHash("0x0"), common.HexToHash("0x1")}, bigs.Uint64Strict(receipt.BlockNumber)) @@ -107,7 +107,7 @@ func TestStorageProofUsingMultiStorageContract(gt *testing.T) { }) t.Logf("reset setValues transaction included in L2 block %d", callRes.BlockNumber) - sys.L2ELValidatorNode().WaitForBlockNumber(bigs.Uint64Strict(callRes.BlockNumber)) + utils.WaitForProofsStoreBlock(t, sys.RethWithProofL2ELNode().Escape().L2EthClient(), bigs.Uint64Strict(callRes.BlockNumber)) // for each case, get proof and verify for _, c := range cases { var slots []common.Hash @@ -151,7 +151,7 @@ func TestTokenVaultStorageProofs(gt *testing.T) { deactBlock := bigs.Uint64Strict(deactRes.BlockNumber) t.Logf("deactivateAllowance included in block %d", deactBlock) - sys.L2ELValidatorNode().WaitForBlockNumber(deactBlock) + utils.WaitForProofsStoreBlock(t, sys.RethWithProofL2ELNode().Escape().L2EthClient(), deactBlock) // balance slot for user balanceSlot := contract.GetBalanceSlot(userAddr) diff --git a/rust/op-reth/tests/proofs/prune/prune_test.go b/rust/op-reth/tests/proofs/prune/prune_test.go index a84a6457e55..d38c5872ab2 100644 --- a/rust/op-reth/tests/proofs/prune/prune_test.go +++ b/rust/op-reth/tests/proofs/prune/prune_test.go @@ -67,8 +67,8 @@ func TestPruneProofStorageWithGetProofConsistency(gt *testing.T) { } t.Logf("Target block for proof validation (pre-prune): %d", targetBlock) - // Make sure validator has the block too (keeps the test stable). - sys.L2ELValidatorNode().WaitForBlockNumber(targetBlock) + // Make sure the proof store has indexed the target block before verifying. + utils.WaitForProofsStoreBlock(t, ethClient, targetBlock) // Pre-prune proof verification at targetBlock. // This verifies the proof against the block's state root (efficient correctness check). @@ -88,7 +88,7 @@ func TestPruneProofStorageWithGetProofConsistency(gt *testing.T) { requiredLatest := targetBlock + proofWindow if initialStatus.Latest < requiredLatest { t.Logf("Waiting for chain to advance to at least block %d so pruning can pass targetBlock", requiredLatest) - opRethELNode.WaitForBlockNumber(requiredLatest) + utils.WaitForProofsStoreBlock(t, ethClient, requiredLatest) } t.Logf("Waiting for pruner to advance earliest past targetBlock=%d ...", targetBlock) diff --git a/rust/op-reth/tests/proofs/reorg/reorg_test.go b/rust/op-reth/tests/proofs/reorg/reorg_test.go index d5e1e20375e..4ba66c5431d 100644 --- a/rust/op-reth/tests/proofs/reorg/reorg_test.go +++ b/rust/op-reth/tests/proofs/reorg/reorg_test.go @@ -168,7 +168,7 @@ func TestReorgUsingAccountProof(gt *testing.T) { } latestBlock := sys.L2Chain.WaitForBlock() - sys.L2ELValidatorNode().WaitForBlockNumber(latestBlock.Number) + utils.WaitForProofsStoreBlock(t, sys.RethWithProofL2ELNode().Escape().L2EthClient(), latestBlock.Number) // verify that the L2A validator has reorged and reached the latest block err := wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { From 24fbdee9f44a1ff6fbffa47bcbb19da4378f2f2a Mon Sep 17 00:00:00 2001 From: Arun Dhyani Date: Thu, 30 Apr 2026 13:48:39 +0530 Subject: [PATCH 02/10] remove prune interval from devstack --- op-devstack/sysgo/l2_el_opreth.go | 2 +- op-devstack/sysgo/mixed_runtime.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/op-devstack/sysgo/l2_el_opreth.go b/op-devstack/sysgo/l2_el_opreth.go index e0fbc5603c7..4fce659cd9f 100644 --- a/op-devstack/sysgo/l2_el_opreth.go +++ b/op-devstack/sysgo/l2_el_opreth.go @@ -64,7 +64,7 @@ func (n *OpReth) Start() { }) n.userRPC = "ws://" + n.userProxy.Addr() } - logOut := logpipe.ToLoggerWithMinLevel(n.p.Logger().New("component", "op-reth", "src", "stdout", "name", n.name, "chain", n.chainID), log.LevelWarn) + logOut := logpipe.ToLoggerWithMinLevel(n.p.Logger().New("component", "op-reth", "src", "stdout", "name", n.name, "chain", n.chainID), log.LevelInfo) logErr := logpipe.ToLoggerWithMinLevel(n.p.Logger().New("component", "op-reth", "src", "stderr", "name", n.name, "chain", n.chainID), log.LevelWarn) authRPCChan := make(chan string, 1) diff --git a/op-devstack/sysgo/mixed_runtime.go b/op-devstack/sysgo/mixed_runtime.go index 72d25d8b743..d735204748f 100644 --- a/op-devstack/sysgo/mixed_runtime.go +++ b/op-devstack/sysgo/mixed_runtime.go @@ -364,7 +364,6 @@ func buildMixedOpRethNode( args, "--proofs-history", "--proofs-history.window=10000", - "--proofs-history.prune-interval=1m", "--proofs-history.storage-path="+proofHistoryDir, ) From 19b0bc5f6532c432cebf3c2f85b59d36af6f7c44 Mon Sep 17 00:00:00 2001 From: Arun Dhyani Date: Thu, 30 Apr 2026 14:01:20 +0530 Subject: [PATCH 03/10] fmt fixes --- rust/op-reth/crates/exex/src/lib.rs | 195 ++++++++++++++---------- rust/op-reth/crates/trie/src/metrics.rs | 14 +- rust/op-reth/crates/trie/tests/live.rs | 9 +- 3 files changed, 121 insertions(+), 97 deletions(-) diff --git a/rust/op-reth/crates/exex/src/lib.rs b/rust/op-reth/crates/exex/src/lib.rs index 15167dd4850..5c5287af359 100644 --- a/rust/op-reth/crates/exex/src/lib.rs +++ b/rust/op-reth/crates/exex/src/lib.rs @@ -14,12 +14,11 @@ use futures_util::TryStreamExt; use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::{FullNodeComponents, NodePrimitives, NodeTypes}; -use reth_provider::BlockNumReader; use reth_optimism_trie::{ - engine::EngineHandle, - OpProofsProviderRO, OpProofStoragePruner, OpProofsStore, + OpProofStoragePruner, OpProofsProviderRO, OpProofsStore, engine::EngineHandle, }; -use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, SortedTrieData}; +use reth_provider::BlockNumReader; +use reth_trie::{HashedPostStateSorted, SortedTrieData, updates::TrieUpdatesSorted}; use std::sync::Arc; use tracing::{debug, info}; @@ -256,8 +255,8 @@ where "Configuration requires pruning {} blocks, which exceeds the safety threshold of {}. \ Huge prune operations can stall the node. \ Please run 'op-reth proofs prune' manually before starting the node.", - blocks_to_prune, - MAX_PRUNE_BLOCKS_STARTUP + blocks_to_prune, + MAX_PRUNE_BLOCKS_STARTUP )); } } @@ -312,8 +311,8 @@ where // `Chain::blocks()` is a BTreeMap so iteration is already ordered oldest → newest. for (&block_number, block) in new.blocks() { // Fast path: use pre-computed trie data only when verification is not due. - let should_verify = self.verification_interval > 0 - && block_number.is_multiple_of(self.verification_interval); + let should_verify = self.verification_interval > 0 && + block_number.is_multiple_of(self.verification_interval); let precomputed = (!should_verify).then(|| new.trie_data_at(block_number)).flatten(); if let Some(d) = precomputed { @@ -324,7 +323,8 @@ where (**hashed_state).clone(), )?; } else { - // Slow path: execute the block in full (no trie data, or verification interval hit). + // Slow path: execute the block in full (no trie data, or verification interval + // hit). engine_handle.execute_block(block)?; } } @@ -406,16 +406,16 @@ where mod tests { use super::*; use alloy_consensus::private::alloy_primitives::B256; - use alloy_eips::{eip1898::BlockWithParent, BlockNumHash, NumHash}; + use alloy_eips::{BlockNumHash, NumHash, eip1898::BlockWithParent}; use reth_db::test_utils::tempdir_path; use reth_ethereum_primitives::{Block, Receipt}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_optimism_trie::{ - db::MdbxProofsStorageV2, engine::EngineHandle, BlockStateDiff, OpProofsProviderRO, - OpProofsProviderRw, OpProofsStore, + BlockStateDiff, OpProofsProviderRO, OpProofsProviderRw, OpProofsStore, + db::MdbxProofsStorageV2, engine::EngineHandle, }; use reth_primitives_traits::RecoveredBlock; - use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, LazyTrieData}; + use reth_trie::{HashedPostStateSorted, LazyTrieData, updates::TrieUpdatesSorted}; use std::{collections::BTreeMap, default::Default, sync::Arc}; // ------------------------------------------------------------------------- @@ -516,11 +516,7 @@ mod tests { let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let pruner = OpProofStoragePruner::new( - store.clone(), - ctx.components.provider.clone(), - 20, - ); + let pruner = OpProofStoragePruner::new(store.clone(), ctx.components.provider.clone(), 20); let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), @@ -538,7 +534,13 @@ mod tests { exex.handle_notification(notif, &engine_handle).expect("handle chain commit"); engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get latest block") + .expect("ok") + .0; assert_eq!(latest, 1); } @@ -553,11 +555,7 @@ mod tests { let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let pruner = OpProofStoragePruner::new( - store.clone(), - ctx.components.provider.clone(), - 20, - ); + let pruner = OpProofStoragePruner::new(store.clone(), ctx.components.provider.clone(), 20); let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), @@ -577,7 +575,13 @@ mod tests { } engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get latest block") + .expect("ok") + .0; assert_eq!(latest, 5); // Try to handle already processed notification @@ -585,7 +589,12 @@ mod tests { let notif = ExExNotification::ChainCommitted { new: new_chain }; exex.handle_notification(notif, &engine_handle).expect("handle chain commit"); engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok"); + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get latest block") + .expect("ok"); assert_eq!(latest.0, 5); assert_eq!(latest.1, hash_for_num(5)); // block was not updated } @@ -601,11 +610,7 @@ mod tests { let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let pruner = OpProofStoragePruner::new( - store.clone(), - ctx.components.provider.clone(), - 20, - ); + let pruner = OpProofStoragePruner::new(store.clone(), ctx.components.provider.clone(), 20); let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), @@ -620,12 +625,17 @@ mod tests { for i in 1..=10 { let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; - exex.handle_notification(notif, &engine_handle) - .expect("handle chain commit"); + exex.handle_notification(notif, &engine_handle).expect("handle chain commit"); } engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get latest block") + .expect("ok") + .0; assert_eq!(latest, 10); // Now the tip is 10, and we want to reorg from block 6..12 @@ -635,10 +645,15 @@ mod tests { // Notification: chain reorged 6..12 let notif = ExExNotification::ChainReorged { new: new_chain, old: old_chain }; - exex.handle_notification(notif, &engine_handle) - .expect("handle chain re-orged"); + exex.handle_notification(notif, &engine_handle).expect("handle chain re-orged"); engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get latest block") + .expect("ok") + .0; assert_eq!(latest, 12); } @@ -653,11 +668,7 @@ mod tests { let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let pruner = OpProofStoragePruner::new( - store.clone(), - ctx.components.provider.clone(), - 20, - ); + let pruner = OpProofStoragePruner::new(store.clone(), ctx.components.provider.clone(), 20); let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), @@ -673,12 +684,17 @@ mod tests { let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; - exex.handle_notification(notif, &engine_handle) - .expect("handle chain commit"); + exex.handle_notification(notif, &engine_handle).expect("handle chain commit"); } engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get latest block") + .expect("ok") + .0; assert_eq!(latest, 10); // Now the tip is 10, and we want to reorg starting at block 12 (beyond stored tip). @@ -690,10 +706,15 @@ mod tests { // Notification: chain reorged 12..20, fork at 11 let notif = ExExNotification::ChainReorged { new: new_chain, old: old_chain }; - exex.handle_notification(notif, &engine_handle) - .expect("handle chain re-orged"); + exex.handle_notification(notif, &engine_handle).expect("handle chain re-orged"); engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get latest block") + .expect("ok") + .0; assert_eq!(latest, 10); } @@ -708,11 +729,7 @@ mod tests { let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let pruner = OpProofStoragePruner::new( - store.clone(), - ctx.components.provider.clone(), - 20, - ); + let pruner = OpProofStoragePruner::new(store.clone(), ctx.components.provider.clone(), 20); let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), @@ -728,12 +745,17 @@ mod tests { let new_chain = Arc::new(mk_chain_with_updates(i, i, None)); let notif = ExExNotification::ChainCommitted { new: new_chain }; - exex.handle_notification(notif, &engine_handle) - .expect("handle chain commit"); + exex.handle_notification(notif, &engine_handle).expect("handle chain commit"); } engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get latest block") + .expect("ok") + .0; assert_eq!(latest, 10); // Now the tip is 10, and we want to revert from block 9..10 @@ -742,10 +764,15 @@ mod tests { // Notification: chain reverted 9..10 let notif = ExExNotification::ChainReverted { old: old_chain }; - exex.handle_notification(notif, &engine_handle) - .expect("handle chain reverted"); + exex.handle_notification(notif, &engine_handle).expect("handle chain reverted"); engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get latest block") + .expect("ok") + .0; assert_eq!(latest, 8); } @@ -760,11 +787,7 @@ mod tests { let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let pruner = OpProofStoragePruner::new( - store.clone(), - ctx.components.provider.clone(), - 20, - ); + let pruner = OpProofStoragePruner::new(store.clone(), ctx.components.provider.clone(), 20); let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), @@ -784,7 +807,13 @@ mod tests { } engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get latest block") + .expect("ok") + .0; assert_eq!(latest, 5); // Now the tip is 10, and we want to revert from block 9..10 @@ -793,10 +822,15 @@ mod tests { // Notification: chain reverted 9..10 let notif = ExExNotification::ChainReverted { old: old_chain }; - exex.handle_notification(notif, &engine_handle) - .expect("handle chain reverted"); + exex.handle_notification(notif, &engine_handle).expect("handle chain reverted"); engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get latest block").expect("ok").0; + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get latest block") + .expect("ok") + .0; assert_eq!(latest, 5); } @@ -824,10 +858,7 @@ mod tests { for i in 1..1100 { let rw = store.provider_rw().expect("provider rw"); rw.store_trie_updates( - BlockWithParent::new( - hash_for_num(i - 1), - BlockNumHash::new(i, hash_for_num(i)), - ), + BlockWithParent::new(hash_for_num(i - 1), BlockNumHash::new(i, hash_for_num(i))), BlockStateDiff::default(), ) .expect("store trie update"); @@ -865,11 +896,7 @@ mod tests { let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let pruner = OpProofStoragePruner::new( - store.clone(), - ctx.components.provider.clone(), - 20, - ); + let pruner = OpProofStoragePruner::new(store.clone(), ctx.components.provider.clone(), 20); let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), @@ -901,11 +928,7 @@ mod tests { let (ctx, _handle) = reth_exex_test_utils::test_exex_context().await.expect("exex test context"); - let pruner = OpProofStoragePruner::new( - store.clone(), - ctx.components.provider.clone(), - 20, - ); + let pruner = OpProofStoragePruner::new(store.clone(), ctx.components.provider.clone(), 20); let engine_handle = EngineHandle::spawn_with_thresholds( ctx.components.components.evm_config.clone(), ctx.components.provider.clone(), @@ -929,7 +952,13 @@ mod tests { // The engine has a sync target set but no blocks in the provider, so its catch-up // will error out without writing anything. Storage stays at block 0. engine_handle.flush(); - let latest = store.provider_ro().expect("provider ro").get_latest_block_number().expect("get").expect("ok").0; + let latest = store + .provider_ro() + .expect("provider ro") + .get_latest_block_number() + .expect("get") + .expect("ok") + .0; assert_eq!(latest, 0, "Main thread should not have processed the blocks synchronously"); } } diff --git a/rust/op-reth/crates/trie/src/metrics.rs b/rust/op-reth/crates/trie/src/metrics.rs index a4ea943c139..27f71ba0d4d 100644 --- a/rust/op-reth/crates/trie/src/metrics.rs +++ b/rust/op-reth/crates/trie/src/metrics.rs @@ -1,14 +1,15 @@ //! Storage wrapper that records metrics for all operations. use crate::{ + BlockStateDiff, OpProofsStorageResult, OpProofsStore, api::{ InitialStateAnchor, OpProofsInitProvider, OpProofsProviderRO, OpProofsProviderRw, WriteCounts, }, - cursor, BlockStateDiff, OpProofsStorageResult, OpProofsStore, + cursor, }; -use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; -use alloy_primitives::{map::HashMap, B256, U256}; +use alloy_eips::{BlockNumHash, eip1898::BlockWithParent}; +use alloy_primitives::{B256, U256, map::HashMap}; use derive_more::Constructor; use metrics::{Gauge, Histogram}; use reth_db::DatabaseError; @@ -129,11 +130,7 @@ impl StorageMetrics { /// Record a storage operation with timing. pub fn record_operation(&self, operation: StorageOperation, f: impl FnOnce() -> R) -> R { - if let Some(metrics) = self.operations.get(&operation) { - metrics.record(f) - } else { - f() - } + if let Some(metrics) = self.operations.get(&operation) { metrics.record(f) } else { f() } } /// Record a storage operation with timing (async version). @@ -209,7 +206,6 @@ impl OperationMetrics { } } - /// Wrapper for [`TrieCursor`] that records metrics. #[derive(Debug, Constructor, Clone)] pub struct OpProofsTrieCursorWithMetrics { diff --git a/rust/op-reth/crates/trie/tests/live.rs b/rust/op-reth/crates/trie/tests/live.rs index 9ee82888b7d..c3c9a3d8a2e 100644 --- a/rust/op-reth/crates/trie/tests/live.rs +++ b/rust/op-reth/crates/trie/tests/live.rs @@ -12,9 +12,9 @@ use reth_evm::{ConfigureEvm, execute::Executor}; use reth_evm_ethereum::EthEvmConfig; use reth_node_api::{NodePrimitives, NodeTypesWithDB}; use reth_optimism_trie::{ - MdbxProofsStorage, OpProofStoragePruner, OpProofsStorage, - OpProofsStore, initialize::InitializationJob, - engine::{EngineHandle, EngineError}, + MdbxProofsStorage, OpProofStoragePruner, OpProofsStorage, OpProofsStore, + engine::{EngineError, EngineHandle}, + initialize::InitializationJob, }; use reth_primitives_traits::{Block as _, RecoveredBlock}; use reth_provider::{ @@ -273,8 +273,7 @@ where let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; let pruner = OpProofStoragePruner::new(storage.clone(), blockchain_db.clone(), 1000); - let engine_handle = - EngineHandle::spawn(evm_config, blockchain_db, storage, pruner); + let engine_handle = EngineHandle::spawn(evm_config, blockchain_db, storage, pruner); for (idx, block_spec) in scenario.blocks_after_initialization.iter().enumerate() { let block_number = last_block_number + idx as u64 + 1; From ef4dbbd3e19d1009fe0116b261d908430c02244b Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Tue, 12 May 2026 14:43:35 +1000 Subject: [PATCH 04/10] docs(ai/acceptance-tests): note native kona prestate build option (#20655) * docs(ai/acceptance-tests): note native kona prestate build option The `just reproducible-prestate-kona` recipe requires Docker. For local test runs the native `cd rust && just build-kona-prestates` recipe is sufficient and works without Docker. Surface both, and reserve the reproducible build for cases where the prestate hash must match a deployed release. * docs: clarify kona prestate build preference and constraints Prefer the reproducible (Docker) build; reserve the native build as a Linux+MIPS-toolchain fallback that produces a non-matching hash. --- docs/ai/acceptance-tests.md | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/ai/acceptance-tests.md b/docs/ai/acceptance-tests.md index 0b60f74e9f5..bd977692cd8 100644 --- a/docs/ai/acceptance-tests.md +++ b/docs/ai/acceptance-tests.md @@ -46,15 +46,25 @@ RUST_JIT_BUILD=1 cd op-acceptance-tests && mise exec -- just acceptance-test bas This runs only packages listed in `gates/base.txt`. -### Kona Reproducible Prestate +### Kona Prestate -Some tests (e.g. superfaultproofs, interop fault proofs) require a reproducible kona prestate. This is **not** handled by `build-deps` or `RUST_JIT_BUILD`: +Some tests (e.g. superfaultproofs, interop fault proofs) require a kona prestate. This is **not** handled by `build-deps` or `RUST_JIT_BUILD`. There are two ways to build it: + +**Reproducible build** (preferred when Docker is available): ```bash mise exec -- just reproducible-prestate-kona ``` -**Requires Docker.** If Docker is not available in your environment, ask the user to run this command for you. +This produces a prestate whose hash matches CI/release builds. It works on any host with Docker installed. + +**Native build** (fallback when Docker is not available): + +```bash +cd rust && mise exec -- just build-kona-prestates +``` + +Only works on **Linux** with the **MIPS cross-compile toolchain** installed. The produced hash will not match release builds, so this is only suitable for local test runs where the hash doesn't need to match a deployed release. If neither Docker nor the MIPS toolchain is available, ask the user to build the prestate for you. ## What `build-deps` Does From f1fb0e0509790ff0877c6c2734ab3e7f7653044c Mon Sep 17 00:00:00 2001 From: Zak Ayesh <44901995+ZakAyesh@users.noreply.github.com> Date: Tue, 12 May 2026 09:35:41 +0100 Subject: [PATCH 05/10] Add supernode explainer (#20561) * Add supernode explainer * Update docs/public-docs/op-stack/interop/explainer.mdx Co-authored-by: George Knee * Update docs/public-docs/op-stack/interop/reorg.mdx Co-authored-by: George Knee * Update docs/public-docs/op-stack/reference/glossary.mdx Co-authored-by: George Knee * Link to Light Node Topology Notice Page in Gloassary * Updated supernode diagram * docs(supernode): use "component" for static description of op-supernode Use "component" when describing op-supernode as a static entity (its architecture and role). Keep "process" for runtime arrangement descriptions ("inside one process", "running every chain inside one process") and for the literal OS-level in-process vs operating-system process distinction. --------- Co-authored-by: George Knee --- docs/public-docs/docs.json | 1 + .../op-stack/interop/explainer.mdx | 1 + docs/public-docs/op-stack/interop/reorg.mdx | 5 +- .../op-stack/interop/supernode.mdx | 160 ++++++++++++++++++ .../op-stack/reference/glossary.mdx | 37 +++- .../interop/supernode-architecture.png | Bin 0 -> 66667 bytes 6 files changed, 196 insertions(+), 8 deletions(-) create mode 100644 docs/public-docs/op-stack/interop/supernode.mdx create mode 100644 docs/public-docs/public/img/op-stack/interop/supernode-architecture.png diff --git a/docs/public-docs/docs.json b/docs/public-docs/docs.json index ba8665507f8..4bf6722f69f 100644 --- a/docs/public-docs/docs.json +++ b/docs/public-docs/docs.json @@ -2324,6 +2324,7 @@ "pages": [ "/op-stack/interop/interop", "/op-stack/interop/explainer", + "/op-stack/interop/supernode", "/op-stack/interop/reorg", "/op-stack/interop/superchain-eth-bridge" ] diff --git a/docs/public-docs/op-stack/interop/explainer.mdx b/docs/public-docs/op-stack/interop/explainer.mdx index 0aa4218a768..21c370976dd 100644 --- a/docs/public-docs/op-stack/interop/explainer.mdx +++ b/docs/public-docs/op-stack/interop/explainer.mdx @@ -183,4 +183,5 @@ The Superchain interop cluster is being rolled out iteratively. To see which cha * Learn [how messages get from one chain to another chain](/app-developers/guides/interoperability/message-passing). * Learn how [interop handles reorgs and avoids double-spends](/op-stack/interop/reorg). +* Read about [op-supernode](/op-stack/interop/supernode), the component that derives every chain in the dependency set together and enforces cross-chain safety. * Read the [cross-chain security measures](/op-stack/security/interop-security) for safe interoperability. diff --git a/docs/public-docs/op-stack/interop/reorg.mdx b/docs/public-docs/op-stack/interop/reorg.mdx index ddb21f30c83..ad9cfa6a096 100644 --- a/docs/public-docs/op-stack/interop/reorg.mdx +++ b/docs/public-docs/op-stack/interop/reorg.mdx @@ -85,7 +85,7 @@ L1 reorgs typically happen at the unsafe head — only the most recent L1 blocks When an L1 reorg does affect L2, one of two things happens: * **The replacement L1 block carries the same batch data as the original.** Derivation is deterministic, so the L2 chain it produces is identical, and the reorg is a no-op from the L2 perspective. -* **The replacement L1 block does not carry that batch data.** The sequencer notices and reposts the affected batch in a later L1 block. As long as the batch lands again before the [sequencer window](https://specs.optimism.io/glossary.html#sequencing-window) elapses (3600 L1 blocks ≈ 12 hours on standard chains like OP Mainnet and Base), derivation reproduces the same L2 chain. If the window does elapse without the batch reappearing, the affected L2 blocks are replaced with deposit-only blocks (see [Invalid block](#invalid-block) below). +* **The replacement L1 block does not carry that batch data.** The sequencer notices and reposts the affected batch in a later L1 block. As long as the batch lands again before the [sequencer window](https://specs.optimism.io/glossary.html#sequencing-window) elapses (3600 L1 blocks ≈ 12 hours on standard chains like OP Mainnet and Unichain), derivation reproduces the same L2 chain. If the window does elapse without the batch reappearing, the affected L2 blocks are replaced with deposit-only blocks (see [Invalid block](#invalid-block) below). The takeaway is that L1 reorgs do not by themselves break interop guarantees: either the data comes back and L2 stays identical, or the chain falls back to deposit-only blocks for that span — the same behavior as if the sequencer had simply gone offline. @@ -160,5 +160,6 @@ At worst, some unsafe blocks need to be recalculated (if one fork is chosen over ## Next steps * Read the [interop explainer](./explainer) for the rest of the architecture. +* Read about [op-supernode](./supernode), the component that derives every chain in the dependency set together and enforces the safety levels described above. * Read the [cross-chain security measures](/op-stack/security/interop-security) for safe interoperability. -* View more [interop tutorials](/app-developers/tutorials/interoperability/). +* View more [interop guides and tutorials](/app-developers/guides/interoperability/get-started). diff --git a/docs/public-docs/op-stack/interop/supernode.mdx b/docs/public-docs/op-stack/interop/supernode.mdx new file mode 100644 index 00000000000..4e2e57ee5ab --- /dev/null +++ b/docs/public-docs/op-stack/interop/supernode.mdx @@ -0,0 +1,160 @@ +--- +title: OP Supernode +description: Learn how op-supernode runs every chain in an interop dependency set inside one process. +audit-source: + - op-supernode/cmd/main.go + - op-supernode/supernode/supernode.go + - op-supernode/supernode/chain_container/chain_container.go + - op-supernode/supernode/activity/interop/interop.go + - op-supernode/README.md + - op-supernode/safety-labels.md +--- + + + OP Stack interop is in active development. + op-supernode is the runtime that operators of interop chains will run, and the architecture and interfaces described here may continue to evolve as the rollout progresses. + + +# OP Supernode + +*op-supernode* is a component that runs every chain in an interop dependency set together as virtual nodes inside one binary. +Where a pre-interop deployment runs one op-node per chain, op-supernode hosts every chain side by side, shares the L1 and beacon-chain plumbing across them, and adds the cross-chain message verification work that interop needs. + +## Why op-supernode exists + +OP Stack interop changes what a single node has to do. +Before interop, a node only had to derive its own chain. +With interop, a node has to derive *every chain in its dependency set*, because any of those chains can emit an initiating message that the local chain depends on. +The local chain cannot advance past a block whose dependencies it cannot prove, so the derivation of every other chain in the set is on the critical path. + +Without consolidation, every operator runs a full op-node and execution client for every chain in the dependency set. +That duplicates the L1 client, the beacon-chain client, the derivation pipeline, the storage layout, and the operational glue around each one. +For a fully-connected dependency set — the configuration the [Superchain interop cluster](/op-stack/interop/explainer#superchain-interop-cluster) targets — the duplication scales with the size of the cluster. + +op-supernode collapses that duplication. +It runs each chain as an in-memory *virtual node* inside one process, with a single L1 client and a single beacon client serving all of them, and adds the cross-chain message verification work above the per-chain layer. + +## How op-supernode works + +The supernode is composed of *chain containers* and *activities*. +Each chain container hosts one virtual node for one chain. +Activities are modular components that operate above the chain layer, with access to every chain. + +An op-supernode hosts two chain containers (Chain A and Chain B), each containing a Virtual Node and Engine Controller. The Engine Controllers drive external Execution Clients. A shared Interop Verification Activity and L1 Client sit alongside the chain containers. + +### Chain containers + +A *chain container* is the supernode's wrapper around one chain. +Inside a chain container is a *virtual node*: a consensus-layer (CL) implementation hosted in-process rather than as a separate operating-system process. +Today the only virtual node implementation is op-node itself, hosted as a library; the chain container manages its lifecycle (start, stop, pause, resume) and exposes a stable interface to the rest of the supernode. + +Chain containers also drive the execution engine for the chain through an engine controller. +They expose the operations the supernode needs to verify cross-chain messages — deriving the local-safe block at a timestamp, fetching receipts, answering output-root queries — without reaching into the internals of any one chain. + +### Shared resources + +Running every chain inside one process makes shared resources possible. + +* A single **L1 RPC client** and a single **L1 beacon client** serve every chain. Cache hits on L1 blocks and blob lookups carry across chains. +* The **JSON-RPC surface** is namespaced per chain. `11155420/` reaches OP Sepolia's RPC; `1301/` reaches Unichain Sepolia's. Tools that expect an op-node-shaped endpoint reach a chain by addressing it through that prefix. +* **Metrics** are namespaced per chain via the same scheme. +* **Data directories** are namespaced so SafeDB and P2P state for one chain cannot collide with another's. + +Some flags are intentionally owned at the supernode level rather than per chain. +`--l1` and `--l1.beacon` configure the shared L1 plumbing, and any per-chain override of them is silently replaced with the top-level value. + +### Activities + +An *activity* is a modular component that operates above the chain layer rather than inside any one chain. +Each activity can register an RPC namespace on the supernode root, expose Prometheus metrics, and run a goroutine for as long as the supernode is up. + +The supernode ships with a small set of activities: + +* **Heartbeat** — emits a liveness signal and exposes `heartbeat_check` over JSON-RPC. +* **SuperRoot** — produces a *super root*: a commitment over verified L2 blocks across the dependency set at a given timestamp. Exposed as `superroot_atTimestamp`. The fault proof system needs this commitment to produce an interop-aware proof. +* **Supernode** — exposes `supernode_syncStatus`, an aggregate per-chain sync status across the dependency set. +* **Interop** — does the actual cross-chain message verification (see the next section). + +## Cross-chain message safety + +The interop activity is the part of op-supernode that decides when a chain's blocks have satisfied their cross-chain dependencies and can be promoted past unsafe. +It runs above the chain containers and reaches into them through a narrow interface. + +For every block produced on every chain, the interop activity answers one question: have all the initiating messages this block executes been reproduced from L1, and at the same safety level the destination block is trying to reach? +The activity decides per round between *wait*, *advance*, *invalidate*, and *rewind*. +The decision is recorded in a write-ahead log so the supernode can pick up after a restart in the same state it was in before. + +When the answer is *advance*, the supernode signals the chain's CL to promote the block. +The signal flows through an *authority* interface that the chain container holds and the virtual node defers to: the supernode advances safety on a chain only via the chain's own CL. +The CL stays the single source of truth for safety on its chain, and the execution layer (EL) never has to learn about interop. + +When the answer is *invalidate* or *rewind*, the supernode tells the chain to back out the affected blocks. +A block on chain B that referenced a chain-A log can be invalidated because the chain-A log never made it to L1, or because the L1 record contradicts what was gossiped over P2P. +See [Interop reorg awareness](/op-stack/interop/reorg) for how that plays out at the chain level. + +The user-facing safety levels do not change. +Block safety levels (*unsafe*, *safe*, *finalized*) are still defined as they are without interop, and the EL's view of those labels matches. +The supernode's role is to make sure the labels mean what they say once cross-chain dependencies are part of the picture. + +## op-supernode and Light CL + +op-supernode pairs with the *Light CL* mode of op-node and kona-node. +A Light CL turns off local derivation and mirrors safe and finalized state from a trusted external source over the `optimism_syncStatus` RPC. +It still advances the unsafe chain over P2P; only the safe and finalized views are delegated. + +Together, supernode and Light CL form a topology: + +* One trusted op-supernode (or a small high-availability pool of them) runs derivation for every chain in the dependency set, plus the interop activity that promotes blocks to safe. +* The rest of the operator's fleet runs op-node or kona-node in Light CL mode, points at the supernode's `optimism_syncStatus`, and inherits its safe and finalized view. +* Node operators run the same setup, minus the sequencer Light CLs (only the chain operator produces blocks). + +This split is what makes interop tractable for operators who already run dozens or hundreds of nodes per chain. +The expensive multi-chain derivation work happens once, on the supernode; the rest of the fleet stays cheap. + +```mermaid +graph LR + classDef supernode fill:#FFE + classDef transparent fill:none, stroke:none + + SnA[Supernode-A] + SnB[Supernode-B] + SnC[Supernode-C] + + Px0["Proxy CL
Chain-0"] + Px1["Proxy CL
Chain-1"] + + subgraph c0["Chain-0 Light CL fleet"] + direction TB + C0Seq["Sequencer
0..N"] + C0Rep["Replica
0..N"] + end + + subgraph c1["Chain-1 Light CL fleet"] + direction TB + C1Seq["Sequencer
0..N"] + C1Rep["Replica
0..N"] + end + + SnA --> Px0 + SnA --> Px1 + SnB --> Px0 + SnB --> Px1 + SnC --> Px0 + SnC --> Px1 + + Px0 --> C0Seq + Px0 --> C0Rep + Px1 --> C1Seq + Px1 --> C1Rep + + class SnA,SnB,SnC supernode + class c0,c1 transparent +``` + +## Where to go next + +* Read the [interop explainer](/op-stack/interop/explainer) for how cross-chain messaging works at the protocol level. +* Read [interop reorg awareness](/op-stack/interop/reorg) for how the safety model handles equivocation and L1 reorgs. +* Read the [cross-chain security measures](/op-stack/security/interop-security) for how an operator can configure the safety level it requires for inbound messages. +* Read the [specialized op-node topology notice](/notices/specialized-node-topology) for the operator-facing pattern of running light op-nodes with `--l2.follow.source`, the fleet-side of the supernode-plus-light-CL topology. +* For implementation detail, see the [op-supernode source](https://github.com/ethereum-optimism/optimism/tree/develop/op-supernode) in the monorepo. diff --git a/docs/public-docs/op-stack/reference/glossary.mdx b/docs/public-docs/op-stack/reference/glossary.mdx index a31703f678f..67bf2a1c0d4 100644 --- a/docs/public-docs/op-stack/reference/glossary.mdx +++ b/docs/public-docs/op-stack/reference/glossary.mdx @@ -34,9 +34,9 @@ reality the block time is variable as some time slots might be skipped. Pre-merg ### Delegation -Refers to the process of assigning the voting power of your tokens to a designated community member, known as a delegate. -Delegates are individuals who have volunteered to actively participate in the governance of the Optimism Token House. -By delegating your voting power, you enable these delegates to vote on governance matters on your behalf, while you retain full ownership of your tokens and the freedom to use them as you wish. +Refers to the process of assigning the voting power of your tokens to a designated community member, known as a delegate. +Delegates are individuals who have volunteered to actively participate in the governance of the Optimism Token House. +By delegating your voting power, you enable these delegates to vote on governance matters on your behalf, while you retain full ownership of your tokens and the freedom to use them as you wish. You can also change your chosen delegate at any time, allowing for flexibility in how your voting power is represented in the governance process. ### EOA or externally owned account @@ -53,8 +53,8 @@ at the request of the L1 consensus layer. On L2, the executed blocks are freshly ### Optimism collective -The Optimism Collective is a band of people, projects, and companies working together to build a better economy for everyone, -united by a mutually beneficial pact to adhere to the axiom of impact=profit — the principle that positive impact to the collective should be rewarded with profit to the individual. +The Optimism Collective is a band of people, projects, and companies working together to build a better economy for everyone, +united by a mutually beneficial pact to adhere to the axiom of impact=profit — the principle that positive impact to the collective should be rewarded with profit to the individual. New model of digital democratic governance optimized to drive rapid and sustained growth of a decentralized ecosystem. @@ -269,7 +269,7 @@ It also submits [output roots](#l2-output-root) to L1. Range of L1 blocks from which a [sequencing epoch](#sequencing-epoch) can be derived. A sequencing window whose first L1 block has number `N` contains [batcher transactions](#batcher-transaction) for epoch -`N`. The window contains blocks `(N, N + SWS)` where `SWS` is the sequencer window size. +`N`. The window contains blocks `(N, N + SWS)` where `SWS` is the sequencer window size. The current default `SWS` is 3600 epochs. Additionally, the first block in the window defines the [depositing transactions](#depositing-transaction) which determine the [deposits](#deposit) to be included in the first L2 block of the epoch. @@ -284,6 +284,31 @@ sequencing window. Epochs can have variable size, subject to some constraints. The network of OP Stack chains connected by native interoperability. Not yet live. Chains that are part of the OP Stack ecosystem share security and a common development stack (the OP Stack). The interop cluster specifically refers to the subset of chains connected by the OP Stack interoperability layer. +### Dependency set + +The set of chains that a given chain accepts initiating messages from. A chain's local block cannot become safe until every initiating message it depends on has also been derived from L1. The *transitive* dependency set extends this to the dependencies of those chains, and so on. The [OP Stack interop cluster](#op-stack-interop-cluster) is configured as a fully-connected dependency set: every chain in the set has every other chain in its dependency set. + +### Supernode + +A component that runs the consensus layer of every chain in a [dependency set](#dependency-set) together as in-memory [virtual nodes](#virtual-node) inside one binary. The supernode shares the L1 client, L1 beacon client, JSON-RPC surface, and metrics across chains, and verifies that every cross-chain message a chain depends on has been reproduced from L1 before promoting blocks to "safe". It connects to an execution client for each chain using the engine API. Often referred to as `op-supernode` after the binary name. See the [supernode explainer](/op-stack/interop/supernode) for the architecture. + +### Chain container + +The [supernode](#supernode)'s wrapper around a single chain. A chain container hosts one [virtual node](#virtual-node), drives that chain's execution engine via an engine controller, and exposes a stable interface the rest of the supernode uses to derive blocks, fetch receipts, and answer output-root queries. + +### Virtual node + +A consensus-layer node hosted in-process inside a [supernode](#supernode), rather than as a separate operating-system process. Today the only virtual node implementation is op-node itself, hosted as a library. + +### Super root + +A commitment over verified L2 blocks across a [dependency set](#dependency-set) at a given timestamp. Produced by the supernode's `superroot_atTimestamp` RPC and consumed by the fault proof system as the input it needs to generate an interop-aware proof. + +### Light CL + +A mode of operation for the consensus layer (op-node or kona-node) that turns off local L1-to-L2 derivation and mirrors safe and finalized state from a trusted external source over the `optimism_syncStatus` RPC. A light CL still advances the unsafe chain over P2P. Pairs with a [supernode](#supernode) acting as the safe source for a fleet of light CLs. +Learn more at the [Light Node Topology Notice Page](0xE69104DD872222E1Bd7C1adD47588F8C62ed64C0). + ### Shared L1 Bridge The L1 bridge contracts which govern all OP Chains in the OP Stack ecosystem. This bridge can be upgraded by the Optimism Collective. diff --git a/docs/public-docs/public/img/op-stack/interop/supernode-architecture.png b/docs/public-docs/public/img/op-stack/interop/supernode-architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..8e3acd2250cfaabd987df2957121761d515bf69d GIT binary patch literal 66667 zcmdSAbyQSg`!0+Lh=d?WBP9aT-8vwRz|bu%-CYU;h;$B(#0({%AYIY|0>aSUNaxUT z9^UtN*7w)<=lRxm_F@Tcn7yC42UBe}sX7fh#8~1;xO){~ZJ44hrio zxKl&n-ivVu14B;gjk@RL){IB;)3>vjdqo?ZUM?SJf`b{?D2Y41k#bscb*XVRKAG&x9yL&tgA1Ym9Qq$iu?HS60pWGz^j4g_Yg=-uPj9)thkoo5r z&k62Q7Q946E%Ea4smeR?f=Rk-JSX_;W*{FXc?Zn;HQrynv}_fuPXrhk?*%*?4Gm<9 zi7`GAe54$glzxH*wgm&@t@(e~2HgJd4+Q_ckMV!x#{cWX=bz_;31a->mt#gG6mo!n zpp;2-dt-y=3YY9)V7H??mfs_QaCSa!`|Fm$C5`n2;@HpD3zdM7PjtORq#`T(VGt}z z5)6#j&+lV=6qCcsw$go0@ZGMysHmu^sj0R$g&-~|DJec4Rb{7FJw+8JCAh}&vbD)r zC&_MVBUyo&Pl-o~DD(ja#_cHIDKIPDCM;ek4=Q3OEFxN%x6X6oIbPogLb|=4g z4b6WcX5DGJ*wLx5h|L<%h==m?@hN4A`<)-H)A$_?2H}g3SK5qezY0;T!RPEGCnx8( zn;aHSU-}N#peW)l#;;7pAe*t$#?!@c%f3{ET~d!v;qJdWagUv8r=?E2$)DXT&c-jM znzA>A5xjiGL`!eME{lP8zdnB7N7&`G({R{7IjL(fM%9&_P{nSXBh6IgKpvv1p@EJL zxLlgB%u;$0Z{|XkI;uC7v!;SI=xtM2&O7Cks| zBp;L^>{fTaQ7q!IqsVMfZ9fyv(Nts7@yK8dkx*BvoWgCsK7^R5_lWK=p^naX%Sv1l z43o-1E1X_kk4`Yh%8pklF^{;-EbafCjsL(~L|tZ;x+Iv2RFyaVpe+n%DO);U%F4FhW^UN*G3D0Q`+o?pC6sbM%Y6_~{%Rmf-US1(|W^)p3k z>Qr~8s&WVa@h@pwmpyJ%*@q@#6GPgUoO;W3cki8@wGxVFDUZ2nRE0vx9|gN3Dcr-g zTq#4jkWt3K8lF7=@02mh8{J;G8e|DL7#0c0$*-fmX#y_yjoU&xOt>Jwf5+0xL{t0y z;pzhJi-6AkN#VIT-rf}KkCwQu@!U5_#I~E-8UL}w)kTS_i{I`On6tIEhOYNlZJS03 zA@lNJ&^quax%d}_`!P{VX6EJ@?vqyRaGQyG&KdjBzVC?Yl&`R;h2_zP*QuN6BxSJhzJ<+BT(_je+r>sQ(+rO&wiL?M8-mU)c2@!Q+P-E6MLU zli_dy|I1Ub5JmHrI-17D#&ofJ-i=27XDfORjfsg|qrtz~$CuOd9L#GPTo z5}k@wqnq=w1ZFkcsacbApv)BEuvKuLZ%e$_utenjH>}nbLC3^4G?>1db z+LSc{FElLkz_j|ii7o|Ch?Su}+bAxp^6voaKIcAVCqw0?zL|^pKWoY`;%1R*!IG{b zZ)0PV<+YlUW?#q5#3UncrK9RgJB;xbg0YxaK?G&ZRS& zh6XQG&0Gtbt4P(A!e!cdww85M7I3xs6ZH<~VUYC+)F($%U%%FQ1+0yOeBso#J0%l6 zy<}g#ORc_jOc6njQW`(ln(MinYY(>s=;2XbfRN8usjt*539v9DT^Wx8ELd_6zQ%bhPz_eVxXigKY;p`%l( zU2CIy4s7h~)S{k_yyS0R00eg*0@kQdK5fDAbgvDcM$mEo^6U_|&5=?@zME7Jta>V+ z4Hm6oSv81uL74dB3oxRP$7B$!i3%K4@#li`^ALO*|I>v~5YvFgmsM3IL3@P8+{7sH zn!&(5=KQLQE)GtYqGjlwBnIXsFfuX<%zB!Rl^Ud_re^x=)B@jlhn2nQ%|PgN(9M+P zxfsT_F--umoic3p7o%km5vkA1V<3IJy1I(o_>AP`Y6PGMWoSx@ixo7i8DZ%jvtCT;ju1{#-^DN{%@SS2ES~8WNWB0(m|h&M z(~zA_jxX!+X)o@ybbXcz%?mzyCF` z+rP~LH}5%5&o|4I(*;#KWc$x{}MQlyUgmAtZ` z$Ch2E+zjkbS=0H369{A%+tnT$!*VOtV#x}@1OUNoRSbyRq#~_SO}nDRK&S$nJ|eCn zpZ_jH7(~(6&%c9DgAdF586hfxYt(`uw%|Cv;v7lNXT3H41Bc|5%DVkfo*Xa$kQ7Rm zNG7HS!4j-R(nyH2vi6q0(s2FhwxH6PoKUV+W;h^zVGBYdsVPWPw=hRzAXEMWNG#Q_ zdYAUU88!PawuiAC6dLyRve$2A-CX9AYfsgC?4sU@l+@M=RrYtu(D?B>&cBBIKA8)k ziSE+S$`r;En0CAksws*u8Vz@8Ln*oaFFi+uYTuH-Vr9Jm^MiKL5@3mfxrNgBd%k=+ zYtQw5;k(q_ZY(1U@dV__b`49v}%h|b1*Y|oy>TYfgtw|{9__yIUVJ!@G5_tQtg zZ40TX88Om7i=Jdk{I3&r) z+D7xocQ8JF4FzWRd$&APMV>57N|buQ==uPT!(Q_q;9VZee*HRED=68x z41BgLv;5CN0H38_hdjuxvYRUE_P4HVlcMADJ>ARC@)y>)=8ctA*VHWfl#n+fRS&}W z>c8A*d$CELSlI%*BBc_8dQHT{XqhY)nV9#_uBF1VGJ(gEeH6BhUrnrwjJu+!at8}0 z6LekZ+<1pg)e}|u?r`&!mDkkN7}UEbnUjUI2t9)w?ati*dx=d0OBC9+@aNB;&Q57G z74ek!@m5M=BEtJ@`1Rjx$xz?|Zs9>ltI0aI&6kJ`9Tx2&x!CT~pR8(yYj#~Ox zXQ-*kZ`=w~;AnKkfA~0SAb*rtT}J^c7N7tY-y(O7lJJ7jGUc!ilZ^{KwL@C^SXpa& ziHoPhZjjF#n^UWVhb7@HF?|MhQ&RyN_W9^3)y=Wu{Ng}FNq8`z91nut@P)dBGgTQP zROAck{*g@RVW}Dz%m_d1T5{5$vb15*9vvRG{b5b_|0F7Yet!S!v$c}3c9G>6Nml~_ zr3%Y|Okl>%d#yOl0R3#1H5=lpsMDH_e$!d%j8sdp-rUd9dm1r8R#;j!?5^HmMfJYw z-#>+wIyOxML)-|>>Kq~8MUP-^dgFl-`q1~q!> z>ffR)4(&_(>TF8P=G-ajV(;~y?M*L+q0#7;_nh?fZBCSYJwE^xPUO^Z?CI}+0W&i* z%S&>cPrrpFAZkcj-|^;ys+W+-VmVPViHV7kM_!xkoF}Ssa&T~r#LHn778Zg{00F65 zO;b~vA3i_}%gfKdxbkO4q}C@33j)AZxQEQF@kVd3#Z`GrLbvC8bPP$}SKmCJq44d#$4W5ID`J2CQszo#5bL@Cj=tuS)AnU<=Fpo*`BO) zq3qgr_Yutm-U!(54qyn1MeDjm3 z63YE9jth5mf7nJFoSqZwsoWjDuD4%Pn#@)9PiW&jLVr+H4BS5;M|0A~kG zShXt~g!v2+4|mG06=wkdFK;hjS3*!wy7i*V1Xjwf?!G<^;}C+J)rQ8i)ik)F_d4(% z1qB5Vw?qI@VJ)pO0gSzn7!QjUJV}$4(e>ZoY`0P{LvjiKOTe+TRaN&F!#MyLZY+70 zJNM>AUV`}1UrD2Yt4bE+!LXXL-`NVhuwPo|VP9hmvNXt@)3*t9+&I+l3O(lE+>9pQ_qpWkT-tvV^pKc@1oZ1*vUedHkv&b8C_9-`DYn?^UO!gv5V`U6sM_tURg5jC(QfjCSq=G0uPTBd$6bT~>u7sQYlLd1n5yLL>1LwMw%jb_4Go=6p);f7y|dk#oL>k-}2eo|aQpb_0S-5uT##Fu|?wZNlzb+Pb<$ zcux_o*OzBZnVu#9%PJ`;X=o5nSWBC15OdW1%K@2O+b$k@4#49;rkKz8cjKrfUw~~6 z*9MfSFgwRT-nlm%wU7>2&3}+43zHOGEImNzJ*;KEV1gPZatM*cf5VdxH`0m%)>Cat z$#_4gFc8whZga!;Hay_GyU=|_F>SH(yu7^c-}eAuQzSl|o)w1G*4Dye>gwtM?630A z>(RP}rz;6+bg`dHH-BhvCF$9x2sq64B(l00Xc+`&`5go7=a=NI#-fi+YhS-z6-6Zy z0-%pl9o_a%6N@$#84Sr*V6QdnNnmza{iEvhmbs*;$Q$^ynzFir6ldpc zZ6`gQlCPedm30%~z-IKQo|?;ZGSh684-2iq8#-|qb&8c-r%{m9M6Nk;5SGTZ_ zq*$Z?aU(A;58^a!^;Ah~oZZyK!~{q4r88i>>JP{hDTw)EMw}=+#{rQRx1X+&WypcX z^c1JNprf~O%`_x5=nd2wbC=f?1O=nw&qKX&(p}m(Gkp)bRdb~a8=;KG!-j=ztwPEQ z3Kv}x*V(Nl0J|a|1hrK33vYtOmurL%i%LRnshHbpie*vb%sH_E`xwuFsLOBK&;p~F zjRreE%3iod_6 z_o2DUBNA0RUO2%J)6R%}Ptn_2A}AKB>-Khc3+=ZPfojVnV5oU4X55{N`<{TU&iWqq zX&HL0#4C&cCk-_A_4JVQ+m18B^|?*jKaVOrMFPl;Kon|B+D*CF>g$<765F|Xc+`4n zk%2lJcMQt}2(SuRkSz0sk(`}z-DV(VfE;rK47=&(`T}5XErW0J@80!igOu7qtOX2T zf)X#kJmt|cpcO8lT4m(95DaK$!8|-OQ`~)P{MIKT1!iTbUZcMhGJ!}1;`S|BzAcCV zI|b<8PDgWt_}-`2H`iAjHS>Xh$}Bed=OWBvpXfFm%2T7XTBj*V^B%{KX6#Kpy# zag#lMD4DHdT@9wCsj0ci#NB!6A0SAn*=5vd0w%1gstV}S<2J0&HW6w@obM97y}eWC z$9>m+(YVdLg6}8))RSV3C_&Q~zYh%9GGjkE@Sw(Ge4~Kp-(e-&-fs!1+z7HtEn!M? z0@Zc-M5YM~n%sn;ZTI3}R>8F*1^E(-*wh6iA>3{X6uk8?Td|2=BqQaR(Vn8#bUEfa zN#=GOe+oWj)RorH?666v6lu&?k82o=so}gYpoQzfr}Gjv>YC74L!PsTZOP*a}GH{J=$q}krN!Xu%Gsh8qUOTAlM=Ym--ee0_K8QF##dQ) zOO5T-x5le4Y4sj8%JydMFG!;?>RmFP@XO(#@I)l27rtKpLOxWmebl8D zJorAuPf2!lo&P)??HA2%)ux9nVU`VZ*)Z>fxqD5TA}+|oMdwommRHl>n6kShs3?k`#M1hmD# z^?#(*_kHfK6NpdG-^zytIXZq#SbPrNbUT182#nK}Dr|55myk@u{{V^c30_iVh7Ae| z$Cr3`XSKCRV>+V$!L2byl*~mo-keWU8$W#N$cg>di_I_B&P<(Oa3C7)tWUW}#m-30l0X3*Xj!2VBx z$3MIKHSRa*Lp@5*cd*wNr2YbrTVoWDuiT^wy2E~b`VZ21KGk6CK%l9VgCkq{jx?vc zX_;WqX!V&&v(kOtZpXhwFkfZs9=C0^k9JZJ=?@9o z3T7prUe`GIn8&xTzaE;3{a7gxN%dgdhW^^L)B%jO-7XD!^gSMby-#37- z*z6E3gn==k83302_PRBN|6I50e|!DkcjN!JBi_QJ3I$zOGG@e6=v5l1-(O8A2PZ%Y zA<0!~Cq?`YpSUBh&l`#L3j(%|uQ7g!H}?=Q>yhSQo2i-v0d*jk|D^DmBGKV?V5LY8&5@?^^z4cs5 zSF68{o9d>OHX9qGV~yuM zOT$&TBKf3T#`q|F{|J4G+iY?Em!B$9=V0Ap{hjAg74AcQ=+ zATg^RaJ6k>)r+ai7D(btWP6#fj>aaW!_HRcNtWo+8qfc=N9sN;4XWh8WdsX`zV=#< zek)8n+8f)nw3S6)d+~@iM(sLH-%zb|?`d=O#;>03G@~>Fsz>1~_2{%s+?g22&gDPm zk)c$_0XTY-g8LDV{ne^BpGCx$CR0$?t`3*^NWFkxUX#mu?=x4CA{)B&_m53w?i?#5 zB-^{37gyvrb62?AiA{)(i!|HpER0c32H0jvF<2!_ySzR05eSgN67s(M=YAEw1yl0y z_N;w6r#%x0?-R*_t^%!c?}Js)!`6 zhyjIGP_xF1rxea=)OgeCyT@ zeR5|588tIvKCY^rM~+NxCP)kAkld-RnP74Vhsn~4JhtCGNcA!}(9Ow+k&=aAo2$#W zl4{^zSfy{cKMa>=e&*i*<0`7hWisYCoy(z?@n9s}=J|N#>Ri6gCsA*?_!DkP2ys>G z-A!84Zm`g>8~Kvz(HW2&q1(nmxuu=HuIRDe`Xr=l9uE$M$_EobV@8RhM+iK6US`97 z_ZdOyYWB>%aeS&7VP-#-ma3k^uw)1zx)1qOT1n#}k7%oePPB2!%*k^jkkxWH%V)Lp zFBu0zw3m>`gN{Up=$rY;=u%EarAL8hu>T}F%**HOMT2I%XDv5=lvba~4VPe3QPiFm z0d00Ln^vNoj2hdaFXazbjIMk(x;?U>pWM%{dGh%viP0TCF-g(_GC_jq^iBwLB^Gj+Q;5QcdWxPpA~&7* zHkEAs^XHsgsn;cQn4{sOg$c}vT$I%E727Z~w)NBaw&R8d+t9nW_P^>Oy*MEX8lkP8 zTj@R4aq3-hc|wOxTUXeXx@08A?bdf+q#rn6O(}X<*DW1%vu@X9ptvsnDuk9@TDXZOXp38|ER0drDct*m@t)>A zc?HpNG)G@vLLExq^3uh2irOwGQdNHaYsI;M7XN^k#~c5#X%LfMDn&8~G_D?$Y5E^# z?UD=dnG>7iH9N|dE!F8^r^zlJq}U)!vaUuOQb#J?hnz0{1RC1O7zlA!tKr3#`%AVZ zbTu==js(kX>hHYmOKjQAJX6IInm=k%GFOrim1u1G$<=6gK3{?PSg^9JD6LZL-H@Dg zoaP_8IJ2jDCuAs>{WS$_+U}RDAhX}4{l)S2O(*FZB~hLG^;a%=Mp}q$75)@i0<^nh z(rQzK4eif#5`{?=h=cqg#jq~tVubEzOwG1JCSftEgVq{DYuGam&k|$P+(}umgq`I` zoGhVGk#yO!**=<*)VddFLNo>W=ylEP7wz0q>h5^obJmJttv)!&i)v#%a66AEL(3QxJ&?MUTsVbXdQ)?lhffP)h7A4r8DSC9c0L zjcOAnkYh>GNbG0kXvLxFfM;e6LPD-nH(VU-bKWZml>NPVXj&E1I@! z{0-4TgeX|1cDi=DTGTvdW81TzaEFqAm>4I|zCXvQAfNW`n&!6WS_~NYLB@&Y6J9#c!{;axPRl5?4ieJ!6$Z7z^ISqVd5bYTQD zB|MSWuUb(_iF3>s6OV}M3Nx;LTN%iB47!qhd+hH&$3Q3hldIECb+~LV(6T>@zg^l; zbU1C@?Um)o@CYmNTh&rk#`l>!a7UP%u($=PCckq`=H*u9Bv*;X|W*dATp=#HHu%N>}G0vUXCBTI~E=v!V)hbw`jg z{&Pe=au8AfvA6*^P;vD+^%NvPHDax5>6yb4U{eido}n%@67gJod8gjz;DX`N9P*EL!*0CsC(5 z332UIj>TsZ8$06WFoMN_{k4Q;m`xjdb8p(h#Po7p>yvdm)THY`nSq+X=C+slp#Xq@ zAU8%4EROs7a42K#TX-G_uYr#s#~v>~&^1gCXA|!NYBvQ376-$9v^7N5xQu@rvx*3k z^msQ$FHWtx$$uq51D7K0u9trkD{J(fU$gcJ6gx?s5yaKHH&*h@d}rvo8v59b7+7zQ3V^ z$kwo%cqsio)~;9U=04u$3)^^UgNGbGbP|lfS)P2xM+Ow ztso)+Mqrg1vgAm6pWR+$V8xj+UM`RU7ckWehLJs9qpzh^$3U2q#%Duf`U;f7rfZy{ zK)+nYKQeLzE18&iaG( zcI$N^vj{)`xx7E_DF9&~TV6m10MbK|nX9A5l@lFdX2RiPOtX#axr|vHYID6iG1~+C z!Pc$c(Dc=|`A?KAQ%hB)Q##!_gQ1bnGHzOOWc^_4-)iYj=PHb#Jk(1qf+(Gz%vB~m zMUrKtPi($q|9t%G`XiL(RqX5wB>}fC=Tv?^9QKh&NK0_sel6W7KBm9G~(` zI`pxcL+uM&!?qAv=3rj2)kSOtrs+d-H7_38_9OC?w<%#CpgZsP)>gA|6I6Q&$T{kG z-X@FSu~NI8eg2>p!CgeAI~VUQ=Ci6W81fGQOSD8K4T@~yf>pUdoE^n0pfS8^hDby1 z2-YlA65UJWQ^NWIbb6pDDHs)$^YEh(48y0<;qT`0=*UQh`?(e9BAFySy!+|4kPo^O zZJvWh*EspSDL;ihUit2J)K9Kj5md8B%DH5LSX1pMaUHcT z0Drvu8p~KX*Y$$p>rsAbZKxk zLY)=LuO`Gn^EWO+po=$Bj_%|Q`H}bZjpyaID$|TBx+K~;in=u;$7=Gn*jYv_n-1%7VZH~_O@XLNE zg%)oLwXlZnyew6f&w!~leTSg<;BzD9>iPU)2MTMp)0{S@y(~k`r3(zfybaPTLq(B? z0#863eVUF2su!R6WfYC**-y-O;vl|Ot!K(G6l&}1RDGSEE+WUph?%pef>h7(EiPt5 zd$e6Hb0w4u4D>9|6=#N<{Z_jWMx|dPEUE{#IBOb-=LVhepz?i)v2TUp{L;+1UKuXJ zRM4E1$>|dkXTFzy$~S{))F}A+p_%vey1jCUW%sjQiXL+2+@@Dq95QU$$|nbTZx#>h zcF((5Z`XJAz1iZJ3wc^=NU14%Cx`z}og_49w}M8@8jzrTVJ+oUB1$2^3W>P8RmuCW z5T9S)aSm~)$?+8i>=lK9Q%G1@K>){2b}nAUxL*DPT%j9F}(&L5b_8w-lo!&bZf2 z(z10m-0Re+HXBS1ZmBF3*DRX=NWBywnk9&d+o&Q=6t?4TJ*5V_+QobwDU}g z-C@O|TzGc;-ZM;guSU?z9sr^+o7C0fvL<;fFVGlf>s?Ie`Jy-Sks9(*Y*Ij*#=2qt zT4Alk@HLj4g0h-lP|JH26fbr?WUO-aeV)-k9KKO)y^x#E|nQ$9gWHtwLr$ zErmHAEl47<-4Y*tv7`#}H{rr^hz>=u=CF!1j&~K=$=sngC5r9IJ8NO0D8MTnVN}&> zS|7Dt^abgjC<-RUqquzjWJU3bhB!){E%qP}R*9}%71MUSTAzTu`%p_M1QBd~C!C&ak@Fe>-|}Y~YoFM8!|rH?^itC>v`3zU(%3 z%5*G75Uv_3G{_s!brn13&EauLk-RlLVz<3m(3187&2_e3?#D|&f%;a0mf#35*!{_5 zLwL3LxZ}bVyic$4BQ!NOcK_OEm&WZ#HCDEL#X0s~$=0Xaj_CIBDUDpRbn=S;jf-Er zeU1$L-yPPJ4R0>i;j;#+I$DN00p}e77ylA{&X2anpMI(N*_ezyh32TcL^~xa6mN`< zi144})lMDT#_Pq|b=d7VYSH-CoC-(cMH=YcP3?)jb5?yQBQB}1rQ2F)V{fv|Ll$N5e zPoa@2Z}Dk=`Mbn2La8NIT1PH57?VQk-(bV3ckF2LuCg%_8 z>CmxNGlwI5d7h3=bP2LHM1tF&;t>Q78Yf?vlAw~mcf<-%Qx}GN@-#v|kB(vY6cATW z;=dF|HxcQ1lQJ_VZ~}<(IC>DN=WhttXSLXFEDqN-grIT7Cs}Z>uLj^DZCt~>(}}gD z3%9~(c`uZbKnn5<$P{FmfrR6&f-Z!PsP&WklSt`Yk%{Ms7vbz5nc`t$t9NBx`4G^MIO9@G_1 z(znLFI!tTpG{I-vkDMhuS9UUIzHAp%etIAgTa4SfY*-)&pr>e^{RRxm49jp#&y%!L z_#OwX^JVWFW`KvuLfH7S)9TiF0I!#Yjzb8$NqPKUpvbDdxJ|gEXV&fIKNDAMj=WA*Sz z!c>NS(xKXCePKkQ+lfif5$$0^$JlX_VHJI~sZe=I0T!DgOZMdOiQ46_FJYdid$B3& zWcAR<6zF1JH($iKPgxx!V>|7EUF|Db(5Y}%9$*?)n%N^&G=<69JFyz$u+gZ7GQz`D0?GcYcszl8OI~51LUMWh zQhMn`ScUMWVsv`oU)90}uU6Hfo!B5%fP7AXOv-i135q2DJ7q27F)$nL{#mi^GJ~FH zExCAjbDggs8B2OK;uJ$@9DBsME!&-`ECmGU;6%p_St>1JIXxeN5W7BI5}0v~HzuvJ zGji@`2~ahsDb$ zQJ?vx)t(Wcw@1e!lX?)?Q(yGh4zGBVBDA3lBll|h^o(2>;Cl8k_O&e~p%F#Jf0J0X zY~3`b5Bc6=qc0k&B~Dv&2YkQU{He-FD-$}L6=BB?$t|jf%E|?7=Y#-&yT-alKnEj$ zY)O2If%cFBK>*MY!lAb+in`Kfp!<>Pd1mM@hvKD0gfz=NC*^+hT`xx|!IYM@NLq>Mq&P}1E#NW_PD zGA*k#L8W>CJ%(xIf0Tmns8c5qh!4&VwZzXp5`-~QGsXn=(f4ALtwZve| zK#_t>vv;na0g^?ak1T6AXW<%;uNF;xuNdrjsc{z+*4P7*x=Kzz_WNOaPzZXA_$FeD zA?pj2RP`yxi}9TM17>0*-h@dW|Mk^qKcjv7=p=Iv@_y|QFDBR*i~y=N7eQZP$tO@v34Oz0YCE)5 zwnOd_noR-0dg|~1bb87~!Y>&Xbv}LbxH(SUV2Y4x&ChV(0Gg`+a6kblMeY@k7HrT? zC+Y@FVZmU6Rj&R9b6@u^L=`j>GCfAFHu=>rU*U%2q(-&Ifo}X)N=|t)v4hUGxtbjV zd+g4F1hesZY8|pqo}z_TlbKsg^uqbGjr%VYQPcP$*dugx5Ue)Nxb9o&4?Q9Q@`%}9 z?4t9d070aTYBd=+G3g$UwEAJDh$hPP8R$41d-c=q0?44itWSUtJx}GOD~35_vZt)3 zO(CF9^nSnBuU&&$3zV1EY<20+I0C48%*D=X(;I^^hLA6JF(@Nm=4$@g@bZmjr4hLq zNt>;pOoJ|RM?9mxX#O}rrK}CmOKjcOGp~VwYf8ifBUXElx6sxyu}$~FMqXpJ3gR#!>>_1omzyP`pqWV z2xNF@D3IG~C7;ZOd!V-`xJbWj2(y^p!-xs;nO6GF?A%&NAN{=J+3YEPe}@O^S%%m8 z%x=@3R`UQjD$1u6e=ye1lqV%9C74jrNPtDvHOOn%{0LU$pt3yzPDPyqRUS_#j6hlR zu)VU&K$XihcW2b8j2JIM;XdA-t(z216nwv>%>kE6IKd@T@x_4f<~ML2;uRa464ob4 zDfb*?!Q7^*mE-n+^cQz^zaT*=3=GVhwZi^Xa>=!H{&U35<=|gF0~B(=(EWHNAX<9K zzB=6HN2q4zxE`bWziS!5sK&}_7QcYwdUOY=CXXh1OKaARKm_8rec`9SoDl#Q z?gRS9^SG^QJE^I~Q5(gp9(yMcFtP`QkuqQ)+{bY^hKdo96t|lP!JIL0fPYtDotHZ+f74gJeCOzP=s>f^KF4!!|xH`Fmnbtrczf5 zj%OX=g8X%>^@zFm^H`ggBtPK=B2tw>go7|lMV=_^{`o_YLvDZn`_9bDTdcZ=9=l%` z3hsRAxxELj|Iei&zdpTr#EfWn!uZ(enGt)degU)o!K z<1O0?dL=J{mr1(Z9%5j5W8L{k=_;`01~;rn?jXXA0#1?ILc2$^LpWclzctaP$xGf6={P;M3x!s z9|I)>X=%7ES%@4d@sdUH9l1WecN6%>tLg zT0*1ef(_6oSbE+zjV6`@0uB!l4(e>h{L>-!8=x2B^Qf41>pb8S=xHsfnKwU6vsZ1MMX+IhAUgNX1x`05l3Q~)Q*9`LU z3X8Gz$*iKoPw~2qw2^QLBV)SuPUFGmNMu?n&!eH}(>7z7SwUTob>!HQ`MJJ7 ze1S`JJ+o(JWFY-|u|41&_A=(nTrhl*c_@_!_};r42el4aRXVBhWp}8XT5+)w#?cHO zRhm<&`U?7b%nYWkR}n)abEQ|yI~-dzCH3ebA#qAH<>0zD^7==KajrU`HON@{2f4{__+}q}rr|Ru~+4E1L zYIm!8we;H94wUf6*or3ws}*^EezH8m8G}7|wonJ??Tcw~u6)TP*7+iB{RvCOGEf|3 zgkA2ny29s%AKNW3DR@&&HSTsd0wIF$I=t718db6@*EsraigPXFERup^%5{Lod-m(% zn{8KkKkuYBG^f`RNM0%ovrwDk)Sv|MI66Bp%i`amxIU11JYuy@e7wtfboqw8Ib81& zAYN?&KS{Wx!sFJ8>uSQiIqE09sSj$%M$zq3e@5#!xlgVU8Z)(XR+Me&hs^!+^m4DXOQEJa`6rta zRrsivsDcG9;52TvFO9VK`{L)a&i-n<(VCE>W^~u5i-dOj9j3<<=J*^=-v+q|uVT?A zz0>Eq$$hJ{&Fxm*&s#)Mhpdp5JOb94#8IT^kj8H^{rC6^r=&)Std!@!KI8e~slKa3 zdGDVw%d#{7jEtYGNt&Q@d3b06utR-h^g_aFdh8oPtMU|%eIl=jRvx!KQ!TwuW5gegXY}j z2u*V5eFuZ=PtJNO@(2c^o$G^~wf>REN`kE`JZk|z_ZrhAoX?a3?BDmb8Nu1cT_-k| zkfPjfLv2Pp8KCbW3nL6v==;kG*J$M3abq?MrQHtj1P(btQRaJL(wY2}(8V9)IDGSQ zuIBWH;J0!3dWo*#jP~gAK|McP!C$3D)Mvaf$AgPCq#P}Nz4P)H=3@RoECze~Ch}G{ zsPl)cL9jR}&O!CTr=>UpJ^^p+^FcPJF1a=kt(hrcm1**t@kt}mbQNnrP11~UWYo|# z+NCaVJ-G48entBLURUUMv-Puy3GIxjAE+>c&?+hLT3-|ZrR+Cl{3xT0%FE)!fYark zkaA@8^?iWzhCiH(+`jvM*{UbUX&Ko69$EitMX zg1`P3abLmI)*402twJeMXmKf4ptw8S0>#~(wjn@(1b3?-r8oq)A_>Kd1qfD33x(hg z!9BRUywm&a54@Qd7zQ#Kl5@^?_P6(1d#|PJKIz(Md7BsYGwR})$PnYGX6MWbG?gGr z76elJQahw^F!|4zwV}t`9}D|h7PlkAt@!OH&K2%acFy{Z0R3n0*a7rqri@T|9hL>r z5cAOf;o02`>bp3F88$FP?uU7S2P~ybm}fnOy?{~T}>$f5OCmmY=y7_fK&?j z18r>tY-@6!Cp^~IOogoezu}{_wonTZPa^P9RCveiMK2L`D0K*Fdw&1zo43-b>;7&z zEY5T@5Sz|}*UubVtN~Vq>3&c%-sRw%ke^3=`I4$kVm!s)Ow1tcTfmZWQ+K1o5G*@c;Ce^_?R0ZDRcifPip*H_L7e`s zoG*Yfb1~>~hnAY?A*h`;WuAvTZngimGj@AgNDzO4&!p&$13IkW_-rvWau+B@X9gdo z+tu!Qpao-G6Z^cL%4;xAYVEpmjx!LS3D)YkT|)$5xRQ@~WDmmH!?UG0s8#!J-^4F; z{LsaFzRq;-{oR(_4k*K)V{h6S-?0pLUm+E!fr|4$Yuc%Zt1l(cs`gsEO1t?9? zX4{PzaWGtmIrOQ#eY&lmrslZ8{Xv@%Gf~Lv^E|b7C@Tv=v64$S)RX?#77!9jP0VTo z4%Fm7FNiMo?%M~#$im#6 z-f+)HH3R{^w>G^4_)H%obu?22r;z653uA}qWkID3u1{V+k#LF=*kG4)d&dF|Cb2OKUWD&h-6Uq%#Z5xwX*_RAb{w8(6~BD z5H8Rmj9g>{!F3$iopxN93F;)kBdU@%?$`am9tdQ1YOFU2H_4rM@t+KaR9?A1A(dG zI?kT~X$|cf6bQACdCH*aPueLDl!X8`^)#v(ClOP>2ncbsuH=#3Q2{rLUSM`2DIT$AR$ zPM#k)npChv9Fu`d(o62+B^(VK*Ea7Lei7QUo4{4i0W^uOa*pSm%yWL~qZ>FM&`JPV zNaFpaK7PC!&;f!YFT;>*93{SCH~{{{ZrJ)gHUwF^y#38B{~2^>BVHs4%#eJr$a#8D z-KstEcx-K4r}H z@E5Btm7y4HgnbqC^lHgj?jm1Gwsr{x+X%N3c(=qT+8or|*uw2x!!!mgK%kXl(_d|u zs9G$@ZOv{@`Uc9J{?r2KQF~=~l5=y=!^fD~w0wwIiF_$Toz2kc!LA52`O-MzU19>+ z7frPMGHUw5lesTE2$%{s7qY4&0nA*J6KJ0E`pFbx;%XkP_J(FjwBIS`6~3uh;Ps|) ztNYYlc02Ds4jj;cv+tdEeg6TVSkBchKsHO1hdcOs!@DHKn2>j?9$0lFa(a|_T($(f zTLt;lPFu~OY?)0qDHuCTSDPJC>djCOgs;!y;P-)!AxE9QRzC=RpX<|{^#05h31xXY zI-Qic9)CQvkzp3$<8rxTUFLUDv-as(n-7O;+pwKuSeTYP-o(S2)~gjhDgVmRZw2q~3>Xx-mp32K`=>1po4iM>Yope-?@V^>XOz_GG3)1bpa- z3gF`ezX(babM45XVsS!nIW8|zI5%;>8P!J2Q zzq|9n?qpXERRMHm3+91d#a?NJdkj5}#^zn>T6WG#y(k2s5~o3iN}RcwpLtoiV*Tiu z*R2TB;AC%yJ;HrBpI|YSG{Zj!ReU|!XuduSCw{#x4Xe9Qm^eTD^_s@pC1ERN^c zk!#7zsSHB-{LPzhv!sZ2IYPI@OV3ixWA@h0uq5{5*PnSM=&SIVfIY39k7^5Z;3Zq5 zZx-}kZ))Xx9}q3>Qvz)(CQ5Jp>ISncWrYS_#9;-nIh(t}e~(i%ix_9`_qZK#DvrGD z3YKZh*(6-PhZoP&p79pmLn}`NUx&X+a=_Wh(?IYV^pYU?t3t z;JU+(WSkB8u&xY1ZRHzC0!-lOMVoz{mr8}F02m})89iK_EN19b>4NF ziEbXGDISvn37@yF640}z=lLl|ubAOqjup}I$Gut;(~Du9I)tBfl`;!J?5E4=S8_Dpqgd4)1 z_*XoT5C2oQQuSPQBcxmAy4+KLTr-Pw(If`ssz#9e>n}vW0b2KjeZ-FCv$(?@jAtP4Su*4SazJ5k$7&Se-Z{76j^hvqJJ?5*I{M`$m$K$sm4@ zk##M8_IfJv0AN6eVG2oS_yiI5%vunwTSD2|Bgf(LIsm?qAmA8lZHG?SN`%-&?ln{3 zhrS+auVG>HZ&@|6vdYG+Na7W5smNk9CDW57G=y^VSdw1NDP zMQfP~n5&tG%8d2_gwR8BI9a#7d^F`ufVR3DHAx32A_RCs@ZGfmO89LF?jsM0Jp`W& zsVfCBcU8ZJJ241O`BfJg11+B{sv_(9YF#E6+20DPQw~&ApeR9yPAHeHt{MUk*E9I= za3Tj2Jw=c~s%DBgb7u}l69W;ogS*3IQ+K7v_r2H8%Ei9byj-3+4Vix}?KoKA13vt- z7m}dg+?;G+4a>^XR360Zx+cH45#A!#gNz(UeIPxVUZ`9(ame_K__WahqZwyokMhkC zib!bTalYDZtsgH$geJV^F$t(_H?nATobve)DFE2^k1fD9x93?k60pZO;q!dQGsQOr`A@*RtTMs zuz!F5@;_wj#IY^XX;C63dES1@pZJMC^a(&#$c-+uM-AxK;M+-^^K`rySDdfE`xlD4 z`WdH77d~h6FD3DvxVgbN-Iz~%*r#uQ3;Z_#_OIYSctF7oRNRxmOxxeb-|76nkH4yp z|2_WyJOKzw%5lsk!Sz%)5jxg-^7^Q2ALfWF9A;a_iDYM1*1wFt^}@Ld5PAW09)pEb zUl)TXhTho16Xsglu4qO3>LL@i>8bDiypD|fsIn-iG3-K(?&DwyacKjFDohB%rJ^d! zf>UUQJ&=Wca@fk#Sq8ywk3h!eG1LwEGbFW$<@>Jpe}^Bm^q`tFgfs=`MvW|o=C_p! zKxr#K;EHzDIh}U4nq{uaXx6Y=ks&IabbSr;a6Qj@Qv6eW3zr;hA!G`fuBonWTv{u9UFCLBIw0`R&I#Bw&d z*=6h)k*|t$jb?PVU9>ou5~$GGS=r6{Vz2*xviYy}{FgF8K#I9hg`iDNF-i%0_cj_L zfs<&KI$590wL{9WJS8OLG%X2|P%`D9i1`J8iwZ(j!__Bxv~G-IeM?475@ge_TE8M8 zVXy+XSaRlhc6kLoVq@%QA8Osf`Xd)#O%J4m)Dexg9(V!_9%?gs~F4Rph*G zv^yb5Z=#;a)9ox5*tH%XH+o;4R-j?xF*As7n32vFk9y4$k3xP*X`#6D^C-Rw|DzY0 z!xG+)+z4BO1F8U;?Mw5_L!N@Y)Sc0lO|5#U6E}4s92kOQxJ|jes@H$p8ImM^w&V5p zP=8R|Wf8if9!}}I+6|#>O}n)$PGLrPcUq;;1-HHK%+|)QbmAI))2G?*Yx|Rsq~#_q zHv{Fro+IG9Gj1&F zL66`TnJM*Tu!*8U<4(^A-KU6gvU`h5d>ex;<{T8Vse(*W;#xpAMgMl-@-=(MwChyk zT)+U8!$_(v4HE0opqY|O8xofUF4+y(&#vBW_=Ya(*mgTKYIWmn=%vS0LU*>dnc(tj z$}YV17q>0aYOvu$TuuMS!MJvM42o_?I5a~xOSBE-H=qj4XX3fI6uwd`>Rf5LjSkMf zS`)fGZ7*!I2im|=-l8?@9Nj%ry(Txs7EP#}FZ5hO1CW+{Ps?e;oBN7%;UqW{GpS+OqCc6g`R z5)tCohk&C%>YSj630WyM$>9uUO~&gzpy5V0{o?lFT;TLm*pLW4r5bQx?>~u<03-=- zf0_0zF2`AB(M4I4emRpJ%Jr5lLC%?9J59`U`mXVdELPBzHjyp5f~E&9IUTQOhGIiR zu`T&uu3sIs;2SP5gAeb1E{C9z;d!}^H-Sr;hKEhCinu!pN?k#Mk0sl4_774jWcCNE zX@(2n#NPN!pU}kOetYrIssn|=fz!ysKpE$kDVbyr_8kewn46m^xf}PT)=y<&gzC(r z!PwF!MNW{1a^VdHIDI6iheA6yMDk~sR{zF$Q7jUYc{trOJw`HNd#ua!YqdIB5Cy<} z4fBwenuzLFvt|epB8u=8=ZZXkw^2&P_1pa#a^+0Ys0VctAE^GWY2vL1LX>GAqB>=L zHj*No*V8*fb%<6MWWpIFTs*9lwDb#nH$LvRSPMt1YvBucfL0GCZ~fLPFU!x&_3M4v zX?voo=$~@V*nUcb){?$g`{bMEdo6i9C^EC4t;W91qvmTKNwQT{sX+NyN}&p^s) za%}3$G4WamK>Q?n9WXjyUtTxPJx^7r&p9GOKQ=b)MfzI@#_7F`U{y>HJ3qzjpaXiu zQ*e23RZeHg!wZs@%em;=^Hv{0-hu&<>uYB@^aUK#_|!{FGkQkyRfA@1r^jYGZ$yog zRC1JS-y|xG(7nwv3B1d&DhNdgoRuVbiO&QcdT$$>dbz0WSv z%m{;E2_O!=*qxeQ$kbO0WXQqrQ-kHd6eIzcJb@Gy=?xx<&LnBQWMm2v==R0Tk%8eZ z2RkDq+H|hcGB&o?hRL8BbeEBf8A{_6*(|W+XU-ZFrEv(8MLEC=hF0u<#@Jl zCeB^7+TA!|QKa(YN8>MAJttEmv9@)St6xP`St+O24$ziBwwmy#0Q#E@&?6lqvP~+r z2*W_p25SWdOY-zmZ`pV{)jZYhqeAl*^p#>HE2J%lPA65oUX!i(F(p@$tVsC@X z>D~Rbut?o*bDEw?%bkvbV%j8W&tXvh1wC0r7$?Y~4%%sEh29{EovA?vWYPFa*{w7Y z86vh1za*eMNx2u0jO&paXVWd6Na;~hhY?}T8TWd}X?oxw0yRNU;U|R|{o}5k+2eGu zmg6+JqPd#(EMxYT^9o1C$G_#V8L-B3xAq!@5XKQ>8CGH(&d}~pMC5Zf-lX7|lNV7C zWqUS^apa;XH?PVq7AA|;Q5$ahuqgWbTFsAgSx3%4XZxc1vN8Jsiiu zDs>-Rxmu1*^B!StmU@G)PpeaCKN1z~o^A{*&0=<>-R7Sxm?$Pa(_U2x z=9unKH(?NDJ6pfRTYp}<$(diwBK6-k91-lZRroT0PDZqAfT*uwe7kwWdc5CKB37Uy z^lWvIk5)mmPCBrYB(kuU0UdZX0Ags`nEjdgF{^^G`~E4q*}5;AE-K8R-yi1JcWr#N zO%glzP6~yD3@RQCLvrSVX3Jc;%5|e?099|i=MObGgDl5sKSq<=X(B_&sjYQJ7 z>+TLTj-$`}4V2Xbzc1v0`u%UJQ+T8%E1}^#+l?o@xFnSqs2zTO;ZJ%ebtyako9oGyC8VWsJ*bRy5MuXAZ$7TFx9|pDugd+*XB$kQ4}BIM0t%hY?>S@rN|=^(s4TsjMJq>G}KuwnOsk)%PA^_^F-I8)0O+he4N z-pus#<%u?B#Q6}-N20Ql0v&m_c$(qarbDp8ICiC0|8*ks{`%Nj9jn_F`R(b&a-BJG z98zDcVIHGqoEba^&XL(``%!-PU014Pokf`M^-^8a_34wDcix9~iw4T4}Rc{*Q&q=ua@E#kBTjy%8$wHNV_)MA?6lGO4vl!;gt*Di-kt=HXSzxJ zoec%l6AeD-sKj?ceQSsQw4VZym)pO}fl(W9`V`}=%32nKm zJ5u`Sge7_b3B+aI-?u?$HK?MKPyTaL(ZcrYVHkd%b{h>aSWP1#EBSc)Bcp-v{4~yD zq6kDQf{MxQUU!yeFdIJxj;Odo1ALTC?}y)37vl@pVNzWCnQ-E}&5eDe^l8DwCNjK; zr}U+H+0fUyrT*SOuJs@()YGAXsDh*bAo&wUK2a54!`;-}Uhmgs^nbI`<5>C-T6Zbm*dv$sqwsbQsrEF{11UGwZtA7#$JHdX z+4Cn%R9z~H*i)8UmJYp17mG{G&Np(5)1yK^`4W12xFE#a&UW9mK5<7q7<7Oow+(>M zya4qqOm817gO@L+tqeVLfSw!wo$8ciXA9g+6UEf(7i4jpM&0Bat*I_4>n3GH+g6QJ zYiA)%Pw`#%si8FMp3J9vXWpKpU(jy?eJ#P@Kttdg_yyA2@q05FxC5;jF6UL%IM*yx ze0w>K&zggH#b1*wIBVsXR<)>QQNN_G?do33bgrI@(xa&(bNGlOexOv0eO8z0+1{X> z^=$7!GSnmpqwft=T3^Rva;n{Ck?4b_C<|<5d*z?@U1{V_{5SYAPmmIJSnHMp%f zy~$5*U=OkI=<%(~7Mk5X{!JbshHSbXz{Uc~;CHBisJEp*q~3vo@h3vXQt6e+CK2Bb zWm0A(zHQ=VV6}seda83;R0s^|8Gu9dK3O032EQei$8Xrm8NY&mV2f8oIt1`{3u`tm zhN$;T9Q5c2dkjRC4U-&+8u%Qt8aMa`Cehav2UU)nfM_VMND8eyZ=ZRKh0r*4O1Pfw z_r|YAGB&AY#RGLD(#0LSB`W{rVl@mn%a7YjVWnMr2syuNxx2If3NswnOaIN%3MBNA zd|5@^AAooepJjD!d3_(Pc^X+K6KFE00BnKj$Qgf}^;((P-b^0A1=qK8ZI*Z=xwO?F z8@8_lSJs5sk+L7OWsVA_qo&cTXyWs2ZMq-}2bV2l_f}K$mU2t0cuyU;`p20VHslKz zx)T{?oevzut+Ys97xeC>#`Alw_uAvn0)x=X@8UEH zzx|wz^bPu*Oy4tzNOY~(&6<&X1iT7aBuYgDYy}1IV+PtbBL3 zk`7lZtnBR{c>BDzOoBHCw2t1K;~S1z76@oQyC>ldv@Kh^x3<$W{eGqIry$3XJjrVQ z_>vWy4b$vMFXRj)T87>@_fqv>~)Gp46kni>pEZ3#*&fGdG_1^IV z_6E_=3~CVa?Nki?-UALq$e*nm?pj@Wm7i)`SG4bgsY`sU=r-`3!2|Nui!i+WdozZY2OT_=1*}pDI8DjL=MB{Pys53D zWo)%@mreSK3n?idn8VwWvWzE<*D+?!6s%0@#Z?K!?FidBD?9sSu}LPbD1Yr%?1y|g z^M4joXnLefs$cPxX057AK}Widdx?@kPgK*}HD#u}HHkQC>42J3xFfI2=Pj&FIXGB~ zEEcH3iDq;TlkUvofkqazfV5RTg@uWLtD%Hwb?};GTYspexUys5SVcM^C0+`+P(c55 zRWFZ=zqx*w*AmT+i!z9Y?I?d&ZbrxSBDQ3A(|m}FE|!iiE9BvoWQc4m-?0P_;<1u~ zhm=A^0i0o&1)(uPz_dK(XjDARCMLr!C)B?{4B{497=@xi5veb=mRyu)-p1=GE$S+a z1N9a&eB>c5u%g5alKae?->e=;R2%oPg}C&MQbk)fL?{q2Wj#`(8aA_C)KmI=jy|2pdlR-spt~gEt3ERDR7C)k_N+(eeocy4z zj?v0<2%+++6&s(2ev$reRjh>2^TnVVHl?{rRapw*ZCWk|bH9F0CDl#k-Y}Et-iZoM zr02JM0ZIp9ryDwe92we^!xxM`JtgWeMDj zZ5Q1a#hyvDtL52pj^|qvTx%r>XN|4`Od%dGxtS8|;ktP=J;DP+Z)O9;bUs^JQslY6 zjp$#Z{8%E9MC_el^!LV2|6CL&0Xp9j+L3hr`H9hkbL;6GuId&)#DDjQ0?0wlmB{p8 zBDgC`21EikXKeuk11gMIxeIkU@9Qkhb`7%#KzGL_17?U9rdQneLMcrt7zO_#SrI}i zTgU#EZ7%C#0SJ2(71roVKahtxq4&)@b&XLN3qB}-QuA4ae6M<7m!VcrjZl})py4L z2G14(YY{He`ZH#(HKT7{{mzhsirZmMR!mNN`$sVt1L$G9b`qzVn#L>@xS4H^C+N|H z?Jr<=4GoQ~{Em}kbDS5N?nkcn4ijB}&yTs<+diH0=8)aq5;FYeNPmKwBq&0?0Qz0A z$=B25+TgX-%)qvD6x8|JgOg2{{R3$N>4{YR6;1jN;3l`W^9WnIzT8gc`vWPt`OQJD zXE0_=lg?W=`bN|Pw2wyoWBpbCRYWdrgbnjsc2wUxhT&~bU-B_+E-7PNQ~I_{IaFTs&R-EKth9f)IJLnxu5PDtIFg1ULZS_ zkEK}22!%QTC~2+Irc_}1s5`}3`g%XuFn93cP5Vs6^zDhY^=sX--JtAe65h&$M{T(OS^p;={mgT6^*baQLR@DKllgX} zTqi;^ylt*uxzlkwEeSvZ)-(~FUdLg;Y=TEgf?A`b4F+5taOKBZ$;7Ul8#h=%`OK16Y(Z{X~2Ldw9?W3f-+M9s?^T&zsTwYB|n z$2H(du{l?Bm{m3sqA`4LEg3x6TLH$R$~QyekR2Tz7Z0Kbb4mwV}4J46hy8WhgpT42JuI0LuO8>WI&{)XKhI`xY zKA%;c$5rBOozKjWbrYh##4-d4a~k~j<{0yfecOhcI+mT>A=m&CyV&G)+p^OJ#Glxp zGsR=tz{b6B&!kPM^tx-5+$pqsN>_NBJ2JPzk>|{=c^TjBF8H)=ih{yB){b;fx-!ZR?tZIwox%c zw<%PwgHS#82SnYD+oCa5w}W6V8ruxNDlU5F@Zn3CPWx?8>VVtBES+QJ#@F5mhp#$n zqp7=H?k!fv4Y~Y<&><9c2ORGQY^E$9?tr#6Kj{Wzu0H&$1?$^b;hM414*x6ezut|& zj8#}}9{M(V>}JQO0lgB9exAZKnC?DaCu5HKZ^fmLMIIhvQAI`Nea^!#*v0CcmO7AO zN-3Dpa)nClHgUN8vyAS$ILL>Znu8UtAt9kJQ?=?Z65)EBHgo@6x8B-rq#KrG(4@N^ zxQ&-tflR$Rwbk8(ZsJ_EPaUjQ26(yD)u*I~tcoC^6ss z`!z899p|r&XI#b=7KX;=Vm@aBS172KEVLA}lAxE4oSYcYF*JRR4>321%gO?l<9LNB zMuP7ptQFQv!)epmm*rup{aO?6y|si33+#7e-L9G5fUD-Yndk6rHMRAu*G_L3%;iR3 zXw)lHBlvPqy+uSc_Vc9Y^Kol;;FdW#98;_w5vUuf%6}C3> zG8+p$wMMyYzw72$2&oJVOvh)s)AGYksKvy%LALfq=PEPkU1Xxw%;Ad=dksRRk2(Ek z2S}j`Z4=FqcUgXT6TJkpG@wfRqsd$8wB&4HJ?R>uIvNs4{Wt^ayKs$UPp>( zHXfyySoy6sutl1|Ht3~R&#HOFceBP@vqhkOx|d!n;0#{$)ld?~xCw}84&Urt!xu|B zt>;E2&W=F0T|Iq(g!^bx5^u`pm3jvejU+2WEPJBSCGqcubKlxgk(CV5l{#p(+qs*( zO3pub@tOGKZm$yVM~)zzdnOV)vwb^8x}1B`W%EIW2}GM?Ii5GZ)ksY~Z2kV1&GPf3 z_E-X@3{hCXH6o>-)_YL>Hn8Qpjjt-ND;p?ejHy?!J}FE^~E~pFpZ) z3OViwp9|c|b=)C&{Ye$8N3{BWJjEm7wDpFR(qi00jU#0d9uGR(83Ql;-6wCge2SXN ze~u7gUGG@k^4QJz9DQX05-!cd`+@-w(5|%I?+(M4k9OGat~SgQd(-XkcbUztKlZtz zb?i1f4c(Y}HDqA>&_`Dh0R8)@T%VR;o`Iqov#NN?ht?WAQqSBK|{Z*75J#)jttFDD8$>Sh@Fql24{2L{8Pxo6If ze^qD%d#=xX20-RhN$ngtXdIx(k?65sj{ z_%;CVUjCNqaIFXW%^8?)dKX5Ix0TCVVbAQ<R!RdVK)r7J0+mh~pG7F+%EeOIn4ObPHXRd&U+#@Dn zXdl?c(=c|70`q%1j}c(JdE-%zDyWnY*XHuDc63;~eoN-lNF_jQT(>cAGGuvmrEYiN zCTo1|YZTO^Zl{*gnpjkfT)w!ZSXeVrkHK?#5V)RGzX)=qP{ZyjyB`!D3aJ$ao?Zhx z<^>522ZGzl*kjqXGfW|uvGuy8GM9<&ksa91y-#AT`SSSMcF1_ssKTMS9D`f$-#XmDc#seo+sx|oJ3k;ZB_oqvLRhLui0-7hn=X^`S+!^qYxR5N?%Qii) z)8?E0tA1+~y|QIkD8OkOi;eV{$g00Ks->F&()BNg*l^Ior>|ra|Fr&i-%;~M*yHCO zi1EB#&N=Xcm?@+j0v}jaaXFZ-2=>cK*TQ$LUvddLG#!rxuqYj20J9+i&qnKq=}i?D z)HzW@IZJ3AwQe^X0QNQ)Ue0Nar6WKcvFpp@L-0kmFxp;BA>%9O?^X+RxJw0kpJbA{ z4Sgoh+qeIz=Hxo|i0SA4p9ux+ARK1Ao-_)QDFuYI!NE^4|3awoCbkcIy`@f#9&-G5WhD+SX%K?oNROuoyJ&?dC0|uWmWHI036M=w7L~$hI)(iDwK(r!bj|uD zFV1IKKv_FYyr{{E9052^)$|~2%p9&lk$@ALX)X=(h)dlkXvlo4iZ`J z>JM!;JIfh>3)D))vX2UdE|QNZ^h&2{BfQ=Kcg>kkma&g|+Rr%LpzOP$B?p5O)3qkw{dUUzQVlduSSmW~`Q{%K=s1s!ANG zA8>JnH46cD#0YAZSW{+Dl(*eXkzZy7E*bm5x3jE`9Rwz;Pg)t}MpKcFl872d-8H6} zhri20kdUaVv0Gevw#thKp0opv1*fYi126i|{Xo4h>To~bv*ArNdoJ7u=>N8cmj$ZS z?sUq>t<<~#3H5HW>i*#>34^GCT0>=x!6S*_lW9(Jc2lD&FrmC#pSq;?iH}SqC3nDK z(s)beUK+eg`!)z&{zQJm!O`01B2COXNQ5S$h6|RH9bf*a(xtCi)_ON6kRHox+4>4( zw%ZcOjoFx~sDQiaLpbAYW=kdJG%nQ2f^x^~jeJL4Ge9kvq;32a?(x<2i4PsaROv!kGK#ph}+NS_l3TK7c=%RL-2`t@z_gr!efPsXf&- z$+gyJDI!M~NriR=_H%s^bz`X6wo`URef|R>EbhT!pJ5T?`f0R)zL-TA1bH(|q#!op zN-8fty^ogGo;zCq!F!z-vHt2i{=zFVeEqb&f+wBY0qaYReCL?=yw^hk5Mpm`Y!y9r zq&Ef{$ww;$8*0y)_Uk+2Nq$rSH$(jnRWVWJRb-`@M7xs5gf)-?n!=drS z$1!UUyl{m#J_boz@+wxgv(YM=Cw!v*V&tw_&Z7B_jkKBTwq{33i=V()|nIV&hYi9yS9ua%5~{`JPVfMDHU>oTwO+M*sCtMhjg3{yuu3n+X!;JXfMDB7Ta5&tD8sSm9 zb&`7lL9OU^dKLAr$?y{-A zAYYD@)MB}|mr-DYv}1x~F34`u%huNDIgjQe-?95^#~WC?!K?d1g}v)z)twTQ+1i{$ z_lE93AjOvgGAt}SGW38n>Gzb57*XYqc8snU1;K$AYlJrwbp_r(mYe}@v=dyfHs#fH zv5aAtkzSapG!9!@;F-!RJ_ndEi_hyuP+g|i(`s%^Bf;{Il+v~*8gLR7*XNzTIL%ojI8>z1X~TlC4RV}7Pza9@4OGRs9*Uj$srK2v#7mse3bZC>8QTqG4}3o6Xg zQ1ph;OG%CPt;w6jWYzF+9p`|sF}?T2JgYhC_Ild2v-al0?V@D^BWnWw%^$RfZ%uzU z`#;U$lQ$B{I@iSk4A$HXJY|yxSc8n3M4Dm2yVSqWx7~-Tc{%|Cxh&Y%;kx(BMYrd( z%~5`)Sd7qj$J>#vVv0JG?_Rpa^X55P%|Zb<3m2x)&Lc(gL7<^3z0%3c@_Qf1?}0b1 z4{+Cg!PbzxZpWylgy+2lgv0@;R`spcKY*)Qk=H0vWTRH2oo>YQ{Y4!T5;?|0pxnNJ z0jMTP6eo(V5!_%i!%4PF5?N0wkug zlN6~Mv*+Lbve%o*PQ3G6)pt#0q>knTYoX<=BjEpN*k&bl4Aeb;&oi>w!&gJcb6fwi=3zGdk+acC(&saI$H^QQC2IYMIdT zXNYp4B#}`e%ARYHVI~81b7_iUqV{dC&OBZDZ{ur8dL1AE`OnxTfxn^yE*#5ljev#< zPEesuN1Ec$uld7~2N&SBnb@T`w6sr!WD4Q8ELW6fAX4^F(C+{9CaTQ3aVI|bs@n4M z#Ctl>Sku$wJca zasJ1KU^pbbb4m5rQixt`Cx|MHwVyYr8Mj0Q%{0H;bZVok#ROCX5gsj zU)yle?dqYb1S7%ZDCo?aSrdI!C0e8Q^|e(nP?R*xJG>Q&h79_60Bt`-bAv!!4q(V3 zmB(N3^v_^v>rfC)$?v0T|y$?vLaL2LL`VKY|KyU6^eSth?&5khcdb zjEd5(!G{1H8=#r&I6vWI%;D8|GBJ@G!Vu(BP}MH{JAh8iH36d^h_@USWk)jAz34- ztDe3Uhm$vRR>A5H@{aOW0ziWIj2}nqH)D&`81X zn+Ivae!nh&JF*WM3;5FW;ysM@IZ5vs|i=Z#aP zr1EO2)T1JnPRLDkdVWPmKrzoZ<>Ek4R1xvNSQJq4;LyjnL3=spQ0&$DY$KrOkN>O( z#jstm`VWfXelcW<*h*dI^)<&aFFw=8sJT}*O5=qA%bQViBd&+YFGSyTkq(uYBZAgN z#)F<&I^Fcj(6Zwo)Yf~_Zf@`Tu@gK^|^=({O|NwFDLn~8*#lO<9qVg^CdQ4 zb)!iWz9$f*#ceF-v5Hg&W1~H>Y9=` zSQ(O*3$v9xtEOVIrY#g8k>gXVOWtnmbw0|fT-46T55NB&L!CeE^Tp6YJeBV;=HY#( z-HtNAUm(_g_E^Wa2=VRe$h>lPP1I+>I_AW_4@=ZXF>+v}iylQQIT2dgO2PNG>P+ z=Nuf%PTe;HAK*>+6Rp-KJ(rsK+XzzsY|hHuKxGi!Hti?1Avs3}nf3Z4K`~kNk46{q zR5RA+%4_O}6pX))K`{L)^iJVMRf|3?>o-BQll)gh8IIH2-PZRfZ|Hu{`0R-Gr&RX|&Ksf-inX(_?>^(cm>0XiL7zmSkG0 ztcMQ)#*odk1`=t|;2|Lgss}UD_SLy9RxrnaqA4c@%VrwBVjP-g515$v&j9Ir@O}B= zmGtwa8e?l8-1hjay_3DpCZ=%gi_XX=@=70tz(8&q&xRSUWS+*;eqb&T!)M8B><~&I zA~~|+#*-;_JKK77ys4T9+ymc>iW-VkWst{XX1BD!|NpCm0C>N|d;+9%;@8(0g8>hF zV2CV@8q|^f&YXhjmj|Hkpu)+?1n6SrB#U~FZN<*{-<~CE)A)Dbpje8>v=Vvs3d%^x z&A;!ZV!)>8a{p#4WaZDiyjKe0PrbM7sX7`fT1}16`>S$uRwgDKAz!14UKVY{NBG>S z1XNxCks`pOMuZ75E}X(+M)xSRs;atuVPNq-VXWkhxZ3Bp=>ULWDjxhwnr3sLXz7n` z{!6i>{xkt02JN04p*rZRSAMoi0Of;SGDW)#PK`REPu6Ffjr*&JXeoNKtS4GH0B3K_ zSLUsAVvZM42bwE`_8yCcIrMI4Zyk-wY@_JCmhCl%ThgT2o<549^R%}$8Y_Rg2aKlt zH=pv{Kkpq88zz~XEhI|P?Pxl!S~MxyFgeA0V;Vg8@jjpQaTFA5mE(L8l}0P(L?`N| zIN1>$RS9H*U(r`&!ZH3m?+L^Q zCtxC$ayu$o%fyL`f}(WCs+}2g=IS8&rT~D~aZF1E`V>YqD%?TEqL=M6HGv8Ryo)V@ zW=0j1qo|~vs6eIYVsuqgrk>>DK)k8MWYO!tSMPsbLPH7aVT(FHO%5vNkbu3h+dA*L z1>I**#`lGpRXok%$@07O1^9Z3PYwS4QU3hfgun6AXI72xDB~6#Uw^^R`+77RZzV6! z0W7H^p~Gue`Nt+!I`emZcQ6<(CogT$44LXfDWSt2<3{f-MSSQSy0)~Srl6h*1Y`pX zhv%@?wes*BKW1jZpOgD)39K&-VZ6?!Mm9D!OGwR6ALjeErHEG;)YMe9)Ko<^hl{8c zI6YC>#!o|q#=fhv^qtRIHddePP|6F##^CM`g8!P2{dZLFZy#b&u-{bvECG$)+kj_# zlFhje3?6g^g=p<6~ruL>n-2pz;;OXzpW4Au` z;G?cG?FZ>^9ajQC|7@tn_i({`y2a~w!$o|$#5o5h<7a{#t@11Z+!QnKN+92kbg4Mcq7ARZ zh&0;w&I1A~wpyJ}vzJHU@DT$Zy7H{#u{_PqnkbLUvM&I$QO!q=ojlyzEv`3@K^S5cqkaVI9Vc1X;HHiD^ zW5igg>n1&m_!4|5dUK4=yk-Km1*a)WJKmrP`==d{Mf*D=2mD;W1up{nhe?;(bm|dW zKioS=vanfDrp)^v%)~7`?b^?_E0e+3sHFSyqp=`UiYn5z>7`?aiJaQt-IkiDDBwO< zO|UVga1#G}vH#PwdVe#AVg{IDqv)CIg=qHa8_G`C79^YoJzK548|m)>7^)jR#RxMI z0dN05o$)OAbKnWA7{N8VzcOZc_pn;PPrUUz83o-`3azxde@E>8*Qb@wD0l??^?%0h z{&Sf7`)c6vzen)?*Q)>Tx&QYs%%mV|x`+3^S_z*w6|vw8jVV@az!rPvl)#Gu}C;mQ@ zc!tx>Kc6G%f6(^UVNtf>+aN0HARt3GIFxiZ0y=a`BP|ReF?1;T0z(fVNHcV|bc=w1 z)F9Fg(%oJA_`ct7zdd&M&)xZlIR=<{=DF|tit{|L>k@`Fkz9nlC4=omB0I^jAvIrV zu^#wIW1yW-dUR{jnAN`ZZyAa@0ucSnF@?7x;>9&{r6^*j*gie&g7XmO0;|kaOqlILeUOR89SP1 zSlATzCmNQRI`0P00)x0;gOsYOyhb{HcSse_Oytg4Amj^Du&^LA<7nJC(ogiu6m>h7 z=X9%;{PN25Ev8eh?MVzJgQA9z-td2vLElAY`%IDI^Bdv6TYP2`ult{IcdPUC3h?P) zWD}k^VnC#eh@Qojn;hjWW?NeJQ)~{ZvJ7KE7!)|UM4Ij<@<3T9!PEpAeDMBPwe&v~ zo#y>ZsY^Z|b1Q=h$OsklLN|U)pXxWas^{YeD+4X;UW2}8_@)p*XYGZcKkML~+%E|4 z_DOLe3faZkFOWEMHFmNfiGFJ}x9jJ$lUK_V_q|iq{!{rb&7}>LwOdo?R|RxAUvTId z*kVdZl;`T<$up!Qg>G*ZEg)o2GRRF&TfzlAU3)D4nzrrgtwMYI&!iREGS6Fe-x6<} zE0aJRdxsxqr^oBcpT--zNQozMjZ8SS9=OOo5Ehc8a7hs3`<)=PPaRnYEe$>XT!v`RZIZnfnC z%DNL0H=!>}w%8&YAxt00pjHEw7w$=23;Eqo9s7L@`7zq`NbZ|PvpwYq=nXKs4LC20 z=X0){N>$)L!*aCZj*H(XD`6ueK$07vd?Cpxuc@z&PjS2TR1jA2N{pBHcZv0rycPER zHx6fewVDfL_8;CL)D+XY=R+s6bM%PwKDCvn8GLlT4=remwMbl?-3?>VI+FbX^2c0H zQbTZC2^I@}-DC%6Ye6x^de%`QB9SgSZ8GNpQeJ;FdDC`uYjeS{0K|(}1m>UF@|1$` zJM*f-#j|nkYOJiPtxDvJF;LajgFD)i7YGOt3K;q00L15I3YXA2n zYW(=m!-e!vX2Bvsn9i6POu6uj(*40dJ$MAg2t~5xhTNJBASSNk?rsnNb0gJ^9ldwI z*7%E4L}eX zH?NlACX(-DGYJA0Q9_J*7Im9kY+z`Ec#M{Y-Hds{8%9Pdb}7&&?_uPWpWv_(2*S2^ zBjr`?Ue1INg!^6QTP*BcyM0<3zI6(7W5<4y_ds}k9Ji@eecSi4Lm|*D)xFVkvcEE{ zHVCXYqw9UUber{h3eTh^Zt5ai-_1ZI630q#ePB0?zviE;$Ha*}f1kZ5HXIr-N|y(0 z90?c-h8oSwGa#%#12FQcaHjboGCIHK6G;uHrQ{KKtdzh;Ni0xhl!J6|F2aQ&NvTXt5U?G z<}v#L%rj9FKi@aAXKq_+$b<;*yw)!D~8KQd`Yij!i|I%0{Fa$ z=@v6fg?33xHfr-vr>J9w98m;PFfl9w;D-7+0pm5SF?7z_V>BeD>U zo^A|(A3Ig!GUSgZE!{nn0AVRt%e31>RX%kue|{B#6`%b_BNb@JqQ0LR1lEf6O}=!u zHnn#(vJ9hm{Ou4$GaF-A{pOZrj#SxiP_O>We5Oyu8$9Wri5iaO^DkA1#j4W1 z$%`B~IaX0}8fUA;dN6IRT2~aVZBa1%{VL&~SSjdOPb2 zzl-9w|NYHjKU<12QRQv<+)qmqXtoW?L}xBlcfgR}%lp$|46w(kJ?AEik6+J>J|-yc zZ-#Y71B=~q@Kd6nU#p%gPc_H>RyHqRKt`U3W=ib5niLEj6gyLLn9Qo-J}+=EUPZZW zI7z7+k5@I)CX%emb~DuP3O3v_3UOQBby2zX|EhW4s9DV_%2{g~p(MzW;Sa82KV}~y zB?e)kO)c(5T_*01g?NwMnB(YpkH9u|&) zxrQ4921Gxh9$WS5R4Lo?=At6s{b~ zh|x}l&&U;OX*ho7+fE3Pq&}Ohxma<9Ze*2m$hp+@7R!NrJI0=NtpZ`dx5Wb*gtTX8 zRPmOA!{`G@46ycI=M-J5#8lmYRZP5EjWsm({!xd6P*aAg1o%-_wkL7VnAPP4u@+l~ zi3OPf3$^LlMScG5rjt>3PKog`tmV_GopLBVwcju4g>VE?;oH(TynTveqh_UYdPMNU zJQ1&Pr7B=>YoT-@Lw_+C1y+b9R$MKmW)nO6cA1{I=GJy(ugHk(t*~8lyeqhTMp)&# zC+8qBecU!DbEzW=E;6 z8NKXx<|Z@z`rf~MySP|XmmK3YHx%?eSw1jMO>whznfA_exIUI)s^|Cn7vO&?+ckxU zSU5OOO{NM)tY{^iMhfrAI`nJpdjJEP9@Llp1ayeP`OOab_sVtU>GTLSt+#2O z@RP5SIqC&6)eFQ>JH1PzPMy~G-e3&ZC>5l~?@YcW!y9{m-)-PCZNGfk9*5m*T9U3i zfAg?<@CSvPN9ldm!3Uk6CUnB<1ABhQ*pEZb8O91t+CB+&!_hmDN^R|Awpq`tU>a7B z7b_PTFiDYmrz!5hm{8&NPa2U|VnUqEud2AHnAefFGCajVR2%;LmFq0dP>u93^InMol$)>~po<@bdC%Ot@_% z{7nx~F_rBLzhA`L&G@dTd7$_~u)qT2fM}9L9mA>hY-?Z{meXKx>@;qVlYtXSOs040 z;vmS#FX8IweHcjQR866hqm#b_1VMtJKPic6zo@ajQ(tkGYj}mPZN!OwC+!Ng(@4vC z>cM__*E#D$8BH~*uj7pgSsA^$?)*jv{ka?wu2M=}%B-PXAw+jJiD&haL9F1)#M*Q( zog)Gt%3`gevBh7+Wi}#j^YA3C3$H@A1O^%jwb1p0rdb zSdq9Zgj?|BeXY)eF=YpI67jRRzHlX{YcLeWW7RXRpUh!KU}Lebf4k~+PSrc_E_|&E zX#hiSrpEKh=n)ztDbKp4{0X{NPcO{C(17Z;NnCBvaNYGUCipd(zC5Trs85LV`b_?; zPA@~kdZXR`b`K|G$;V!pw2FNS8SQzpo9vQEkMNy~S;Y(f@&`tS^vJreTrY#qPzu_; z25}fR8~{^ol6hdV($bjbP+$P?KYX!I_9^dwPDFfTL`)9&ZkN^XHSd254}S)gA5-1l zxM}z&Qe^#x*3g9*AUNr zIG{@)?x<1gLgQjLozk8@6^nj=ahIX5YiXeFB*36o%aN12_yRU?>AHw2vR}A^_t%%F zkL1ELQph>tKHxzg(i*gd0M#G_P`0Os2yg%!=TLCo!oc-@Qf3>lMIyrAmW^D@bzHcHML$pE`DBnh08hU!_Isd6EMkd4n~NKVBwEuspe6PcMkDMkhS zfQ&41EcE0HnD;2X!w{+&IMWIzx4QdED&g3+-FovSXDyv%F3odaUcRLfb_6WYyDIbE z@BWXh22+Uf@w2Qcf_7~l06>81$oMCM_FS9%Lgm~ETA$VOr+dpNIT@L>=(f0QX5)%; zT1+S%WQB0gZ{FuOuFF9tjd8{GxR)^t8(yh?{ql4Sm#@Gfc|}hr{MJ>soG?xi2yQQd z3A$v{a3?JA<}nMef9Q36jk79IR2?7-FJ>vgMJ7V$peTpuHg#3x1uM>3PI&$~h zygInotG!PVeo153UIol&pXyiU$RYeH#~GuTFGJPwC8&p<@TAJgcv|RH!I_kPhu6+j zP+uOc(@GFivt-OCCV;bT{quLZOzt`n**Vt}9Cy5el{(wy6Fh35b8)NzRKFRCh9DCor63Wg$8<{J+S05kv$h2D&fmVTieSPAd<`f#5wrDp&p zaNDYHZCIdA$P(KZQqUpwJKu}hfy?3Ty=rifnqxA;;RrRI=f&7S6-w{AH;SDDlY`~( zHdFl8e!MJiIqd>+Xrvx?n-6TFJBVx@GfdxGymiFW-;wkJDa3JImwWJ{`v+;{ktunTi94|-NkQ_w8(FwMY7^X*l{FlqKzf@DmioM^4;u6Zb(g4f_5#v@I8-Z7h?fS_zZt- zyrk;<^|l6w7Jeynf_XgOOr5ea2>P-H(%?LAU3=KMQ2*+s;XEC}44N=_{c+@oTos>v zhBrKFk2HRFC^CF&9Q~aFif&|AF#R%jyD`JJ&iN9+HY7Z6{VB^gwI5m^#Oban5fc-O zd!8f#D*;#`VEo+b>~L~5T{eAZ=|f|>zpkQ9GRab3c8-F)=&(pGB3c7VZdF{`WW+hg z%bd6FAhY9lF~9=_9gYzG7S<)|t>Y_!CL!UIbxcr z#8xl3@H>*{2?G0*6|(x(mnM;H=pnRZ%gbt~mmFH{A|oAlu%|fGVwr`nGU3|pZ95C) z-^4V#`of==8Qc)B4=hKZ?S2C>3us-igAB`bD=T=gx%pL$=VY7HrVy!#lb14-gPG&e z_r~LPQ*!;b)b!}Ly@suhHUIzz1Q9O~=3UzKWqK91G<|ln3fR|1>7e#x3v!^Qe6$2@ z!m$I;IQqZp1||{uw+^3{Lb$qZvW|eLuz1Gf^q>;RO(kpmT*jjSk{i>ihYl6SLxM`q z4+d8$mu>yyDf}I{dSXA`srD5V1l<=A9Y;!AbZ7Kd+HSo+xW4gYDE@PlKR>g%o*63I z)BL9NQ-7L`@P3=;eeEMJB4S-N#k)@&Vm_rHBPC8$(UwfH3&Svo~Lra9>+Feg|gB@nAa>F4m5Gki;S|B3Q#Gl|VTuBtZE>+0&de~$~vmp=}ZDp=-=Z=O?B zuwLBjS?KNMSVVcX?pg0k4j0IYNnx+V$@tWBsL&N$((5{Mme?bm+LwG7A5S5tI6b5_ z2Cs$>1#rcgh2_Z9S=aYwR=E0gwcc7Tt0sY=c}LluA=Q2|ps}KbalokTxMnnaXfmr& zP$%j)8&h&2DHF9hII=6IKLhhpZzFT@^}UsMrl(7f>2 z7Ad_{d}!@Y&NZFfaLIV1{-|aq>k5+@Ap`xcV(%lR$ zGtdHhuo*i}s`2oJ+LslL!B`N9)rxrN4!FE5=Oc1Q(r}Lf7*2Wl;muvjAZ2Da*Oe}N z_uhD=rm|KIeCFlc(R<{Md}Z{D?Sb9$0DQBDewCH>jrpkYd|8nHZ2Hg4HeoIgU1l{! zP-K*nO8J;tEU^@}NRGRH!=LZyF=%LR$|<4d_Een$sCT|rkc##E!t6Jt+aj`v(*cM> zjrUZc9~9G=0LOa1|4!Z5TUveYG{pm3J8osiC>mTGGtSQ`!a>E{Ki#MgN>9=kDzbd^ z7dLcvufTq+I5+n-SCZ+e;BrMWgh_eO*#H7} zZbxy|q0&Xnh`Ah`jJwWp1L5Ep-w$XaI=*4r|&tLrLm!W_@Fe3bPz<(Ng+0zn*^nozjxC8tCGMk zykDx29aE?#(*--8SkH+bp-exJ%!t`R##S7n7RJ**DuqxRqxQ$=Rhg^9Nt_A6B1VyW z%7d~RUez4^13_RC1SnP+HsZ=AX-9a=gP@FKyTlKjEGa!Veu(5j3pu&62)ZYKNfof+ zLSJBlL2qp*Q0lDx;Sp+BkaoL%Ubptk%DljbgvCwXAjr&bj%w#)kFixZrjS(6pN;=8 zvJ{s#e14r!q?cdbL|>>i#gES_VzBux-_n37OZzE*u`D%lVj`7o=-pS>$kL|!!pM~b zi?4lQMFp30|6r}orVDIVmX)llv+`IjxJxZiK0Rk;In@kH|0c;LNs!$EetF4{y$KeR6Eu!wk_P zaXxxZ)0XWK^1BKnFP62Ej}%nW^h!0U2Sks2i(`Zmv!Embd+`Ej8fJ&fOYq(Jzzg-p@{1pb z$;l9 zISd5Rj=>!X;9KR_&>pK|M>jGk05br`8W#|9lH44RfbDty2vQ2cTxSkbwD3QwJ2mh- z-5Is*nKjfhMy00CRG75U`d(xKBc#_SX4R2ES^++K`w;R5aT6%Ygmj8F;5ZMlZ?c`; zIu*lex34eZ#Z7q15SE_Cql@e{X?b$taf;^YZd*9qzi3`sw&|wwLoD1kNWNO6rP(r1 zr@>f6&4&M}NG10y<`{C4`;kPo5R4jl@V1Fq6m~1^AAa;)tD@ks^+63EPL5~qKh#fw zNgtf}efT{f32_Xf_y>y)Rvqs@H9sk9qY#I0Y@~+YGEPlR=^8q9wR}28H!FwUW#|fn z>Xlf|L(1EhSGBj@!O&OsZvUMmA8WnWv6)p=7#KIrl8-0H>c3rV+IIFoa{=WA9G6sV zXs`)Lko{)#qgb$Zu4e$V;bH3OKZW+MqI*`y zac~px|IU9#1uIY18Eeb7gDRC zo#+b9;#1RFqw-4w=0rU~fJt8oN&wAlBzN_5q=ME$_(8z_cJ!>fVmVH%br^O>qeO16 z&$*bHfA;onPE;)1Tpn+a7Qs@O%sbjt1LIPJLf;%;tZ&2;8#EbRE+Tt}UjC2^f;KS#eFG&?lcD%R_|N{Bc|VZ_vE&sO zr`ysa0GXDyfC{Ck>jnUi1c1ZCqAuOn>Wyq%g`2K2cM`HTo(M~(fIeb<|HPyEPTzRE z+^BiK?OuY12Okm_AAdS-;J^~zArdn2^mg@C!cWlt$$molyJ}p>ic@ob>;1>o$rn3s zfQ~_=IEvOUz47x#%!aJ~z9+K3<~BMMsydS)d99 zG0z9x>AF3h`>HQEsH_;Opo&a8x?8^G+y(K;{9V9MKKnvRvf|WqQJ3kYlfPF|HdFm% z{>WQhp>ZOI_X+8{VU|c-*6{4@t6y1Cepj0>kxt6W14wc{&$BJ^Ujb^%|MGjvgVeZ~ z58*SlMs;>mTVo|5-n#F@?U9+meBv|8epuOMkYHjNnGC*xNvU%<`v=T=H_Vd487A8A+Mhix9E#= z_;$Gl%-Y+Zu4T)81LOw({u`>mMXqzR_KT$VuJ1@yrz!RC7ouVKP@@R|_Vev!Omsv) z^2hkU{CUM;ca2?HO~7|&9M@+h+|VrWItu)74E-1N`1^45?V)@pdLb0RzhZ^)SFiWq zv z#UkHD6x^;sS=z~JAX5rcSMP~kuB&&krNVmXy1}*ycr4JKyaWV6+iBw6rrEJzc5LK| zCXxYp56DyskEJ+KqnEz}H3N(a70x-7*O`sY)oMF4rvPwd1&MTUm>L$$+#p}LaeNcu z9;_H%+Q7{Ob=1Ujv4<(E3TUL;(Yg7W=<%F#cL3OIU6Db;gJfVk09(Rwul?hC{^OV|?4m=f+B3ypnIJhC7cv{oWe^B7rO@M@&!oMYE>HZzD7 zRdR>+DZVmji+pgm$J?0g8MS67nNn@^f`~dXWvZbXJge{lox|M$f~~6uxV*f;YEN5| zUZ;2ku;3^cN*H^?YchHWN=_RW`@J$Kt1i!B{(z9F;mtA4bsa|;bAQP~X{r!i6b~hE zgbF>Up^jFw)mv%@mJb&9aR(J*QOyo8_4x;S>LvC=cC&h?(vEAGd=SJ%nGZMsdk=LT z;KJIQ^?EaLe~vmaPVk`a{$0Wn|FihAXYJZ$BbC(`4eqj_titr)*;0*OLcs41Wv%$P zHjZTV^PQlC>QiV?6lU=a2?63)A3C8F1WAIMDM!`HfUpo)PWrM158i;3Vq>d4e1?Z_ zaZ^0%mFWRYE+v^m!$zPEzC2kP-ZuqCyGV?qGc^YbfkOe@?1Q)X$|TBut&6|X7bu8i zd`<>e)p=NuGXsL!U6Cl;%K&VD2E7>b_^E_NptEPu0NDtX*i%>@*>?#C9?6l$t!iv+c8+Q!A}yBzK?!JagoHK77;#>EZk(~b_&V~S@$H(DczyHqTI zx2K?hcJQ7if1#f6o3opFpCH4Stk%oR-))Bk=Z}_2gWBQ! zpADaNGf=VPKJIJ5uY@`#&b-8|f_GY{;n44_@F+6sxzQu+`fF5Ld7Yffn_2;uoz*;t0vwu&#EIDr zczZ?;Q?iPK!CrOjB&Z2ncb<52g;>RBRe02sDHP6>rRvMvDsNHOD+s|u0W%SY9!FZ38wRaO&?4u7Lk9z^ps&^PlkU4In zSa+5?q59}&2i*l2r@0nqOIK)Qf3SopMOHv(b2QEDrt_uEgu`-#GU&15^BrSq{N%-m z@S%C`h2;nphe^l=*L16eI(i-#pC@cHQq-}#D!Mf4Et;82l%1s<$j*@3ylF1M_vceL?W@`EoiFT;re9H!=51)Fk7)^GkPmW6!-bQ&}*>RypsJ7l&_~V~Qap>>o2r@|l>)vi<5=lF&PxI)PYfsqloiJcqF0E;vw(97w{QV+ z1J&EQsy_fB&%)8CBHzo77Rzk%7T`0O=B@@*?|)6hz1oidF(5J1uoV)l$=nM2Y`zNY z!Zq4?qXPTw9r0~^Xr#gUx6Q1uspbh^9$&3rCV!3q;A3<^tl_xHYrZqi_j0e5+G}Ap zZ$GrRVjciq!g)B^B%p=-?CmvKaFK zE!p4hH`#40qo#PU1QPGL-y|5xhmJ|A=m{^Y^pE00u~^83ByU*O&*F3)B2^2EewKIS zT;oF@1xH6lRa9DZv9q$wjNgH&b*pvebWD)6@$YhsmtuU zQnIbu^sVGpb$n9Fpmq^@>1S6;1+41{mE?}OD!dh!~q+$aIh;d z1nGILHojIN>7BXp(--l1>*zW4scMd3Yt3hqGg+F+M%E3kqJ^734?27LikpGmmO*E- zjGU@-;`f@Fpp{TlPl*OQf1-O5`LWC*v1DD@XYT_#YTC_Ub#DO)wi)~MJx_|p2)L!n z@=Gzd)lA}_P7e|bTHI}2y(85!N0%eRl}g2LmqUPq8wQSL$U9$@mSWbdw}s`()}TZ zqu_ETAK}q5&jgZY_~TCNu9eSK?$%KeC!Q6%MD6_I%!1n7x3pA}ghVR6rM%YEgPhkN(jh9Ob zMb;{SbYgJ6)qUZNIHam>T;4(OXtQE&{sS_sv;mNtz*yPHAc-$yr!30`68?(gdbbj^ zt8XnXBP=7iM63LM{nXv}apKJS?V*`5*7*R+$RI7F2kCBbZygW)Tos!GutK{=%N5P% zK8mZf->3jS!*7)XX79ZSGr~qEpBWZrPcBEalNIC(PME71)ViNf%1(FUn>gM;<|TNi}RX@z&5y5|- z@zx9V@ty)(nQz9qSirUEBmsENm3D5rG+e4C<7B-AK|0OY2c~-Lv;jyjo8QgJ zXU7C=rZKzO-Egtz7H~bmeL|#@@gCao>+EV+lE|UcdQu49jg6kDyQNLBJ+g8UsmPlb z4yTl>ash;Dir6m6@G*zs);Im>;r-YB1NDG`{@p>0;;Z0l>R+Y5$-9&s`WP{lFhfe5 z`i>Gp^yM8JXHgW`5H#PQvOgb4J5_nta}=A?TT3BmJt4Hhr6+tFc{4lbA5O>Gi#Fs9 zfo}pL7Z~)+&Zx$rr^+!C2cq?nKSD7ad=p!;2cLm|__V41{8;9}h1{1B9%vyzWyDQK za%djQcB`{uArOlkVn$<^RZBqdxJo_SWmnR2!Q7o|-Q2J5K}~c-YPD;~SX^$5r5?}G zd`b&kq1DuEbYFP%MgOz#{0TL)lEzCtnJw-Rp=7t)Hvl&$ zZXMYyg#&^}ASwbrYBwxYrD%+q%fm|DS1{fjB7IFWF;O((dD#*TXfrF}d_|Vxh|e>W z$r=St2NM?1gC7*9*9*Meb&<(m(qCxV%vYL}4d{?ZitSI=EdKI0VZ3a3Enn!$VD+>+ zw{+v6rry39hZ^F_Y^wxNz|eyy99)|s2mYVi5PD2CeE+G$s~6sOl6_&b_pY;)#n!}j zZpRguX6{Pj?gxeR;dhXkvX-EK8VkScwgdDXJDBZ#6q#hY+DD_E4?002V`ql;w9ucV z@xNRa9|2sES`ZM~^Kl~7h;#=BbO5IY*Ar=e6QT5KzA$g0t?eWFdbi1BB6F1y1QB16 zgK}HaBem+IHOL`IEP;*(BOtLy*a3Vqyniup#ns{kIqbNszTamMOY%AkXEf0meeW}Z zcsJ=Z6BJkv@MW2nyId98_3>@|Lv;fyDW~=2DUpBxH#i_~ppH47v*!FUigH64=ch>_ z>a?k>$#CwxsX*#Z10u{6zZxoHwe-FbOa*zv+#UyFfVT6ZliOOp+DIlBvu2``Z7EXl zp95~cBQ{sK-hGd6mXz2;J>_MujkGJBRl?_$IMK+@U_cP|d$Vc@izV5sp7U<&)+^_a zM#$`C0((h99^VYnEN>(jDj$ooM38yD#=gdi;bsCY7mGYZ$fKzXOOs?L1>F=h@Cg#K zu-ZHQ2_T66RVjHY%Iy+6h=m?;z=XWRc-DHP{}Gf!OTAuU9o0(7-5sZ>U)aIlp-KH#1VUc@|UonMg788m9niA)`L+ic5JVzhUx}SBid|6+|;U{>ykCW zDfF+E%4b^4x&b|C>$#oZTWMwVo*O6&aMz>q4m9Mcs8+9CUD{cLIFHwk^SN+ZK-v$~ z3Y2YKB;?T}igH0{kJ5~VuQDDAPFN7KYPjXgu~Js!xzIh*=+UYml9s*)000$DHBvDpu~|g#%nF?NFFQsr2sDz?qI|}yX0?| z*}}oyCmm_HACjBE-j{fU^%eIqdv`-s*mT$AKEKoZw)qu4L%Ei zL5m<$09&YeTunU2fhmb;)XBj6!SWt)(P5-@p+vC^*jj$qt-ii_NFtzgh@-U9{gL%aw93P zTxOM;U;emO?eY4+)O&6T5&8|T7;_;4x<;KxdOGZy`%b5=(1SEeQt2NI0=9l#`Kx?n zF#KUGAm9Sx-?%P{?aT+JMS zq(e(t(asDG0y37C+~3`L7Pz;u74uuGk)b_<-{>lOGk@A&AIG}AveO& zM*oW-Dc*eT<~opopEO&$nngAN0sOlcBfpwVKGl<)IhY7Zdfcy_JlXZa^Zl_J2Iy77 z6bg%qjR8+eociZXrt75zPOH~Vw%$IV2Sb)N0d74J5Q2Zr78aEd5G`K|ngoWssqV{p zFcXRPV#mFMWDQz$`@hfu5gh#XudY{9nFMCn(>F1p3LIV`E3H#5FE9PPzRJI#%Tmkz zDf)$4wN#P4gyBy!+?qgSDyjgU9Kff{Qcl(NB2KZ+0)%Gl3IYT*fqXN&5%X2f<5{<< zlr=jsfU2KszSV1JoN;2|+;qg<=h5op@GYnEd>E8Lj~MzcU$#iOaAjwWs+1WDlIv!G zKuTe5V?m3mjiG>4_x-?bgpO9UW~?y)fWd=V!?EH4K|?Z}w>t?Afa+60m>$pR6rRNs zckg^|lXm-5#E=@y)cY(i9Bi1N?Ju3#0)kQDr~MRx)%DsbfjIj)fiZb@3OX$DL}P@7 z%)F6bdnk90WRpEZr$}H4r46cURpQ+pUpI*(@dOk&$JoQ(J6SS0O+yR~fFMtI?2a>{=w*s;(g=^1|1f_{RwGr^bNY3})^W}n)a8uL}qzly@}h19h$3Sd9p-kQMpJ3 z0U#@gFod5@)TzICf1LBGqtRqpFmW9Id#VnnUTFGL+@CW#f0Wys;+gdw61c$=SwMU6 z0J367uRd1WmGlk}gdBa$!S6;*1(Ud1B1B6t>9fCJ66E8%k1zcUp9zZr!DLQ_O=c3p zc%qQ1D=6%CKsRI+K;Au>tVw}S%mv6va3MQd?5;G-y~TW8TA=cfN1Psr8Op9?IO0(f zSV4zD#?9Ct`;cBCTRH)=8(^l_QwFe4yNKk0-&y2&uFHZ{8TDedIRRm6ERli+82YUc zv14Q_#C~acwqo@etRS#mzwKLQjnMYMl${iDi8(UZUZD!-Nd@9%${;M$ItxUgw$v;c ztlN&H&TtR36Se{XgOW@{unxNjNIxZc2!0)WMDOBdZ?V=VZ}llfM|3g=*^vz63yAQ# zR(#1c9XhEOsx3KPi(&h74p&IUKN0i}zW_0hF_4wZ? zF1oakBJF)MK?XwrVNzMtVoNcHGtI?Jba#)?3n;ho6tl=sUtO_~Z2%u7XY+q$rN8cv zF(2Pd2SVlQdyRWHnEk|nRtGKBE`d~fdO z&^PY1HxrSN{|Vra3&iA-;2~wa1&IhO|8g9R#Tu?jk^o!Nw&S)j{q2}OhJa0MGREX*2V!^@_HAiW&YnB0pmo{ zmRuhRDm3%e5wqzqlLl=R`(uZ8@$d^+wWU>N0jR`ZBIm}9LW?n-iaejp)7q*w$8uUL z-tH%2u_I=2#E2jVGbN>Z63tDq%DY7T^uNEqJcX&o($zpXsu#zEx2xB|vK{7#_Zy`T zP8=3K(BQLQ3HAjg{@JPL17W2SCKX}-Tk3nkMs)Ses@ad78w$p3e;oAPv+h2G;rTAW zvI2t9MFuZi3}Y9JMK~UC>Hov3{fE^5&sY7A<-ZsI?>v<7ItcP3!>)gjd>z%P z^tW~xcyE9gOV4dyrbmGTT<$`OqFp$J3kZNlSluNw@7$~$%n>u&m(>jyR&W2|^HszH z$W{#Sj?~}jf;Y$FJ4D)DhT*$*}x?(ZbU>F*_QrqN{O6cvX<5iEhA~gJl2K z$p6D9!7UHs!Ch|LmVMN8QvDwN`%XMI0#elZK^%rSHE9LVi88qJ88{2Zz36VUONC*;N7FAh1 zHkHQ@5TbI;RPAw@0~*Hv(L7;=8@oRWpQ^PG$@!$ zK7Jj_oNWc5#k=3mS|XZS4Xx?awnDFX7N2mC6~`o(hty_B+*>aHvG^jyKHRaiTQYH( zP3Pcy)ZT1DO_&CjV_8LAXJ_MZAbAdDWf70wKrpi~VWow|G~?!pee7#To)dR98-YD? z3pb-u9<8sM8Z!S;B)a4Ly8wUv1W-TiFVyj}mzN0>>kVU3O9MlZ>Y$e~r4+NQp*W z!k)9CXW|ZRh36}Y#yyxJkJ?{8oxMOmdh2&R|3j?##K-2>Amii5+oYVASG#zH41-q( zBhl$T!-PxAI5xk5EBadx1N=bYqFm5qP!OS=wXC<|5nHvSWQ~WU6%Mt5HZz;Elkxog zV4H?w1(j2EX)z-9dcpJ4_q0yivHqV^wLQs1Hp%0~v?Q`Wq$Ll|8t~Goj&FZh8&xzJ zI!b&RC_z>Gdpy5)8@p0b(y`>XCo3%k{>kb_>Mfq%8PC`*32;m4=8U=4aoSn`9*F<7 zjjE}zMXc&LSa=}}D|kBhoJ>DPaivDE@?zOjo3esBc(AajuTDzi)hFsoVS%CVbXJlO z7ag5)ByTgynv6J_jYpfjbP>NiXjQOuE_~EuIHRxg$Tdwa_pqVXXJZfEu2D6+RK6G;GRgkNy ziildX5VY<?jAyhvz2<1axuk;){qKS zI}uR?NT+U~coE=dsZ9rx;UL^%B(N#|Ya4(PM`$Sbo(WKfN0WTWQq$N?Nghmj;a|vr zudlbzRk!Z!De7?M~|VeF=zZ*CV2weW|M9XtF#aLMW&bj*X@6%E0w*uOJQtc|r1jUPWfL znkD}Z9(VuU(9i2M%5o~Fk!nJTI`%J9?u8wG?EJBgFFr2A&Z%cxJ;qudzkS})+jyJe zW#4vD%tkVV>a-K1!oh;DcQUiFhlfX#OI#gJ?0c?^>yVb%>wfL<>&!W4E#BhdaOp^Y z9m+#eTKH8tN43)vl+~DVrwclqlyi8}$Hp84%>h>6zZMBrINk3+kdiGQhi^pBF|imy zu$h{zx(^gM@h``UKMj>|{m5q!xqfJd1oY*_5@M&FwUW2a@2@$b@||^M(`P#*&oF;e z2sCU<=4|tqRx}vmMf%A&73JHXP`XPwr|EB#vRfi+x8_%~zqJOg5=Xb(i$b9Zt3M|v zpU%^)pLr^)>^tfWk$y{RfbCvADKnr*dH!!B5L87*S(e&H?jAE(NB)CLveS!D;(+&f z#*Sz6x$^&S*@Jkp_4diy)1F_g+Ghe!+3c4rSL)u8NBG}0SEi2eXEV4dy;UeBs>)D!Kim8 zd!-CiK&1$=YB=H_U5l&(8<%fc)lv-1%{4 zNU1vv=tEPSe>;AFmC65397Lx`8h#Do$^T93Q%+5b@8@U5I&9-~_`_BAws|c2{@M%J z+Zdp;e;oqw>|Z+U-=F`#wAkoyNd!YdfY@eeXr=wH;{_h14>zF~j3Vm~sYx@Kv#gjg z=@8t2{6QwRaZwOPw%itf-T;0`&;I#42`D9Tb|~lrx}U?Zq2jBx!h>1 zcI5GA5@42zR1X!&HxNGAMp33MgQ46VNEZYq<0yCJKO}LC zEYqY$TE???=!jHH{$w%xCj_BbI^-15k*7Rp)~DN;#dp;xr&kp-%@+3+)rmvBqA6bp z61_i#C^ZZ2D}$12z0q4~who&vFkkDt&+87JC(}M|Lz4(P z9#?=a$gS}0GuV3%#{UZ(H@HHWe;o$WT6t#ct;Vt;K}`26podsUr?EE;i}EiDO1Kch z*li$8sH-Z+3Vo9IWxtTY*{!}C4AJ8;^j$q{7zm;v}mc}Zq9fq z!Su_%@--%%{wu4W4F1)z0j}<8L8He@BpEA<|95iitd!vXt*9pnq8;jqbSf_xZ8P%D z-w&W=Qy=zLGyrrBh->uP*#ow59g*Q%bA_ceDYyE4<>rg-CS*TlC5Ph6)b$>69qc;4-R!&RvEy?NZA?DrxQtfF zTsU?)OR=g{|DdD!?t>bjdGOd{5XK*F#dU3SCf{)3uvOA9!Vyi1cD9_UoGf2m&@f`2 z!DZB~$Tj8lCmF2?hq!dCKatt(`AGt9EgW!%5dv}Zj@+%DVO4q6iK3sL8z&_`aqjiw zNhC)k)WT4?2d)!`pBh%$=KzfLsOR7ZzXgHx>C61Ks8tnL|4NU^uY(_Vt|Elf8{H>^ zBYG~;lZ40qbIR!@8%_3S+hxLkooU4LX5FDOfc&r^f7Wy9?!$yvzpvS=5fNx)Rq^;7 z1=MaD@l=rj0F9|ApS(HHWV8xF4gNxF7f&faXQ1#9~ zwYfKRurmz-;|LIBZ;ppx`Y-Su=Xqb1XVTbyFmn;Y^hv05_T!BMjOMtbwdocXICOaEOSPlOqXuE zcB;y72{i_(8gf9L(^(GPZ$~L-XZW234<|77qe3?7@tYGpV>)vu1Es;i#>(+v@`oVq zsh}&0usamgBb}EMUR{)h#kX+3p10>gi;ti1d}Pf0DNwM{el}Z0uQODh*ml|T=C`oIg|<_CH_Z;hC1*O5s;YmJw=^57VC0{rbL z!}UK%aG?K`wbiuhn`dN<2G|WmCHcDH#Fq)xN}!8eR$GpSQa&0k@clWFZ1%9ypgVa= z+cDIU1rIpgGOsH$YxADS&yj0>ea253UaULlf3+0w4YNwni!e*FOV5CBXU#EQD%<9i49i{P>a4r=%2#j#fgol;(zW3R&yiKZQze~4K2tB@P^Ym)v|$nhs1m?{5xOE?Wo(hvfLIrV8r-l*lVSw8~qP@0sTd;{`Y2E-L~RdlFp*Pv3O!yg@ieNIMmxPU87Zb@enL&XhN4llfNNXXe3S4 z_n0U61-)bzW#;&C;oh!J4!tCplwAj}55sc&FM^!0IknaoYmF8;mQ8*qKDlj9GcSOr z*BwirBbJ4P1wYRtVDy(g-|i&j?2VM$`@r3;ssl$ymgAPyD#I4r-W0e;oag9}>g`y2 z<6v<`0IgUSpP0j=>6HCNE}5NFJY;L~&G~2W&eVqN^~O&>q3g%_aY;x7s!VTvhvN&6FBf(Mn@^SBh#R@kS*GRQ6H0Z%h7PrL zM%$mX>@+!-!0j{ZTb1O8YJSztXka8;HX+46(?1)H5eF48hcrak(7Q4wT=C}VEP)LO zg_>u{Zm*H9a%gHen3n?1Xp*D7&_}oKl0!d&x+`D5_+W99BmY6`Cz^=+1C8H-+QKy& z3@q##oFLG$n zhOtVdX#4d#@lT*st-(H7X7XsRSj41;SM%M@x|>>k`;?ll6YB5ccE!+T)*PiSv>>pM z@rnCIDV-~FA%gVJk7LIEnh%0Ja$fXrK|CG!USWZLex9e~2T01j+*SAl~$TKPg>NCq?7f_IN$jw#01*cxm>h0o3^3+0whOjz?u(_}3x zuQ)E%i3@Oc;6gliTZOvp(^OkC{PqFv#Le4i)gK71aiLNkMe`)~r9%gV10%dq(lyg2 zeWUjqtu4)4^PjlAPuKg9tW$45np%~~AUbR6(kVYrm_RSE-XQFE2Bx=PSUL0h!QITH zBDN!^dZsD|<)!=G_S)YPBy?Qwj&xGHTkBrrb?kvtmtCHhl+_GEb{cJ}kU3*;gw=GBfsuA>z%4OU~V-wxl+-5Fe zl`@{EKqg%=O5RDn6hyL#qXq-I_@-~oiC34NWUtH=UX09WNLKBPTy_$;??Y7&+1Gv_ zOA)Y|ItgY+tu5p_DbK+dK7wQ4MC7T*IXESVrvt#`SCLGP zj5u=G9gZ(O-d}`i=YVG&O3Arw4J3Ttv`LtUCGK8k{ak)hVHq`LWNlkWUOfB_hc2 z(H={PIJGhh+Ff%E=~h({!?rmufqXk1UH9i@{W+0+KNjzbs0w#O9H_aZ+d}1B0jPQs0 zD)1;!hI{EvK|Y}f5{@YwE6wU{9H5cUXp40yeL%^Pu#ltVUm^wu2GxGO&@|~Gz=g7e z+l|%;ue>DXrLfo>wmV9t8L)mB9a9g+PRnCl)oO z;R2s6yb!uk8KP!MM-GLc%l1+Py2&%38lWC zuD2Jh>LwD)yDX6G4M9$X8$_2Vuw3C6^MXf=wCvhBKI`gBf|G|O=`VIAO0Gr@-TRDDl0HX^6AJ4XtZp_hbzprtlw5{3xeh56D z*_Za*kL|`O4Xpu9nFKwyAYoaljd~Nc^A;~rFb|i|2~ppc%(u~du#$L$BCXe7#ZrU) zTPL=q~RFMV{5-q5w* zgiW4z&g9AX4D7$Nl@y&;v5rH=zb=b_(JpL;8qA# zD}I}-=;A;Y+_SZ(7>2WjS%zG;5)}8glhP-Bn}g9c1Ssgwm-KQH7WNO{LP4WaBqdp= zxtRVW+hhUkX(};a_+9!I6r|}Cqq{)-^o>#F{Oo<5|Og9Y5| zQHd1(4BqcPwc8svsrJ4#;Cpd=hVL$bO@FsNEib|?tQWF+%HZD1-)m?WMU!C>c2_+XBjZQRQAMVhs_seyiyNk$c)%a=!2IG$1XxqJsD z@aA2kj;wZY5!{rDhsBYI-w!W^&PE*EwkwI!EfS$q5$?jo)ZQ9x3O_V|mT-QkenM?L zGt!#0l++80I9QZCdO25^JLslaJjgT&M%Wtw(42&6DI{;6R4oO^*ZpfURyAwqk_S!q6NHs9v zvzon7Y4!_;WuHVv4~=wG@fWah%5qZN)y5Mc;}Y}0MPhMP8;K~NJMigYaJS%jurK6W za3{t!y7jrK<&J?pz%k@2nX0|?eN3MB)!{CEs-b42jRHT39Bc|N6pPqx)u^=& zA!Pn~dqU1x)!ik#kQ*j}fBvSlk34p4<+zGl@u+VdtJBZNOZh2;;xMdn8j%T9@t!6R295t3fI^B5USzTbK0t_~U8AQ3if^FS6 zoXQPbIgFXN{N^7siSus!^hzC@>DTbJVRp+)UsI-A|i za(tRRgy$6^668I9)Vaaby2y>Mb*OZ$)@b{{?nX=Ej2!!~dd7vkrZh|1*P>)z7-^dL zc?)18Q2 zJB`l=QM|Um(jR{Gk39NTg#TsYRan?Tf=!0*Y8syn=WOG&%G)L@h^fQE?bLb6#Sfq9 z>)f>9pIT!M6^Om-U#W8rrpSN;&iBzORT29iw7fW=;e#_1t@e{o8LU~yBH4$s2V~WN z%(0$*>W!#7_Uku3elkjLLtK($%kFCT9at*QYPVvVEFKO~2@JJ9J0p(eDzeXpEsp%A z%y5tha#>zym2J)zI&F_s9$GlnHqc%M)@hHK=GCaBA)17O7X2z1Gs8x#T^2{0Sg_(v zm#66I#bN2v8W(Z?8HU=WWV)&_;zU~~$r|N4n={)=yQZ6Se7<5K-2A&!mUjMZtra*G z!I?UVFEPUI(%^{QR3f79!DX%(YMsK};?lMh}6L5oanGuWK(bGMXgq(x* z7*=K^r87ry`rkj5gxYB6R8wHt@Z;Azhr%T;QvpPu5Nk%;QC}E^mwxK)aBcXioFomr{M7F} z@<@EtP;##2NwX_s-oDMdpJpF#3@)BjXK5tfVe8919lw%0d43Y3M{M~52}-{NB#_L$ zxdYBk(?Z5i$px*x5yyX9uQI?7H|N)qrP-4n1|QktP&wbG>VDOk_uiS6-Njf`-8(u= zb(P*Zb?F%Gf8h~w-s(}giFMxCw1%zF9W{R0CaY3d^ykHxpTiKLFwmdT4?PaOO2Z?r z;Md`}4=~b$;paM- zwg;$q@WU%-H@%wK0#DCJ#4cHpht2*!;v#mt!d(-%ZPsK(7hL)Pttwt0;tx@U8a7Ah*~=YAHS{I z2MVWlE(yzY<9>7l9Te`|-nZ>q??_01#f3QShW*yBhOpEjpv>?$pa*H5?b(SG zqhjI)qjJ)%PUw+o!7V9NvEPxOJisuWEe+XktDvu~+!T8ic!?D=5O)yFt#u(~Yz5y^ z1@bI)R%!2nnd~h`p-T*V`Patn&=r|2y6oZb$k-=6Kn+5tiUiS@0yE+ypr!nd*5IR+@Jek+}<@jIvIx=7;eQX>U4oi(w&CA4@yBh)KQZqGmGuSO$< zLx;&lrvCuRqkM)YO@(xQ_ z|7f)Rq0@MUMSmq`*A^Iokn{K;{!kRd1%A7c^ z3!L?c$U)u841?sx&o8A~(s-0@3mXH+4~cS!e67d5S&PX*lZErs#k|Pt@zQKEY0^8>)KwqaZ1?NW z3#_v>d1N!JvqBpF;WSE0|P(O~>DRKvqOqqN0I8OJL(4{0QTEsapcZ05Tou~l% z+Fudd@}3P}1|p0XFkM`9!fn47Ua5y%q?GJOm>r$jB13m)&dVmY178nZd-I&<3qi?K zrt0fKJqxL?=EwCIjcI%+`uSrvy>GFeqE|+uw^5=)X9A zAM;``0vVK?;AyHx4w;7gnGGE(R^el!TvN-ZntTE<%XAj*%p;_JilG$-xZ>Vyx7vQZ za^CLn+3U_azd3;;GQ5!KXtPBo^tiZ@azgvkUE|BpTan7$7;o==QFK^vonRIDD&40! zY47iSK{)mX+n<0+!k=eJM}Ezubwh#vxg0?^OJTb#N+{J}>5h9ZVIcU%kf{xKXm?(IVplTtq#TM51VpwQ2C~b47*+_a^%J zBl#!XP=}jZZs_@J5!a!M&e{mm6CkHMY-k{wBCl(c|ok3A#-7SzW4K7*=W9VT*kR)H!b4isTyYV2ThI#H z6mlCbmL_+YGeLA7;_n_yzy$br+wIR z-#CB87P6%dGK8(Vr|59(qcg(n8ohHiQT|}@m$I%1MfBU0kXUwpo_}6ZEu9>{2)~#p zf(_P{TgHLLA`Nqop&*(uaLI4pg|=Pm%xN2|t=T$U*Py&vy`YvDPM|!78f@aOE3(8UuD+Io<|MI;%yWJ^iO`f zvWM4T;s_n_M3y4YJ!(fkZ7o^L?(k!oruAD^6=wr}9sn;QwM z*+{nqlwZh;aGQ-m<={r6#?QsRqg5mdjwbib=nVs7W8{tnM8!JZ zinguwxqE~i*Aa5OJi=rO${&dA!Nc`XkDV9Za0XCqe8egjTBn{=7^Nk>%J8YlXnNbA z4ZtbUzaIsn7J>MuYTK4V3fumG7YRX_kH>8qAz64}Z6>*6J%N#lxUX3Frnd-6zXv)!5<*D*`ODr#4WEijLB?jQQCdm&SZJwPS2K5~AES?d{+;^xY;3n~ zhF960MVx^D_MUkR&4_d{57_+Th#xfbUwuFHYfx^aUI#ZJxMh!DoAcBeil33xO@JK+ z&yw9?D95(MfGiE@$)OTRt#JujcGrWKfy)zlfbFJCRp8Px)pZqnAfDucGDcUs5v!oD zBJS6@Y#$b@3!{%q$f{6|W0GAbTp71(Rol~BJ)=d11O%B+P8Hm{?9ClU6S(hCwc&g9 za^#HykK5aibhD&a6Yu0S`GG*4gN{ry3S{Y-K*5kj+2%Bw^vwd^XqO6l@FfN-;Z)Jn zcf^l}W$x|mRq)H=Q|q^wmSTZ+G&;NQ9hls4$g@am0X4eH9xiP&{y{Q~y}8NEAz398 z&c^q6woC^`4jcNajURA`lueOol0rH4!E)(R7^H`sPJ3td0#<%)VwXm2n`bCBd8@Q* zvp|3x6rXU$L1*1kn8&F}xTz(`P_deyWbgQO~gJvfr1h~D*X!3TzH*uHSZsvYiKjGud|H`UwrU1 z>gso7VcyFN6q#0X`|8;^z|!xd=amZgP2;lptMJvwj-z7PNzGS)X$uH6K}Kt`TFnHo z-v0AH($*1gpdTVkeo_Au!PDv=2YNlcr=zFnX*HM;D<5F5bl7nXbJFdhxYAP173*~I znXjMG7OBNbI%ELC)VWIIPF14orPMdvl)DOs!P1le3vXNkC?B|)1(FLVHGT)pO_S~W z6NXwFeEL5t9gOuSOF`z2a`CD%xl}dJsPUzxuYex6?zW;vQks&w$Xb@6W+%to$y4aZ zuZV^7Dfum_?Y4495;O1bEolM*Wz0{9`3b1_PEx5P@)lwQ5rA^z%6?PvGXksZ{f;fT zeJ_wzW9c@07*bF3{#umHzQJ^K+LAfpaZFe#X+mtz)}JG=?tY4dB{3#BHOjwVb$%5a z1s*dP#WZAy$BqhX<|%)qUP5{UAn9^k4OAgY4fV6GN5%L70>en9oRf<$)7$2f zP9q}kL6M@~eTK#!2M=RwUdeRez3QLV0_Ha>#0F{x6XNzjV{u}}XBIf<0rw1p&>eHQ zpvH5-d!NPSDrr{V-*Rf-hm_2^A zMd7>^S%hNPjt@R-iotQ(THm{{RKKG~+VFus-yc8#>3HVd%;T~5u=nY5hgkbsvH*5d z`4YH5Z&TipTUf87QdqSEP+e@x{8?lrjNDvSAFD__nV73;IUTpq(_z#h-xzH>vNBoF zH+`}071SaGOP+o#1e@IUiYB-@s09@$hPW&juq(8NVoQN8*fX_fN)jvcYE@y+rHSNv z(C$@L@@6`umY;KUxYZCxSwMN_8O~9u{3O<3<1|NE%y1N>Dm1=o6zkJG>ptFf9S}BW zneo{Lbcw@GYE6KOf0@65AkG?@g7kf(?MylkhL^IGd3-h-XZb!Z)MyMDdLmlE9QXyJ zoE5S}O9-0H6R8Y-HO?@eJHqb&v{b3(^dHA@rLu^nd-Nu7Lxtb&sB?ctWx7Dm8Gfnj z%E3eN786w#KDddnw=uAdnd<45>#h?X)1yceQo|H^rl_MenI<^{0O&S9JhPsljqHX2 z$^QJTv$b$>u*ov6=vba^DO;gY6y0PqQJF4tTh}hIKVVz_F3Yx0xIA|46HQ=MqJU*zWN|Fdii(FC-=fx;-zwUI|pexU_1t{KF0pzLay4bwtCNa}79! z3LmUgS%~|f(}Zu|8?0w4OHX6Q#S8sToe!RV58!dC(^Aqia*`u@rl<462b+{y#Q2fB z&)w|GhgPk2PiLJxjBK3Si3%|X?pU1Xu)eWA&~M6OoeU4R&3RU0Z?Vzq_h(l?L#j3# z<~%qyVmOKlxf-$45*D^b(k3?9mR33VI0b8cZ`y;A=7A6& z#^DepK=qR6k0j4peReH`o|aW~`~1;P#OAp*90DXy+|ZwNt@3T8+z=>$4C3J1;*`Tm z^wY2g`yEw#gw20jxGI=Akz%hQEdZ3-XI~F6Mf%8u32=m4NPT8mOY8QaiaOZvfqw|w zqJ%ylRo$pkj0myRUeQq&vicT|2j+|5%jzhD;Dzn0&3?|+fn5$9P{Z^&*-{7q0N$Lg z82oa1A+kA)jL5j?8&3^LKktk~Sh>5-@K}+r{9@z))i4F*qztBsfBw#hgrKjplMVnr zuUgC;sWjQ#npI&0XFxUD_Br7GW7x=I6P4j3-5Nm_+X2RcPwvwV6db<&J=j+}>y6nI z6<$LLBHCM6+5VTI=ijo+#o?Wjkp6fBhTX}`D<#%`Gp%^^XvFqjwLft z3+jNdGpfLMlFMJGrp?C*V(ZCppliB*#2qknM% z^`5Dp4t)5HEyEP*n2(>cZ9UD}n{SrJKT;#fby!O9;&$H(B_78I)L#wB; zr7>7e5_B-Dx1Vv?{*kf4>zpwCQ~^q@8>gZDUdyY^;@lPbywjX0s%mVgcL{;G{bBqa zUH@O0+A9!hnX*hV?ElI4YcAjgB|hgdu*+`dVxNseWLh8{Q{dMPl58`jF;A|DVvyT? zK#puK(fjVh9%RnzbNPcQgxOrNI-!p!e|GUaM(X{c11tBZdZ(;5`w+N$+O~z{0A_HA zp9eA_qaZ5evVT}!v-e~)+S#i*abWEVg+~6T5!Pm(4=Z%dRxa@fCedN)_Rk2QbLs>< zxmCF6zPUb>f_%crDg`#&=a&}-QtZ}*vGS~ztFl2vF~D0e-O1t*T@m8f=`vnksb&14 z63d}KlJC5RfRF&$xJm6;Ym|M}(3Z=*%%8$ZwDULTmwHJXKv`IpK`_Ja$1}CNsW057 zbLb`#xT!WJiY>}o9yA1z!Z9j9c`$pKh=1jM=RcU@i%y!)U~;o082w7sbjy;ptWT29 z4l!67G9}{IQB-ele90pFR=i@St}$rE(@~`lwMYj7`)*C~a`Vm9>$U9wB&rX0N#r${ zcDZWcd-9&XFQwWV^0y`0)I`1;LWzHF+SduoAzTp+FElh zVV8NGQG?7yF&(GFElK_rEGuk61N;Jz@j7>&^6f#nte5hI?yB$K#!w#aC}q8TBwO*q zd%T}+Y5!hNFSb?5gnXXlij7jORB(4z5H zj^cOGL8^i)`{e91{lBw53~%IeC8nhnmu?3AWMClYf7ntBURb?k@J)*XjZ5?Sz@N)< z*@>VruM7M5M996`%XSPQgg9Cc-wzuCbS2Dn0q&2Nj>mJKjDNNy5?)-M9#e_S&~y(b zm0c)jn@wZtnrs<9!$@An#*Wu$j;Fpn#WTnjb>9#LYlEup4om*6b^K{Qb_?M);((&0rJb?Pn)iEV=^{yIZ73wtE z7yx#3p@66=CC4%?4PRfsV1L|u80C#CaAyh*^Lpnn;6Z&2 ze0IkE=nTNb(&|QFqTCtl2eWUA#t8r*vkZ7?PmMr2!al9OyP{8QoQl#`Kue;W2{lNbFP)hg>C@;7D|nL_tXx?Nnhps zu~h>`9OijWe_>COT^iik93>fC0~MX{9bGjLL1#Gd#62ADQynVg)4l3smH_(YRaR%7 zx#rE=y%QufAsYSUm_P|#e2jgxLbw4=6TA7>lcK`St9Ty9k*vaCV`6rvjq?4c@-ufb0Ngjz_SoQB7wMtbioEMlh`RZ>fL6c)J;rlA96YwCc^_x za|019h(adrc3@Z&x2Wl%uQ)zNU#oC=Np7RPj#z#CqtW5nt{3jsC(vk(-zb?wJ@uZ^>TYUp0 zuO}ZEn)%{9XEz+6cbEc%G?~d8dAkIJ0cg)6430x7R=bUOoL9Jv!%CG_hHjH$#?y4m zFMZD@yOsWa22+5Xk{%hho_Tz$(vx9+*6xEstk=cn$~k`x@4Uge)3m>v*Qs0WZ8bV$ zJ*Gc-a2YNa1*!?calaWB(o zu=A0wtG*nE4(Fx15y>iN4HY?cTebtgGYmLDV53>Fq(A(%|4Ge~c76_D?Kq1ZQp^q& zppt5WjAh4_ns*mHCG!(oQel{rc%4=&=GKqa@o{V#4mAac4cr>fo*G3IH5pAA145+# za~Lgvx2>UKoed7%xpS|iI1X~YQS$`+E}0_nNv=*t0!WOC_N6j#_#cuc;B`BnQcx$r zFDvP{q7Xp0hDQ>76Z?TKH|^!>-REEp5k|KFeKq7{a>TA5w! zdGGF*F;x#*Fra;s5vIifz637zIe zPscQ!XPg;vlrx#-1HjpXCW(Ms2y8+q3f4BdtuL7_Bb1(dP`+sMq{(I>E?Z7Obhk+D-$;d-NyAS^I z6v~SIfNJ=|)cM16lgVa%u8~ATOS@F~*Wx8Bes+t9{_#WR(#Tj+RCIXTpZ1EO!(TEn z^5sd_Odx+g@jd94?Jxi4CBMG_TUw|)(!v;6ghu;~jK}LCfYR;#H2bUnTIGhO{q4z- z6>E&Q9ambq1^nDUpNjz^?teDm!2fRlVU_)P@gLURzi Date: Tue, 12 May 2026 14:10:01 +0530 Subject: [PATCH 06/10] feat(op-reth): add EngineState, Engine, EngineHandle, and all task (#20419) * feat(trie): add engine buffer layer (TrieBufferState + MemoryOverlay) * feat(trie/engine): add persistence service and prune_with_provider * feat(trie): add EngineState, Engine, EngineHandle, and all task types * simplify engine loop and tighten state ownership * fmt fixes * doc + typo fixes * doc fixes --- rust/op-reth/crates/trie/src/engine/error.rs | 61 +++++ rust/op-reth/crates/trie/src/engine/handle.rs | 175 +++++++++++++ .../op-reth/crates/trie/src/engine/metrics.rs | 22 ++ rust/op-reth/crates/trie/src/engine/mod.rs | 75 +++++- rust/op-reth/crates/trie/src/engine/runner.rs | 174 ++++++++++++ rust/op-reth/crates/trie/src/engine/state.rs | 247 ++++++++++++++++++ .../trie/src/engine/tasks/execute_block.rs | 138 ++++++++++ .../crates/trie/src/engine/tasks/flush.rs | 30 +++ .../trie/src/engine/tasks/index_block.rs | 100 +++++++ .../crates/trie/src/engine/tasks/mod.rs | 21 ++ .../crates/trie/src/engine/tasks/reorg.rs | 106 ++++++++ .../crates/trie/src/engine/tasks/sync_to.rs | 30 +++ .../crates/trie/src/engine/tasks/unwind.rs | 63 +++++ rust/op-reth/crates/trie/src/lib.rs | 1 + 14 files changed, 1242 insertions(+), 1 deletion(-) create mode 100644 rust/op-reth/crates/trie/src/engine/error.rs create mode 100644 rust/op-reth/crates/trie/src/engine/handle.rs create mode 100644 rust/op-reth/crates/trie/src/engine/metrics.rs create mode 100644 rust/op-reth/crates/trie/src/engine/runner.rs create mode 100644 rust/op-reth/crates/trie/src/engine/state.rs create mode 100644 rust/op-reth/crates/trie/src/engine/tasks/execute_block.rs create mode 100644 rust/op-reth/crates/trie/src/engine/tasks/flush.rs create mode 100644 rust/op-reth/crates/trie/src/engine/tasks/index_block.rs create mode 100644 rust/op-reth/crates/trie/src/engine/tasks/mod.rs create mode 100644 rust/op-reth/crates/trie/src/engine/tasks/reorg.rs create mode 100644 rust/op-reth/crates/trie/src/engine/tasks/sync_to.rs create mode 100644 rust/op-reth/crates/trie/src/engine/tasks/unwind.rs diff --git a/rust/op-reth/crates/trie/src/engine/error.rs b/rust/op-reth/crates/trie/src/engine/error.rs new file mode 100644 index 00000000000..4694457f080 --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/error.rs @@ -0,0 +1,61 @@ +//! Error type for the live trie engine. + +use super::persistence::error::PersistenceError; +use crate::OpProofsStorageError; +use alloy_primitives::B256; +use reth_execution_errors::BlockExecutionError; +use reth_provider::ProviderError; +use thiserror::Error; + +/// Errors produced by the live trie engine. +#[derive(Debug, Error)] +pub enum EngineError { + /// Block was not found in the provider during sync catch-up. + #[error("Block {0} not found in provider")] + BlockNotFound(u64), + /// The background persistence service channel closed unexpectedly. + #[error("Persistence service disconnected")] + PersistenceDisconnected, + /// A persistence save or unwind operation timed out. + #[error("Persistence operation timed out")] + PersistenceTimeout, + /// The collector engine thread terminated unexpectedly. + #[error("Collector engine terminated unexpectedly")] + EngineDied, + /// Block is at the correct number but its parent hash does not match the current tip. + #[error( + "Parent hash mismatch at block {block_number}: expected {expected_parent_hash}, got {actual_parent_hash}" + )] + ParentHashMismatch { + /// The block number where the mismatch occurred. + block_number: u64, + /// The expected parent hash (current tip hash). + expected_parent_hash: B256, + /// The actual parent hash from the block header. + actual_parent_hash: B256, + }, + /// The computed state root after EVM execution does not match the block header. + #[error( + "State root mismatch for block {block_number} (have: {current_state_hash}, expected: {expected_state_hash})" + )] + StateRootMismatch { + /// The block number where the mismatch occurred. + block_number: u64, + /// The actual state root computed from execution. + current_state_hash: B256, + /// The expected state root from the block header. + expected_state_hash: B256, + }, + /// An error from the persistence layer. + #[error(transparent)] + Persistence(#[from] PersistenceError), + /// A block execution error during EVM execution. + #[error(transparent)] + Execution(#[from] BlockExecutionError), + /// A provider error propagated from the block provider. + #[error(transparent)] + Provider(#[from] ProviderError), + /// A storage-layer error propagated from the underlying store. + #[error(transparent)] + Storage(#[from] OpProofsStorageError), +} diff --git a/rust/op-reth/crates/trie/src/engine/handle.rs b/rust/op-reth/crates/trie/src/engine/handle.rs new file mode 100644 index 00000000000..177c90e7cad --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/handle.rs @@ -0,0 +1,175 @@ +//! [`EngineHandle`] — the public, cloneable, Send + Sync interface. + +use super::{ + DEFAULT_BACKPRESSURE_THRESHOLD, DEFAULT_PERSISTENCE_THRESHOLD, EngineAction, + error::EngineError, + runner::Engine, + tasks::{ExecuteBlockTask, IndexBlockTask, ReorgTask, SyncToTask, UnwindTask}, +}; +use crate::{OpProofStoragePruner, OpProofsStore}; +use alloy_eips::eip1898::BlockWithParent; +use crossbeam_channel::{Sender, bounded}; +use reth_evm::ConfigureEvm; +use reth_primitives_traits::{NodePrimitives, RecoveredBlock}; +use reth_provider::{ + BlockHashReader, BlockReader, DatabaseProviderFactory, StateProviderFactory, StateReader, +}; +use reth_trie_common::{HashedPostStateSorted, updates::TrieUpdatesSorted}; +use std::{panic, sync::Arc, thread}; +use tracing::error; + +/// A thin, cloneable handle used to communicate with the collector engine. +/// +/// Every public method (except [`Self::sync_to`]) sends an engine action to the +/// engine thread and blocks on a one-shot reply channel. +#[derive(Debug)] +pub struct EngineHandle { + sender: Sender>, +} + +impl Clone for EngineHandle { + fn clone(&self) -> Self { + Self { sender: self.sender.clone() } + } +} + +impl EngineHandle { + /// Spawn the collector engine on a new thread and return a handle. + pub fn spawn( + evm_config: Evm, + provider: Provider, + storage: Store, + pruner: OpProofStoragePruner, + ) -> Self + where + Evm: ConfigureEvm> + 'static, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, + { + Self::spawn_with_thresholds( + evm_config, + provider, + storage, + pruner, + DEFAULT_PERSISTENCE_THRESHOLD, + DEFAULT_BACKPRESSURE_THRESHOLD, + ) + } + + /// Spawn the collector engine with custom persistence and backpressure thresholds. + pub fn spawn_with_thresholds( + evm_config: Evm, + provider: Provider, + storage: Store, + pruner: OpProofStoragePruner, + persistence_threshold: u64, + backpressure_threshold: u64, + ) -> Self + where + Evm: ConfigureEvm> + 'static, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, + { + let (tx, rx) = bounded(10); + let engine = Engine::new(evm_config, provider, storage, pruner, rx) + .with_persistence_threshold(persistence_threshold) + .with_backpressure_threshold(backpressure_threshold); + + thread::Builder::new() + .name("live-trie-collector".into()) + .spawn(move || { + if let Err(panic) = panic::catch_unwind(panic::AssertUnwindSafe(|| engine.run())) { + let msg = panic + .downcast_ref::<&str>() + .copied() + .or_else(|| panic.downcast_ref::().map(String::as_str)) + .unwrap_or("unknown"); + error!(target: "live-trie::engine", %msg, "Collector engine panicked"); + } + }) + .expect("failed to spawn live-trie-collector thread"); + + Self { sender: tx } + } + + fn send_and_recv( + &self, + make_action: impl FnOnce(Sender>) -> EngineAction, + ) -> Result<(), EngineError> { + let (reply_tx, reply_rx) = bounded(1); + self.sender.send(make_action(reply_tx)).map_err(|_| EngineError::EngineDied)?; + reply_rx.recv().map_err(|_| EngineError::EngineDied)? + } + + /// Execute a block through the EVM and buffer the resulting trie updates. + pub fn execute_block(&self, block: &RecoveredBlock) -> Result<(), EngineError> + where + Block: Clone, + { + self.send_and_recv(|reply| { + EngineAction::ExecuteBlock(ExecuteBlockTask { block: block.clone(), reply }) + }) + } + + /// Buffer pre-computed trie updates for `block` (no EVM execution). + pub fn index_block( + &self, + block: BlockWithParent, + sorted_trie_updates: TrieUpdatesSorted, + sorted_post_state: HashedPostStateSorted, + ) -> Result<(), EngineError> { + self.send_and_recv(|reply| { + EngineAction::IndexBlock(IndexBlockTask { + block, + sorted_trie_updates, + sorted_post_state, + reply, + }) + }) + } + + /// Handle a chain reorg: unwind to the common ancestor then buffer new fork blocks. + pub fn reorg( + &self, + block_updates: Vec<(BlockWithParent, Arc, Arc)>, + ) -> Result<(), EngineError> { + self.send_and_recv(|reply| EngineAction::Reorg(ReorgTask { block_updates, reply })) + } + + /// Unwind indexed data back to `to` (first block number removed, inclusive). + pub fn unwind(&self, to: BlockWithParent) -> Result<(), EngineError> { + self.send_and_recv(|reply| EngineAction::Unwind(UnwindTask { to, reply })) + } + + /// Update the sync catch-up target (fire-and-forget). + /// + /// The engine will execute blocks up to `target` during its idle time, + /// interleaving catch-up work with incoming actions. + pub fn sync_to(&self, target: u64) -> Result<(), EngineError> { + self.sender + .send(EngineAction::SyncTo(SyncToTask { target })) + .map_err(|_| EngineError::EngineDied) + } + + /// Block until any in-progress background persistence completes (test/utility only). + #[cfg(test)] + pub fn flush(&self) { + use super::tasks::FlushTask; + let (reply_tx, reply_rx) = bounded(1); + if self.sender.send(EngineAction::Flush(FlushTask { reply: reply_tx })).is_ok() { + let _ = reply_rx.recv(); + } + } +} diff --git a/rust/op-reth/crates/trie/src/engine/metrics.rs b/rust/op-reth/crates/trie/src/engine/metrics.rs new file mode 100644 index 00000000000..ff1d3f2a85a --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/metrics.rs @@ -0,0 +1,22 @@ +//! Metrics for the live trie engine. + +use metrics::Histogram; +use reth_metrics::Metrics; + +/// High-level engine metrics. +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_trie.engine")] +pub(super) struct EngineMetrics { + /// Time to execute a block end-to-end (EVM + state root) in seconds. + pub execute_block_duration_seconds: Histogram, + /// Time spent executing the block (EVM only) in seconds. + pub execution_duration_seconds: Histogram, + /// Time spent calculating the state root in seconds. + pub state_root_duration_seconds: Histogram, + /// Time to index pre-computed trie updates for a block in seconds. + pub index_block_duration_seconds: Histogram, + /// Time to handle a reorg (unwind + re-index) in seconds. + pub reorg_duration_seconds: Histogram, + /// Time spent unwinding persistence and memory in seconds. + pub unwind_duration_seconds: Histogram, +} diff --git a/rust/op-reth/crates/trie/src/engine/mod.rs b/rust/op-reth/crates/trie/src/engine/mod.rs index 1db26293c02..17c9e3f9176 100644 --- a/rust/op-reth/crates/trie/src/engine/mod.rs +++ b/rust/op-reth/crates/trie/src/engine/mod.rs @@ -1,4 +1,77 @@ -//! Live trie engine. +//! Live trie collector for external proofs storage. +//! +//! The collector runs as an **engine** on a dedicated background thread. Callers +//! interact with it through [`EngineHandle`], a thin channel-based +//! handle whose methods mirror the old `LiveTrieCollector` API. +//! +//! Internally the engine owns *all* mutable state (memory buffer, persistence +//! handle, in-flight tracking) and processes engine action messages one at +//! a time, which structurally enforces the serial-call invariant. mod buffer; pub mod persistence; +mod tasks; + +mod error; +pub use error::EngineError; + +mod handle; +pub use handle::EngineHandle; + +#[cfg(feature = "metrics")] +mod metrics; +mod runner; +mod state; + +/// Default number of blocks to keep in memory before persisting. +const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 5; + +/// Default number of blocks where we block execution to allow persistence to catch up. +const DEFAULT_BACKPRESSURE_THRESHOLD: u64 = 10; + +/// Default timeout for waiting on a persistence save/unwind operation (in seconds). +const DEFAULT_PERSISTENCE_TIMEOUT_SECS: u64 = 60; + +/// Messages sent from [`EngineHandle`] to the engine thread. +enum EngineAction { + /// Execute a block via the EVM and index the resulting trie diff. + ExecuteBlock(tasks::ExecuteBlockTask), + /// Index pre-computed trie updates for a block (no EVM execution). + IndexBlock(tasks::IndexBlockTask), + /// Handle a reorg: unwind to the common ancestor then index the new chain. + Reorg(tasks::ReorgTask), + /// Unwind indexed data back to a given block. + Unwind(tasks::UnwindTask), + /// Block the caller until any in-flight persistence completes. + #[cfg(test)] + Flush(tasks::FlushTask), + /// Update the sync catch-up target (fire-and-forget). + SyncTo(tasks::SyncToTask), +} + +impl EngineAction { + fn execute(self, state: &mut state::EngineState) + where + Evm: reth_evm::ConfigureEvm< + Primitives: reth_primitives_traits::NodePrimitives, + >, + Provider: reth_provider::BlockHashReader + + reth_provider::StateReader + + reth_provider::DatabaseProviderFactory + + reth_provider::StateProviderFactory + + reth_provider::BlockReader + + Clone + + 'static, + Store: crate::OpProofsStore + Clone + 'static, + { + match self { + Self::ExecuteBlock(task) => task.execute(state), + Self::IndexBlock(task) => task.execute(state), + Self::Reorg(task) => task.execute(state), + Self::Unwind(task) => task.execute(state), + #[cfg(test)] + Self::Flush(task) => task.execute(state), + Self::SyncTo(task) => task.execute(state), + } + } +} diff --git a/rust/op-reth/crates/trie/src/engine/runner.rs b/rust/op-reth/crates/trie/src/engine/runner.rs new file mode 100644 index 00000000000..cf52dedb4be --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/runner.rs @@ -0,0 +1,174 @@ +//! [`Engine`] — the thin event-loop dispatcher. + +use super::{ + DEFAULT_BACKPRESSURE_THRESHOLD, DEFAULT_PERSISTENCE_THRESHOLD, EngineAction, + error::EngineError, state::EngineState as State, +}; +use crate::{OpProofStoragePruner, OpProofsStore}; +use crossbeam_channel::Receiver; +use reth_evm::ConfigureEvm; +use reth_primitives_traits::BlockTy; +use reth_provider::{ + BlockHashReader, BlockReader, DatabaseProviderFactory, StateProviderFactory, StateReader, + TransactionVariant, +}; +use std::{ + ops::ControlFlow, + time::{Duration, Instant}, +}; +use tracing::{debug, error}; + +/// The engine that runs on a dedicated thread, dispatching [`EngineAction`] +/// messages to self-contained task structs that operate on the engine state. +#[allow(missing_debug_implementations)] +pub(super) struct Engine +where + Evm: ConfigureEvm, + Provider: StateReader + DatabaseProviderFactory + StateProviderFactory + BlockReader, +{ + state: State, + incoming: Receiver>>, + persistence_threshold: u64, + backpressure_threshold: u64, +} + +impl Engine +where + Evm: ConfigureEvm, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader> + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, +{ + pub(super) fn new( + evm_config: Evm, + provider: Provider, + storage: Store, + pruner: OpProofStoragePruner, + incoming: Receiver>>, + ) -> Self { + Self { + state: State::new(evm_config, provider, storage, pruner), + incoming, + persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, + backpressure_threshold: DEFAULT_BACKPRESSURE_THRESHOLD, + } + } + + pub(super) const fn with_persistence_threshold(mut self, threshold: u64) -> Self { + self.persistence_threshold = threshold; + self + } + + pub(super) const fn with_backpressure_threshold(mut self, threshold: u64) -> Self { + self.backpressure_threshold = threshold; + self + } + + /// Returns `true` if the engine is behind its sync target. + fn needs_sync(&self) -> bool { + let current_tip = self.state.get_tip().map(|t| t.number).unwrap_or(0); + self.state.sync_target > current_tip + } + + /// Returns `true` if the buffer is above the backpressure threshold with a save in-flight. + fn backpressure_active(&self) -> bool { + self.state.persistence.in_flight.is_some() && + self.state.memory.len() as u64 >= self.backpressure_threshold + } + + /// Start a background persistence save if the memory buffer has reached the threshold. + fn maybe_start_save(&mut self) { + if self.state.memory.len() as u64 >= self.persistence_threshold && + let Err(e) = self.state.advance_persistence() + { + error!(target: "live-trie::engine", ?e, "Failed to start persistence save"); + } + } + + /// Execute the next sequential block (`current_tip + 1`) to advance toward the sync target. + fn advance_sync(&mut self) -> Result<(), EngineError> { + let current_tip = self.state.get_tip()?.number; + + if self.state.sync_target <= current_tip { + return Ok(()); + } + + let block_num = current_tip + 1; + let block = self + .state + .provider + .recovered_block(block_num.into(), TransactionVariant::NoHash)? + .ok_or(EngineError::BlockNotFound(block_num))?; + + super::tasks::execute_block(&block, &mut self.state) + } + + /// Process one event from the action, persistence, or sync channel. + /// + /// Three receivers compete in a single `select!`: + /// - **action**: a new [`EngineAction`] from a caller, or [`crossbeam_channel::never`] while + /// backpressure is active — callers naturally block in their bounded `send` until the + /// in-flight save completes and memory is pruned. + /// - **persistence**: signals a completed background save. + /// - **sync**: a zero-duration timer that fires immediately when the engine is behind its sync + /// target and not under backpressure; [`crossbeam_channel::never`] otherwise. + /// + /// Returns [`ControlFlow::Break`] when the action channel disconnects. + fn process_next_event(&mut self) -> ControlFlow<()> { + let backpressure = self.backpressure_active(); + + let persist_rx = + self.state.persistence.in_flight.clone().unwrap_or_else(crossbeam_channel::never); + + // Gate new actions while backpressure is active — don't grow memory while draining it. + let incoming_rx: Receiver>> = + if backpressure { crossbeam_channel::never() } else { self.incoming.clone() }; + + // Fire immediately when there is sync work to do; block indefinitely otherwise. + let sync_rx: Receiver = if self.needs_sync() && !backpressure { + crossbeam_channel::after(Duration::ZERO) + } else { + crossbeam_channel::never() + }; + + crossbeam_channel::select! { + recv(incoming_rx) -> msg => match msg { + Ok(action) => action.execute(&mut self.state), + Err(_) => return ControlFlow::Break(()), + }, + recv(persist_rx) -> result => self.state.persistence.on_complete(result, &self.state.memory), + recv(sync_rx) -> _ => if let Err(err) = self.advance_sync() { + error!(target: "live-trie::engine", ?err, "Sync step failed"); + }, + } + ControlFlow::Continue(()) + } + + /// Runs the main loop of the engine, processing incoming actions. + pub(super) fn run(mut self) { + debug_assert!( + self.persistence_threshold < self.backpressure_threshold, + "backpressure_threshold ({}) must be greater than persistence_threshold ({})", + self.backpressure_threshold, + self.persistence_threshold, + ); + debug!(target: "live-trie::engine", "Collector engine started"); + + loop { + match self.process_next_event() { + ControlFlow::Break(()) => break, + ControlFlow::Continue(()) => {} + } + self.maybe_start_save(); + } + + debug!(target: "live-trie::engine", "Collector engine shutting down, draining in-flight persist"); + self.state.drain_persistence(); + debug!(target: "live-trie::engine", "Collector engine stopped"); + } +} diff --git a/rust/op-reth/crates/trie/src/engine/state.rs b/rust/op-reth/crates/trie/src/engine/state.rs new file mode 100644 index 00000000000..3787185bd20 --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/state.rs @@ -0,0 +1,247 @@ +//! [`EngineState`] — all mutable engine state in one place. + +#[cfg(feature = "metrics")] +use super::metrics::EngineMetrics; +use super::{ + DEFAULT_PERSISTENCE_TIMEOUT_SECS, + buffer::state::TrieBufferState, + error::EngineError, + persistence::{PersistenceHandle, error::PersistenceError}, +}; +use crate::{OpProofStoragePruner, OpProofsProviderRO, OpProofsStorageError, OpProofsStore}; +use alloy_eips::{NumHash, eip1898::BlockWithParent}; +use crossbeam_channel::{Receiver, RecvError, RecvTimeoutError, bounded}; +use reth_evm::ConfigureEvm; +use reth_primitives_traits::BlockTy; +use reth_provider::{ + BlockHashReader, BlockReader, DatabaseProviderFactory, StateProviderFactory, StateReader, +}; +use std::time::Duration; +#[cfg(feature = "metrics")] +use std::time::Instant; +use tracing::{error, info}; + +/// Tracks all in-flight state for background persistence. +pub(crate) struct PersistenceState { + /// Handle to the persistence service. + handle: PersistenceHandle, + /// Reply channel for the in-flight save. Present only while a save is running. + /// + /// Exposed `pub(crate)` so the engine loop can include it in a `select!`. + pub(crate) in_flight: Option, PersistenceError>>>, +} + +impl PersistenceState { + const fn new(handle: PersistenceHandle) -> Self { + Self { handle, in_flight: None } + } + + /// Blocking: wait for the in-flight save to finish and prune `memory`. + /// + /// Used during shutdown and before unwind to quiesce the persistence layer. + pub(crate) fn wait(&mut self, memory: &TrieBufferState) { + let Some(rx) = self.in_flight.take() else { return }; + + match rx.recv_timeout(Duration::from_secs(DEFAULT_PERSISTENCE_TIMEOUT_SECS)) { + Ok(Ok(Some(last_persisted))) => { + info!( + target: "live-trie::engine", + block_number = last_persisted, + "Persistence completed (waited), pruning memory" + ); + memory.prune(last_persisted + 1); + } + Ok(Ok(None)) => {} + Ok(Err(e)) => { + error!(target: "live-trie::engine", ?e, "Persistence save failed while waiting"); + } + Err(RecvTimeoutError::Timeout) => { + error!(target: "live-trie::engine", "Persistence timeout while waiting"); + } + Err(RecvTimeoutError::Disconnected) => { + error!(target: "live-trie::engine", "Persistence service disconnected while waiting"); + } + } + } + + /// Handle a completed background save received via `select!`: clear `in_flight` and prune + /// memory. + pub(crate) fn on_complete( + &mut self, + result: Result, PersistenceError>, RecvError>, + memory: &TrieBufferState, + ) { + self.in_flight = None; + + match result { + Ok(Ok(Some(last_persisted))) => { + info!( + target: "live-trie::engine", + block_number = last_persisted, + "Background persistence completed, pruning memory" + ); + memory.prune(last_persisted + 1); + } + Ok(Ok(None)) => {} + Ok(Err(e)) => { + error!(target: "live-trie::engine", ?e, "Background persistence save failed"); + } + Err(_) => { + error!(target: "live-trie::engine", "Persistence service disconnected unexpectedly"); + } + } + } + + /// Start a background save if no save is already running. + /// + /// The caller is responsible for checking the threshold before calling this. + /// Completion is handled reactively by the engine loop via `select!` on [`Self::in_flight`]. + pub(crate) fn advance_persistence( + &mut self, + memory: &TrieBufferState, + ) -> Result<(), EngineError> { + if self.in_flight.is_some() { + return Ok(()); + } + + let blocks = memory.blocks_ordered(); + if blocks.is_empty() { + return Ok(()); + } + + info!( + target: "live-trie::engine", + count = blocks.len(), + start_block = blocks.first().map(|arc| arc.0.block.number), + end_block = blocks.last().map(|arc| arc.0.block.number), + "Persistence threshold reached: sending to persistence service" + ); + + let (tx, rx) = bounded(1); + self.handle.save_updates(blocks, tx)?; + self.in_flight = Some(rx); + + Ok(()) + } + + /// Wait for any in-flight save, then send an unwind to the persistence service and + /// block until it completes. + pub(crate) fn unwind( + &mut self, + to: BlockWithParent, + memory: &TrieBufferState, + ) -> Result<(), EngineError> { + if self.in_flight.is_some() { + info!(target: "live-trie::engine", "Unwind waiting for in-flight persistence..."); + self.wait(memory); + } + + let (tx, rx) = bounded(1); + self.handle.unwind(to, tx)?; + + match rx.recv_timeout(Duration::from_secs(DEFAULT_PERSISTENCE_TIMEOUT_SECS)) { + Ok(Ok(())) => Ok(()), + Ok(Err(e)) => Err(e.into()), + Err(RecvTimeoutError::Timeout) => Err(EngineError::PersistenceTimeout), + Err(RecvTimeoutError::Disconnected) => Err(EngineError::PersistenceDisconnected), + } + } +} + +/// All mutable state owned by the engine. +pub(crate) struct EngineState +where + Evm: ConfigureEvm, + Provider: StateReader + DatabaseProviderFactory + StateProviderFactory + BlockReader, +{ + /// The highest block number the engine should sync to between processing actions. + pub(crate) sync_target: u64, + + pub(crate) evm_config: Evm, + pub(crate) provider: Provider, + pub(crate) storage: Store, + + pub(crate) memory: TrieBufferState, + pub(crate) persistence: PersistenceState, + + #[cfg(feature = "metrics")] + pub(crate) metrics: EngineMetrics, +} + +impl EngineState +where + Evm: ConfigureEvm, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader> + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, +{ + pub(crate) fn new( + evm_config: Evm, + provider: Provider, + storage: Store, + pruner: OpProofStoragePruner, + ) -> Self { + let persistence_handle = PersistenceHandle::spawn(pruner, storage.clone()); + Self { + evm_config, + provider, + storage, + memory: TrieBufferState::new(), + persistence: PersistenceState::new(persistence_handle), + sync_target: 0, + #[cfg(feature = "metrics")] + metrics: EngineMetrics::new_with_labels(&[] as &[(&str, &str)]), + } + } + + /// Start a background save if no save is already running. + /// + /// The caller is responsible for checking the persistence threshold before + /// calling this. + pub(crate) fn advance_persistence(&mut self) -> Result<(), EngineError> { + self.persistence.advance_persistence(&self.memory) + } + + /// Block until any in-flight background save finishes and the memory buffer is pruned. + pub(crate) fn drain_persistence(&mut self) { + self.persistence.wait(&self.memory); + } + + /// Drain any in-flight save, unwind the persistence service to `to`, then + /// unwind the in-memory buffer to match. + pub(crate) fn unwind(&mut self, to: BlockWithParent) -> Result<(), EngineError> { + #[cfg(feature = "metrics")] + let start = Instant::now(); + self.persistence.unwind(to, &self.memory)?; + self.memory.unwind(to.block.number); + #[cfg(feature = "metrics")] + self.metrics.unwind_duration_seconds.record(start.elapsed()); + Ok(()) + } + + /// Advances `sync_target` to `block_number` if it is higher than the current target. + pub(crate) const fn update_sync_target(&mut self, block_number: u64) { + if block_number > self.sync_target { + self.sync_target = block_number; + } + } + + /// Returns the current tip: the in-memory tip if present, otherwise the latest persisted block. + pub(crate) fn get_tip(&self) -> Result { + if let Some(tip) = self.memory.tip() { + return Ok(tip); + } + + self.storage + .provider_ro()? + .get_latest_block_number()? + .map(|(n, h)| NumHash::new(n, h)) + .ok_or(OpProofsStorageError::NoBlocksFound) + .map_err(Into::into) + } +} diff --git a/rust/op-reth/crates/trie/src/engine/tasks/execute_block.rs b/rust/op-reth/crates/trie/src/engine/tasks/execute_block.rs new file mode 100644 index 00000000000..cfc0db4b4e3 --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/tasks/execute_block.rs @@ -0,0 +1,138 @@ +use super::super::state::EngineState; +use crate::{ + BlockStateDiff, OpProofsStore, engine::EngineError, provider::OpProofsStateProviderRef, +}; +use alloy_eips::{NumHash, eip1898::BlockWithParent}; +use crossbeam_channel::Sender; +use reth_evm::{ConfigureEvm, execute::Executor}; +use reth_primitives_traits::{AlloyBlockHeader, NodePrimitives, RecoveredBlock}; +use reth_provider::{ + BlockHashReader, BlockReader, DatabaseProviderFactory, HashedPostStateProvider, + StateProviderFactory, StateReader, StateRootProvider, +}; +use reth_revm::database::StateProviderDatabase; +use std::time::Instant; +use tracing::{debug, info}; + +pub(crate) struct ExecuteBlockTask { + pub(crate) block: RecoveredBlock, + pub(crate) reply: Sender>, +} + +impl ExecuteBlockTask { + pub(crate) fn execute(self, state: &mut EngineState) + where + Evm: ConfigureEvm>, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, + { + let result = run(&self.block, state); + let _ = self.reply.send(result); + } +} + +pub(crate) fn run( + block: &RecoveredBlock, + state: &mut EngineState, +) -> Result<(), EngineError> +where + Block: reth_primitives_traits::Block, + Evm: ConfigureEvm>, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, +{ + let start = Instant::now(); + let tip = state.get_tip()?; + let parent_block_number = block.number().saturating_sub(1); + + if block.number() <= tip.number { + debug!( + block_number = block.number(), + tip_number = tip.number, + "Block already covered by tip, skipping execute_and_store", + ); + return Ok(()); + } + + if block.number() > tip.number.saturating_add(1) { + debug!( + block_number = block.number(), + tip_number = tip.number, + "Gap detected, updating sync target", + ); + state.update_sync_target(block.number()); + return Ok(()); + } + + if block.parent_hash() != tip.hash { + return Err(EngineError::ParentHashMismatch { + block_number: block.number(), + expected_parent_hash: tip.hash, + actual_parent_hash: block.parent_hash(), + }); + } + + let block_ref = + BlockWithParent::new(block.parent_hash(), NumHash::new(block.number(), block.hash())); + + let inner_provider = OpProofsStateProviderRef::new( + state.provider.state_by_block_hash(block.parent_hash())?, + state.storage.provider_ro()?, + parent_block_number, + ); + let state_provider = state.memory.state_provider(block.parent_hash(), inner_provider); + + let db = StateProviderDatabase::new(&state_provider); + let block_executor = state.evm_config.batch_executor(db); + let execution_result = block_executor.execute(block)?; + let execution_duration = start.elapsed(); + + let hashed_state = state_provider.hashed_post_state(&execution_result.state); + let (state_root, trie_updates) = + state_provider.state_root_with_updates(hashed_state.clone())?; + let state_root_duration = start.elapsed() - execution_duration; + + if state_root != block.state_root() { + return Err(EngineError::StateRootMismatch { + block_number: block.number(), + current_state_hash: state_root, + expected_state_hash: block.state_root(), + }); + } + + let sorted_trie_updates = trie_updates.into_sorted(); + let sorted_post_state = hashed_state.into_sorted(); + + state.memory.insert(block_ref, BlockStateDiff { sorted_trie_updates, sorted_post_state }); + + let total_duration = start.elapsed(); + + #[cfg(feature = "metrics")] + { + state.metrics.execute_block_duration_seconds.record(total_duration); + state.metrics.execution_duration_seconds.record(execution_duration); + state.metrics.state_root_duration_seconds.record(state_root_duration); + } + + info!( + block_number = block.number(), + ?total_duration, + ?execution_duration, + ?state_root_duration, + "Block executed and trie updates buffered successfully", + ); + + Ok(()) +} diff --git a/rust/op-reth/crates/trie/src/engine/tasks/flush.rs b/rust/op-reth/crates/trie/src/engine/tasks/flush.rs new file mode 100644 index 00000000000..dd9a6e4a67a --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/tasks/flush.rs @@ -0,0 +1,30 @@ +use super::super::state::EngineState; +use crate::OpProofsStore; +use crossbeam_channel::Sender; +use reth_evm::ConfigureEvm; +use reth_primitives_traits::BlockTy; +use reth_provider::{ + BlockHashReader, BlockReader, DatabaseProviderFactory, StateProviderFactory, StateReader, +}; + +pub(crate) struct FlushTask { + pub(crate) reply: Sender<()>, +} + +impl FlushTask { + pub(crate) fn execute(self, state: &mut EngineState) + where + Evm: ConfigureEvm, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader> + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, + { + state.drain_persistence(); + let _ = self.reply.send(()); + } +} diff --git a/rust/op-reth/crates/trie/src/engine/tasks/index_block.rs b/rust/op-reth/crates/trie/src/engine/tasks/index_block.rs new file mode 100644 index 00000000000..a79e97f0c6e --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/tasks/index_block.rs @@ -0,0 +1,100 @@ +use super::super::state::EngineState; +use crate::{BlockStateDiff, OpProofsStore, engine::EngineError}; +use alloy_eips::eip1898::BlockWithParent; +use crossbeam_channel::Sender; +use reth_evm::ConfigureEvm; +use reth_primitives_traits::BlockTy; +use reth_provider::{ + BlockHashReader, BlockReader, DatabaseProviderFactory, StateProviderFactory, StateReader, +}; +use reth_trie_common::{HashedPostStateSorted, updates::TrieUpdatesSorted}; +use std::time::Instant; +use tracing::{debug, info}; + +pub(crate) struct IndexBlockTask { + pub(crate) block: BlockWithParent, + pub(crate) sorted_trie_updates: TrieUpdatesSorted, + pub(crate) sorted_post_state: HashedPostStateSorted, + pub(crate) reply: Sender>, +} + +impl IndexBlockTask { + pub(crate) fn execute(self, state: &mut EngineState) + where + Evm: ConfigureEvm, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader> + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, + { + let Self { block, sorted_trie_updates, sorted_post_state, reply } = self; + let _ = reply.send(run(state, block, sorted_trie_updates, sorted_post_state)); + } +} + +fn run( + state: &mut EngineState, + block: BlockWithParent, + sorted_trie_updates: TrieUpdatesSorted, + sorted_post_state: HashedPostStateSorted, +) -> Result<(), EngineError> +where + Evm: ConfigureEvm, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader> + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, +{ + let start = Instant::now(); + let tip = state.get_tip()?; + + if block.block.number <= tip.number { + debug!( + block_number = block.block.number, + tip_number = tip.number, + "Block already covered by tip, skipping store_block_updates", + ); + return Ok(()); + } + + if block.block.number > tip.number.saturating_add(1) { + debug!( + block_number = block.block.number, + tip_number = tip.number, + "Gap detected, updating sync target", + ); + if block.block.number > state.sync_target { + state.sync_target = block.block.number; + } + return Ok(()); + } + + if block.parent != tip.hash { + return Err(EngineError::ParentHashMismatch { + block_number: block.block.number, + expected_parent_hash: tip.hash, + actual_parent_hash: block.parent, + }); + } + + state.memory.insert(block, BlockStateDiff { sorted_trie_updates, sorted_post_state }); + + #[cfg(feature = "metrics")] + state.metrics.index_block_duration_seconds.record(start.elapsed()); + + info!( + block_number = block.block.number, + total_duration = ?start.elapsed(), + "Trie updates buffered successfully", + ); + + Ok(()) +} diff --git a/rust/op-reth/crates/trie/src/engine/tasks/mod.rs b/rust/op-reth/crates/trie/src/engine/tasks/mod.rs new file mode 100644 index 00000000000..84c9b8c6488 --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/tasks/mod.rs @@ -0,0 +1,21 @@ +//! Task structs — one per engine action variant. +//! +//! Each task owns its input data and reply channel. Its `execute` method +//! takes `&mut EngineState`, calls the appropriate state method, and sends +//! the reply. The engine dispatcher is a thin match with no business logic. + +mod execute_block; +#[cfg(test)] +mod flush; +mod index_block; +mod reorg; +mod sync_to; +mod unwind; + +pub(super) use execute_block::{ExecuteBlockTask, run as execute_block}; +#[cfg(test)] +pub(super) use flush::FlushTask; +pub(super) use index_block::IndexBlockTask; +pub(super) use reorg::ReorgTask; +pub(super) use sync_to::SyncToTask; +pub(super) use unwind::UnwindTask; diff --git a/rust/op-reth/crates/trie/src/engine/tasks/reorg.rs b/rust/op-reth/crates/trie/src/engine/tasks/reorg.rs new file mode 100644 index 00000000000..45f807d0d42 --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/tasks/reorg.rs @@ -0,0 +1,106 @@ +use super::super::state::EngineState; +use crate::{BlockStateDiff, OpProofsStore, engine::EngineError}; +use alloy_eips::eip1898::BlockWithParent; +use crossbeam_channel::Sender; +use reth_evm::ConfigureEvm; +use reth_primitives_traits::BlockTy; +use reth_provider::{ + BlockHashReader, BlockReader, DatabaseProviderFactory, StateProviderFactory, StateReader, +}; +use reth_trie_common::{HashedPostStateSorted, updates::TrieUpdatesSorted}; +use std::{sync::Arc, time::Instant}; +use tracing::{debug, info}; + +pub(crate) struct ReorgTask { + pub(crate) block_updates: + Vec<(BlockWithParent, Arc, Arc)>, + pub(crate) reply: Sender>, +} + +impl ReorgTask { + pub(crate) fn execute(self, state: &mut EngineState) + where + Evm: ConfigureEvm, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader> + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, + { + let _ = self.reply.send(run(state, self.block_updates)); + } +} + +fn run( + state: &mut EngineState, + block_updates: Vec<(BlockWithParent, Arc, Arc)>, +) -> Result<(), EngineError> +where + Evm: ConfigureEvm, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader> + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, +{ + if block_updates.is_empty() { + return Ok(()); + } + + let first = &block_updates[0].0; + let tip = state.get_tip()?; + + if first.block.number > tip.number { + // Reorg originates beyond the stored tip — the engine is still catching up. + // Sync batches will fetch post-reorg blocks from the provider, so nothing to unwind. + debug!( + target: "live-trie::engine", + first_block = first.block.number, + tip = tip.number, + "Reorg starts beyond stored tip, skipping" + ); + return Ok(()); + } + + let start = Instant::now(); + let common_ancestor_number = first.block.number.saturating_sub(1); + + info!( + target: "live-trie::engine", + reorg_depth = block_updates.len(), + common_ancestor = common_ancestor_number, + "Handling reorg: unwinding and buffering new path" + ); + + state.unwind(*first)?; + + for (block, trie_updates, hashed_state) in &block_updates { + state.memory.insert( + *block, + BlockStateDiff { + sorted_trie_updates: (**trie_updates).clone(), + sorted_post_state: (**hashed_state).clone(), + }, + ); + } + + let total_duration = start.elapsed(); + + #[cfg(feature = "metrics")] + state.metrics.reorg_duration_seconds.record(total_duration); + + info!( + start_block_number = block_updates.first().map(|(b, _, _)| b.block.number), + end_block_number = block_updates.last().map(|(b, _, _)| b.block.number), + ?total_duration, + "Trie updates rewound and buffered successfully", + ); + + Ok(()) +} diff --git a/rust/op-reth/crates/trie/src/engine/tasks/sync_to.rs b/rust/op-reth/crates/trie/src/engine/tasks/sync_to.rs new file mode 100644 index 00000000000..c675c07e882 --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/tasks/sync_to.rs @@ -0,0 +1,30 @@ +use super::super::state::EngineState; +use crate::OpProofsStore; +use reth_evm::ConfigureEvm; +use reth_primitives_traits::BlockTy; +use reth_provider::{ + BlockHashReader, BlockReader, DatabaseProviderFactory, StateProviderFactory, StateReader, +}; +use tracing::debug; + +pub(crate) struct SyncToTask { + pub(crate) target: u64, +} + +impl SyncToTask { + pub(crate) fn execute(self, state: &mut EngineState) + where + Evm: ConfigureEvm, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader> + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, + { + state.update_sync_target(self.target); + debug!(target: "live-trie::engine", sync_target = self.target, "Sync target updated"); + } +} diff --git a/rust/op-reth/crates/trie/src/engine/tasks/unwind.rs b/rust/op-reth/crates/trie/src/engine/tasks/unwind.rs new file mode 100644 index 00000000000..707c68fa1a4 --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/tasks/unwind.rs @@ -0,0 +1,63 @@ +use super::super::state::EngineState; +use crate::{OpProofsStore, engine::EngineError}; +use alloy_eips::eip1898::BlockWithParent; +use crossbeam_channel::Sender; +use reth_evm::ConfigureEvm; +use reth_primitives_traits::BlockTy; +use reth_provider::{ + BlockHashReader, BlockReader, DatabaseProviderFactory, StateProviderFactory, StateReader, +}; +use tracing::{debug, info}; + +pub(crate) struct UnwindTask { + pub(crate) to: BlockWithParent, + pub(crate) reply: Sender>, +} + +impl UnwindTask { + pub(crate) fn execute(self, state: &mut EngineState) + where + Evm: ConfigureEvm, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader> + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, + { + let _ = self.reply.send(run(state, self.to)); + } +} + +fn run( + state: &mut EngineState, + to: BlockWithParent, +) -> Result<(), EngineError> +where + Evm: ConfigureEvm, + Provider: BlockHashReader + + StateReader + + DatabaseProviderFactory + + StateProviderFactory + + BlockReader> + + Clone + + 'static, + Store: OpProofsStore + Clone + 'static, +{ + let tip = state.get_tip()?; + if to.block.number > tip.number { + debug!( + target: "live-trie::engine", + to_block = to.block.number, + tip = tip.number, + "Unwind target beyond stored tip, skipping" + ); + return Ok(()); + } + + info!(target: "live-trie::engine", to_block = to.block.number, "Unwinding history"); + state.unwind(to)?; + Ok(()) +} diff --git a/rust/op-reth/crates/trie/src/lib.rs b/rust/op-reth/crates/trie/src/lib.rs index 5af20dce9d4..16b75c20a54 100644 --- a/rust/op-reth/crates/trie/src/lib.rs +++ b/rust/op-reth/crates/trie/src/lib.rs @@ -53,6 +53,7 @@ pub mod provider; pub mod live; pub mod engine; +pub use engine::EngineHandle; pub mod cursor; #[cfg(not(feature = "metrics"))] From e1bf33bf2d9cc00f990600041521daaa7794313a Mon Sep 17 00:00:00 2001 From: Arun Dhyani Date: Tue, 12 May 2026 15:52:19 +0530 Subject: [PATCH 07/10] fmt fixes --- rust/op-reth/crates/node/src/proof_history.rs | 7 ++----- rust/op-reth/crates/trie/src/engine/handle.rs | 4 ---- rust/op-reth/crates/trie/tests/live.rs | 3 ++- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/rust/op-reth/crates/node/src/proof_history.rs b/rust/op-reth/crates/node/src/proof_history.rs index b4ce7216745..b5c460d67b5 100644 --- a/rust/op-reth/crates/node/src/proof_history.rs +++ b/rust/op-reth/crates/node/src/proof_history.rs @@ -71,11 +71,8 @@ where let storage: OpProofsStorage> = mdbx.clone().into(); let storage_exec = storage.clone(); - let RollupArgs { - proofs_history_window, - proofs_history_verification_interval, - .. - } = args.clone(); + let RollupArgs { proofs_history_window, proofs_history_verification_interval, .. } = + args.clone(); let handle = builder .node(OpNode::new(args)) diff --git a/rust/op-reth/crates/trie/src/engine/handle.rs b/rust/op-reth/crates/trie/src/engine/handle.rs index 107cbf3bb5e..5bc1126cf96 100644 --- a/rust/op-reth/crates/trie/src/engine/handle.rs +++ b/rust/op-reth/crates/trie/src/engine/handle.rs @@ -164,11 +164,7 @@ impl EngineHandle } /// Block until any in-progress background persistence completes (test/utility only). -<<<<<<< HEAD #[cfg(any(test, feature = "test-utils"))] -======= - #[cfg(test)] ->>>>>>> origin/develop pub fn flush(&self) { use super::tasks::FlushTask; let (reply_tx, reply_rx) = bounded(1); diff --git a/rust/op-reth/crates/trie/tests/live.rs b/rust/op-reth/crates/trie/tests/live.rs index ba42d6efcb7..11839efd12e 100644 --- a/rust/op-reth/crates/trie/tests/live.rs +++ b/rust/op-reth/crates/trie/tests/live.rs @@ -13,7 +13,8 @@ use reth_evm_ethereum::EthEvmConfig; use reth_node_api::{NodePrimitives, NodeTypesWithDB}; use reth_optimism_trie::{ MdbxProofsStorage, MdbxProofsStorageV2, OpProofStoragePruner, OpProofsStorage, OpProofsStore, - engine::{EngineError, EngineHandle}, RethTrieStorageLayout, + RethTrieStorageLayout, + engine::{EngineError, EngineHandle}, initialize::InitializationJob, }; use reth_primitives_traits::{Block as _, RecoveredBlock}; From 858ce5a155dfdf53a7747ece83b8bac87fb17f12 Mon Sep 17 00:00:00 2001 From: Arun Dhyani Date: Tue, 12 May 2026 20:30:11 +0530 Subject: [PATCH 08/10] zepter + test fixes --- rust/op-reth/crates/exex/Cargo.toml | 15 +++++++------ rust/op-reth/crates/node/Cargo.toml | 3 ++- rust/op-reth/crates/trie/Cargo.toml | 13 ++++++++++- .../trie/src/engine/tasks/execute_block.rs | 22 ++++++++++++++++--- rust/op-reth/crates/trie/tests/live.rs | 8 +++---- 5 files changed, 45 insertions(+), 16 deletions(-) diff --git a/rust/op-reth/crates/exex/Cargo.toml b/rust/op-reth/crates/exex/Cargo.toml index 28b01c85dbc..e112dd325eb 100644 --- a/rust/op-reth/crates/exex/Cargo.toml +++ b/rust/op-reth/crates/exex/Cargo.toml @@ -47,13 +47,14 @@ tempfile.workspace = true [features] test-utils = [ - "reth-db/test-utils", - "reth-trie/test-utils", - "reth-node-builder/test-utils", - "reth-optimism-node/test-utils", - "reth-provider/test-utils", - "reth-ethereum-primitives/test-utils", - "reth-primitives-traits/test-utils", + "reth-db/test-utils", + "reth-trie/test-utils", + "reth-node-builder/test-utils", + "reth-optimism-node/test-utils", + "reth-provider/test-utils", + "reth-ethereum-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-optimism-trie/test-utils" ] metrics = [ "reth-optimism-trie/metrics", diff --git a/rust/op-reth/crates/node/Cargo.toml b/rust/op-reth/crates/node/Cargo.toml index 376308a6779..5b0b0664a79 100644 --- a/rust/op-reth/crates/node/Cargo.toml +++ b/rust/op-reth/crates/node/Cargo.toml @@ -147,7 +147,8 @@ test-utils = [ "reth-stages-types/test-utils", "reth-db-api/test-utils", "reth-tasks/test-utils", - "reth-optimism-exex/test-utils" + "reth-optimism-exex/test-utils", + "reth-optimism-trie/test-utils" ] reth-codec = ["reth-optimism-primitives/reth-codec"] diff --git a/rust/op-reth/crates/trie/Cargo.toml b/rust/op-reth/crates/trie/Cargo.toml index 0d430993537..f652d5adaa8 100644 --- a/rust/op-reth/crates/trie/Cargo.toml +++ b/rust/op-reth/crates/trie/Cargo.toml @@ -82,7 +82,18 @@ eyre.workspace = true serial_test.workspace = true [features] -test-utils = [] +test-utils = [ + "reth-codecs/test-utils", + "reth-db/test-utils", + "reth-ethereum-primitives?/test-utils", + "reth-evm/test-utils", + "reth-primitives-traits/test-utils", + "reth-provider/test-utils", + "reth-revm/test-utils", + "reth-tasks/test-utils", + "reth-trie/test-utils", + "reth-trie-common/test-utils" +] serde-bincode-compat = [ "reth-trie-common/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", diff --git a/rust/op-reth/crates/trie/src/engine/tasks/execute_block.rs b/rust/op-reth/crates/trie/src/engine/tasks/execute_block.rs index cfc0db4b4e3..d0604d5b0d5 100644 --- a/rust/op-reth/crates/trie/src/engine/tasks/execute_block.rs +++ b/rust/op-reth/crates/trie/src/engine/tasks/execute_block.rs @@ -7,12 +7,12 @@ use crossbeam_channel::Sender; use reth_evm::{ConfigureEvm, execute::Executor}; use reth_primitives_traits::{AlloyBlockHeader, NodePrimitives, RecoveredBlock}; use reth_provider::{ - BlockHashReader, BlockReader, DatabaseProviderFactory, HashedPostStateProvider, + BlockHashReader, BlockReader, DatabaseProviderFactory, HashedPostStateProvider, ProviderError, StateProviderFactory, StateReader, StateRootProvider, }; use reth_revm::database::StateProviderDatabase; use std::time::Instant; -use tracing::{debug, info}; +use tracing::{debug, info, warn}; pub(crate) struct ExecuteBlockTask { pub(crate) block: RecoveredBlock, @@ -87,8 +87,24 @@ where let block_ref = BlockWithParent::new(block.parent_hash(), NumHash::new(block.number(), block.hash())); + let parent_state = match state.provider.state_by_block_hash(block.parent_hash()) { + Ok(p) => p, + Err(ProviderError::StateForHashNotFound(hash)) => { + // Likely a transient reorg race: reth no longer has state for what we believe is the + // parent. Skip gracefully; subsequent ChainCommitted/ChainReorged notifications will + // resync us. + warn!( + block_number = block.number(), + parent_hash = ?hash, + "Parent state not available in reth; skipping execute_block", + ); + return Ok(()); + } + Err(e) => return Err(e.into()), + }; + let inner_provider = OpProofsStateProviderRef::new( - state.provider.state_by_block_hash(block.parent_hash())?, + parent_state, state.storage.provider_ro()?, parent_block_number, ); diff --git a/rust/op-reth/crates/trie/tests/live.rs b/rust/op-reth/crates/trie/tests/live.rs index 11839efd12e..6b3b189b469 100644 --- a/rust/op-reth/crates/trie/tests/live.rs +++ b/rust/op-reth/crates/trie/tests/live.rs @@ -32,13 +32,13 @@ use tempfile::TempDir; use test_case::test_case; fn create_mdbx_proofs_storage() -> Arc { - let path = TempDir::new().unwrap(); - Arc::new(MdbxProofsStorage::new(path.path()).unwrap()) + let path = TempDir::new().unwrap().keep(); + Arc::new(MdbxProofsStorage::new(&path).unwrap()) } fn create_mdbx_proofs_storage_v2() -> Arc { - let path = TempDir::new().unwrap(); - Arc::new(MdbxProofsStorageV2::new(path.path()).unwrap()) + let path = TempDir::new().unwrap().keep(); + Arc::new(MdbxProofsStorageV2::new(&path).unwrap()) } /// Converts a secp256k1 public key to an Ethereum address. From 0524b1f9956aaa235e7f52ee3554ed3eb670878d Mon Sep 17 00:00:00 2001 From: Arun Dhyani Date: Tue, 12 May 2026 21:44:41 +0530 Subject: [PATCH 09/10] added ServiceGuard --- rust/op-reth/crates/trie/src/engine/handle.rs | 14 ++++------ rust/op-reth/crates/trie/src/engine/mod.rs | 1 + .../trie/src/engine/persistence/handle.rs | 14 +++++----- .../crates/trie/src/engine/service_guard.rs | 26 +++++++++++++++++++ 4 files changed, 38 insertions(+), 17 deletions(-) create mode 100644 rust/op-reth/crates/trie/src/engine/service_guard.rs diff --git a/rust/op-reth/crates/trie/src/engine/handle.rs b/rust/op-reth/crates/trie/src/engine/handle.rs index 5bc1126cf96..f50dc414cdb 100644 --- a/rust/op-reth/crates/trie/src/engine/handle.rs +++ b/rust/op-reth/crates/trie/src/engine/handle.rs @@ -4,6 +4,7 @@ use super::{ DEFAULT_BACKPRESSURE_THRESHOLD, DEFAULT_PERSISTENCE_THRESHOLD, EngineAction, error::EngineError, runner::Engine, + service_guard::ServiceGuard, tasks::{ExecuteBlockTask, IndexBlockTask, ReorgTask, SyncToTask, UnwindTask}, }; use crate::{OpProofStoragePruner, OpProofsStore}; @@ -22,15 +23,10 @@ use tracing::error; /// /// Every public method (except [`Self::sync_to`]) sends an engine action to the /// engine thread and blocks on a one-shot reply channel. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EngineHandle { sender: Sender>, -} - -impl Clone for EngineHandle { - fn clone(&self) -> Self { - Self { sender: self.sender.clone() } - } + _service_guard: Arc, } impl EngineHandle { @@ -87,7 +83,7 @@ impl EngineHandle .with_persistence_threshold(persistence_threshold) .with_backpressure_threshold(backpressure_threshold); - thread::Builder::new() + let join_handle = thread::Builder::new() .name("live-trie-collector".into()) .spawn(move || { if let Err(panic) = panic::catch_unwind(panic::AssertUnwindSafe(|| engine.run())) { @@ -101,7 +97,7 @@ impl EngineHandle }) .expect("failed to spawn live-trie-collector thread"); - Self { sender: tx } + Self { sender: tx, _service_guard: Arc::new(ServiceGuard::new(join_handle)) } } fn send_and_recv( diff --git a/rust/op-reth/crates/trie/src/engine/mod.rs b/rust/op-reth/crates/trie/src/engine/mod.rs index 388c1a920a2..b9e45d76d91 100644 --- a/rust/op-reth/crates/trie/src/engine/mod.rs +++ b/rust/op-reth/crates/trie/src/engine/mod.rs @@ -21,6 +21,7 @@ pub use handle::EngineHandle; #[cfg(feature = "metrics")] mod metrics; mod runner; +mod service_guard; mod state; /// Default number of blocks to keep in memory before persisting. diff --git a/rust/op-reth/crates/trie/src/engine/persistence/handle.rs b/rust/op-reth/crates/trie/src/engine/persistence/handle.rs index 5ca9aafe303..b0030e3a1b6 100644 --- a/rust/op-reth/crates/trie/src/engine/persistence/handle.rs +++ b/rust/op-reth/crates/trie/src/engine/persistence/handle.rs @@ -1,6 +1,8 @@ //! Handle and action enum for the persistence service. -use super::{error::PersistenceError, service::PersistenceService}; +use super::{ + super::service_guard::ServiceGuard, error::PersistenceError, service::PersistenceService, +}; use crate::{BlockStateDiff, OpProofsStore, prune::OpProofStoragePruner}; use alloy_eips::eip1898::BlockWithParent; use crossbeam_channel::Sender; @@ -29,14 +31,10 @@ pub enum PersistenceAction { #[derive(Debug, Clone)] pub struct PersistenceHandle { sender: Sender, + _service_guard: Arc, } impl PersistenceHandle { - /// Create a new handle. - pub const fn new(sender: Sender) -> Self { - Self { sender } - } - /// Spawn the service in a new thread and return a handle. pub fn spawn(pruner: OpProofStoragePruner, storage: S) -> Self where @@ -46,12 +44,12 @@ impl PersistenceHandle { let (tx, rx) = crossbeam_channel::bounded(2); let service = PersistenceService::new(pruner, storage, rx); - thread::Builder::new() + let join_handle = thread::Builder::new() .name("Live Trie Persistence".into()) .spawn(move || service.run()) .expect("failed to spawn live trie persistence thread"); - Self::new(tx) + Self { sender: tx, _service_guard: Arc::new(ServiceGuard::new(join_handle)) } } /// Send a save request. diff --git a/rust/op-reth/crates/trie/src/engine/service_guard.rs b/rust/op-reth/crates/trie/src/engine/service_guard.rs new file mode 100644 index 00000000000..8231a1c81a3 --- /dev/null +++ b/rust/op-reth/crates/trie/src/engine/service_guard.rs @@ -0,0 +1,26 @@ +//! Generic guard that joins a service thread on drop. + +use std::{fmt, thread::JoinHandle}; + +/// Joins the wrapped thread when dropped. `None` is allowed for test/mock construction. +pub(super) struct ServiceGuard(Option>); + +impl ServiceGuard { + pub(super) const fn new(handle: JoinHandle<()>) -> Self { + Self(Some(handle)) + } +} + +impl fmt::Debug for ServiceGuard { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("ServiceGuard").field(&self.0.as_ref().map(|_| "...")).finish() + } +} + +impl Drop for ServiceGuard { + fn drop(&mut self) { + if let Some(join_handle) = self.0.take() { + let _ = join_handle.join(); + } + } +} From 2ce4c6b6adba9afadb8c650dd154af22df75b0e5 Mon Sep 17 00:00:00 2001 From: Arun Dhyani Date: Tue, 12 May 2026 22:50:31 +0530 Subject: [PATCH 10/10] idle flush added --- rust/op-reth/crates/trie/src/engine/mod.rs | 8 +++++++ rust/op-reth/crates/trie/src/engine/runner.rs | 21 ++++++++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/rust/op-reth/crates/trie/src/engine/mod.rs b/rust/op-reth/crates/trie/src/engine/mod.rs index b9e45d76d91..fc24a514588 100644 --- a/rust/op-reth/crates/trie/src/engine/mod.rs +++ b/rust/op-reth/crates/trie/src/engine/mod.rs @@ -33,6 +33,14 @@ const DEFAULT_BACKPRESSURE_THRESHOLD: u64 = 10; /// Default timeout for waiting on a persistence save/unwind operation (in seconds). const DEFAULT_PERSISTENCE_TIMEOUT_SECS: u64 = 60; +/// How long the engine waits with a non-empty memory buffer before flushing it even if the +/// persistence threshold has not been reached. +/// +/// Without this, a paused chain (e.g. fault-proof tests that freeze the sequencer) would leave +/// buffered blocks unpersisted indefinitely, breaking the proofs RPC's strict "is this block +/// persisted?" check. +const IDLE_FLUSH_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10); + /// Messages sent from [`EngineHandle`] to the engine thread. enum EngineAction { /// Execute a block via the EVM and index the resulting trie diff. diff --git a/rust/op-reth/crates/trie/src/engine/runner.rs b/rust/op-reth/crates/trie/src/engine/runner.rs index cf52dedb4be..0bf22570a25 100644 --- a/rust/op-reth/crates/trie/src/engine/runner.rs +++ b/rust/op-reth/crates/trie/src/engine/runner.rs @@ -2,7 +2,7 @@ use super::{ DEFAULT_BACKPRESSURE_THRESHOLD, DEFAULT_PERSISTENCE_THRESHOLD, EngineAction, - error::EngineError, state::EngineState as State, + IDLE_FLUSH_INTERVAL, error::EngineError, state::EngineState as State, }; use crate::{OpProofStoragePruner, OpProofsStore}; use crossbeam_channel::Receiver; @@ -108,19 +108,23 @@ where super::tasks::execute_block(&block, &mut self.state) } - /// Process one event from the action, persistence, or sync channel. + /// Process one event from the action, persistence, sync, or idle-flush channel. /// - /// Three receivers compete in a single `select!`: + /// Four receivers compete in a single `select!`: /// - **action**: a new [`EngineAction`] from a caller, or [`crossbeam_channel::never`] while /// backpressure is active — callers naturally block in their bounded `send` until the /// in-flight save completes and memory is pruned. /// - **persistence**: signals a completed background save. /// - **sync**: a zero-duration timer that fires immediately when the engine is behind its sync /// target and not under backpressure; [`crossbeam_channel::never`] otherwise. + /// - **idle-flush**: fires after [`IDLE_FLUSH_INTERVAL`] when memory holds buffered blocks but + /// the persistence threshold hasn't been reached and no save is in flight. Keeps a paused + /// chain from leaving buffered blocks invisible to the proofs RPC indefinitely. /// /// Returns [`ControlFlow::Break`] when the action channel disconnects. fn process_next_event(&mut self) -> ControlFlow<()> { let backpressure = self.backpressure_active(); + let save_in_flight = self.state.persistence.in_flight.is_some(); let persist_rx = self.state.persistence.in_flight.clone().unwrap_or_else(crossbeam_channel::never); @@ -136,6 +140,14 @@ where crossbeam_channel::never() }; + // Arm the idle-flush timer only when there's buffered work that nothing else will flush. + let idle_flush_rx: Receiver = + if !self.state.memory.is_empty() && !save_in_flight && !backpressure { + crossbeam_channel::after(IDLE_FLUSH_INTERVAL) + } else { + crossbeam_channel::never() + }; + crossbeam_channel::select! { recv(incoming_rx) -> msg => match msg { Ok(action) => action.execute(&mut self.state), @@ -145,6 +157,9 @@ where recv(sync_rx) -> _ => if let Err(err) = self.advance_sync() { error!(target: "live-trie::engine", ?err, "Sync step failed"); }, + recv(idle_flush_rx) -> _ => if let Err(e) = self.state.advance_persistence() { + error!(target: "live-trie::engine", ?e, "Idle flush failed"); + }, } ControlFlow::Continue(()) }