From 24232e1f820f0da77d30369e38d562d619498585 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 3 Nov 2025 08:46:47 +0100 Subject: [PATCH 01/63] feat: imlp eq for array and message --- src/event_scanner/message.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/event_scanner/message.rs b/src/event_scanner/message.rs index ebd1081a..e5319711 100644 --- a/src/event_scanner/message.rs +++ b/src/event_scanner/message.rs @@ -35,6 +35,12 @@ impl PartialEq<&[E; N]> for Message { } } +impl PartialEq<[E; N]> for Message { + fn eq(&self, other: &[E; N]) -> bool { + self.eq(&other) + } +} + impl PartialEq<&[E]> for Message { fn eq(&self, other: &&[E]) -> bool { if let Message::Data(logs) = self { From 07d1f7c59aeea1d5798533373f15509d2d7f4252 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 3 Nov 2025 08:47:28 +0100 Subject: [PATCH 02/63] test: ref reorg_rescans_events_within_same_block --- tests/live/reorg.rs | 99 +++++++++++++++++++++------------------------ 1 file changed, 45 insertions(+), 54 deletions(-) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index 80676272..1241e19a 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -4,67 +4,58 @@ use tokio_stream::StreamExt; use tokio::{sync::Mutex, time::timeout}; -use crate::common::{LiveScannerSetup, reorg_with_new_count_incr_txs, setup_live_scanner}; -use alloy::providers::ext::AnvilApi; -use event_scanner::{Message, ScannerStatus}; +use crate::common::{ + LiveScannerSetup, TestCounter::CountIncreased, reorg_with_new_count_incr_txs, + setup_live_scanner, +}; +use alloy::{ + primitives::U256, + providers::ext::AnvilApi, + rpc::types::anvil::{ReorgOptions, TransactionData}, +}; +use event_scanner::{Message, ScannerStatus, assert_empty, assert_next}; #[tokio::test] async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { let LiveScannerSetup { provider, contract, scanner, mut stream, anvil: _anvil } = - setup_live_scanner(Option::Some(0.1), Option::None, 0).await?; + setup_live_scanner(None, None, 0).await?; scanner.start().await?; - let num_initial_events = 5; - let num_new_events = 3; - let reorg_depth = 5; - let same_block = true; - - let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - provider, - contract, - num_initial_events, - num_new_events, - reorg_depth, - same_block, - ) - .await?; - - let event_block_count = Arc::new(Mutex::new(Vec::new())); - let event_block_count_clone = Arc::clone(&event_block_count); - - let reorg_detected = Arc::new(Mutex::new(false)); - let reorg_detected_clone = reorg_detected.clone(); - - let event_counting = async move { - while let Some(message) = stream.next().await { - match message { - Message::Data(logs) => { - let mut guard = event_block_count_clone.lock().await; - for log in logs { - if let Some(n) = log.transaction_hash { - guard.push(n); - } - } - } - Message::Error(e) => { - panic!("panic with error {e}"); - } - Message::Status(status) => { - if matches!(status, ScannerStatus::ReorgDetected) { - *reorg_detected_clone.lock().await = true; - } - } - } - } - }; - - let _ = timeout(Duration::from_secs(5), event_counting).await; - - let final_blocks: Vec<_> = event_block_count.lock().await.clone(); - assert_eq!(final_blocks.len() as u64, num_initial_events + num_new_events); - assert_eq!(final_blocks, expected_event_tx_hashes); - assert!(*reorg_detected.lock().await); + // emit initial events + for _ in 0..5 { + contract.increase().send().await?.watch().await?; + } + + // assert initial events are emitted as expected + assert_next!(stream, [CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, [CountIncreased { newCount: U256::from(2) }]); + assert_next!(stream, [CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, [CountIncreased { newCount: U256::from(4) }]); + assert_next!(stream, [CountIncreased { newCount: U256::from(5) }]); + let mut stream = assert_empty!(stream); + + // reorg the chain + let tx_block_pairs = (0..3) + .map(|_| { + let tx = contract.increase().into_transaction_request(); + (TransactionData::JSON(tx), 0) + }) + .collect(); + + provider.anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await.unwrap(); + + // assert expected messages post-reorg + assert_next!(stream, ScannerStatus::ReorgDetected); + assert_next!( + stream, + [ + CountIncreased { newCount: U256::from(2) }, + CountIncreased { newCount: U256::from(3) }, + CountIncreased { newCount: U256::from(4) }, + ] + ); + assert_empty!(stream); Ok(()) } From 08d6097e7c44e33a0bc14ef8050288282ed39021 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 3 Nov 2025 09:36:39 +0100 Subject: [PATCH 03/63] test: ref --- tests/live/reorg.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index 1241e19a..df2580c5 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -37,10 +37,7 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { // reorg the chain let tx_block_pairs = (0..3) - .map(|_| { - let tx = contract.increase().into_transaction_request(); - (TransactionData::JSON(tx), 0) - }) + .map(|_| (TransactionData::JSON(contract.increase().into_transaction_request()), 0)) .collect(); provider.anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await.unwrap(); From e5107027ed54ba480e1a4d0a5a464b88279c27fb Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 3 Nov 2025 09:37:18 +0100 Subject: [PATCH 04/63] test: ref --- tests/live/reorg.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index df2580c5..acf6f0f0 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -36,9 +36,11 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { let mut stream = assert_empty!(stream); // reorg the chain - let tx_block_pairs = (0..3) - .map(|_| (TransactionData::JSON(contract.increase().into_transaction_request()), 0)) - .collect(); + let tx_block_pairs = vec![ + (TransactionData::JSON(contract.increase().into_transaction_request()), 0), + (TransactionData::JSON(contract.increase().into_transaction_request()), 0), + (TransactionData::JSON(contract.increase().into_transaction_request()), 0), + ]; provider.anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await.unwrap(); From 780e402bc44320b16af5daaf87f6d2007ab50388 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 3 Nov 2025 09:37:31 +0100 Subject: [PATCH 05/63] test: ref --- tests/live/reorg.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index acf6f0f0..5a5a02c7 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -41,7 +41,6 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { (TransactionData::JSON(contract.increase().into_transaction_request()), 0), (TransactionData::JSON(contract.increase().into_transaction_request()), 0), ]; - provider.anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await.unwrap(); // assert expected messages post-reorg From 783a043835576d0f08f09031867bad9a450b5858 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 3 Nov 2025 11:22:24 +0100 Subject: [PATCH 06/63] feat: design live mode reorg API --- src/block_range_scanner.rs | 120 +++++++++++++---------- src/block_range_scanner/reorg_handler.rs | 49 +++++++++ src/block_range_scanner/ring_buffer.rs | 42 ++++++++ 3 files changed, 161 insertions(+), 50 deletions(-) create mode 100644 src/block_range_scanner/reorg_handler.rs create mode 100644 src/block_range_scanner/ring_buffer.rs diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index cd3ac650..c30f21ff 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -88,6 +88,11 @@ use alloy::{ }; use tracing::{debug, error, info, warn}; +mod reorg_handler; +mod ring_buffer; + +use reorg_handler::ReorgHandler; + pub const DEFAULT_MAX_BLOCK_RANGE: u64 = 1000; // copied form https://github.com/taikoxyz/taiko-mono/blob/f4b3a0e830e42e2fee54829326389709dd422098/packages/taiko-client/pkg/chain_iterator/block_batch_iterator.go#L19 pub const DEFAULT_BLOCK_CONFIRMATIONS: u64 = 0; @@ -155,6 +160,7 @@ impl BlockRangeScanner { } } + /// Sets the maximum block range per RPC call for the scanner. #[must_use] pub fn max_block_range(mut self, max_block_range: u64) -> Self { self.max_block_range = max_block_range; @@ -279,6 +285,7 @@ pub enum Command { struct Service { provider: RobustProvider, + reorg_handler: ReorgHandler, max_block_range: u64, error_count: u64, command_receiver: mpsc::Receiver, @@ -288,9 +295,11 @@ struct Service { impl Service { pub fn new(provider: RobustProvider, max_block_range: u64) -> (Self, mpsc::Sender) { let (cmd_tx, cmd_rx) = mpsc::channel(100); + let reorg_handler = ReorgHandler::new(provider.clone()); let service = Self { provider, + reorg_handler, max_block_range, error_count: 0, command_receiver: cmd_rx, @@ -356,6 +365,7 @@ impl Service { let max_block_range = self.max_block_range; let provider = self.provider.clone(); let latest = self.provider.get_block_number().await?; + let reorg_handler = self.reorg_handler.clone(); // the next block returned by the underlying subscription will always be `latest + 1`, // because `latest` was already mined and subscription by definition only streams after new @@ -366,9 +376,10 @@ impl Service { Self::stream_live_blocks( range_start, provider, - sender, + &sender, block_confirmations, max_block_range, + reorg_handler, ) .await; }); @@ -420,6 +431,7 @@ impl Service { ) -> Result<(), ScannerError> { let provider = self.provider.clone(); let max_block_range = self.max_block_range; + let reorg_handler = self.reorg_handler.clone(); let get_start_block = async || -> Result { let block = match start_height { @@ -453,9 +465,10 @@ impl Service { Self::stream_live_blocks( start_block, provider, - sender, + &sender, block_confirmations, max_block_range, + reorg_handler, ) .await; }); @@ -479,9 +492,10 @@ impl Service { Self::stream_live_blocks( cutoff + 1, provider, - live_block_buffer_sender, + &live_block_buffer_sender, block_confirmations, max_block_range, + reorg_handler, ) .await; }); @@ -669,61 +683,65 @@ impl Service { async fn stream_live_blocks( mut range_start: BlockNumber, provider: RobustProvider, - sender: mpsc::Sender, + sender: &mpsc::Sender, block_confirmations: u64, max_block_range: u64, + mut reorg_handler: ReorgHandler, ) { - match Self::get_block_subscription(&provider).await { - Ok(ws_stream) => { - info!("WebSocket connected for live blocks"); - - // ensure we start streaming only after the expected_next_block cutoff - let cutoff = range_start; - let mut stream = - ws_stream.into_stream().skip_while(|header| header.number() < cutoff); - - while let Some(incoming_block) = stream.next().await { - let incoming_block_num = incoming_block.number(); - info!(block_number = incoming_block_num, "Received block header"); - - if incoming_block_num < range_start { - warn!("Reorg detected: sending forked range"); - if !sender.try_stream(ScannerStatus::ReorgDetected).await { - return; - } + let ws_stream = match Self::get_block_subscription(&provider).await { + Ok(stream) => stream, + Err(e) => { + error!(error = %e, "Error establishing subscription"); + _ = sender.try_stream(e).await; + return; + } + }; - // Calculate the confirmed block position for the incoming block - let incoming_confirmed = - incoming_block_num.saturating_sub(block_confirmations); + // ensure we start streaming only after the expected_next_block cutoff + let cutoff = range_start; + let mut stream = ws_stream.into_stream().skip_while(|header| header.number() < cutoff); - // updated expected block to updated confirmed - range_start = incoming_confirmed; - } + while let Some(incoming_block) = stream.next().await { + let incoming_block_num = incoming_block.number(); + info!(block_number = incoming_block_num, "Received block header"); - let confirmed = incoming_block_num.saturating_sub(block_confirmations); - if confirmed >= range_start { - // NOTE: Edge case when difference between range end and range start >= max - // reads - let range_end = - confirmed.min(range_start.saturating_add(max_block_range - 1)); - - info!( - range_start = range_start, - range_end = range_end, - "Sending live block range" - ); - - if !sender.try_stream(range_start..=range_end).await { - return; - } + let reorged_opt = match reorg_handler.check(incoming_block).await { + Ok(opt) => opt, + Err(e) => { + error!(error = %e, "Failed to perform reorg check"); + _ = sender.try_stream(e).await; + return; + } + }; - // Overflow can not realistically happen - range_start = range_end + 1; - } + if let Some(reorged_from) = reorged_opt { + if !sender.try_stream(ScannerStatus::ReorgDetected).await { + break; } + + // TODO: explain in docs that the returned block after a reorg will be the + // first confirmed block that is smaller between: + // - the first post-reorg block + // - the previous range_start + + // updated expected block to updated confirmed + range_start = range_start.min(reorged_from); } - Err(e) => { - _ = sender.try_stream(e).await; + + let confirmed = incoming_block_num.saturating_sub(block_confirmations); + if confirmed >= range_start { + // NOTE: Edge case when difference between range end and range start >= max + // reads + let range_end = confirmed.min(range_start.saturating_add(max_block_range - 1)); + + info!(range_start = range_start, range_end = range_end, "Sending live block range"); + + if !sender.try_stream(range_start..=range_end).await { + break; + } + + // Overflow can not realistically happen + range_start = range_end + 1; } } } @@ -767,13 +785,15 @@ impl Service { } } - info!(processed = processed, discarded = discarded, "Processed buffered messages"); + info!(processed = processed, discarded = discarded, "Finished processing live messages"); } async fn get_block_subscription( provider: &RobustProvider, ) -> Result, ScannerError> { + info!("Establishing subscription to live blocks..."); let ws_stream = provider.subscribe_blocks().await?; + info!("Subscription established"); Ok(ws_stream) } } diff --git a/src/block_range_scanner/reorg_handler.rs b/src/block_range_scanner/reorg_handler.rs new file mode 100644 index 00000000..1d1eecbe --- /dev/null +++ b/src/block_range_scanner/reorg_handler.rs @@ -0,0 +1,49 @@ +use alloy::{ + network::{Ethereum, Network}, + primitives::{BlockHash, BlockNumber}, +}; + +use crate::{ + ScannerError, + robust_provider::{self, RobustProvider}, +}; + +use super::ring_buffer::RingBuffer; + +#[derive(Clone)] +pub(crate) struct ReorgHandler { + provider: RobustProvider, + buffer: RingBuffer, +} + +impl ReorgHandler { + pub fn new(provider: RobustProvider) -> Self { + Self { provider, buffer: RingBuffer::new(10) } + } + + pub async fn check( + &mut self, + incoming_block: N::HeaderResponse, + ) -> Result, ScannerError> { + if !self.reorg_detected().await? { + return Ok(None); + } + + // warn!(reorged_from = reorged_from, "Reorg detected: sending forked range"); + + Ok(Some(1)) + } + + async fn reorg_detected(&self) -> Result { + match self.buffer.back() { + Some(last_streamed_block_hash) => { + match self.provider.get_block_by_hash(*last_streamed_block_hash).await { + Ok(_) => Ok(false), + Err(robust_provider::Error::BlockNotFound(_)) => Ok(true), + Err(e) => Err(e.into()), + } + } + None => Ok(false), + } + } +} diff --git a/src/block_range_scanner/ring_buffer.rs b/src/block_range_scanner/ring_buffer.rs new file mode 100644 index 00000000..caeb72f2 --- /dev/null +++ b/src/block_range_scanner/ring_buffer.rs @@ -0,0 +1,42 @@ +use std::collections::VecDeque; + +#[derive(Clone)] +pub struct RingBuffer { + inner: VecDeque, + capacity: usize, +} + +impl RingBuffer { + /// Creates an empty RingBuffer with a specific capacity. + pub fn new(capacity: usize) -> Self { + Self { inner: VecDeque::with_capacity(capacity), capacity } + } + + /// Adds a new element to the buffer. If the buffer is full, + /// the oldest element is removed to make space. + pub fn push(&mut self, item: T) { + if self.inner.len() == self.capacity { + self.inner.pop_front(); // Remove the oldest element + } + self.inner.push_back(item); // Add the new element + } + + /// Removes and returns the oldest element from the buffer, or None if it's empty. + pub fn pop(&mut self) -> Option { + self.inner.pop_front() + } + + pub fn back(&self) -> Option<&T> { + self.inner.back() + } + + /// Returns the current number of elements in the buffer. + pub fn len(&self) -> usize { + self.inner.len() + } + + /// Returns the maximum capacity of the buffer. + pub fn capacity(&self) -> usize { + self.capacity + } +} From 3d59bff3ab1d63561d54dfc7099bffe3af613c49 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 3 Nov 2025 12:56:39 +0100 Subject: [PATCH 07/63] ref: pass ws_stream to the live task (not create it within) --- src/block_range_scanner.rs | 31 ++++++++----------------------- 1 file changed, 8 insertions(+), 23 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index c30f21ff..0a7136ff 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -363,7 +363,6 @@ impl Service { sender: mpsc::Sender, ) -> Result<(), ScannerError> { let max_block_range = self.max_block_range; - let provider = self.provider.clone(); let latest = self.provider.get_block_number().await?; let reorg_handler = self.reorg_handler.clone(); @@ -372,10 +371,12 @@ impl Service { // blocks have been mined let range_start = (latest + 1).saturating_sub(block_confirmations); + let ws_stream = self.provider.subscribe_blocks().await?; + tokio::spawn(async move { Self::stream_live_blocks( range_start, - provider, + ws_stream, &sender, block_confirmations, max_block_range, @@ -453,6 +454,8 @@ impl Service { let confirmed_tip = latest_block.saturating_sub(block_confirmations); + let ws_stream = self.provider.subscribe_blocks().await?; + // If start is beyond confirmed tip, skip historical and go straight to live if start_block > confirmed_tip { info!( @@ -464,7 +467,7 @@ impl Service { tokio::spawn(async move { Self::stream_live_blocks( start_block, - provider, + ws_stream, &sender, block_confirmations, max_block_range, @@ -491,7 +494,7 @@ impl Service { tokio::spawn(async move { Self::stream_live_blocks( cutoff + 1, - provider, + ws_stream, &live_block_buffer_sender, block_confirmations, max_block_range, @@ -682,21 +685,12 @@ impl Service { async fn stream_live_blocks( mut range_start: BlockNumber, - provider: RobustProvider, + ws_stream: Subscription, sender: &mpsc::Sender, block_confirmations: u64, max_block_range: u64, mut reorg_handler: ReorgHandler, ) { - let ws_stream = match Self::get_block_subscription(&provider).await { - Ok(stream) => stream, - Err(e) => { - error!(error = %e, "Error establishing subscription"); - _ = sender.try_stream(e).await; - return; - } - }; - // ensure we start streaming only after the expected_next_block cutoff let cutoff = range_start; let mut stream = ws_stream.into_stream().skip_while(|header| header.number() < cutoff); @@ -787,15 +781,6 @@ impl Service { info!(processed = processed, discarded = discarded, "Finished processing live messages"); } - - async fn get_block_subscription( - provider: &RobustProvider, - ) -> Result, ScannerError> { - info!("Establishing subscription to live blocks..."); - let ws_stream = provider.subscribe_blocks().await?; - info!("Subscription established"); - Ok(ws_stream) - } } async fn reorg_detected( From d3ac82c02d8dc2de47188c1924452650a54d62e0 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 10 Nov 2025 10:47:23 +0100 Subject: [PATCH 08/63] feat: implement ReorgHandler::check --- src/block_range_scanner/reorg_handler.rs | 44 ++++++++++++++++++++++-- src/block_range_scanner/ring_buffer.rs | 17 ++------- 2 files changed, 44 insertions(+), 17 deletions(-) diff --git a/src/block_range_scanner/reorg_handler.rs b/src/block_range_scanner/reorg_handler.rs index 1d1eecbe..3f5fd703 100644 --- a/src/block_range_scanner/reorg_handler.rs +++ b/src/block_range_scanner/reorg_handler.rs @@ -1,7 +1,10 @@ use alloy::{ - network::{Ethereum, Network}, + consensus::BlockHeader, + eips::BlockNumberOrTag, + network::{BlockResponse, Ethereum, Network, primitives::HeaderResponse}, primitives::{BlockHash, BlockNumber}, }; +use tracing::{info, warn}; use crate::{ ScannerError, @@ -26,12 +29,47 @@ impl ReorgHandler { incoming_block: N::HeaderResponse, ) -> Result, ScannerError> { if !self.reorg_detected().await? { + // + self.buffer.push(incoming_block.hash()); return Ok(None); } - // warn!(reorged_from = reorged_from, "Reorg detected: sending forked range"); + info!("Reorg detected, searching for common ancestor"); - Ok(Some(1)) + // last block hash definitely doesn't exist on-chain + _ = self.buffer.pop_back(); + + while let Some(&block_hash) = self.buffer.back() { + info!(block_hash = %block_hash, "Checking if block exists on-chain"); + match self.provider.get_block_by_hash(block_hash).await { + Ok(block) => { + let header = block.header(); + info!(common_ancestor = %header.hash(), block_number = header.number(), "Common ancestor found"); + // store the incoming block's hash for future reference + self.buffer.push(incoming_block.hash()); + return Ok(Some(header.number())); + } + Err(robust_provider::Error::BlockNotFound(_)) => { + _ = self.buffer.pop_back(); + } + Err(e) => return Err(e.into()), + } + } + + warn!("Deep reorg detected, setting finalized block as common ancestor"); + + let finalized = self.provider.get_block_by_number(BlockNumberOrTag::Finalized).await?; + + // no need to store finalized block's hash in the buffer, as it is returned by default only + // if not buffered hashes exist on-chain + + // store the incoming block's hash for future reference + self.buffer.push(incoming_block.hash()); + + let header = finalized.header(); + info!(common_ancestor = %header.hash(), block_number = header.number(), "Finalized block set as common ancestor"); + + Ok(Some(header.number())) } async fn reorg_detected(&self) -> Result { diff --git a/src/block_range_scanner/ring_buffer.rs b/src/block_range_scanner/ring_buffer.rs index caeb72f2..28a8d699 100644 --- a/src/block_range_scanner/ring_buffer.rs +++ b/src/block_range_scanner/ring_buffer.rs @@ -1,7 +1,7 @@ use std::collections::VecDeque; #[derive(Clone)] -pub struct RingBuffer { +pub(crate) struct RingBuffer { inner: VecDeque, capacity: usize, } @@ -21,22 +21,11 @@ impl RingBuffer { self.inner.push_back(item); // Add the new element } - /// Removes and returns the oldest element from the buffer, or None if it's empty. - pub fn pop(&mut self) -> Option { - self.inner.pop_front() + pub fn pop_back(&mut self) -> Option { + self.inner.pop_back() } pub fn back(&self) -> Option<&T> { self.inner.back() } - - /// Returns the current number of elements in the buffer. - pub fn len(&self) -> usize { - self.inner.len() - } - - /// Returns the maximum capacity of the buffer. - pub fn capacity(&self) -> usize { - self.capacity - } } From 81212dd462d505421f7fb13448de9525dd9de948 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 10 Nov 2025 12:10:24 +0100 Subject: [PATCH 09/63] test: fix shallow_block_confirmation_does_not_mitigate_reorg --- tests/block_range_scanner.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 7dfe10c4..5840895a 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -150,13 +150,11 @@ async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result< // reorg more blocks than the block_confirmation config provider.anvil_reorg(ReorgOptions { depth: 8, tx_block_pairs: vec![] }).await?; - // mint additional blocks + + // mint additional blocks to allow the scanner to stream the pre-reorg blocks provider.anvil_mine(Some(3), None).await?; assert_next!(stream, ScannerStatus::ReorgDetected); - assert_next!(stream, 0..=0); - assert_next!(stream, 1..=1); - assert_next!(stream, 2..=2); assert_next!(stream, 3..=3); assert_next!(stream, 4..=4); assert_next!(stream, 5..=5); From 73388f35bd674ba04584be3c0386614df03864d6 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 10 Nov 2025 12:10:24 +0100 Subject: [PATCH 10/63] fix: range_start should be updated only when reorg common_ancestor is less than it --- src/block_range_scanner.rs | 15 +++++++-------- src/block_range_scanner/reorg_handler.rs | 7 ++++--- src/types.rs | 2 ++ 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 86460dfe..bb1565d6 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -648,18 +648,19 @@ impl Service { } }; - if let Some(reorged_from) = reorged_opt { - if !sender.try_stream(ScannerStatus::ReorgDetected).await { - break; + if let Some(common_ancestor) = reorged_opt { + if common_ancestor < range_start { + if !sender.try_stream(ScannerStatus::ReorgDetected).await { + return; + } + // updated expected block to updated confirmed + range_start = common_ancestor + 1; } // TODO: explain in docs that the returned block after a reorg will be the // first confirmed block that is smaller between: // - the first post-reorg block // - the previous range_start - - // updated expected block to updated confirmed - range_start = range_start.min(reorged_from); } let confirmed = incoming_block_num.saturating_sub(block_confirmations); @@ -668,8 +669,6 @@ impl Service { // reads let range_end = confirmed.min(range_start.saturating_add(max_block_range - 1)); - info!(range_start = range_start, range_end = range_end, "Sending live block range"); - if !sender.try_stream(range_start..=range_end).await { return; } diff --git a/src/block_range_scanner/reorg_handler.rs b/src/block_range_scanner/reorg_handler.rs index 3f5fd703..80e53000 100644 --- a/src/block_range_scanner/reorg_handler.rs +++ b/src/block_range_scanner/reorg_handler.rs @@ -29,7 +29,7 @@ impl ReorgHandler { incoming_block: N::HeaderResponse, ) -> Result, ScannerError> { if !self.reorg_detected().await? { - // + // store the incoming block's hash for future reference self.buffer.push(incoming_block.hash()); return Ok(None); } @@ -42,14 +42,15 @@ impl ReorgHandler { while let Some(&block_hash) = self.buffer.back() { info!(block_hash = %block_hash, "Checking if block exists on-chain"); match self.provider.get_block_by_hash(block_hash).await { - Ok(block) => { - let header = block.header(); + Ok(common_ancestor) => { + let header = common_ancestor.header(); info!(common_ancestor = %header.hash(), block_number = header.number(), "Common ancestor found"); // store the incoming block's hash for future reference self.buffer.push(incoming_block.hash()); return Ok(Some(header.number())); } Err(robust_provider::Error::BlockNotFound(_)) => { + // block was reorged _ = self.buffer.pop_back(); } Err(e) => return Err(e.into()), diff --git a/src/types.rs b/src/types.rs index d4399621..e498ab12 100644 --- a/src/types.rs +++ b/src/types.rs @@ -10,6 +10,8 @@ pub enum ScannerMessage { Status(ScannerStatus), } +// TODO: implement Display for ScannerMessage + #[derive(Copy, Debug, Clone, PartialEq)] pub enum ScannerStatus { SwitchingToLive, From ec19227d44b9bf8fc79b481b2db145ba08733dcf Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 10 Nov 2025 12:36:58 +0100 Subject: [PATCH 11/63] ref: refactor sync mode --- src/block_range_scanner.rs | 293 ++++++++++++++++--------------------- 1 file changed, 128 insertions(+), 165 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index bb1565d6..6d37e650 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -389,12 +389,10 @@ impl Service { // Step 1: // Fetches the starting block and end block for historical sync in parallel - let (start_block, latest_block) = tokio::try_join!(get_start_block(), get_latest_block())?; + let (mut start_block, latest_block) = + tokio::try_join!(get_start_block(), get_latest_block())?; - let confirmed_tip = latest_block.saturating_sub(block_confirmations); - - let subscription = self.provider.subscribe_blocks().await?; - info!("Buffering live blocks"); + let mut confirmed_tip = latest_block.saturating_sub(block_confirmations); // If start is beyond confirmed tip, skip historical and go straight to live if start_block > confirmed_tip { @@ -404,6 +402,8 @@ impl Service { "Start block is beyond confirmed tip, starting live stream" ); + let subscription = self.provider.subscribe_blocks().await?; + tokio::spawn(async move { Self::stream_live_blocks( start_block, @@ -421,51 +421,56 @@ impl Service { info!(start_block = start_block, end_block = confirmed_tip, "Syncing historical data"); - // Step 2: Setup the live streaming buffer - // This channel will accumulate while historical sync is running - let (live_block_buffer_sender, live_block_buffer_receiver) = - mpsc::channel::(MAX_BUFFERED_MESSAGES); - - // The cutoff is the last block we have synced historically - // Any block > cutoff will come from the live stream - let cutoff = confirmed_tip; - // This task runs independently, accumulating new blocks while wehistorical data is syncing tokio::spawn(async move { - Self::stream_live_blocks( - cutoff + 1, - subscription, - live_block_buffer_sender, - block_confirmations, - max_block_range, - reorg_handler, - ) - .await; - }); - - tokio::spawn(async move { - // Step 4: Perform historical synchronization - // This processes blocks from start_block to end_block (cutoff) - // If this fails, we need to abort the live streaming task - Self::stream_historical_blocks(start_block, confirmed_tip, max_block_range, &sender) + while start_block < confirmed_tip { + Self::stream_historical_blocks( + start_block, + confirmed_tip, + max_block_range, + &sender, + ) .await; + let latest = match provider.get_block_by_number(BlockNumberOrTag::Latest).await { + Ok(block) => block.header().number(), + Err(e) => { + error!(error = %e, "Error latest block when calculating next historical batch, shutting down"); + _ = sender.try_stream(e).await; + return; + } + }; + + start_block = confirmed_tip + 1; + confirmed_tip = latest.saturating_sub(block_confirmations); + } + info!("Chain tip reached, switching to live"); + let subscription = match provider.subscribe_blocks().await { + Ok(sub) => sub, + Err(e) => { + error!(error = %e, "Error subscribing to live blocks, shutting down"); + _ = sender.try_stream(e).await; + return; + } + }; + if !sender.try_stream(ScannerStatus::SwitchingToLive).await { return; } info!("Successfully transitioned from historical to live data"); - // Step 5: - // Spawn the buffer processor task - // This will: - // 1. Process all buffered blocks, filtering out any ≤ cutoff - // 2. Forward blocks > cutoff to the user - // 3. Continue forwarding until the buffer if exhausted (waits for new blocks from live - // stream) - Self::process_live_block_buffer(live_block_buffer_receiver, sender, cutoff).await; + Self::stream_live_blocks( + start_block, + subscription, + sender, + block_confirmations, + max_block_range, + reorg_handler, + ) + .await; }); Ok(()) @@ -549,7 +554,7 @@ impl Service { } Err(e) => { error!(error = %e, "Terminal RPC call error, shutting down"); - _ = sender.try_stream(e); + _ = sender.try_stream(e).await; return; } }; @@ -571,7 +576,7 @@ impl Service { } Err(e) => { error!(error = %e, "Terminal RPC call error, shutting down"); - _ = sender.try_stream(e); + _ = sender.try_stream(e).await; return; } }; @@ -678,48 +683,6 @@ impl Service { } } } - - async fn process_live_block_buffer( - mut buffer_rx: mpsc::Receiver, - sender: mpsc::Sender, - cutoff: BlockNumber, - ) { - let mut processed = 0; - let mut discarded = 0; - - // Process all buffered messages - while let Some(data) = buffer_rx.recv().await { - match data { - Message::Data(range) => { - let (start, end) = (*range.start(), *range.end()); - if start >= cutoff { - if !sender.try_stream(range).await { - break; - } - processed += end - start; - } else if end >= cutoff { - discarded += cutoff - start; - - let start = cutoff; - if !sender.try_stream(start..=end).await { - break; - } - processed += end - start; - } else { - discarded += end - start; - } - } - other => { - // Could be error or status - if !sender.try_stream(other).await { - break; - } - } - } - } - - info!(processed = processed, discarded = discarded, "Finished processing live messages"); - } } async fn reorg_detected( @@ -877,8 +840,7 @@ impl BlockRangeScannerClient { #[cfg(test)] mod tests { use super::*; - use crate::{assert_closed, assert_next}; - use alloy::{eips::BlockId, network::Ethereum}; + use alloy::eips::BlockId; use tokio::sync::mpsc; #[test] @@ -897,87 +859,88 @@ mod tests { assert_eq!(scanner.max_block_range, max_block_range); } - #[tokio::test] - async fn buffered_messages_after_cutoff_are_all_passed() { - let cutoff = 50; - let (buffer_tx, buffer_rx) = mpsc::channel(8); - buffer_tx.send(Message::Data(51..=55)).await.unwrap(); - buffer_tx.send(Message::Data(56..=60)).await.unwrap(); - buffer_tx.send(Message::Data(61..=70)).await.unwrap(); - drop(buffer_tx); - - let (out_tx, out_rx) = mpsc::channel(8); - Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; - - let mut stream = ReceiverStream::new(out_rx); - - assert_next!(stream, 51..=55); - assert_next!(stream, 56..=60); - assert_next!(stream, 61..=70); - assert_closed!(stream); - } - - #[tokio::test] - async fn ranges_entirely_before_cutoff_are_discarded() { - let cutoff = 100; - - let (buffer_tx, buffer_rx) = mpsc::channel(8); - buffer_tx.send(Message::Data(40..=50)).await.unwrap(); - buffer_tx.send(Message::Data(51..=60)).await.unwrap(); - buffer_tx.send(Message::Data(61..=70)).await.unwrap(); - drop(buffer_tx); - - let (out_tx, out_rx) = mpsc::channel(8); - Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; - - let mut stream = ReceiverStream::new(out_rx); - - assert_closed!(stream); - } - - #[tokio::test] - async fn ranges_overlapping_cutoff_are_trimmed() { - let cutoff = 75; - - let (buffer_tx, buffer_rx) = mpsc::channel(8); - buffer_tx.send(Message::Data(60..=70)).await.unwrap(); - buffer_tx.send(Message::Data(71..=80)).await.unwrap(); - buffer_tx.send(Message::Data(81..=86)).await.unwrap(); - drop(buffer_tx); - - let (out_tx, out_rx) = mpsc::channel(8); - Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; - - let mut stream = ReceiverStream::new(out_rx); - - assert_next!(stream, 75..=80); - assert_next!(stream, 81..=86); - assert_closed!(stream); - } - - #[tokio::test] - async fn edge_case_range_exactly_at_cutoff() { - let cutoff = 100; - - let (buffer_tx, buffer_rx) = mpsc::channel(8); - buffer_tx.send(Message::Data(98..=98)).await.unwrap(); // Just before: discard - buffer_tx.send(Message::Data(99..=100)).await.unwrap(); // Includes cutoff: trim to 100..=100 - buffer_tx.send(Message::Data(100..=100)).await.unwrap(); // Exactly at: forward - buffer_tx.send(Message::Data(100..=101)).await.unwrap(); // Starts at cutoff: forward - buffer_tx.send(Message::Data(102..=102)).await.unwrap(); // After cutoff: forward - drop(buffer_tx); - - let (out_tx, out_rx) = mpsc::channel(8); - Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; - - let mut stream = ReceiverStream::new(out_rx); - - assert_next!(stream, 100..=100); - assert_next!(stream, 100..=100); - assert_next!(stream, 100..=101); - assert_next!(stream, 102..=102); - assert_closed!(stream); - } + // TODO: update to valid handle_sync tests + // #[tokio::test] + // async fn buffered_messages_after_cutoff_are_all_passed() { + // let cutoff = 50; + // let (buffer_tx, buffer_rx) = mpsc::channel(8); + // buffer_tx.send(Message::Data(51..=55)).await.unwrap(); + // buffer_tx.send(Message::Data(56..=60)).await.unwrap(); + // buffer_tx.send(Message::Data(61..=70)).await.unwrap(); + // drop(buffer_tx); + + // let (out_tx, out_rx) = mpsc::channel(8); + // Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; + + // let mut stream = ReceiverStream::new(out_rx); + + // assert_next!(stream, 51..=55); + // assert_next!(stream, 56..=60); + // assert_next!(stream, 61..=70); + // assert_closed!(stream); + // } + + // #[tokio::test] + // async fn ranges_entirely_before_cutoff_are_discarded() { + // let cutoff = 100; + + // let (buffer_tx, buffer_rx) = mpsc::channel(8); + // buffer_tx.send(Message::Data(40..=50)).await.unwrap(); + // buffer_tx.send(Message::Data(51..=60)).await.unwrap(); + // buffer_tx.send(Message::Data(61..=70)).await.unwrap(); + // drop(buffer_tx); + + // let (out_tx, out_rx) = mpsc::channel(8); + // Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; + + // let mut stream = ReceiverStream::new(out_rx); + + // assert_closed!(stream); + // } + + // #[tokio::test] + // async fn ranges_overlapping_cutoff_are_trimmed() { + // let cutoff = 75; + + // let (buffer_tx, buffer_rx) = mpsc::channel(8); + // buffer_tx.send(Message::Data(60..=70)).await.unwrap(); + // buffer_tx.send(Message::Data(71..=80)).await.unwrap(); + // buffer_tx.send(Message::Data(81..=86)).await.unwrap(); + // drop(buffer_tx); + + // let (out_tx, out_rx) = mpsc::channel(8); + // Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; + + // let mut stream = ReceiverStream::new(out_rx); + + // assert_next!(stream, 75..=80); + // assert_next!(stream, 81..=86); + // assert_closed!(stream); + // } + + // #[tokio::test] + // async fn edge_case_range_exactly_at_cutoff() { + // let cutoff = 100; + + // let (buffer_tx, buffer_rx) = mpsc::channel(8); + // buffer_tx.send(Message::Data(98..=98)).await.unwrap(); // Just before: discard + // buffer_tx.send(Message::Data(99..=100)).await.unwrap(); // Includes cutoff: trim to + // 100..=100 buffer_tx.send(Message::Data(100..=100)).await.unwrap(); // Exactly at: + // forward buffer_tx.send(Message::Data(100..=101)).await.unwrap(); // Starts at cutoff: + // forward buffer_tx.send(Message::Data(102..=102)).await.unwrap(); // After cutoff: + // forward drop(buffer_tx); + + // let (out_tx, out_rx) = mpsc::channel(8); + // Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; + + // let mut stream = ReceiverStream::new(out_rx); + + // assert_next!(stream, 100..=100); + // assert_next!(stream, 100..=100); + // assert_next!(stream, 100..=101); + // assert_next!(stream, 102..=102); + // assert_closed!(stream); + // } #[tokio::test] async fn try_send_forwards_errors_to_subscribers() { From b58e3a912c77eba0752c64ea2ea5d1d9185ad627 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Wed, 12 Nov 2025 12:28:35 +0100 Subject: [PATCH 12/63] feat: major update to reorg logic (incl. historic reorg) --- src/block_range_scanner.rs | 118 +++++++++++++++++------ src/block_range_scanner/reorg_handler.rs | 63 +++++++----- src/block_range_scanner/ring_buffer.rs | 4 + 3 files changed, 128 insertions(+), 57 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 6d37e650..97017189 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -301,7 +301,7 @@ impl Service { ) -> Result<(), ScannerError> { let max_block_range = self.max_block_range; let latest = self.provider.get_block_number().await?; - let reorg_handler = self.reorg_handler.clone(); + let mut reorg_handler = self.reorg_handler.clone(); // the next block returned by the underlying subscription will always be `latest + 1`, // because `latest` was already mined and subscription by definition only streams after new @@ -319,7 +319,7 @@ impl Service { sender, block_confirmations, max_block_range, - reorg_handler, + &mut reorg_handler, ) .await; }); @@ -350,12 +350,15 @@ impl Service { info!(start_block = start_block_num, end_block = end_block_num, "Syncing historical data"); + let mut reorg_handler = self.reorg_handler.clone(); + tokio::spawn(async move { Self::stream_historical_blocks( start_block_num, end_block_num, max_block_range, &sender, + &mut reorg_handler, ) .await; }); @@ -371,7 +374,7 @@ impl Service { ) -> Result<(), ScannerError> { let provider = self.provider.clone(); let max_block_range = self.max_block_range; - let reorg_handler = self.reorg_handler.clone(); + let mut reorg_handler = self.reorg_handler.clone(); let get_start_block = async || -> Result { let block = match start_height { @@ -411,7 +414,7 @@ impl Service { sender, block_confirmations, max_block_range, - reorg_handler, + &mut reorg_handler, ) .await; }); @@ -429,6 +432,7 @@ impl Service { confirmed_tip, max_block_range, &sender, + &mut reorg_handler, ) .await; @@ -468,7 +472,7 @@ impl Service { sender, block_confirmations, max_block_range, - reorg_handler, + &mut reorg_handler, ) .await; }); @@ -595,6 +599,7 @@ impl Service { end: BlockNumber, max_block_range: u64, sender: &mpsc::Sender, + reorg_handler: &mut ReorgHandler, ) { let mut batch_count = 0; @@ -615,36 +620,74 @@ impl Service { debug!(batch_count = batch_count, "Processed historical batches"); } - if batch_end_block_number == end { - break; - } - - // Next block number always exists as we checked end block previously - let next_start_block_number = batch_end_block_number.saturating_add(1); + let reorged_opt = + match reorg_handler.check_by_block_number(batch_end_block_number).await { + Ok(opt) => opt, + Err(e) => { + error!(error = %e, "Failed to perform reorg check"); + _ = sender.try_stream(e).await; + return; + } + }; - next_start_block = next_start_block_number; + next_start_block = if let Some(common_ancestor) = reorged_opt { + if !sender.try_stream(ScannerStatus::ReorgDetected).await { + return; + } + common_ancestor + 1 + } else { + batch_end_block_number.saturating_add(1) + }; } info!(batch_count = batch_count, "Historical sync completed"); } async fn stream_live_blocks( - mut range_start: BlockNumber, + stream_start: BlockNumber, subscription: Subscription, sender: mpsc::Sender, block_confirmations: u64, max_block_range: u64, - mut reorg_handler: ReorgHandler, + reorg_handler: &mut ReorgHandler, ) { - // ensure we start streaming only after the expected_next_block cutoff - let cutoff = range_start; - let mut stream = subscription.into_stream().skip_while(|header| header.number() < cutoff); + // ensure we start streaming only after the specified starting block + + let mut stream = subscription.into_stream().skip_while(|header| { + header.number().saturating_sub(block_confirmations) < stream_start + }); + + let Some(incoming_block) = stream.next().await else { + warn!("Subscription channel closed"); + return; + }; + + let incoming_block_num = incoming_block.number(); + info!(block_number = incoming_block_num, "Received block header"); + + let confirmed = incoming_block_num.saturating_sub(block_confirmations); + // TODO: stream `batch_start..=batch_end` in batches of `max_block_range` + // (e.g. in case max_block_range=2, and the range is 100..=106) + + let mut batch_start = stream_start; + let mut inner_batch_start = batch_start; + let mut batch_end; + loop { + batch_end = confirmed.min(inner_batch_start.saturating_add(max_block_range - 1)); + if !sender.try_stream(inner_batch_start..=batch_end).await { + return; + } + if batch_end == confirmed { + break; + } + inner_batch_start = batch_end + 1; + } while let Some(incoming_block) = stream.next().await { let incoming_block_num = incoming_block.number(); info!(block_number = incoming_block_num, "Received block header"); - let reorged_opt = match reorg_handler.check(incoming_block).await { + let reorged_opt = match reorg_handler.check_by_block_number(batch_end).await { Ok(opt) => opt, Err(e) => { error!(error = %e, "Failed to perform reorg check"); @@ -654,32 +697,45 @@ impl Service { }; if let Some(common_ancestor) = reorged_opt { - if common_ancestor < range_start { + if common_ancestor < batch_start { if !sender.try_stream(ScannerStatus::ReorgDetected).await { return; } - // updated expected block to updated confirmed - range_start = common_ancestor + 1; + // no need to stream blocks prior to the previously specified starting block + if common_ancestor < stream_start { + batch_start = stream_start; + } else { + batch_start = common_ancestor + 1; + } } // TODO: explain in docs that the returned block after a reorg will be the // first confirmed block that is smaller between: // - the first post-reorg block // - the previous range_start + } else { + // no reorg happened, move the block range start + // + // SAFETY: Overflow cannot realistically happen + batch_start = batch_end + 1; } let confirmed = incoming_block_num.saturating_sub(block_confirmations); - if confirmed >= range_start { - // NOTE: Edge case when difference between range end and range start >= max - // reads - let range_end = confirmed.min(range_start.saturating_add(max_block_range - 1)); - - if !sender.try_stream(range_start..=range_end).await { - return; + if confirmed >= batch_start { + let mut inner_batch_start = batch_start; + loop { + // NOTE: Edge case when difference between range end and range start >= max + // reads + batch_end = + confirmed.min(inner_batch_start.saturating_add(max_block_range - 1)); + if !sender.try_stream(inner_batch_start..=batch_end).await { + return; + } + if batch_end == confirmed { + break; + } + inner_batch_start = batch_end + 1; } - - // Overflow can not realistically happen - range_start = range_end + 1; } } } diff --git a/src/block_range_scanner/reorg_handler.rs b/src/block_range_scanner/reorg_handler.rs index 80e53000..e2849f4a 100644 --- a/src/block_range_scanner/reorg_handler.rs +++ b/src/block_range_scanner/reorg_handler.rs @@ -24,30 +24,49 @@ impl ReorgHandler { Self { provider, buffer: RingBuffer::new(10) } } + pub async fn check_by_block_number( + &mut self, + block: impl Into, + ) -> Result, ScannerError> { + let block = self.provider.get_block_by_number(block.into()).await?; + self.check(block.header()).await + } + pub async fn check( &mut self, - incoming_block: N::HeaderResponse, + block: &N::HeaderResponse, ) -> Result, ScannerError> { - if !self.reorg_detected().await? { + if !self.reorg_detected(block).await? { // store the incoming block's hash for future reference - self.buffer.push(incoming_block.hash()); + self.buffer.push(block.hash()); return Ok(None); } info!("Reorg detected, searching for common ancestor"); - // last block hash definitely doesn't exist on-chain - _ = self.buffer.pop_back(); - while let Some(&block_hash) = self.buffer.back() { info!(block_hash = %block_hash, "Checking if block exists on-chain"); match self.provider.get_block_by_hash(block_hash).await { Ok(common_ancestor) => { - let header = common_ancestor.header(); - info!(common_ancestor = %header.hash(), block_number = header.number(), "Common ancestor found"); - // store the incoming block's hash for future reference - self.buffer.push(incoming_block.hash()); - return Ok(Some(header.number())); + let common_ancestor = common_ancestor.header(); + + let finalized = + self.provider.get_block_by_number(BlockNumberOrTag::Finalized).await?; + let finalized = finalized.header(); + + let common_ancestor = if finalized.number() <= common_ancestor.number() { + info!(common_ancestor = %common_ancestor.hash(), block_number = common_ancestor.number(), "Common ancestor found"); + common_ancestor + } else { + warn!( + finalized_hash = %finalized.hash(), block_number = finalized.number(), "Possible deep reorg detected, using finalized block as common ancestor" + ); + // all buffered blocks are finalized, so no more need to track them + self.buffer.clear(); + finalized + }; + + return Ok(Some(common_ancestor.number())); } Err(robust_provider::Error::BlockNotFound(_)) => { // block was reorged @@ -57,32 +76,24 @@ impl ReorgHandler { } } - warn!("Deep reorg detected, setting finalized block as common ancestor"); + warn!("Possible deep reorg detected, setting finalized block as common ancestor"); let finalized = self.provider.get_block_by_number(BlockNumberOrTag::Finalized).await?; // no need to store finalized block's hash in the buffer, as it is returned by default only // if not buffered hashes exist on-chain - // store the incoming block's hash for future reference - self.buffer.push(incoming_block.hash()); - let header = finalized.header(); - info!(common_ancestor = %header.hash(), block_number = header.number(), "Finalized block set as common ancestor"); + info!(finalized_hash = %header.hash(), block_number = header.number(), "Finalized block set as common ancestor"); Ok(Some(header.number())) } - async fn reorg_detected(&self) -> Result { - match self.buffer.back() { - Some(last_streamed_block_hash) => { - match self.provider.get_block_by_hash(*last_streamed_block_hash).await { - Ok(_) => Ok(false), - Err(robust_provider::Error::BlockNotFound(_)) => Ok(true), - Err(e) => Err(e.into()), - } - } - None => Ok(false), + async fn reorg_detected(&self, block: &N::HeaderResponse) -> Result { + match self.provider.get_block_by_hash(block.hash()).await { + Ok(_) => Ok(false), + Err(robust_provider::Error::BlockNotFound(_)) => Ok(true), + Err(e) => Err(e.into()), } } } diff --git a/src/block_range_scanner/ring_buffer.rs b/src/block_range_scanner/ring_buffer.rs index 28a8d699..22ca6cd8 100644 --- a/src/block_range_scanner/ring_buffer.rs +++ b/src/block_range_scanner/ring_buffer.rs @@ -28,4 +28,8 @@ impl RingBuffer { pub fn back(&self) -> Option<&T> { self.inner.back() } + + pub fn clear(&mut self) { + self.inner.clear() + } } From 431e20204194ced28e1d9901d9c5f09005a7afb6 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Wed, 12 Nov 2025 12:54:19 +0100 Subject: [PATCH 13/63] fix: always check reorgs by block hash --- src/block_range_scanner.rs | 96 ++++++++++++++++-------- src/block_range_scanner/reorg_handler.rs | 10 +-- tests/live/reorg.rs | 2 +- 3 files changed, 66 insertions(+), 42 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 97017189..a705d966 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -301,6 +301,7 @@ impl Service { ) -> Result<(), ScannerError> { let max_block_range = self.max_block_range; let latest = self.provider.get_block_number().await?; + let provider = self.provider.clone(); let mut reorg_handler = self.reorg_handler.clone(); // the next block returned by the underlying subscription will always be `latest + 1`, @@ -317,6 +318,7 @@ impl Service { range_start, subscription, sender, + &provider, block_confirmations, max_block_range, &mut reorg_handler, @@ -334,6 +336,7 @@ impl Service { sender: mpsc::Sender, ) -> Result<(), ScannerError> { let max_block_range = self.max_block_range; + let provider = self.provider.clone(); let (start_block, end_block) = tokio::try_join!( self.provider.get_block_by_number(start_height), @@ -358,6 +361,7 @@ impl Service { end_block_num, max_block_range, &sender, + &provider, &mut reorg_handler, ) .await; @@ -412,6 +416,7 @@ impl Service { start_block, subscription, sender, + &provider, block_confirmations, max_block_range, &mut reorg_handler, @@ -432,6 +437,7 @@ impl Service { confirmed_tip, max_block_range, &sender, + &provider, &mut reorg_handler, ) .await; @@ -470,6 +476,7 @@ impl Service { start_block, subscription, sender, + &provider, block_confirmations, max_block_range, &mut reorg_handler, @@ -599,6 +606,7 @@ impl Service { end: BlockNumber, max_block_range: u64, sender: &mpsc::Sender, + provider: &RobustProvider, reorg_handler: &mut ReorgHandler, ) { let mut batch_count = 0; @@ -608,10 +616,17 @@ impl Service { // must be <= to include the edge case when start == end (i.e. return the single block // range) while next_start_block <= end { - let batch_end_block_number = - next_start_block.saturating_add(max_block_range - 1).min(end); + let batch_end_num = next_start_block.saturating_add(max_block_range - 1).min(end); + let batch_end = match provider.get_block_by_number(batch_end_num.into()).await { + Ok(block) => block.header().clone(), + Err(e) => { + error!(batch_start = next_start_block, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); + _ = sender.try_stream(e).await; + return; + } + }; - if !sender.try_stream(next_start_block..=batch_end_block_number).await { + if !sender.try_stream(next_start_block..=batch_end_num).await { break; } @@ -620,15 +635,14 @@ impl Service { debug!(batch_count = batch_count, "Processed historical batches"); } - let reorged_opt = - match reorg_handler.check_by_block_number(batch_end_block_number).await { - Ok(opt) => opt, - Err(e) => { - error!(error = %e, "Failed to perform reorg check"); - _ = sender.try_stream(e).await; - return; - } - }; + let reorged_opt = match reorg_handler.check(&batch_end).await { + Ok(opt) => opt, + Err(e) => { + error!(error = %e, "Failed to perform reorg check"); + _ = sender.try_stream(e).await; + return; + } + }; next_start_block = if let Some(common_ancestor) = reorged_opt { if !sender.try_stream(ScannerStatus::ReorgDetected).await { @@ -636,7 +650,7 @@ impl Service { } common_ancestor + 1 } else { - batch_end_block_number.saturating_add(1) + batch_end_num.saturating_add(1) }; } @@ -647,6 +661,7 @@ impl Service { stream_start: BlockNumber, subscription: Subscription, sender: mpsc::Sender, + provider: &RobustProvider, block_confirmations: u64, max_block_range: u64, reorg_handler: &mut ReorgHandler, @@ -673,21 +688,30 @@ impl Service { let mut inner_batch_start = batch_start; let mut batch_end; loop { - batch_end = confirmed.min(inner_batch_start.saturating_add(max_block_range - 1)); - if !sender.try_stream(inner_batch_start..=batch_end).await { + let batch_end_num = + confirmed.min(inner_batch_start.saturating_add(max_block_range - 1)); + batch_end = match provider.get_block_by_number(batch_end_num.into()).await { + Ok(block) => block.header().clone(), + Err(e) => { + error!(batch_start = inner_batch_start, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); + _ = sender.try_stream(e).await; + return; + } + }; + if !sender.try_stream(inner_batch_start..=batch_end_num).await { return; } - if batch_end == confirmed { + if batch_end_num == confirmed { break; } - inner_batch_start = batch_end + 1; + inner_batch_start = batch_end_num + 1; } while let Some(incoming_block) = stream.next().await { let incoming_block_num = incoming_block.number(); info!(block_number = incoming_block_num, "Received block header"); - let reorged_opt = match reorg_handler.check_by_block_number(batch_end).await { + let reorged_opt = match reorg_handler.check(&batch_end).await { Ok(opt) => opt, Err(e) => { error!(error = %e, "Failed to perform reorg check"); @@ -697,16 +721,14 @@ impl Service { }; if let Some(common_ancestor) = reorged_opt { - if common_ancestor < batch_start { - if !sender.try_stream(ScannerStatus::ReorgDetected).await { - return; - } - // no need to stream blocks prior to the previously specified starting block - if common_ancestor < stream_start { - batch_start = stream_start; - } else { - batch_start = common_ancestor + 1; - } + if !sender.try_stream(ScannerStatus::ReorgDetected).await { + return; + } + // no need to stream blocks prior to the previously specified starting block + if common_ancestor < stream_start { + batch_start = stream_start; + } else { + batch_start = common_ancestor + 1; } // TODO: explain in docs that the returned block after a reorg will be the @@ -717,7 +739,7 @@ impl Service { // no reorg happened, move the block range start // // SAFETY: Overflow cannot realistically happen - batch_start = batch_end + 1; + batch_start = batch_end.number() + 1; } let confirmed = incoming_block_num.saturating_sub(block_confirmations); @@ -726,15 +748,23 @@ impl Service { loop { // NOTE: Edge case when difference between range end and range start >= max // reads - batch_end = + let batch_end_num = confirmed.min(inner_batch_start.saturating_add(max_block_range - 1)); - if !sender.try_stream(inner_batch_start..=batch_end).await { + batch_end = match provider.get_block_by_number(batch_end_num.into()).await { + Ok(block) => block.header().clone(), + Err(e) => { + error!(batch_start = inner_batch_start, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); + _ = sender.try_stream(e).await; + return; + } + }; + if !sender.try_stream(inner_batch_start..=batch_end_num).await { return; } - if batch_end == confirmed { + if batch_end_num == confirmed { break; } - inner_batch_start = batch_end + 1; + inner_batch_start = batch_end_num + 1; } } } diff --git a/src/block_range_scanner/reorg_handler.rs b/src/block_range_scanner/reorg_handler.rs index e2849f4a..3f5e151f 100644 --- a/src/block_range_scanner/reorg_handler.rs +++ b/src/block_range_scanner/reorg_handler.rs @@ -24,19 +24,13 @@ impl ReorgHandler { Self { provider, buffer: RingBuffer::new(10) } } - pub async fn check_by_block_number( - &mut self, - block: impl Into, - ) -> Result, ScannerError> { - let block = self.provider.get_block_by_number(block.into()).await?; - self.check(block.header()).await - } - pub async fn check( &mut self, block: &N::HeaderResponse, ) -> Result, ScannerError> { + info!(block_hash = %block.hash(), block_number = block.number(), "Checking if block was reorged"); if !self.reorg_detected(block).await? { + info!(block_hash = %block.hash(), block_number = block.number(), "No reorg detected"); // store the incoming block's hash for future reference self.buffer.push(block.hash()); return Ok(None); diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index 5a96d764..d566aca1 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -90,7 +90,7 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { Ok(()) } -#[tokio::test] +#[test_log::test(tokio::test)] async fn reorg_depth_one() -> anyhow::Result<()> { let LiveScannerSetup { provider, contract, scanner, mut stream, anvil: _anvil } = setup_live_scanner(None, None, 0).await?; From f6812087a5445336797cd4c7be6b1776fbcc7048 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 13 Nov 2025 07:58:35 +0100 Subject: [PATCH 14/63] test: sync::block_confirmations_mitigate_reorgs assert historic before emitting live --- tests/sync/from_block.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index 1b146791..7bba36a6 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -88,11 +88,6 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { scanner.start().await?; - // emit "live" events - for _ in 0..2 { - contract.increase().send().await?.watch().await?; - } - // assert historic events are streamed in a batch assert_next!( stream, @@ -101,6 +96,12 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { TestCounter::CountIncreased { newCount: U256::from(2) } ] ); + + // emit "live" events + for _ in 0..2 { + contract.increase().send().await?.watch().await?; + } + // switching to "live" phase assert_next!(stream, ScannerStatus::SwitchingToLive); // assert confirmed live events are streamed separately From 42e6e2c730f18b86d9727c1cdfae0cc4ec2210d5 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 13 Nov 2025 07:59:58 +0100 Subject: [PATCH 15/63] test: reorgs assert historic before emitting live assert historic before emitting live --- tests/sync/from_block.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index 7bba36a6..d8b3decc 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -22,10 +22,6 @@ async fn replays_historical_then_switches_to_live() -> anyhow::Result<()> { scanner.start().await?; - // now emit new events - contract.increase().send().await?.watch().await?; - contract.increase().send().await?.watch().await?; - // historical events assert_next!( stream, @@ -36,6 +32,10 @@ async fn replays_historical_then_switches_to_live() -> anyhow::Result<()> { ] ); + // now emit new events + contract.increase().send().await?.watch().await?; + contract.increase().send().await?.watch().await?; + // chain tip reached assert_next!(stream, ScannerStatus::SwitchingToLive); From a315ff6cbddb4b55df736763af7df163a554f97d Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 13 Nov 2025 08:24:09 +0100 Subject: [PATCH 16/63] fix: update batch_end on reorg --- src/block_range_scanner.rs | 35 ++++++++++++++---------- src/block_range_scanner/reorg_handler.rs | 23 +++++++++------- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index a705d966..b4149cc4 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -618,7 +618,7 @@ impl Service { while next_start_block <= end { let batch_end_num = next_start_block.saturating_add(max_block_range - 1).min(end); let batch_end = match provider.get_block_by_number(batch_end_num.into()).await { - Ok(block) => block.header().clone(), + Ok(block) => block, Err(e) => { error!(batch_start = next_start_block, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); _ = sender.try_stream(e).await; @@ -648,7 +648,7 @@ impl Service { if !sender.try_stream(ScannerStatus::ReorgDetected).await { return; } - common_ancestor + 1 + common_ancestor.header().number() + 1 } else { batch_end_num.saturating_add(1) }; @@ -691,7 +691,7 @@ impl Service { let batch_end_num = confirmed.min(inner_batch_start.saturating_add(max_block_range - 1)); batch_end = match provider.get_block_by_number(batch_end_num.into()).await { - Ok(block) => block.header().clone(), + Ok(block) => Some(block), Err(e) => { error!(batch_start = inner_batch_start, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); _ = sender.try_stream(e).await; @@ -711,13 +711,16 @@ impl Service { let incoming_block_num = incoming_block.number(); info!(block_number = incoming_block_num, "Received block header"); - let reorged_opt = match reorg_handler.check(&batch_end).await { - Ok(opt) => opt, - Err(e) => { - error!(error = %e, "Failed to perform reorg check"); - _ = sender.try_stream(e).await; - return; - } + let reorged_opt = match batch_end.as_ref() { + None => None, + Some(batch_end) => match reorg_handler.check(batch_end).await { + Ok(opt) => opt, + Err(e) => { + error!(error = %e, "Failed to perform reorg check"); + _ = sender.try_stream(e).await; + return; + } + }, }; if let Some(common_ancestor) = reorged_opt { @@ -725,10 +728,12 @@ impl Service { return; } // no need to stream blocks prior to the previously specified starting block - if common_ancestor < stream_start { + if common_ancestor.header().number() < stream_start { batch_start = stream_start; + batch_end = None; } else { - batch_start = common_ancestor + 1; + batch_start = common_ancestor.header().number() + 1; + batch_end = Some(common_ancestor); } // TODO: explain in docs that the returned block after a reorg will be the @@ -739,7 +744,9 @@ impl Service { // no reorg happened, move the block range start // // SAFETY: Overflow cannot realistically happen - batch_start = batch_end.number() + 1; + if let Some(batch_end) = batch_end.as_ref() { + batch_start = batch_end.header().number() + 1; + } } let confirmed = incoming_block_num.saturating_sub(block_confirmations); @@ -751,7 +758,7 @@ impl Service { let batch_end_num = confirmed.min(inner_batch_start.saturating_add(max_block_range - 1)); batch_end = match provider.get_block_by_number(batch_end_num.into()).await { - Ok(block) => block.header().clone(), + Ok(block) => Some(block), Err(e) => { error!(batch_start = inner_batch_start, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); _ = sender.try_stream(e).await; diff --git a/src/block_range_scanner/reorg_handler.rs b/src/block_range_scanner/reorg_handler.rs index 3f5e151f..367b1096 100644 --- a/src/block_range_scanner/reorg_handler.rs +++ b/src/block_range_scanner/reorg_handler.rs @@ -2,7 +2,7 @@ use alloy::{ consensus::BlockHeader, eips::BlockNumberOrTag, network::{BlockResponse, Ethereum, Network, primitives::HeaderResponse}, - primitives::{BlockHash, BlockNumber}, + primitives::BlockHash, }; use tracing::{info, warn}; @@ -26,8 +26,9 @@ impl ReorgHandler { pub async fn check( &mut self, - block: &N::HeaderResponse, - ) -> Result, ScannerError> { + block: &N::BlockResponse, + ) -> Result, ScannerError> { + let block = block.header(); info!(block_hash = %block.hash(), block_number = block.number(), "Checking if block was reorged"); if !self.reorg_detected(block).await? { info!(block_hash = %block.hash(), block_number = block.number(), "No reorg detected"); @@ -42,25 +43,27 @@ impl ReorgHandler { info!(block_hash = %block_hash, "Checking if block exists on-chain"); match self.provider.get_block_by_hash(block_hash).await { Ok(common_ancestor) => { - let common_ancestor = common_ancestor.header(); + let common_ancestor_header = common_ancestor.header(); let finalized = self.provider.get_block_by_number(BlockNumberOrTag::Finalized).await?; - let finalized = finalized.header(); + let finalized_header = finalized.header(); - let common_ancestor = if finalized.number() <= common_ancestor.number() { - info!(common_ancestor = %common_ancestor.hash(), block_number = common_ancestor.number(), "Common ancestor found"); + let common_ancestor = if finalized_header.number() <= + common_ancestor_header.number() + { + info!(common_ancestor = %common_ancestor_header.hash(), block_number = common_ancestor_header.number(), "Common ancestor found"); common_ancestor } else { warn!( - finalized_hash = %finalized.hash(), block_number = finalized.number(), "Possible deep reorg detected, using finalized block as common ancestor" + finalized_hash = %finalized_header.hash(), block_number = finalized_header.number(), "Possible deep reorg detected, using finalized block as common ancestor" ); // all buffered blocks are finalized, so no more need to track them self.buffer.clear(); finalized }; - return Ok(Some(common_ancestor.number())); + return Ok(Some(common_ancestor)); } Err(robust_provider::Error::BlockNotFound(_)) => { // block was reorged @@ -80,7 +83,7 @@ impl ReorgHandler { let header = finalized.header(); info!(finalized_hash = %header.hash(), block_number = header.number(), "Finalized block set as common ancestor"); - Ok(Some(header.number())) + Ok(Some(finalized)) } async fn reorg_detected(&self, block: &N::HeaderResponse) -> Result { From 9a6318270251ba291b24e0d0b4975c514de7fa3f Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 13 Nov 2025 11:46:04 +0100 Subject: [PATCH 17/63] ref: use get_latest_confirmed instead of manual calc --- src/block_range_scanner.rs | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index d4d0a862..58f40a86 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -300,14 +300,14 @@ impl Service { sender: mpsc::Sender, ) -> Result<(), ScannerError> { let max_block_range = self.max_block_range; - let latest = self.provider.get_block_number().await?; + let confirmed = self.provider.get_latest_confirmed(block_confirmations).await?; let provider = self.provider.clone(); let mut reorg_handler = self.reorg_handler.clone(); - // the next block returned by the underlying subscription will always be `latest + 1`, - // because `latest` was already mined and subscription by definition only streams after new - // blocks have been mined - let range_start = (latest + 1).saturating_sub(block_confirmations); + // the next block returned by the underlying subscription will always be `confirmed + 1`, + // because `confirmed` was already mined and subscription by definition only streams after + // new blocks have been mined + let stream_start = confirmed + 1; let subscription = self.provider.subscribe_blocks().await?; @@ -315,7 +315,7 @@ impl Service { tokio::spawn(async move { Self::stream_live_blocks( - range_start, + stream_start, subscription, sender, &provider, @@ -439,17 +439,16 @@ impl Service { ) .await; - let latest = match provider.get_block_by_number(BlockNumberOrTag::Latest).await { - Ok(block) => block.header().number(), + start_block = confirmed_tip + 1; + + confirmed_tip = match provider.get_latest_confirmed(block_confirmations).await { + Ok(number) => number, Err(e) => { error!(error = %e, "Error latest block when calculating next historical batch, shutting down"); _ = sender.try_stream(e).await; return; } }; - - start_block = confirmed_tip + 1; - confirmed_tip = latest.saturating_sub(block_confirmations); } info!("Chain tip reached, switching to live"); From 60111c3357ad171a026afb22f928428343e55d5d Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 13 Nov 2025 11:55:29 +0100 Subject: [PATCH 18/63] Revert "ref: use get_latest_confirmed instead of manual calc" This reverts commit 9a6318270251ba291b24e0d0b4975c514de7fa3f. --- src/block_range_scanner.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 58f40a86..d4d0a862 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -300,14 +300,14 @@ impl Service { sender: mpsc::Sender, ) -> Result<(), ScannerError> { let max_block_range = self.max_block_range; - let confirmed = self.provider.get_latest_confirmed(block_confirmations).await?; + let latest = self.provider.get_block_number().await?; let provider = self.provider.clone(); let mut reorg_handler = self.reorg_handler.clone(); - // the next block returned by the underlying subscription will always be `confirmed + 1`, - // because `confirmed` was already mined and subscription by definition only streams after - // new blocks have been mined - let stream_start = confirmed + 1; + // the next block returned by the underlying subscription will always be `latest + 1`, + // because `latest` was already mined and subscription by definition only streams after new + // blocks have been mined + let range_start = (latest + 1).saturating_sub(block_confirmations); let subscription = self.provider.subscribe_blocks().await?; @@ -315,7 +315,7 @@ impl Service { tokio::spawn(async move { Self::stream_live_blocks( - stream_start, + range_start, subscription, sender, &provider, @@ -439,16 +439,17 @@ impl Service { ) .await; - start_block = confirmed_tip + 1; - - confirmed_tip = match provider.get_latest_confirmed(block_confirmations).await { - Ok(number) => number, + let latest = match provider.get_block_by_number(BlockNumberOrTag::Latest).await { + Ok(block) => block.header().number(), Err(e) => { error!(error = %e, "Error latest block when calculating next historical batch, shutting down"); _ = sender.try_stream(e).await; return; } }; + + start_block = confirmed_tip + 1; + confirmed_tip = latest.saturating_sub(block_confirmations); } info!("Chain tip reached, switching to live"); From 43d862101f1faab080dc9a10b1a6dcb5d1604cbe Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 13 Nov 2025 11:55:40 +0100 Subject: [PATCH 19/63] test: remove test_log from reorg_depth_one --- tests/live/reorg.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index d566aca1..5a96d764 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -90,7 +90,7 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { Ok(()) } -#[test_log::test(tokio::test)] +#[tokio::test] async fn reorg_depth_one() -> anyhow::Result<()> { let LiveScannerSetup { provider, contract, scanner, mut stream, anvil: _anvil } = setup_live_scanner(None, None, 0).await?; From e07a6710acf3b9cfbb01ff16890ce374a93ab272 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 13 Nov 2025 12:02:35 +0100 Subject: [PATCH 20/63] test: add edge case live_with_block_confirmations_always_emits_genesis_block --- tests/block_range_scanner.rs | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 5840895a..1cfc946c 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -55,6 +55,41 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh Ok(()) } +#[tokio::test] +async fn live_with_block_confirmations_always_emits_genesis_block() -> anyhow::Result<()> { + let anvil = Anvil::new().try_spawn()?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + + let client = BlockRangeScanner::new().connect(provider.clone()).await?.run()?; + + let mut stream = client.stream_live(3).await?; + + provider.anvil_mine(Some(1), None).await?; + + assert_next!(stream, 0..=0); + let stream = assert_empty!(stream); + + provider.anvil_mine(Some(2), None).await?; + + let mut stream = assert_empty!(stream); + + provider.anvil_mine(Some(5), None).await?; + + assert_next!(stream, 1..=1); + assert_next!(stream, 2..=2); + assert_next!(stream, 3..=3); + assert_next!(stream, 4..=4); + assert_next!(stream, 5..=5); + let mut stream = assert_empty!(stream); + + provider.anvil_mine(Some(1), None).await?; + + assert_next!(stream, 6..=6); + assert_empty!(stream); + + Ok(()) +} + #[tokio::test] async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; From 7f3c963303cb9f697eee77e7be24d3543737dcb5 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 13 Nov 2025 12:11:18 +0100 Subject: [PATCH 21/63] ref: remove inner_batch_start --- src/block_range_scanner.rs | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index d4d0a862..bc01bca3 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -682,28 +682,30 @@ impl Service { // (e.g. in case max_block_range=2, and the range is 100..=106) let mut batch_start = stream_start; - let mut inner_batch_start = batch_start; - let mut batch_end; + let mut batch_end: Option<::BlockResponse>; loop { - let batch_end_num = - confirmed.min(inner_batch_start.saturating_add(max_block_range - 1)); + let batch_end_num = confirmed.min(batch_start.saturating_add(max_block_range - 1)); batch_end = match provider.get_block_by_number(batch_end_num.into()).await { Ok(block) => Some(block), Err(e) => { - error!(batch_start = inner_batch_start, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); + error!(batch_start = batch_start, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); _ = sender.try_stream(e).await; return; } }; - if !sender.try_stream(inner_batch_start..=batch_end_num).await { + if !sender.try_stream(batch_start..=batch_end_num).await { return; } if batch_end_num == confirmed { break; } - inner_batch_start = batch_end_num + 1; + batch_start = batch_end_num + 1; } + // reset batch start + let mut batch_start = stream_start; + // batch_end is now set + while let Some(incoming_block) = stream.next().await { let incoming_block_num = incoming_block.number(); info!(block_number = incoming_block_num, "Received block header"); @@ -738,7 +740,7 @@ impl Service { // - the first post-reorg block // - the previous range_start } else { - // no reorg happened, move the block range start + // no reorg happened, move the block range back to expected next start // // SAFETY: Overflow cannot realistically happen if let Some(batch_end) = batch_end.as_ref() { @@ -748,27 +750,29 @@ impl Service { let confirmed = incoming_block_num.saturating_sub(block_confirmations); if confirmed >= batch_start { - let mut inner_batch_start = batch_start; loop { // NOTE: Edge case when difference between range end and range start >= max // reads let batch_end_num = - confirmed.min(inner_batch_start.saturating_add(max_block_range - 1)); + confirmed.min(batch_start.saturating_add(max_block_range - 1)); batch_end = match provider.get_block_by_number(batch_end_num.into()).await { Ok(block) => Some(block), Err(e) => { - error!(batch_start = inner_batch_start, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); + error!(batch_start = batch_start, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); _ = sender.try_stream(e).await; return; } }; - if !sender.try_stream(inner_batch_start..=batch_end_num).await { + if !sender.try_stream(batch_start..=batch_end_num).await { return; } + + // SAFETY: Overflow cannot realistically happen + batch_start = batch_end_num + 1; + if batch_end_num == confirmed { break; } - inner_batch_start = batch_end_num + 1; } } } From dd247f9e8710dd7a84874a1e1d52cfaf560ca711 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Fri, 14 Nov 2025 09:58:56 +0100 Subject: [PATCH 22/63] test: fix wording in assert_next macro --- src/test_utils/macros.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test_utils/macros.rs b/src/test_utils/macros.rs index 3221f6af..17e698f9 100644 --- a/src/test_utils/macros.rs +++ b/src/test_utils/macros.rs @@ -13,7 +13,7 @@ macro_rules! assert_next { if let Some(msg) = message { assert_eq!(msg, $expected) } else { - panic!("Expected {:?}, got: {message:?}", $expected) + panic!("Expected {:?}, but channel is closed", $expected) } }; } From f892327b05ece0337008d205f2057573a5fd1c13 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Fri, 14 Nov 2025 11:36:10 +0100 Subject: [PATCH 23/63] fix: update rewind logic + major switchingtolive refactor --- src/block_range_scanner.rs | 23 ++-- src/event_scanner/scanner/common.rs | 7 +- src/event_scanner/scanner/sync/from_latest.rs | 41 +++--- src/test_utils/macros.rs | 24 +++- tests/block_range_scanner.rs | 3 +- tests/latest_events/basic.rs | 23 ++-- tests/live/basic.rs | 119 ++++-------------- tests/live/optional_fields.rs | 71 ++++++----- tests/sync/from_block.rs | 1 + tests/sync/from_latest.rs | 52 +++++--- 10 files changed, 162 insertions(+), 202 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index bc01bca3..16292117 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -322,6 +322,7 @@ impl Service { block_confirmations, max_block_range, &mut reorg_handler, + false, ) .await; }); @@ -403,10 +404,11 @@ impl Service { info!( start_block = start_block, confirmed_tip = confirmed_tip, - "Start block is beyond confirmed tip, starting live stream" + "Start block is at or beyond confirmed tip, starting live stream" ); - let subscription = self.provider.subscribe_blocks().await?; + let subscription: Subscription<::HeaderResponse> = + self.provider.subscribe_blocks().await?; tokio::spawn(async move { Self::stream_live_blocks( @@ -417,6 +419,7 @@ impl Service { block_confirmations, max_block_range, &mut reorg_handler, + true, ) .await; }); @@ -463,10 +466,6 @@ impl Service { } }; - if !sender.try_stream(ScannerStatus::SwitchingToLive).await { - return; - } - info!("Successfully transitioned from historical to live data"); Self::stream_live_blocks( @@ -477,6 +476,7 @@ impl Service { block_confirmations, max_block_range, &mut reorg_handler, + true, ) .await; }); @@ -662,9 +662,9 @@ impl Service { block_confirmations: u64, max_block_range: u64, reorg_handler: &mut ReorgHandler, + notify: bool, ) { // ensure we start streaming only after the specified starting block - let mut stream = subscription.into_stream().skip_while(|header| { header.number().saturating_sub(block_confirmations) < stream_start }); @@ -674,15 +674,20 @@ impl Service { return; }; + if notify { + if !sender.try_stream(ScannerStatus::SwitchingToLive).await { + return; + } + } + let incoming_block_num = incoming_block.number(); info!(block_number = incoming_block_num, "Received block header"); let confirmed = incoming_block_num.saturating_sub(block_confirmations); - // TODO: stream `batch_start..=batch_end` in batches of `max_block_range` - // (e.g. in case max_block_range=2, and the range is 100..=106) let mut batch_start = stream_start; let mut batch_end: Option<::BlockResponse>; + // TODO: include reorg handling here, maybe rely on historic handling fn loop { let batch_end_num = confirmed.min(batch_start.saturating_add(max_block_range - 1)); batch_end = match provider.get_block_by_number(batch_end_num.into()).await { diff --git a/src/event_scanner/scanner/common.rs b/src/event_scanner/scanner/common.rs index 6bae3d9f..49e0d099 100644 --- a/src/event_scanner/scanner/common.rs +++ b/src/event_scanner/scanner/common.rs @@ -150,10 +150,11 @@ pub fn spawn_log_consumers( if let ConsumerMode::CollectLatest { .. } = mode { if !collected.is_empty() { collected.reverse(); // restore chronological order + info!("Sending collected logs to consumer"); + _ = sender.try_stream(collected).await; + } else { + info!("No latest logs collected"); } - - info!("Sending collected logs to consumer"); - _ = sender.try_stream(collected).await; } }); diff --git a/src/event_scanner/scanner/sync/from_latest.rs b/src/event_scanner/scanner/sync/from_latest.rs index e5623f53..6db88d7b 100644 --- a/src/event_scanner/scanner/sync/from_latest.rs +++ b/src/event_scanner/scanner/sync/from_latest.rs @@ -4,13 +4,10 @@ use alloy::{ network::{BlockResponse, Network}, }; -use tokio::sync::mpsc; -use tokio_stream::{StreamExt, wrappers::ReceiverStream}; -use tracing::info; +use tracing::{error, info}; use crate::{ - EventScannerBuilder, ScannerError, ScannerStatus, - block_range_scanner::Message as BlockRangeMessage, + EventScannerBuilder, ScannerError, event_scanner::{ EventScanner, scanner::{ @@ -19,6 +16,7 @@ use crate::{ }, }, robust_provider::IntoRobustProvider, + types::TryStream, }; impl EventScannerBuilder { @@ -80,11 +78,6 @@ impl EventScanner { // Setup rewind and live streams to run in parallel. let rewind_stream = client.rewind(BlockNumberOrTag::Earliest, latest_block).await?; - // We actually rely on the sync mode for the live stream, to - // ensure that we don't miss any events in case a new block was minted while - // we were setting up the streams or a reorg happens. - let sync_stream = - client.stream_from(latest_block + 1, self.config.block_confirmations).await?; // Start streaming... tokio::spawn(async move { @@ -100,20 +93,20 @@ impl EventScanner { ) .await; - // Notify the client that we're now streaming live. - info!("Switching to live stream"); - - // Use a one-off channel for the notification. - let (tx, rx) = mpsc::channel::(1); - let stream = ReceiverStream::new(rx); - tx.send(BlockRangeMessage::Status(ScannerStatus::SwitchingToLive)) - .await - .expect("receiver exists"); - - // close the channel to stop the stream - drop(tx); - - let sync_stream = stream.chain(sync_stream); + // We actually rely on the sync mode for the live stream, as more blocks could have been + // minted while the scanner was collecting the latest `count` events. + // Note: Sync mode will notify the client when it switches to live streaming. + let sync_stream = + match client.stream_from(latest_block + 1, self.config.block_confirmations).await { + Ok(stream) => stream, + Err(e) => { + error!(error = %e, "Error during sync mode setup"); + for listener in listeners { + _ = listener.sender.try_stream(e.clone()).await; + } + return; + } + }; // Start the live (sync) stream. handle_stream(sync_stream, &provider, &listeners, ConsumerMode::Stream).await; diff --git a/src/test_utils/macros.rs b/src/test_utils/macros.rs index 17e698f9..9c8a1b18 100644 --- a/src/test_utils/macros.rs +++ b/src/test_utils/macros.rs @@ -13,7 +13,29 @@ macro_rules! assert_next { if let Some(msg) = message { assert_eq!(msg, $expected) } else { - panic!("Expected {:?}, but channel is closed", $expected) + panic!("Expected {:?}, but channel was closed", $expected) + } + }; +} + +#[macro_export] +macro_rules! assert_next_any { + ($stream: expr, $expected_options: expr) => { + assert_next_any!($stream, $expected_options, timeout = 5) + }; + ($stream: expr, $expected_options: expr, timeout = $secs: expr) => { + let message = tokio::time::timeout( + std::time::Duration::from_secs($secs), + tokio_stream::StreamExt::next(&mut $stream), + ) + .await + .expect("timed out"); + + if let Some(data) = message { + let matched = $expected_options.iter().any(|expected| data == *expected); + assert!(matched, "Expected one of:\n{:#?}\n\nGot:\n{:#?}", $expected_options, data); + } else { + panic!("Expected one of {:?}, but channel was closed", $expected_options) } }; } diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 1cfc946c..6db30872 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -91,7 +91,7 @@ async fn live_with_block_confirmations_always_emits_genesis_block() -> anyhow::R } #[tokio::test] -async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> { +async fn stream_from_starts_at_latest_once_it_has_enough_confirmations() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; @@ -107,6 +107,7 @@ async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> let mut stream = assert_empty!(stream); provider.anvil_mine(Some(1), None).await?; + assert_next!(stream, ScannerStatus::SwitchingToLive); assert_next!(stream, 20..=20); let mut stream = assert_empty!(stream); diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index 5d07f9ee..5230f2b2 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -64,7 +64,7 @@ async fn latest_scanner_fewer_available_than_count_returns_all() -> anyhow::Resu } #[tokio::test] -async fn latest_scanner_no_events_returns_empty() -> anyhow::Result<()> { +async fn latest_scanner_no_past_events_returns_empty() -> anyhow::Result<()> { let count = 5; let setup = setup_latest_scanner(None, None, count, None, None).await?; let scanner = setup.scanner; @@ -72,9 +72,6 @@ async fn latest_scanner_no_events_returns_empty() -> anyhow::Result<()> { scanner.start().await?; - let expected: &[TestCounter::CountIncreased] = &[]; - - assert_next!(stream, expected); assert_closed!(stream); Ok(()) @@ -218,7 +215,7 @@ async fn latest_scanner_different_filters_receive_different_results() -> anyhow: #[tokio::test] async fn latest_scanner_mixed_events_and_filters_return_correct_streams() -> anyhow::Result<()> { let count = 2; - let setup = setup_latest_scanner(None, None, count, None, None).await?; + let setup = setup_latest_scanner(Some(0.1), None, count, None, None).await?; let contract = setup.contract; let mut scanner = setup.scanner; let mut stream_inc = setup.stream; // CountIncreased by default @@ -229,17 +226,11 @@ async fn latest_scanner_mixed_events_and_filters_return_correct_streams() -> any .event(TestCounter::CountDecreased::SIGNATURE); let mut stream_dec = scanner.subscribe(filter_dec); - // Sequence: inc(1), inc(2), dec(1), inc(2), dec(1) - // inc -> 1 - contract.increase().send().await?.watch().await?; - // inc -> 2 - contract.increase().send().await?.watch().await?; - // dec -> 1 - contract.decrease().send().await?.watch().await?; - // inc -> 2 - contract.increase().send().await?.watch().await?; - // dec -> 1 - contract.decrease().send().await?.watch().await?; + contract.increase().send().await?.watch().await?; // inc(1) + contract.increase().send().await?.watch().await?; // inc(2) + contract.decrease().send().await?.watch().await?; // dec(1) + contract.increase().send().await?.watch().await?; // inc(2) + contract.decrease().send().await?.watch().await?; // dec(1) scanner.start().await?; diff --git a/tests/live/basic.rs b/tests/live/basic.rs index cab23c0d..2e34e5af 100644 --- a/tests/live/basic.rs +++ b/tests/live/basic.rs @@ -7,10 +7,10 @@ use std::{ }; use crate::common::{TestCounter, deploy_counter, setup_live_scanner}; -use alloy::sol_types::SolEvent; -use event_scanner::{EventFilter, Message}; +use alloy::{primitives::U256, sol_types::SolEvent}; +use event_scanner::{EventFilter, Message, assert_empty, assert_next}; use tokio::time::timeout; -use tokio_stream::{StreamExt, wrappers::ReceiverStream}; +use tokio_stream::StreamExt; #[tokio::test] async fn basic_single_event_scanning() -> anyhow::Result<()> { @@ -73,60 +73,29 @@ async fn multiple_contracts_same_event_isolate_callbacks() -> anyhow::Result<()> let b_filter = EventFilter::new() .contract_address(*b.address()) .event(TestCounter::CountIncreased::SIGNATURE.to_owned()); - let expected_events_a = 3; - let expected_events_b = 2; let mut scanner = setup.scanner; - let a_stream = scanner.subscribe(a_filter); - let b_stream = scanner.subscribe(b_filter); + let mut a_stream = scanner.subscribe(a_filter); + let mut b_stream = scanner.subscribe(b_filter); scanner.start().await?; - for _ in 0..expected_events_a { - a.increase().send().await?.watch().await?; - } + a.increase().send().await?.watch().await?; + a.increase().send().await?.watch().await?; + a.increase().send().await?.watch().await?; - for _ in 0..expected_events_b { - b.increase().send().await?.watch().await?; - } + b.increase().send().await?.watch().await?; + b.increase().send().await?.watch().await?; - let make_assertion = async |stream: ReceiverStream, expected_events| { - let mut stream = stream.take(expected_events); - - let count = Arc::new(AtomicUsize::new(0)); - let count_clone = Arc::clone(&count); - - let event_counting = async move { - let mut expected_new_count = 1; - while let Some(message) = stream.next().await { - match message { - Message::Data(logs) => { - count_clone.fetch_add(logs.len(), Ordering::SeqCst); - - for log in logs { - let TestCounter::CountIncreased { newCount } = - log.log_decode().unwrap().inner.data; - assert_eq!(newCount, expected_new_count); - expected_new_count += 1; - } - } - Message::Error(e) => { - panic!("panicked with error: {e}"); - } - Message::Status(_) => { - // Handle info if needed - } - } - } - }; + assert_next!(a_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(a_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + assert_next!(a_stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); + assert_empty!(a_stream); - _ = timeout(Duration::from_secs(1), event_counting).await; - assert_eq!(count.load(Ordering::SeqCst), expected_events); - }; - - make_assertion(a_stream, expected_events_a).await; - make_assertion(b_stream, expected_events_b).await; + assert_next!(b_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(b_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + assert_empty!(b_stream); Ok(()) } @@ -144,60 +113,26 @@ async fn multiple_events_same_contract() -> anyhow::Result<()> { .contract_address(contract_address) .event(TestCounter::CountDecreased::SIGNATURE.to_owned()); - let expected_incr_events = 6; - let expected_decr_events = 2; - let mut scanner = setup.scanner; - let mut incr_stream = scanner.subscribe(increase_filter).take(expected_incr_events); - let mut decr_stream = scanner.subscribe(decrease_filter).take(expected_decr_events); + let mut incr_stream = scanner.subscribe(increase_filter); + let mut decr_stream = scanner.subscribe(decrease_filter); scanner.start().await?; - for _ in 0..expected_incr_events { - contract.increase().send().await?.watch().await?; - } + contract.increase().send().await?.watch().await?; + contract.increase().send().await?.watch().await?; contract.decrease().send().await?.watch().await?; contract.decrease().send().await?.watch().await?; - let incr_count = Arc::new(AtomicUsize::new(0)); - let decr_count = Arc::new(AtomicUsize::new(0)); - let incr_count_clone = Arc::clone(&incr_count); - let decr_count_clone = Arc::clone(&decr_count); - - let event_counting = async move { - let mut expected_new_count = 0; - - // process CountIncreased - while let Some(Message::Data(logs)) = incr_stream.next().await { - incr_count_clone.fetch_add(logs.len(), Ordering::SeqCst); - - for log in logs { - expected_new_count += 1; - let TestCounter::CountIncreased { newCount } = log.log_decode().unwrap().inner.data; - assert_eq!(newCount, expected_new_count); - } - } - - expected_new_count -= 1; - - // process CountDecreased - while let Some(Message::Data(logs)) = decr_stream.next().await { - decr_count_clone.fetch_add(logs.len(), Ordering::SeqCst); - - for log in logs { - let TestCounter::CountDecreased { newCount } = log.log_decode().unwrap().inner.data; - assert_eq!(newCount, expected_new_count); - expected_new_count -= 1; - } - } - }; - - _ = timeout(Duration::from_secs(2), event_counting).await; + assert_next!(incr_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(incr_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + assert_empty!(incr_stream); - assert_eq!(incr_count.load(Ordering::SeqCst), expected_incr_events); - assert_eq!(decr_count.load(Ordering::SeqCst), expected_decr_events); + assert_next!(decr_stream, &[TestCounter::CountDecreased { newCount: U256::from(1) }]); + assert_next!(decr_stream, &[TestCounter::CountDecreased { newCount: U256::from(0) }]); + assert_empty!(decr_stream); Ok(()) } diff --git a/tests/live/optional_fields.rs b/tests/live/optional_fields.rs index 02118cf2..60b4248a 100644 --- a/tests/live/optional_fields.rs +++ b/tests/live/optional_fields.rs @@ -6,9 +6,9 @@ use std::{ time::Duration, }; -use crate::common::{TestCounter, setup_live_scanner}; -use alloy::sol_types::SolEvent; -use event_scanner::{EventFilter, Message}; +use crate::common::{TestCounter, deploy_counter, setup_live_scanner}; +use alloy::{primitives::U256, sol_types::SolEvent}; +use event_scanner::{EventFilter, Message, assert_empty, assert_next}; use tokio::time::timeout; use tokio_stream::StreamExt; @@ -89,60 +89,59 @@ async fn track_all_events_in_block_range() -> anyhow::Result<()> { #[tokio::test] async fn mixed_optional_and_required_filters() -> anyhow::Result<()> { - let setup = setup_live_scanner(Some(0.1), None, 0).await?; - let contract = setup.contract.clone(); - let contract_address = *contract.address(); + let setup = setup_live_scanner(None, None, 0).await?; + let contract_1 = setup.contract.clone(); + let provider = setup.provider; + + let contract_2 = deploy_counter(provider.primary().clone()).await?; // Filter for specific event from specific contract let specific_filter = EventFilter::new() - .contract_address(contract_address) + .contract_address(*contract_2.address()) .event(TestCounter::CountIncreased::SIGNATURE); - let expected_specific_count = 2; // Filter for all events from all contracts let all_events_filter = EventFilter::new(); - let expected_all_count = 3; let mut scanner = setup.scanner; - let mut specific_stream = scanner.subscribe(specific_filter).take(expected_specific_count); - let mut all_stream = scanner.subscribe(all_events_filter).take(expected_all_count); + let specific_stream = scanner.subscribe(specific_filter); + let mut all_stream = scanner.subscribe(all_events_filter); scanner.start().await?; // First increase the counter to have some balance - for _ in 0..expected_all_count { - contract.increase().send().await?.watch().await?; - } + contract_1.increase().send().await?.watch().await?; + contract_1.increase().send().await?.watch().await?; + contract_1.increase().send().await?.watch().await?; + + assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); + + let mut all_stream = assert_empty!(all_stream); + let mut specific_stream = assert_empty!(specific_stream); // Generate specific events (CountIncreased) - for _ in 0..expected_specific_count { - contract.increase().send().await?.watch().await?; - } + contract_2.increase().send().await?.watch().await?; + contract_2.increase().send().await?.watch().await?; - // Generate additional events that should be caught by the all-events filter - for _ in 0..expected_all_count { - contract.decrease().send().await?.watch().await?; - } + assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); - let specific_event_count = Arc::new(AtomicUsize::new(0)); - let all_events_count = Arc::new(AtomicUsize::new(0)); - let specific_count_clone = Arc::clone(&specific_event_count); - let all_count_clone = Arc::clone(&all_events_count); + assert_next!(specific_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(specific_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); - let event_counting = async move { - while let Some(Message::Data(logs)) = all_stream.next().await { - all_count_clone.fetch_add(logs.len(), Ordering::SeqCst); - } - while let Some(Message::Data(logs)) = specific_stream.next().await { - specific_count_clone.fetch_add(logs.len(), Ordering::SeqCst); - } - }; + let mut all_stream = assert_empty!(all_stream); + let specific_stream = assert_empty!(specific_stream); + + // Generate additional events that should be caught by the all-events filter + contract_1.decrease().send().await?.watch().await?; - _ = timeout(Duration::from_secs(3), event_counting).await; + assert_next!(all_stream, &[TestCounter::CountDecreased { newCount: U256::from(2) }]); - assert_eq!(specific_event_count.load(Ordering::SeqCst), expected_specific_count); - assert_eq!(all_events_count.load(Ordering::SeqCst), expected_all_count); + assert_empty!(all_stream); + assert_empty!(specific_stream); Ok(()) } diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index d8b3decc..72f6c769 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -68,6 +68,7 @@ async fn sync_from_future_block_waits_until_minted() -> anyhow::Result<()> { // Act: emit an event that will be mined in block == future_start contract.increase().send().await?.watch().await?; + assert_next!(stream, ScannerStatus::SwitchingToLive); // Assert: the first streamed message arrives and contains the expected event assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); assert_empty!(stream); diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index 1cafed26..50779b65 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -1,10 +1,10 @@ use alloy::{primitives::U256, providers::ext::AnvilApi}; use crate::common::{TestCounter, setup_sync_from_latest_scanner}; -use event_scanner::{ScannerStatus, assert_next}; +use event_scanner::{ScannerStatus, assert_empty, assert_next, assert_next_any}; #[tokio::test] -async fn scan_latest_then_live_happy_path_no_duplicates() -> anyhow::Result<()> { +async fn happy_path_no_duplicates() -> anyhow::Result<()> { let setup = setup_sync_from_latest_scanner(None, None, 3, 0).await?; let contract = setup.contract; let scanner = setup.scanner; @@ -30,13 +30,13 @@ async fn scan_latest_then_live_happy_path_no_duplicates() -> anyhow::Result<()> TestCounter::CountIncreased { newCount: U256::from(6) }, ] ); - // Transition to live - assert_next!(stream, ScannerStatus::SwitchingToLive); // Live phase: emit three more, should arrive in order without duplicating latest contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; + // Transition to live + assert_next!(stream, ScannerStatus::SwitchingToLive); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(7) }]); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(8) }]); @@ -44,7 +44,7 @@ async fn scan_latest_then_live_happy_path_no_duplicates() -> anyhow::Result<()> } #[tokio::test] -async fn scan_latest_then_live_fewer_historical_then_continues_live() -> anyhow::Result<()> { +async fn fewer_historical_then_continues_live() -> anyhow::Result<()> { let setup = setup_sync_from_latest_scanner(None, None, 5, 0).await?; let contract = setup.contract; let scanner = setup.scanner; @@ -64,11 +64,12 @@ async fn scan_latest_then_live_fewer_historical_then_continues_live() -> anyhow: TestCounter::CountIncreased { newCount: U256::from(2) }, ] ); - assert_next!(stream, ScannerStatus::SwitchingToLive); // Live: two more arrive contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; + + assert_next!(stream, ScannerStatus::SwitchingToLive); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); @@ -76,7 +77,7 @@ async fn scan_latest_then_live_fewer_historical_then_continues_live() -> anyhow: } #[tokio::test] -async fn scan_latest_then_live_exact_historical_count_then_live() -> anyhow::Result<()> { +async fn exact_historical_count_then_live() -> anyhow::Result<()> { let setup = setup_sync_from_latest_scanner(None, None, 4, 0).await?; let contract = setup.contract; let scanner = setup.scanner; @@ -99,40 +100,49 @@ async fn scan_latest_then_live_exact_historical_count_then_live() -> anyhow::Res TestCounter::CountIncreased { newCount: U256::from(4) }, ] ); - assert_next!(stream, ScannerStatus::SwitchingToLive); // Live continues contract.increase().send().await?.watch().await?; + + assert_next!(stream, ScannerStatus::SwitchingToLive); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(5) }]); Ok(()) } #[tokio::test] -async fn scan_latest_then_live_no_historical_only_live_streams() -> anyhow::Result<()> { - let setup = setup_sync_from_latest_scanner(None, None, 5, 0).await?; +async fn no_historical_only_live_streams() -> anyhow::Result<()> { + let setup = setup_sync_from_latest_scanner(Some(0.1), None, 5, 0).await?; let contract = setup.contract; let scanner = setup.scanner; - let mut stream = setup.stream; + let stream = setup.stream; scanner.start().await?; // Latest is empty - let expected: &[TestCounter::CountIncreased] = &[]; - assert_next!(stream, expected); - assert_next!(stream, ScannerStatus::SwitchingToLive); + let mut stream = assert_empty!(stream); // Live events arrive contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + + assert_next!(stream, ScannerStatus::SwitchingToLive); + assert_next_any!( + stream, + [ + vec![TestCounter::CountIncreased { newCount: U256::from(1) }], + vec![ + TestCounter::CountIncreased { newCount: U256::from(1) }, + TestCounter::CountIncreased { newCount: U256::from(2) } + ] + ] + ); Ok(()) } #[tokio::test] -async fn scan_latest_then_live_boundary_no_duplication() -> anyhow::Result<()> { +async fn boundary_no_duplication() -> anyhow::Result<()> { let setup = setup_sync_from_latest_scanner(None, None, 3, 0).await?; let provider = setup.provider; let contract = setup.contract; @@ -160,17 +170,18 @@ async fn scan_latest_then_live_boundary_no_duplication() -> anyhow::Result<()> { TestCounter::CountIncreased { newCount: U256::from(3) }, ] ); - assert_next!(stream, ScannerStatus::SwitchingToLive); // Immediately produce a new live event in a new block contract.increase().send().await?.watch().await?; + + assert_next!(stream, ScannerStatus::SwitchingToLive); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); Ok(()) } #[tokio::test] -async fn scan_latest_then_live_waiting_on_live_logs_arriving() -> anyhow::Result<()> { +async fn waiting_on_live_logs_arriving() -> anyhow::Result<()> { let setup = setup_sync_from_latest_scanner(None, None, 3, 0).await?; let contract = setup.contract; let scanner = setup.scanner; @@ -192,10 +203,11 @@ async fn scan_latest_then_live_waiting_on_live_logs_arriving() -> anyhow::Result TestCounter::CountIncreased { newCount: U256::from(3) }, ] ); - assert_next!(stream, ScannerStatus::SwitchingToLive); let inner = stream.into_inner(); assert!(inner.is_empty()); + // `ScannerStatus::SwitchingToLive` arrives only on first live block received + Ok(()) } From 078c4c64e77ec2bc72f8a6ba8b7b0fded4f72932 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Fri, 14 Nov 2025 11:36:49 +0100 Subject: [PATCH 24/63] feat: SwitchingToLive -> StartingLiveStream --- src/block_range_scanner.rs | 2 +- src/types.rs | 2 +- tests/block_range_scanner.rs | 2 +- tests/sync/from_block.rs | 6 +++--- tests/sync/from_latest.rs | 10 +++++----- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 16292117..d37232c4 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -675,7 +675,7 @@ impl Service { }; if notify { - if !sender.try_stream(ScannerStatus::SwitchingToLive).await { + if !sender.try_stream(ScannerStatus::StartingLiveStream).await { return; } } diff --git a/src/types.rs b/src/types.rs index e498ab12..45cb459d 100644 --- a/src/types.rs +++ b/src/types.rs @@ -14,7 +14,7 @@ pub enum ScannerMessage { #[derive(Copy, Debug, Clone, PartialEq)] pub enum ScannerStatus { - SwitchingToLive, + StartingLiveStream, ReorgDetected, } diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 6db30872..95905770 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -107,7 +107,7 @@ async fn stream_from_starts_at_latest_once_it_has_enough_confirmations() -> anyh let mut stream = assert_empty!(stream); provider.anvil_mine(Some(1), None).await?; - assert_next!(stream, ScannerStatus::SwitchingToLive); + assert_next!(stream, ScannerStatus::StartingLiveStream); assert_next!(stream, 20..=20); let mut stream = assert_empty!(stream); diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index 72f6c769..463e36d5 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -37,7 +37,7 @@ async fn replays_historical_then_switches_to_live() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; // chain tip reached - assert_next!(stream, ScannerStatus::SwitchingToLive); + assert_next!(stream, ScannerStatus::StartingLiveStream); // live events assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); @@ -68,7 +68,7 @@ async fn sync_from_future_block_waits_until_minted() -> anyhow::Result<()> { // Act: emit an event that will be mined in block == future_start contract.increase().send().await?.watch().await?; - assert_next!(stream, ScannerStatus::SwitchingToLive); + assert_next!(stream, ScannerStatus::StartingLiveStream); // Assert: the first streamed message arrives and contains the expected event assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); assert_empty!(stream); @@ -104,7 +104,7 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { } // switching to "live" phase - assert_next!(stream, ScannerStatus::SwitchingToLive); + assert_next!(stream, ScannerStatus::StartingLiveStream); // assert confirmed live events are streamed separately assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index 50779b65..bde912e3 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -36,7 +36,7 @@ async fn happy_path_no_duplicates() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; // Transition to live - assert_next!(stream, ScannerStatus::SwitchingToLive); + assert_next!(stream, ScannerStatus::StartingLiveStream); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(7) }]); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(8) }]); @@ -69,7 +69,7 @@ async fn fewer_historical_then_continues_live() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; - assert_next!(stream, ScannerStatus::SwitchingToLive); + assert_next!(stream, ScannerStatus::StartingLiveStream); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); @@ -104,7 +104,7 @@ async fn exact_historical_count_then_live() -> anyhow::Result<()> { // Live continues contract.increase().send().await?.watch().await?; - assert_next!(stream, ScannerStatus::SwitchingToLive); + assert_next!(stream, ScannerStatus::StartingLiveStream); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(5) }]); Ok(()) @@ -126,7 +126,7 @@ async fn no_historical_only_live_streams() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; - assert_next!(stream, ScannerStatus::SwitchingToLive); + assert_next!(stream, ScannerStatus::StartingLiveStream); assert_next_any!( stream, [ @@ -174,7 +174,7 @@ async fn boundary_no_duplication() -> anyhow::Result<()> { // Immediately produce a new live event in a new block contract.increase().send().await?.watch().await?; - assert_next!(stream, ScannerStatus::SwitchingToLive); + assert_next!(stream, ScannerStatus::StartingLiveStream); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); Ok(()) From 8169ed7d6ace5aa40a4a1b7d1f9d8dc406048e29 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Fri, 14 Nov 2025 11:42:00 +0100 Subject: [PATCH 25/63] test: remove assert_next_any --- src/test_utils/macros.rs | 22 ---------------------- tests/sync/from_latest.rs | 14 +++----------- 2 files changed, 3 insertions(+), 33 deletions(-) diff --git a/src/test_utils/macros.rs b/src/test_utils/macros.rs index 9c8a1b18..a595e524 100644 --- a/src/test_utils/macros.rs +++ b/src/test_utils/macros.rs @@ -18,28 +18,6 @@ macro_rules! assert_next { }; } -#[macro_export] -macro_rules! assert_next_any { - ($stream: expr, $expected_options: expr) => { - assert_next_any!($stream, $expected_options, timeout = 5) - }; - ($stream: expr, $expected_options: expr, timeout = $secs: expr) => { - let message = tokio::time::timeout( - std::time::Duration::from_secs($secs), - tokio_stream::StreamExt::next(&mut $stream), - ) - .await - .expect("timed out"); - - if let Some(data) = message { - let matched = $expected_options.iter().any(|expected| data == *expected); - assert!(matched, "Expected one of:\n{:#?}\n\nGot:\n{:#?}", $expected_options, data); - } else { - panic!("Expected one of {:?}, but channel was closed", $expected_options) - } - }; -} - #[macro_export] macro_rules! assert_closed { ($stream: expr) => { diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index bde912e3..f8d4925a 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -1,7 +1,7 @@ use alloy::{primitives::U256, providers::ext::AnvilApi}; use crate::common::{TestCounter, setup_sync_from_latest_scanner}; -use event_scanner::{ScannerStatus, assert_empty, assert_next, assert_next_any}; +use event_scanner::{ScannerStatus, assert_empty, assert_next}; #[tokio::test] async fn happy_path_no_duplicates() -> anyhow::Result<()> { @@ -127,16 +127,8 @@ async fn no_historical_only_live_streams() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; assert_next!(stream, ScannerStatus::StartingLiveStream); - assert_next_any!( - stream, - [ - vec![TestCounter::CountIncreased { newCount: U256::from(1) }], - vec![ - TestCounter::CountIncreased { newCount: U256::from(1) }, - TestCounter::CountIncreased { newCount: U256::from(2) } - ] - ] - ); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); Ok(()) } From 78cd3e6e6347e639798c45f5d09ffd4f0fb1b765 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 17 Nov 2025 12:16:40 +0100 Subject: [PATCH 26/63] Revert "test: remove assert_next_any" This reverts commit 8169ed7d6ace5aa40a4a1b7d1f9d8dc406048e29. --- src/test_utils/macros.rs | 22 ++++++++++++++++++++++ tests/sync/from_latest.rs | 14 +++++++++++--- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/src/test_utils/macros.rs b/src/test_utils/macros.rs index a595e524..9c8a1b18 100644 --- a/src/test_utils/macros.rs +++ b/src/test_utils/macros.rs @@ -18,6 +18,28 @@ macro_rules! assert_next { }; } +#[macro_export] +macro_rules! assert_next_any { + ($stream: expr, $expected_options: expr) => { + assert_next_any!($stream, $expected_options, timeout = 5) + }; + ($stream: expr, $expected_options: expr, timeout = $secs: expr) => { + let message = tokio::time::timeout( + std::time::Duration::from_secs($secs), + tokio_stream::StreamExt::next(&mut $stream), + ) + .await + .expect("timed out"); + + if let Some(data) = message { + let matched = $expected_options.iter().any(|expected| data == *expected); + assert!(matched, "Expected one of:\n{:#?}\n\nGot:\n{:#?}", $expected_options, data); + } else { + panic!("Expected one of {:?}, but channel was closed", $expected_options) + } + }; +} + #[macro_export] macro_rules! assert_closed { ($stream: expr) => { diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index f8d4925a..bde912e3 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -1,7 +1,7 @@ use alloy::{primitives::U256, providers::ext::AnvilApi}; use crate::common::{TestCounter, setup_sync_from_latest_scanner}; -use event_scanner::{ScannerStatus, assert_empty, assert_next}; +use event_scanner::{ScannerStatus, assert_empty, assert_next, assert_next_any}; #[tokio::test] async fn happy_path_no_duplicates() -> anyhow::Result<()> { @@ -127,8 +127,16 @@ async fn no_historical_only_live_streams() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; assert_next!(stream, ScannerStatus::StartingLiveStream); - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + assert_next_any!( + stream, + [ + vec![TestCounter::CountIncreased { newCount: U256::from(1) }], + vec![ + TestCounter::CountIncreased { newCount: U256::from(1) }, + TestCounter::CountIncreased { newCount: U256::from(2) } + ] + ] + ); Ok(()) } From 5c095cfcd0f909b3537bf03e4687ccd4e4eeb71f Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 17 Nov 2025 13:27:11 +0100 Subject: [PATCH 27/63] test: implement assert_event_sequence --- src/test_utils/macros.rs | 50 +++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/src/test_utils/macros.rs b/src/test_utils/macros.rs index 9c8a1b18..93515328 100644 --- a/src/test_utils/macros.rs +++ b/src/test_utils/macros.rs @@ -19,23 +19,47 @@ macro_rules! assert_next { } #[macro_export] -macro_rules! assert_next_any { +macro_rules! assert_event_sequence { ($stream: expr, $expected_options: expr) => { - assert_next_any!($stream, $expected_options, timeout = 5) + assert_event_sequence!($stream, $expected_options, timeout = 5) }; ($stream: expr, $expected_options: expr, timeout = $secs: expr) => { - let message = tokio::time::timeout( - std::time::Duration::from_secs($secs), - tokio_stream::StreamExt::next(&mut $stream), - ) - .await - .expect("timed out"); + let expected_options = $expected_options; + if expected_options.is_empty() { + panic!("assert_event_sequence! called with empty array. Use assert_empty! macro instead to check for no pending messages."); + } - if let Some(data) = message { - let matched = $expected_options.iter().any(|expected| data == *expected); - assert!(matched, "Expected one of:\n{:#?}\n\nGot:\n{:#?}", $expected_options, data); - } else { - panic!("Expected one of {:?}, but channel was closed", $expected_options) + let mut remaining = expected_options.iter(); + let start = std::time::Instant::now(); + let timeout_duration = std::time::Duration::from_secs($secs); + + while let Some(expected) = remaining.next() { + let elapsed = start.elapsed(); + if elapsed >= timeout_duration { + panic!("Timed out waiting for events. Still expecting: {:#?}", remaining); + } + + let time_left = timeout_duration - elapsed; + let message = + tokio::time::timeout(time_left, tokio_stream::StreamExt::next(&mut $stream)) + .await + .expect("timed out waiting for next batch"); + + match message { + Some($crate::ScannerMessage::Data(batch)) => { + let mut batch = batch.iter(); + let event = batch.next().expect("Streamed batch should not be empty"); + assert_eq!(&alloy::sol_types::SolEvent::encode_log_data(expected), event.data(), "Unexpected event: {:#?}\nExpected: {:#?}\nRemaining: {:#?}", event, expected, remaining); + while let Some(event) = batch.next() { + let expected = remaining.next().unwrap_or_else(|| panic!("Received more events than expected, current: {:#?}\nStreamed batch: {:#?}", event, batch)); + assert_eq!(&alloy::sol_types::SolEvent::encode_log_data(expected), event.data(), "Unexpected event: {:#?}\nExpected: {:#?}\nRemaining: {:#?}", event, expected, remaining); + } + } + Some(other) => { + panic!("Expected ScannerMessage::Data, got: {:#?}", other); + } + None => {panic!("Stream closed while still expecting: {:#?}", remaining);} + } } }; } From cb8577e48912b9da9274357a8cefbc66e168095a Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 17 Nov 2025 13:32:17 +0100 Subject: [PATCH 28/63] docs: add todo in macros for similar macro for ranges --- src/test_utils/macros.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/test_utils/macros.rs b/src/test_utils/macros.rs index 93515328..0b4a920e 100644 --- a/src/test_utils/macros.rs +++ b/src/test_utils/macros.rs @@ -18,6 +18,8 @@ macro_rules! assert_next { }; } +// TODO: implement assert_range_coverage + #[macro_export] macro_rules! assert_event_sequence { ($stream: expr, $expected_options: expr) => { From 3cb37e290aa673d5ee89ef0f16235dced851bb42 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 17 Nov 2025 13:38:58 +0100 Subject: [PATCH 29/63] test: update all relevant test assertions to assert_event_sequence --- tests/live/basic.rs | 40 +++++++++++++----- tests/live/optional_fields.rs | 33 +++++++++++---- tests/live/performance.rs | 8 ++-- tests/live/reorg.rs | 80 +++++++++++++++++++++++------------ tests/sync/from_block.rs | 30 ++++++++----- tests/sync/from_latest.rs | 36 +++++++++------- 6 files changed, 153 insertions(+), 74 deletions(-) diff --git a/tests/live/basic.rs b/tests/live/basic.rs index 2e34e5af..a15f3887 100644 --- a/tests/live/basic.rs +++ b/tests/live/basic.rs @@ -8,7 +8,7 @@ use std::{ use crate::common::{TestCounter, deploy_counter, setup_live_scanner}; use alloy::{primitives::U256, sol_types::SolEvent}; -use event_scanner::{EventFilter, Message, assert_empty, assert_next}; +use event_scanner::{EventFilter, Message, assert_empty, assert_event_sequence}; use tokio::time::timeout; use tokio_stream::StreamExt; @@ -88,13 +88,23 @@ async fn multiple_contracts_same_event_isolate_callbacks() -> anyhow::Result<()> b.increase().send().await?.watch().await?; b.increase().send().await?.watch().await?; - assert_next!(a_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); - assert_next!(a_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); - assert_next!(a_stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); + assert_event_sequence!( + a_stream, + &[ + TestCounter::CountIncreased { newCount: U256::from(1) }, + TestCounter::CountIncreased { newCount: U256::from(2) }, + TestCounter::CountIncreased { newCount: U256::from(3) } + ] + ); assert_empty!(a_stream); - assert_next!(b_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); - assert_next!(b_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + assert_event_sequence!( + b_stream, + &[ + TestCounter::CountIncreased { newCount: U256::from(1) }, + TestCounter::CountIncreased { newCount: U256::from(2) } + ] + ); assert_empty!(b_stream); Ok(()) @@ -126,12 +136,22 @@ async fn multiple_events_same_contract() -> anyhow::Result<()> { contract.decrease().send().await?.watch().await?; contract.decrease().send().await?.watch().await?; - assert_next!(incr_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); - assert_next!(incr_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + assert_event_sequence!( + incr_stream, + &[ + TestCounter::CountIncreased { newCount: U256::from(1) }, + TestCounter::CountIncreased { newCount: U256::from(2) } + ] + ); assert_empty!(incr_stream); - assert_next!(decr_stream, &[TestCounter::CountDecreased { newCount: U256::from(1) }]); - assert_next!(decr_stream, &[TestCounter::CountDecreased { newCount: U256::from(0) }]); + assert_event_sequence!( + decr_stream, + &[ + TestCounter::CountDecreased { newCount: U256::from(1) }, + TestCounter::CountDecreased { newCount: U256::from(0) } + ] + ); assert_empty!(decr_stream); Ok(()) diff --git a/tests/live/optional_fields.rs b/tests/live/optional_fields.rs index 60b4248a..19bfd066 100644 --- a/tests/live/optional_fields.rs +++ b/tests/live/optional_fields.rs @@ -8,7 +8,7 @@ use std::{ use crate::common::{TestCounter, deploy_counter, setup_live_scanner}; use alloy::{primitives::U256, sol_types::SolEvent}; -use event_scanner::{EventFilter, Message, assert_empty, assert_next}; +use event_scanner::{EventFilter, Message, assert_empty, assert_event_sequence, assert_next}; use tokio::time::timeout; use tokio_stream::StreamExt; @@ -115,9 +115,14 @@ async fn mixed_optional_and_required_filters() -> anyhow::Result<()> { contract_1.increase().send().await?.watch().await?; contract_1.increase().send().await?.watch().await?; - assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); - assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); - assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); + assert_event_sequence!( + all_stream, + &[ + TestCounter::CountIncreased { newCount: U256::from(1) }, + TestCounter::CountIncreased { newCount: U256::from(2) }, + TestCounter::CountIncreased { newCount: U256::from(3) } + ] + ); let mut all_stream = assert_empty!(all_stream); let mut specific_stream = assert_empty!(specific_stream); @@ -126,11 +131,21 @@ async fn mixed_optional_and_required_filters() -> anyhow::Result<()> { contract_2.increase().send().await?.watch().await?; contract_2.increase().send().await?.watch().await?; - assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); - assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); - - assert_next!(specific_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); - assert_next!(specific_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + assert_event_sequence!( + all_stream, + &[ + TestCounter::CountIncreased { newCount: U256::from(1) }, + TestCounter::CountIncreased { newCount: U256::from(2) } + ] + ); + + assert_event_sequence!( + specific_stream, + &[ + TestCounter::CountIncreased { newCount: U256::from(1) }, + TestCounter::CountIncreased { newCount: U256::from(2) } + ] + ); let mut all_stream = assert_empty!(all_stream); let specific_stream = assert_empty!(specific_stream); diff --git a/tests/live/performance.rs b/tests/live/performance.rs index 2961e140..2ecbfe89 100644 --- a/tests/live/performance.rs +++ b/tests/live/performance.rs @@ -1,5 +1,5 @@ use alloy::primitives::U256; -use event_scanner::{assert_empty, assert_next}; +use event_scanner::{assert_empty, assert_event_sequence}; use crate::common::{LiveScannerSetup, TestCounter::CountIncreased, setup_live_scanner}; @@ -23,9 +23,9 @@ async fn high_event_volume_no_loss() -> anyhow::Result<()> { } }); - for new_count in 1..=100 { - assert_next!(stream, &[CountIncreased { newCount: U256::from(new_count) }]); - } + let expected = + (1..=100).map(|n| CountIncreased { newCount: U256::from(n) }).collect::>(); + assert_event_sequence!(stream, expected); assert_empty!(stream); Ok(()) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index 5a96d764..6eaea5a1 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -6,7 +6,7 @@ use alloy::{ providers::ext::AnvilApi, rpc::types::anvil::{ReorgOptions, TransactionData}, }; -use event_scanner::{ScannerStatus, assert_empty, assert_next}; +use event_scanner::{ScannerStatus, assert_empty, assert_event_sequence, assert_next}; #[tokio::test] async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { @@ -21,11 +21,16 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { } // assert initial events are emitted as expected - assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(5) }]); + assert_event_sequence!( + stream, + &[ + CountIncreased { newCount: U256::from(1) }, + CountIncreased { newCount: U256::from(2) }, + CountIncreased { newCount: U256::from(3) }, + CountIncreased { newCount: U256::from(4) }, + CountIncreased { newCount: U256::from(5) } + ] + ); let mut stream = assert_empty!(stream); // reorg the chain @@ -64,11 +69,16 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { } // assert initial events are emitted as expected - assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(5) }]); + assert_event_sequence!( + stream, + &[ + CountIncreased { newCount: U256::from(1) }, + CountIncreased { newCount: U256::from(2) }, + CountIncreased { newCount: U256::from(3) }, + CountIncreased { newCount: U256::from(4) }, + CountIncreased { newCount: U256::from(5) } + ] + ); let mut stream = assert_empty!(stream); // reorg the chain @@ -82,9 +92,14 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); - assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + assert_event_sequence!( + stream, + &[ + CountIncreased { newCount: U256::from(2) }, + CountIncreased { newCount: U256::from(3) }, + CountIncreased { newCount: U256::from(4) } + ] + ); assert_empty!(stream); Ok(()) @@ -103,10 +118,15 @@ async fn reorg_depth_one() -> anyhow::Result<()> { } // assert initial events are emitted as expected - assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + assert_event_sequence!( + stream, + &[ + CountIncreased { newCount: U256::from(1) }, + CountIncreased { newCount: U256::from(2) }, + CountIncreased { newCount: U256::from(3) }, + CountIncreased { newCount: U256::from(4) } + ] + ); let mut stream = assert_empty!(stream); // reorg the chain @@ -136,10 +156,15 @@ async fn reorg_depth_two() -> anyhow::Result<()> { } // assert initial events are emitted as expected - assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + assert_event_sequence!( + stream, + &[ + CountIncreased { newCount: U256::from(1) }, + CountIncreased { newCount: U256::from(2) }, + CountIncreased { newCount: U256::from(3) }, + CountIncreased { newCount: U256::from(4) } + ] + ); let mut stream = assert_empty!(stream); // reorg the chain @@ -192,11 +217,14 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { provider.primary().anvil_mine(Some(10), None).await?; // no `ReorgDetected` should be emitted - assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); - assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); - assert_next!( + assert_event_sequence!( stream, - &[CountIncreased { newCount: U256::from(3) }, CountIncreased { newCount: U256::from(4) }] + &[ + CountIncreased { newCount: U256::from(1) }, + CountIncreased { newCount: U256::from(2) }, + CountIncreased { newCount: U256::from(3) }, + CountIncreased { newCount: U256::from(4) } + ] ); assert_empty!(stream); diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index 463e36d5..9872fb6e 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -4,7 +4,7 @@ use alloy::{ providers::ext::AnvilApi, rpc::types::anvil::{ReorgOptions, TransactionData}, }; -use event_scanner::{ScannerStatus, assert_empty, assert_next}; +use event_scanner::{ScannerStatus, assert_empty, assert_event_sequence, assert_next}; use crate::common::{SyncScannerSetup, TestCounter, setup_sync_scanner}; @@ -40,8 +40,13 @@ async fn replays_historical_then_switches_to_live() -> anyhow::Result<()> { assert_next!(stream, ScannerStatus::StartingLiveStream); // live events - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(5) }]); + assert_event_sequence!( + stream, + &[ + TestCounter::CountIncreased { newCount: U256::from(4) }, + TestCounter::CountIncreased { newCount: U256::from(5) } + ] + ); assert_empty!(stream); Ok(()) @@ -106,8 +111,13 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { // switching to "live" phase assert_next!(stream, ScannerStatus::StartingLiveStream); // assert confirmed live events are streamed separately - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); + assert_event_sequence!( + stream, + &[ + TestCounter::CountIncreased { newCount: U256::from(3) }, + TestCounter::CountIncreased { newCount: U256::from(4) } + ] + ); let stream = assert_empty!(stream); // Perform a shallow reorg on the live tail @@ -126,14 +136,14 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { provider.primary().anvil_mine(Some(10), None).await?; // no `ReorgDetected` should be emitted - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(5) }]); - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(6) }]); - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(7) }]); - assert_next!( + assert_event_sequence!( stream, &[ + TestCounter::CountIncreased { newCount: U256::from(5) }, + TestCounter::CountIncreased { newCount: U256::from(6) }, + TestCounter::CountIncreased { newCount: U256::from(7) }, TestCounter::CountIncreased { newCount: U256::from(8) }, - TestCounter::CountIncreased { newCount: U256::from(9) } + TestCounter::CountIncreased { newCount: U256::from(9) }, ] ); assert_empty!(stream); diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index bde912e3..b6e4bc8f 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -1,7 +1,7 @@ use alloy::{primitives::U256, providers::ext::AnvilApi}; use crate::common::{TestCounter, setup_sync_from_latest_scanner}; -use event_scanner::{ScannerStatus, assert_empty, assert_next, assert_next_any}; +use event_scanner::{ScannerStatus, assert_empty, assert_event_sequence, assert_next}; #[tokio::test] async fn happy_path_no_duplicates() -> anyhow::Result<()> { @@ -37,8 +37,13 @@ async fn happy_path_no_duplicates() -> anyhow::Result<()> { // Transition to live assert_next!(stream, ScannerStatus::StartingLiveStream); - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(7) }]); - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(8) }]); + assert_event_sequence!( + stream, + &[ + TestCounter::CountIncreased { newCount: U256::from(7) }, + TestCounter::CountIncreased { newCount: U256::from(8) } + ] + ); Ok(()) } @@ -70,8 +75,13 @@ async fn fewer_historical_then_continues_live() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; assert_next!(stream, ScannerStatus::StartingLiveStream); - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); - assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); + assert_event_sequence!( + stream, + &[ + TestCounter::CountIncreased { newCount: U256::from(3) }, + TestCounter::CountIncreased { newCount: U256::from(4) } + ] + ); Ok(()) } @@ -127,14 +137,11 @@ async fn no_historical_only_live_streams() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; assert_next!(stream, ScannerStatus::StartingLiveStream); - assert_next_any!( + assert_event_sequence!( stream, - [ - vec![TestCounter::CountIncreased { newCount: U256::from(1) }], - vec![ - TestCounter::CountIncreased { newCount: U256::from(1) }, - TestCounter::CountIncreased { newCount: U256::from(2) } - ] + &[ + TestCounter::CountIncreased { newCount: U256::from(1) }, + TestCounter::CountIncreased { newCount: U256::from(2) } ] ); @@ -187,7 +194,7 @@ async fn waiting_on_live_logs_arriving() -> anyhow::Result<()> { let scanner = setup.scanner; let mut stream = setup.stream; - // Historical: emit 3, mine 1 empty block to form a clear boundary + // Historical: emit 3 contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; @@ -204,8 +211,7 @@ async fn waiting_on_live_logs_arriving() -> anyhow::Result<()> { ] ); - let inner = stream.into_inner(); - assert!(inner.is_empty()); + assert_empty!(stream); // `ScannerStatus::SwitchingToLive` arrives only on first live block received From 678f36907ccac36b82381aed449a925820aac927 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 17 Nov 2025 22:09:38 +0100 Subject: [PATCH 30/63] Revert "test: update all relevant test assertions to assert_event_sequence" This reverts commit 3cb37e290aa673d5ee89ef0f16235dced851bb42. --- tests/live/basic.rs | 40 +++++------------- tests/live/optional_fields.rs | 33 ++++----------- tests/live/performance.rs | 8 ++-- tests/live/reorg.rs | 80 ++++++++++++----------------------- tests/sync/from_block.rs | 30 +++++-------- tests/sync/from_latest.rs | 36 +++++++--------- 6 files changed, 74 insertions(+), 153 deletions(-) diff --git a/tests/live/basic.rs b/tests/live/basic.rs index a15f3887..2e34e5af 100644 --- a/tests/live/basic.rs +++ b/tests/live/basic.rs @@ -8,7 +8,7 @@ use std::{ use crate::common::{TestCounter, deploy_counter, setup_live_scanner}; use alloy::{primitives::U256, sol_types::SolEvent}; -use event_scanner::{EventFilter, Message, assert_empty, assert_event_sequence}; +use event_scanner::{EventFilter, Message, assert_empty, assert_next}; use tokio::time::timeout; use tokio_stream::StreamExt; @@ -88,23 +88,13 @@ async fn multiple_contracts_same_event_isolate_callbacks() -> anyhow::Result<()> b.increase().send().await?.watch().await?; b.increase().send().await?.watch().await?; - assert_event_sequence!( - a_stream, - &[ - TestCounter::CountIncreased { newCount: U256::from(1) }, - TestCounter::CountIncreased { newCount: U256::from(2) }, - TestCounter::CountIncreased { newCount: U256::from(3) } - ] - ); + assert_next!(a_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(a_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + assert_next!(a_stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); assert_empty!(a_stream); - assert_event_sequence!( - b_stream, - &[ - TestCounter::CountIncreased { newCount: U256::from(1) }, - TestCounter::CountIncreased { newCount: U256::from(2) } - ] - ); + assert_next!(b_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(b_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); assert_empty!(b_stream); Ok(()) @@ -136,22 +126,12 @@ async fn multiple_events_same_contract() -> anyhow::Result<()> { contract.decrease().send().await?.watch().await?; contract.decrease().send().await?.watch().await?; - assert_event_sequence!( - incr_stream, - &[ - TestCounter::CountIncreased { newCount: U256::from(1) }, - TestCounter::CountIncreased { newCount: U256::from(2) } - ] - ); + assert_next!(incr_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(incr_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); assert_empty!(incr_stream); - assert_event_sequence!( - decr_stream, - &[ - TestCounter::CountDecreased { newCount: U256::from(1) }, - TestCounter::CountDecreased { newCount: U256::from(0) } - ] - ); + assert_next!(decr_stream, &[TestCounter::CountDecreased { newCount: U256::from(1) }]); + assert_next!(decr_stream, &[TestCounter::CountDecreased { newCount: U256::from(0) }]); assert_empty!(decr_stream); Ok(()) diff --git a/tests/live/optional_fields.rs b/tests/live/optional_fields.rs index 19bfd066..60b4248a 100644 --- a/tests/live/optional_fields.rs +++ b/tests/live/optional_fields.rs @@ -8,7 +8,7 @@ use std::{ use crate::common::{TestCounter, deploy_counter, setup_live_scanner}; use alloy::{primitives::U256, sol_types::SolEvent}; -use event_scanner::{EventFilter, Message, assert_empty, assert_event_sequence, assert_next}; +use event_scanner::{EventFilter, Message, assert_empty, assert_next}; use tokio::time::timeout; use tokio_stream::StreamExt; @@ -115,14 +115,9 @@ async fn mixed_optional_and_required_filters() -> anyhow::Result<()> { contract_1.increase().send().await?.watch().await?; contract_1.increase().send().await?.watch().await?; - assert_event_sequence!( - all_stream, - &[ - TestCounter::CountIncreased { newCount: U256::from(1) }, - TestCounter::CountIncreased { newCount: U256::from(2) }, - TestCounter::CountIncreased { newCount: U256::from(3) } - ] - ); + assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); let mut all_stream = assert_empty!(all_stream); let mut specific_stream = assert_empty!(specific_stream); @@ -131,21 +126,11 @@ async fn mixed_optional_and_required_filters() -> anyhow::Result<()> { contract_2.increase().send().await?.watch().await?; contract_2.increase().send().await?.watch().await?; - assert_event_sequence!( - all_stream, - &[ - TestCounter::CountIncreased { newCount: U256::from(1) }, - TestCounter::CountIncreased { newCount: U256::from(2) } - ] - ); - - assert_event_sequence!( - specific_stream, - &[ - TestCounter::CountIncreased { newCount: U256::from(1) }, - TestCounter::CountIncreased { newCount: U256::from(2) } - ] - ); + assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(all_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + + assert_next!(specific_stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(specific_stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); let mut all_stream = assert_empty!(all_stream); let specific_stream = assert_empty!(specific_stream); diff --git a/tests/live/performance.rs b/tests/live/performance.rs index 2ecbfe89..2961e140 100644 --- a/tests/live/performance.rs +++ b/tests/live/performance.rs @@ -1,5 +1,5 @@ use alloy::primitives::U256; -use event_scanner::{assert_empty, assert_event_sequence}; +use event_scanner::{assert_empty, assert_next}; use crate::common::{LiveScannerSetup, TestCounter::CountIncreased, setup_live_scanner}; @@ -23,9 +23,9 @@ async fn high_event_volume_no_loss() -> anyhow::Result<()> { } }); - let expected = - (1..=100).map(|n| CountIncreased { newCount: U256::from(n) }).collect::>(); - assert_event_sequence!(stream, expected); + for new_count in 1..=100 { + assert_next!(stream, &[CountIncreased { newCount: U256::from(new_count) }]); + } assert_empty!(stream); Ok(()) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index 6eaea5a1..5a96d764 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -6,7 +6,7 @@ use alloy::{ providers::ext::AnvilApi, rpc::types::anvil::{ReorgOptions, TransactionData}, }; -use event_scanner::{ScannerStatus, assert_empty, assert_event_sequence, assert_next}; +use event_scanner::{ScannerStatus, assert_empty, assert_next}; #[tokio::test] async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { @@ -21,16 +21,11 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { } // assert initial events are emitted as expected - assert_event_sequence!( - stream, - &[ - CountIncreased { newCount: U256::from(1) }, - CountIncreased { newCount: U256::from(2) }, - CountIncreased { newCount: U256::from(3) }, - CountIncreased { newCount: U256::from(4) }, - CountIncreased { newCount: U256::from(5) } - ] - ); + assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(5) }]); let mut stream = assert_empty!(stream); // reorg the chain @@ -69,16 +64,11 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { } // assert initial events are emitted as expected - assert_event_sequence!( - stream, - &[ - CountIncreased { newCount: U256::from(1) }, - CountIncreased { newCount: U256::from(2) }, - CountIncreased { newCount: U256::from(3) }, - CountIncreased { newCount: U256::from(4) }, - CountIncreased { newCount: U256::from(5) } - ] - ); + assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(5) }]); let mut stream = assert_empty!(stream); // reorg the chain @@ -92,14 +82,9 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); - assert_event_sequence!( - stream, - &[ - CountIncreased { newCount: U256::from(2) }, - CountIncreased { newCount: U256::from(3) }, - CountIncreased { newCount: U256::from(4) } - ] - ); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); assert_empty!(stream); Ok(()) @@ -118,15 +103,10 @@ async fn reorg_depth_one() -> anyhow::Result<()> { } // assert initial events are emitted as expected - assert_event_sequence!( - stream, - &[ - CountIncreased { newCount: U256::from(1) }, - CountIncreased { newCount: U256::from(2) }, - CountIncreased { newCount: U256::from(3) }, - CountIncreased { newCount: U256::from(4) } - ] - ); + assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); let mut stream = assert_empty!(stream); // reorg the chain @@ -156,15 +136,10 @@ async fn reorg_depth_two() -> anyhow::Result<()> { } // assert initial events are emitted as expected - assert_event_sequence!( - stream, - &[ - CountIncreased { newCount: U256::from(1) }, - CountIncreased { newCount: U256::from(2) }, - CountIncreased { newCount: U256::from(3) }, - CountIncreased { newCount: U256::from(4) } - ] - ); + assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); let mut stream = assert_empty!(stream); // reorg the chain @@ -217,14 +192,11 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { provider.primary().anvil_mine(Some(10), None).await?; // no `ReorgDetected` should be emitted - assert_event_sequence!( + assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + assert_next!( stream, - &[ - CountIncreased { newCount: U256::from(1) }, - CountIncreased { newCount: U256::from(2) }, - CountIncreased { newCount: U256::from(3) }, - CountIncreased { newCount: U256::from(4) } - ] + &[CountIncreased { newCount: U256::from(3) }, CountIncreased { newCount: U256::from(4) }] ); assert_empty!(stream); diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index 9872fb6e..463e36d5 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -4,7 +4,7 @@ use alloy::{ providers::ext::AnvilApi, rpc::types::anvil::{ReorgOptions, TransactionData}, }; -use event_scanner::{ScannerStatus, assert_empty, assert_event_sequence, assert_next}; +use event_scanner::{ScannerStatus, assert_empty, assert_next}; use crate::common::{SyncScannerSetup, TestCounter, setup_sync_scanner}; @@ -40,13 +40,8 @@ async fn replays_historical_then_switches_to_live() -> anyhow::Result<()> { assert_next!(stream, ScannerStatus::StartingLiveStream); // live events - assert_event_sequence!( - stream, - &[ - TestCounter::CountIncreased { newCount: U256::from(4) }, - TestCounter::CountIncreased { newCount: U256::from(5) } - ] - ); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(5) }]); assert_empty!(stream); Ok(()) @@ -111,13 +106,8 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { // switching to "live" phase assert_next!(stream, ScannerStatus::StartingLiveStream); // assert confirmed live events are streamed separately - assert_event_sequence!( - stream, - &[ - TestCounter::CountIncreased { newCount: U256::from(3) }, - TestCounter::CountIncreased { newCount: U256::from(4) } - ] - ); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); let stream = assert_empty!(stream); // Perform a shallow reorg on the live tail @@ -136,14 +126,14 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { provider.primary().anvil_mine(Some(10), None).await?; // no `ReorgDetected` should be emitted - assert_event_sequence!( + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(5) }]); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(6) }]); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(7) }]); + assert_next!( stream, &[ - TestCounter::CountIncreased { newCount: U256::from(5) }, - TestCounter::CountIncreased { newCount: U256::from(6) }, - TestCounter::CountIncreased { newCount: U256::from(7) }, TestCounter::CountIncreased { newCount: U256::from(8) }, - TestCounter::CountIncreased { newCount: U256::from(9) }, + TestCounter::CountIncreased { newCount: U256::from(9) } ] ); assert_empty!(stream); diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index b6e4bc8f..bde912e3 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -1,7 +1,7 @@ use alloy::{primitives::U256, providers::ext::AnvilApi}; use crate::common::{TestCounter, setup_sync_from_latest_scanner}; -use event_scanner::{ScannerStatus, assert_empty, assert_event_sequence, assert_next}; +use event_scanner::{ScannerStatus, assert_empty, assert_next, assert_next_any}; #[tokio::test] async fn happy_path_no_duplicates() -> anyhow::Result<()> { @@ -37,13 +37,8 @@ async fn happy_path_no_duplicates() -> anyhow::Result<()> { // Transition to live assert_next!(stream, ScannerStatus::StartingLiveStream); - assert_event_sequence!( - stream, - &[ - TestCounter::CountIncreased { newCount: U256::from(7) }, - TestCounter::CountIncreased { newCount: U256::from(8) } - ] - ); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(7) }]); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(8) }]); Ok(()) } @@ -75,13 +70,8 @@ async fn fewer_historical_then_continues_live() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; assert_next!(stream, ScannerStatus::StartingLiveStream); - assert_event_sequence!( - stream, - &[ - TestCounter::CountIncreased { newCount: U256::from(3) }, - TestCounter::CountIncreased { newCount: U256::from(4) } - ] - ); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); Ok(()) } @@ -137,11 +127,14 @@ async fn no_historical_only_live_streams() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; assert_next!(stream, ScannerStatus::StartingLiveStream); - assert_event_sequence!( + assert_next_any!( stream, - &[ - TestCounter::CountIncreased { newCount: U256::from(1) }, - TestCounter::CountIncreased { newCount: U256::from(2) } + [ + vec![TestCounter::CountIncreased { newCount: U256::from(1) }], + vec![ + TestCounter::CountIncreased { newCount: U256::from(1) }, + TestCounter::CountIncreased { newCount: U256::from(2) } + ] ] ); @@ -194,7 +187,7 @@ async fn waiting_on_live_logs_arriving() -> anyhow::Result<()> { let scanner = setup.scanner; let mut stream = setup.stream; - // Historical: emit 3 + // Historical: emit 3, mine 1 empty block to form a clear boundary contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; @@ -211,7 +204,8 @@ async fn waiting_on_live_logs_arriving() -> anyhow::Result<()> { ] ); - assert_empty!(stream); + let inner = stream.into_inner(); + assert!(inner.is_empty()); // `ScannerStatus::SwitchingToLive` arrives only on first live block received From 52dfb0ea722d42ce5e531175e59b3a8b44810ecd Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 17 Nov 2025 22:09:42 +0100 Subject: [PATCH 31/63] Revert "docs: add todo in macros for similar macro for ranges" This reverts commit cb8577e48912b9da9274357a8cefbc66e168095a. --- src/test_utils/macros.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/test_utils/macros.rs b/src/test_utils/macros.rs index 0b4a920e..93515328 100644 --- a/src/test_utils/macros.rs +++ b/src/test_utils/macros.rs @@ -18,8 +18,6 @@ macro_rules! assert_next { }; } -// TODO: implement assert_range_coverage - #[macro_export] macro_rules! assert_event_sequence { ($stream: expr, $expected_options: expr) => { From ec6258ff5fe1174ebdce04c37cb857e5c63549d2 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 17 Nov 2025 22:09:44 +0100 Subject: [PATCH 32/63] Revert "test: implement assert_event_sequence" This reverts commit 5c095cfcd0f909b3537bf03e4687ccd4e4eeb71f. --- src/test_utils/macros.rs | 50 +++++++++++----------------------------- 1 file changed, 13 insertions(+), 37 deletions(-) diff --git a/src/test_utils/macros.rs b/src/test_utils/macros.rs index 93515328..9c8a1b18 100644 --- a/src/test_utils/macros.rs +++ b/src/test_utils/macros.rs @@ -19,47 +19,23 @@ macro_rules! assert_next { } #[macro_export] -macro_rules! assert_event_sequence { +macro_rules! assert_next_any { ($stream: expr, $expected_options: expr) => { - assert_event_sequence!($stream, $expected_options, timeout = 5) + assert_next_any!($stream, $expected_options, timeout = 5) }; ($stream: expr, $expected_options: expr, timeout = $secs: expr) => { - let expected_options = $expected_options; - if expected_options.is_empty() { - panic!("assert_event_sequence! called with empty array. Use assert_empty! macro instead to check for no pending messages."); - } - - let mut remaining = expected_options.iter(); - let start = std::time::Instant::now(); - let timeout_duration = std::time::Duration::from_secs($secs); - - while let Some(expected) = remaining.next() { - let elapsed = start.elapsed(); - if elapsed >= timeout_duration { - panic!("Timed out waiting for events. Still expecting: {:#?}", remaining); - } - - let time_left = timeout_duration - elapsed; - let message = - tokio::time::timeout(time_left, tokio_stream::StreamExt::next(&mut $stream)) - .await - .expect("timed out waiting for next batch"); + let message = tokio::time::timeout( + std::time::Duration::from_secs($secs), + tokio_stream::StreamExt::next(&mut $stream), + ) + .await + .expect("timed out"); - match message { - Some($crate::ScannerMessage::Data(batch)) => { - let mut batch = batch.iter(); - let event = batch.next().expect("Streamed batch should not be empty"); - assert_eq!(&alloy::sol_types::SolEvent::encode_log_data(expected), event.data(), "Unexpected event: {:#?}\nExpected: {:#?}\nRemaining: {:#?}", event, expected, remaining); - while let Some(event) = batch.next() { - let expected = remaining.next().unwrap_or_else(|| panic!("Received more events than expected, current: {:#?}\nStreamed batch: {:#?}", event, batch)); - assert_eq!(&alloy::sol_types::SolEvent::encode_log_data(expected), event.data(), "Unexpected event: {:#?}\nExpected: {:#?}\nRemaining: {:#?}", event, expected, remaining); - } - } - Some(other) => { - panic!("Expected ScannerMessage::Data, got: {:#?}", other); - } - None => {panic!("Stream closed while still expecting: {:#?}", remaining);} - } + if let Some(data) = message { + let matched = $expected_options.iter().any(|expected| data == *expected); + assert!(matched, "Expected one of:\n{:#?}\n\nGot:\n{:#?}", $expected_options, data); + } else { + panic!("Expected one of {:?}, but channel was closed", $expected_options) } }; } From c108cc2b1e4e1f5c96dc026b9e9a793fc454ffb8 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Mon, 17 Nov 2025 22:10:10 +0100 Subject: [PATCH 33/63] Revert "Revert "test: remove assert_next_any"" This reverts commit 78cd3e6e6347e639798c45f5d09ffd4f0fb1b765. --- src/test_utils/macros.rs | 22 ---------------------- tests/sync/from_latest.rs | 14 +++----------- 2 files changed, 3 insertions(+), 33 deletions(-) diff --git a/src/test_utils/macros.rs b/src/test_utils/macros.rs index 9c8a1b18..a595e524 100644 --- a/src/test_utils/macros.rs +++ b/src/test_utils/macros.rs @@ -18,28 +18,6 @@ macro_rules! assert_next { }; } -#[macro_export] -macro_rules! assert_next_any { - ($stream: expr, $expected_options: expr) => { - assert_next_any!($stream, $expected_options, timeout = 5) - }; - ($stream: expr, $expected_options: expr, timeout = $secs: expr) => { - let message = tokio::time::timeout( - std::time::Duration::from_secs($secs), - tokio_stream::StreamExt::next(&mut $stream), - ) - .await - .expect("timed out"); - - if let Some(data) = message { - let matched = $expected_options.iter().any(|expected| data == *expected); - assert!(matched, "Expected one of:\n{:#?}\n\nGot:\n{:#?}", $expected_options, data); - } else { - panic!("Expected one of {:?}, but channel was closed", $expected_options) - } - }; -} - #[macro_export] macro_rules! assert_closed { ($stream: expr) => { diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index bde912e3..f8d4925a 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -1,7 +1,7 @@ use alloy::{primitives::U256, providers::ext::AnvilApi}; use crate::common::{TestCounter, setup_sync_from_latest_scanner}; -use event_scanner::{ScannerStatus, assert_empty, assert_next, assert_next_any}; +use event_scanner::{ScannerStatus, assert_empty, assert_next}; #[tokio::test] async fn happy_path_no_duplicates() -> anyhow::Result<()> { @@ -127,16 +127,8 @@ async fn no_historical_only_live_streams() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; assert_next!(stream, ScannerStatus::StartingLiveStream); - assert_next_any!( - stream, - [ - vec![TestCounter::CountIncreased { newCount: U256::from(1) }], - vec![ - TestCounter::CountIncreased { newCount: U256::from(1) }, - TestCounter::CountIncreased { newCount: U256::from(2) } - ] - ] - ); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); Ok(()) } From 02343608aac575d5ef65c777788db030a62dc764 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Tue, 18 Nov 2025 10:04:43 +0100 Subject: [PATCH 34/63] ref: use historical mode fn for initial batch when live --- src/block_range_scanner.rs | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index d37232c4..265d71d7 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -685,31 +685,25 @@ impl Service { let confirmed = incoming_block_num.saturating_sub(block_confirmations); + Self::stream_historical_blocks( + stream_start, + confirmed, + max_block_range, + &sender, + provider, + reorg_handler, + ) + .await; + let mut batch_start = stream_start; - let mut batch_end: Option<::BlockResponse>; - // TODO: include reorg handling here, maybe rely on historic handling fn - loop { - let batch_end_num = confirmed.min(batch_start.saturating_add(max_block_range - 1)); - batch_end = match provider.get_block_by_number(batch_end_num.into()).await { - Ok(block) => Some(block), - Err(e) => { - error!(batch_start = batch_start, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); - _ = sender.try_stream(e).await; - return; - } - }; - if !sender.try_stream(batch_start..=batch_end_num).await { + let mut batch_end = match provider.get_block_by_number(confirmed.into()).await { + Ok(block) => Some(block), + Err(e) => { + error!(batch_start = batch_start, batch_end = confirmed, error = %e, "Failed to get initial batch end block"); + _ = sender.try_stream(e).await; return; } - if batch_end_num == confirmed { - break; - } - batch_start = batch_end_num + 1; - } - - // reset batch start - let mut batch_start = stream_start; - // batch_end is now set + }; while let Some(incoming_block) = stream.next().await { let incoming_block_num = incoming_block.number(); From aabd5e291cbeefd5427dea5b65e2eb852b55758e Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Tue, 18 Nov 2025 10:13:33 +0100 Subject: [PATCH 35/63] ref: live: batch_end -> previous_batch_end --- src/block_range_scanner.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 265d71d7..d94fab88 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -696,7 +696,7 @@ impl Service { .await; let mut batch_start = stream_start; - let mut batch_end = match provider.get_block_by_number(confirmed.into()).await { + let mut previous_batch_end = match provider.get_block_by_number(confirmed.into()).await { Ok(block) => Some(block), Err(e) => { error!(batch_start = batch_start, batch_end = confirmed, error = %e, "Failed to get initial batch end block"); @@ -709,7 +709,7 @@ impl Service { let incoming_block_num = incoming_block.number(); info!(block_number = incoming_block_num, "Received block header"); - let reorged_opt = match batch_end.as_ref() { + let reorged_opt = match previous_batch_end.as_ref() { None => None, Some(batch_end) => match reorg_handler.check(batch_end).await { Ok(opt) => opt, @@ -728,10 +728,10 @@ impl Service { // no need to stream blocks prior to the previously specified starting block if common_ancestor.header().number() < stream_start { batch_start = stream_start; - batch_end = None; + previous_batch_end = None; } else { batch_start = common_ancestor.header().number() + 1; - batch_end = Some(common_ancestor); + previous_batch_end = Some(common_ancestor); } // TODO: explain in docs that the returned block after a reorg will be the @@ -742,8 +742,8 @@ impl Service { // no reorg happened, move the block range back to expected next start // // SAFETY: Overflow cannot realistically happen - if let Some(batch_end) = batch_end.as_ref() { - batch_start = batch_end.header().number() + 1; + if let Some(prev_batch_end) = previous_batch_end.as_ref() { + batch_start = prev_batch_end.header().number() + 1; } } @@ -754,7 +754,10 @@ impl Service { // reads let batch_end_num = confirmed.min(batch_start.saturating_add(max_block_range - 1)); - batch_end = match provider.get_block_by_number(batch_end_num.into()).await { + previous_batch_end = match provider + .get_block_by_number(batch_end_num.into()) + .await + { Ok(block) => Some(block), Err(e) => { error!(batch_start = batch_start, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); From 667d08f847b08870986633ad85b24570bc10a294 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Tue, 18 Nov 2025 10:15:40 +0100 Subject: [PATCH 36/63] test: ref shallow_block_confirmation_does_not_mitigate_reorg --- tests/block_range_scanner.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index acd724bc..83a7ff0c 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -161,11 +161,17 @@ async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result< // reorg more blocks than the block_confirmation config provider.anvil_reorg(ReorgOptions { depth: 8, tx_block_pairs: vec![] }).await?; - // mint additional blocks to allow the scanner to stream the pre-reorg blocks - provider.anvil_mine(Some(3), None).await?; + // mint 1 block to allow the scanner to process reorged blocks (previously streamed + the block + // confirmed now) + provider.anvil_mine(Some(1), None).await?; assert_next!(stream, ScannerStatus::ReorgDetected); - assert_range_coverage!(stream, 3..=10); + assert_range_coverage!(stream, 3..=8); + let mut stream = assert_empty!(stream); + + // mint additional blocks to allow the scanner to stream all of the pre-reorg blocks + provider.anvil_mine(Some(3), None).await?; + assert_range_coverage!(stream, 9..=10); assert_empty!(stream); Ok(()) From 05fc91ac840a4d5b994048e7864033dad4ded7ca Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Tue, 18 Nov 2025 10:43:34 +0100 Subject: [PATCH 37/63] feat: use historic handling when streaming live range larger than 1 --- src/block_range_scanner.rs | 78 +++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 44 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index d94fab88..dcf3d2fd 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -605,26 +605,26 @@ impl Service { sender: &mpsc::Sender, provider: &RobustProvider, reorg_handler: &mut ReorgHandler, - ) { + ) -> Option { let mut batch_count = 0; let mut next_start_block = start; // must be <= to include the edge case when start == end (i.e. return the single block // range) - while next_start_block <= end { + loop { let batch_end_num = next_start_block.saturating_add(max_block_range - 1).min(end); let batch_end = match provider.get_block_by_number(batch_end_num.into()).await { Ok(block) => block, Err(e) => { error!(batch_start = next_start_block, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); _ = sender.try_stream(e).await; - return; + return None; } }; if !sender.try_stream(next_start_block..=batch_end_num).await { - break; + return Some(batch_end); } batch_count += 1; @@ -637,21 +637,24 @@ impl Service { Err(e) => { error!(error = %e, "Failed to perform reorg check"); _ = sender.try_stream(e).await; - return; + return None; } }; next_start_block = if let Some(common_ancestor) = reorged_opt { if !sender.try_stream(ScannerStatus::ReorgDetected).await { - return; + return None; } common_ancestor.header().number() + 1 } else { batch_end_num.saturating_add(1) }; - } - info!(batch_count = batch_count, "Historical sync completed"); + if next_start_block > end { + info!(batch_count = batch_count, "Historical sync completed"); + return Some(batch_end); + } + } } async fn stream_live_blocks( @@ -685,7 +688,7 @@ impl Service { let confirmed = incoming_block_num.saturating_sub(block_confirmations); - Self::stream_historical_blocks( + let mut previous_batch_end = Self::stream_historical_blocks( stream_start, confirmed, max_block_range, @@ -695,15 +698,12 @@ impl Service { ) .await; + if previous_batch_end.is_none() { + // the sender channel is closed + return; + } + let mut batch_start = stream_start; - let mut previous_batch_end = match provider.get_block_by_number(confirmed.into()).await { - Ok(block) => Some(block), - Err(e) => { - error!(batch_start = batch_start, batch_end = confirmed, error = %e, "Failed to get initial batch end block"); - _ = sender.try_stream(e).await; - return; - } - }; while let Some(incoming_block) = stream.next().await { let incoming_block_num = incoming_block.number(); @@ -747,35 +747,25 @@ impl Service { } } - let confirmed = incoming_block_num.saturating_sub(block_confirmations); - if confirmed >= batch_start { - loop { - // NOTE: Edge case when difference between range end and range start >= max - // reads - let batch_end_num = - confirmed.min(batch_start.saturating_add(max_block_range - 1)); - previous_batch_end = match provider - .get_block_by_number(batch_end_num.into()) - .await - { - Ok(block) => Some(block), - Err(e) => { - error!(batch_start = batch_start, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); - _ = sender.try_stream(e).await; - return; - } - }; - if !sender.try_stream(batch_start..=batch_end_num).await { - return; - } - - // SAFETY: Overflow cannot realistically happen - batch_start = batch_end_num + 1; + let batch_end_num = incoming_block_num.saturating_sub(block_confirmations); + if batch_end_num >= batch_start { + previous_batch_end = Self::stream_historical_blocks( + batch_start, + batch_end_num, + max_block_range, + &sender, + provider, + reorg_handler, + ) + .await; - if batch_end_num == confirmed { - break; - } + if previous_batch_end.is_none() { + // the sender channel is closed + return; } + + // SAFETY: Overflow cannot realistically happen + batch_start = batch_end_num + 1; } } } From e17c72462b0091ff41ba3420af82e0d459796bfc Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Wed, 19 Nov 2025 17:22:58 +0100 Subject: [PATCH 38/63] ref: clippy --- src/block_range_scanner/ring_buffer.rs | 4 ++-- src/event_scanner/scanner/common.rs | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/block_range_scanner/ring_buffer.rs b/src/block_range_scanner/ring_buffer.rs index 22ca6cd8..f40d6e42 100644 --- a/src/block_range_scanner/ring_buffer.rs +++ b/src/block_range_scanner/ring_buffer.rs @@ -7,7 +7,7 @@ pub(crate) struct RingBuffer { } impl RingBuffer { - /// Creates an empty RingBuffer with a specific capacity. + /// Creates an empty [`RingBuffer`] with a specific capacity. pub fn new(capacity: usize) -> Self { Self { inner: VecDeque::with_capacity(capacity), capacity } } @@ -30,6 +30,6 @@ impl RingBuffer { } pub fn clear(&mut self) { - self.inner.clear() + self.inner.clear(); } } diff --git a/src/event_scanner/scanner/common.rs b/src/event_scanner/scanner/common.rs index 44c4f55c..9c446a1a 100644 --- a/src/event_scanner/scanner/common.rs +++ b/src/event_scanner/scanner/common.rs @@ -148,12 +148,12 @@ pub fn spawn_log_consumers( } if let ConsumerMode::CollectLatest { .. } = mode { - if !collected.is_empty() { - collected.reverse(); // restore chronological order + if collected.is_empty() { + info!("No latest logs collected"); + } else { info!("Sending collected logs to consumer"); + collected.reverse(); // restore chronological order _ = sender.try_stream(collected).await; - } else { - info!("No latest logs collected"); } } }); From 679bbf664ace20b7d4c31510b15e5955dc808c47 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Wed, 19 Nov 2025 18:01:00 +0100 Subject: [PATCH 39/63] ref: update when 'start block before conf. tip' log is shown --- src/block_range_scanner.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 3b8e5b9e..310cb620 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -425,11 +425,14 @@ impl Service { }); return Ok(()); + } else if start_block < confirmed_tip { + info!( + start_block = start_block, + confirmed_tip = confirmed_tip, + "Start block is before confirmed tip, syncing historical data" + ); } - info!(start_block = start_block, end_block = confirmed_tip, "Syncing historical data"); - - // This task runs independently, accumulating new blocks while wehistorical data is syncing tokio::spawn(async move { while start_block < confirmed_tip { Self::stream_historical_blocks( From d8607bdf38b8a0b01f0464b5b34aa440f6ae2ece Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 08:39:49 +0100 Subject: [PATCH 40/63] test: update error messages for assert_event_sequence --- src/test_utils/macros.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/test_utils/macros.rs b/src/test_utils/macros.rs index 78e60328..b2106f5f 100644 --- a/src/test_utils/macros.rs +++ b/src/test_utils/macros.rs @@ -176,14 +176,17 @@ pub async fn assert_event_sequence + Unpin>( assert!( elapsed < timeout_duration, - "Timed out waiting for events. Still expecting: {:#?}", + "Timed out waiting for events.\nNext Expected:\n{:#?}\nRemaining:\n{:#?}", + expected, remaining.collect::>() ); let time_left = timeout_duration - elapsed; let message = tokio::time::timeout(time_left, tokio_stream::StreamExt::next(stream)) .await - .expect("timed out waiting for next batch"); + .unwrap_or_else(|_| { + panic!("timed out waiting for next stream batch, expected event: {expected:#?}") + }); match message { Some(Message::Data(batch)) => { From bd77eaa244ad46c01001dc65bfaf816344bd4578 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 08:41:07 +0100 Subject: [PATCH 41/63] test: remove assert_empty from no_historical_only_live_streams --- tests/sync/from_latest.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index ffabc8b4..004200b5 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -128,17 +128,15 @@ async fn no_historical_only_live_streams() -> anyhow::Result<()> { let setup = setup_sync_from_latest_scanner(None, None, 5, 0).await?; let contract = setup.contract; let scanner = setup.scanner; - let stream = setup.stream; + let mut stream = setup.stream; scanner.start().await?; - // Latest is empty - let mut stream = assert_empty!(stream); - // Live events arrive contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; + // Latest is empty assert_next!(stream, Notification::StartingLiveStream); assert_event_sequence_final!( stream, From 4dc736fcfaa040f89b650f7861b1624dc8f9fe42 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 08:41:33 +0100 Subject: [PATCH 42/63] fix: emit StartingLiveStream before receiving first event on live and sync --- src/block_range_scanner.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 310cb620..ead4d902 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -313,6 +313,11 @@ impl Service { info!("WebSocket connected for live blocks"); + if !sender.try_stream(Notification::StartingLiveStream).await { + // TODO: return a new "ReceiverDropped" error? + return Ok(()); + } + tokio::spawn(async move { Self::stream_live_blocks( range_start, @@ -469,6 +474,10 @@ impl Service { } }; + if !sender.try_stream(Notification::StartingLiveStream).await { + return; + } + info!("Successfully transitioned from historical to live data"); Self::stream_live_blocks( @@ -479,7 +488,7 @@ impl Service { block_confirmations, max_block_range, &mut reorg_handler, - true, + false, ) .await; }); From d0afb65ccab0b491cfadd0cf8aafceb28aba1dc9 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 08:44:18 +0100 Subject: [PATCH 43/63] fix: stop emitting StartingLiveStream in live --- src/block_range_scanner.rs | 68 ++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 35 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index ead4d902..081cb25f 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -313,11 +313,6 @@ impl Service { info!("WebSocket connected for live blocks"); - if !sender.try_stream(Notification::StartingLiveStream).await { - // TODO: return a new "ReceiverDropped" error? - return Ok(()); - } - tokio::spawn(async move { Self::stream_live_blocks( range_start, @@ -430,41 +425,44 @@ impl Service { }); return Ok(()); - } else if start_block < confirmed_tip { - info!( - start_block = start_block, - confirmed_tip = confirmed_tip, - "Start block is before confirmed tip, syncing historical data" - ); } tokio::spawn(async move { - while start_block < confirmed_tip { - Self::stream_historical_blocks( - start_block, - confirmed_tip, - max_block_range, - &sender, - &provider, - &mut reorg_handler, - ) - .await; + if start_block < confirmed_tip { + info!( + start_block = start_block, + confirmed_tip = confirmed_tip, + "Start block is before confirmed tip, syncing historical data" + ); + + while start_block < confirmed_tip { + Self::stream_historical_blocks( + start_block, + confirmed_tip, + max_block_range, + &sender, + &provider, + &mut reorg_handler, + ) + .await; + + let latest = match provider.get_block_by_number(BlockNumberOrTag::Latest).await + { + Ok(block) => block.header().number(), + Err(e) => { + error!(error = %e, "Error latest block when calculating next historical batch, shutting down"); + _ = sender.try_stream(e).await; + return; + } + }; - let latest = match provider.get_block_by_number(BlockNumberOrTag::Latest).await { - Ok(block) => block.header().number(), - Err(e) => { - error!(error = %e, "Error latest block when calculating next historical batch, shutting down"); - _ = sender.try_stream(e).await; - return; - } - }; + start_block = confirmed_tip + 1; + confirmed_tip = latest.saturating_sub(block_confirmations); + } - start_block = confirmed_tip + 1; - confirmed_tip = latest.saturating_sub(block_confirmations); + info!("Chain tip reached, switching to live"); } - info!("Chain tip reached, switching to live"); - let subscription = match provider.subscribe_blocks().await { Ok(sub) => sub, Err(e) => { @@ -679,7 +677,7 @@ impl Service { block_confirmations: u64, max_block_range: u64, reorg_handler: &mut ReorgHandler, - notify: bool, + notify_after_first_block: bool, ) { // ensure we start streaming only after the specified starting block let mut stream = subscription.into_stream().skip_while(|header| { @@ -691,7 +689,7 @@ impl Service { return; }; - if notify && !sender.try_stream(Notification::StartingLiveStream).await { + if notify_after_first_block && !sender.try_stream(Notification::StartingLiveStream).await { return; } From f0e8fbd90e01e5a2e8d740bb461a1f9fca614016 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 08:47:23 +0100 Subject: [PATCH 44/63] test:fix no_historical_only_live_streams: introduce lag --- tests/sync/from_latest.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index 004200b5..31c7be51 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -1,4 +1,7 @@ +use std::time::Duration; + use alloy::{primitives::U256, providers::ext::AnvilApi}; +use tokio::time::sleep; use crate::common::{TestCounter, setup_sync_from_latest_scanner}; use event_scanner::{Notification, assert_empty, assert_event_sequence_final, assert_next}; @@ -132,6 +135,9 @@ async fn no_historical_only_live_streams() -> anyhow::Result<()> { scanner.start().await?; + // give scanner time to start + sleep(Duration::from_millis(10)).await; + // Live events arrive contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; From 377b2431e95c32735e55fa06bd25212542d8f6b7 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 08:54:56 +0100 Subject: [PATCH 45/63] remove unused partialeq impl --- src/event_scanner/message.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/event_scanner/message.rs b/src/event_scanner/message.rs index e5319711..ebd1081a 100644 --- a/src/event_scanner/message.rs +++ b/src/event_scanner/message.rs @@ -35,12 +35,6 @@ impl PartialEq<&[E; N]> for Message { } } -impl PartialEq<[E; N]> for Message { - fn eq(&self, other: &[E; N]) -> bool { - self.eq(&other) - } -} - impl PartialEq<&[E]> for Message { fn eq(&self, other: &&[E]) -> bool { if let Message::Data(logs) = self { From cf6f84cada8c842e16a0417f1f3349e7f48f2b79 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 09:05:06 +0100 Subject: [PATCH 46/63] test: ref from_latest --- tests/sync/from_latest.rs | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index 31c7be51..ef2b9b9e 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -33,12 +33,15 @@ async fn happy_path_no_duplicates() -> anyhow::Result<()> { TestCounter::CountIncreased { newCount: U256::from(6) }, ] ); + let mut stream = assert_empty!(stream); // Live phase: emit three more, should arrive in order without duplicating latest contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; - // Transition to live + // Assert `StartingLiveStream` after emitting live events, because the test finishes the "latest + // events" phase before new events are emitted, thus the "live" phase actually starts from a + // future block. assert_next!(stream, Notification::StartingLiveStream); assert_event_sequence_final!( stream, @@ -78,6 +81,9 @@ async fn fewer_historical_then_continues_live() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; + // Assert `StartingLiveStream` after emitting live events, because the test finishes the "latest + // events" phase before new events are emitted, thus the "live" phase actually starts from a + // future block. assert_next!(stream, Notification::StartingLiveStream); assert_event_sequence_final!( stream, @@ -119,6 +125,9 @@ async fn exact_historical_count_then_live() -> anyhow::Result<()> { // Live continues contract.increase().send().await?.watch().await?; + // Assert `StartingLiveStream` after emitting live events, because the test finishes the "latest + // events" phase before new events are emitted, thus the "live" phase actually starts from a + // future block. assert_next!(stream, Notification::StartingLiveStream); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(5) }]); assert_empty!(stream); @@ -142,7 +151,11 @@ async fn no_historical_only_live_streams() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; - // Latest is empty + // Latest events are empty + + // Assert `StartingLiveStream` after emitting live events, because the test finishes the "latest + // events" phase before new events are emitted, thus the "live" phase actually starts from a + // future block. assert_next!(stream, Notification::StartingLiveStream); assert_event_sequence_final!( stream, @@ -189,6 +202,9 @@ async fn block_gaps_do_not_affect_number_of_events_streamed() -> anyhow::Result< // Immediately produce a new live event in a new block contract.increase().send().await?.watch().await?; + // Assert `StartingLiveStream` after emitting live events, because the test finishes the "latest + // events" phase before new events are emitted, thus the "live" phase actually starts from a + // future block. assert_next!(stream, Notification::StartingLiveStream); assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); assert_empty!(stream); From 5209b79b403e2f645498090cb68a40cb5ac3500e Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 09:07:35 +0100 Subject: [PATCH 47/63] test: ref: from_block --- tests/sync/from_block.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index c7bb914f..43f6091a 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -32,13 +32,13 @@ async fn replays_historical_then_switches_to_live() -> anyhow::Result<()> { ] ); + // chain tip reached + assert_next!(stream, Notification::StartingLiveStream); + // now emit live events contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; - // chain tip reached - assert_next!(stream, Notification::StartingLiveStream); - // live events assert_event_sequence_final!( stream, @@ -72,6 +72,8 @@ async fn sync_from_future_block_waits_until_minted() -> anyhow::Result<()> { // Act: emit an event that will be mined in block == future_start contract.increase().send().await?.watch().await?; + // only after the live event at `future_start_block` is emitted, will `StartingLiveStream` be + // streamed assert_next!(stream, Notification::StartingLiveStream); // Assert: the first streamed message arrives and contains the expected event assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); @@ -101,14 +103,13 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { TestCounter::CountIncreased { newCount: U256::from(2) } ] ); + assert_next!(stream, Notification::StartingLiveStream); // emit "live" events for _ in 0..2 { contract.increase().send().await?.watch().await?; } - // switching to "live" phase - assert_next!(stream, Notification::StartingLiveStream); // assert confirmed live events are streamed separately let stream = assert_event_sequence_final!( stream, From f45d3c93c5508f64bc810b9f25353c0eb6654880 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 09:13:16 +0100 Subject: [PATCH 48/63] test: revert mixed_events_and_filters_return_correct_streams --- tests/latest_events/basic.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index 008a4ec9..45aeeff8 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -214,7 +214,7 @@ async fn different_filters_receive_different_results() -> anyhow::Result<()> { #[tokio::test] async fn mixed_events_and_filters_return_correct_streams() -> anyhow::Result<()> { let count = 2; - let setup = setup_latest_scanner(Some(0.1), None, count, None, None).await?; + let setup = setup_latest_scanner(None, None, count, None, None).await?; let contract = setup.contract; let mut scanner = setup.scanner; let mut stream_inc = setup.stream; // CountIncreased by default From 85d146c4535516403000a73fde7533e4500a7686 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 09:15:36 +0100 Subject: [PATCH 49/63] test: ref: live_with_block_confirmations_always_emits_genesis_block --- tests/block_range_scanner.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 16d0f6b7..22314fb0 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -51,27 +51,22 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh async fn live_with_block_confirmations_always_emits_genesis_block() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; - let client = BlockRangeScanner::new().connect(provider.clone()).await?.run()?; let mut stream = client.stream_live(3).await?; provider.anvil_mine(Some(1), None).await?; - assert_next!(stream, 0..=0); let stream = assert_empty!(stream); provider.anvil_mine(Some(2), None).await?; - let mut stream = assert_empty!(stream); provider.anvil_mine(Some(5), None).await?; - assert_range_coverage!(stream, 1..=5); let mut stream = assert_empty!(stream); provider.anvil_mine(Some(1), None).await?; - assert_next!(stream, 6..=6); assert_empty!(stream); From 13bd108fdaa0775885b5471b664982062eb18de0 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 09:54:25 +0100 Subject: [PATCH 50/63] ref: reorg_handler: store block hash only if not already stored --- src/block_range_scanner.rs | 23 +++++++++++++---------- src/block_range_scanner/reorg_handler.rs | 7 +++++-- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 081cb25f..0c98c132 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -502,6 +502,7 @@ impl Service { ) -> Result<(), ScannerError> { let max_block_range = self.max_block_range; let provider = self.provider.clone(); + let mut reorg_handler = self.reorg_handler.clone(); let (start_block, end_block) = try_join!( self.provider.get_block_by_number(start_height), @@ -515,7 +516,8 @@ impl Service { }; tokio::spawn(async move { - Self::stream_rewind(from, to, max_block_range, &sender, &provider).await; + Self::stream_rewind(from, to, max_block_range, &sender, &provider, &mut reorg_handler) + .await; }); Ok(()) @@ -534,11 +536,12 @@ impl Service { max_block_range: u64, sender: &mpsc::Sender, provider: &RobustProvider, + reorg_handler: &mut ReorgHandler, ) { let mut batch_count = 0; // for checking whether reorg occurred - let mut tip_hash = from.header().hash(); + let mut tip = from; let from = from.header().number(); let to = to.header().number(); @@ -565,10 +568,10 @@ impl Service { break; } - let reorged = match reorg_detected(provider, tip_hash).await { - Ok(detected) => { - info!(block_number = %from, hash = %tip_hash, "Reorg detected"); - detected + let reorged_opt = match reorg_handler.check(&tip).await { + Ok(opt) => { + info!(block_number = %from, hash = %tip.header().hash(), "Reorg detected"); + opt } Err(e) => { error!(error = %e, "Terminal RPC call error, shutting down"); @@ -577,8 +580,8 @@ impl Service { } }; - if reorged { - info!(block_number = %from, hash = %tip_hash, "Reorg detected"); + if let Some(common_ancestor) = reorged_opt { + info!(block_number = %from, hash = %tip.header().hash(), "Reorg detected"); if !sender.try_stream(Notification::ReorgDetected).await { break; @@ -587,8 +590,8 @@ impl Service { // restart rewind batch_from = from; // store the updated end block hash - tip_hash = match provider.get_block_by_number(from.into()).await { - Ok(block) => block.header().hash(), + tip = match provider.get_block_by_number(from.into()).await { + Ok(block) => block, Err(RobustProviderError::BlockNotFound(_)) => { panic!("Block with number '{from}' should exist post-reorg"); } diff --git a/src/block_range_scanner/reorg_handler.rs b/src/block_range_scanner/reorg_handler.rs index 367b1096..2a996868 100644 --- a/src/block_range_scanner/reorg_handler.rs +++ b/src/block_range_scanner/reorg_handler.rs @@ -31,9 +31,12 @@ impl ReorgHandler { let block = block.header(); info!(block_hash = %block.hash(), block_number = block.number(), "Checking if block was reorged"); if !self.reorg_detected(block).await? { - info!(block_hash = %block.hash(), block_number = block.number(), "No reorg detected"); + let hash = block.hash(); + info!(block_hash = %hash, block_number = block.number(), "No reorg detected"); // store the incoming block's hash for future reference - self.buffer.push(block.hash()); + if !matches!(self.buffer.back(), Some(&hash) if hash == hash) { + self.buffer.push(hash); + } return Ok(None); } From 8af930e1a0d82818bd1da973d51393a0621fa319 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 09:59:17 +0100 Subject: [PATCH 51/63] Revert "ref: reorg_handler: store block hash only if not already stored" This reverts commit 13bd108fdaa0775885b5471b664982062eb18de0. --- src/block_range_scanner.rs | 23 ++++++++++------------- src/block_range_scanner/reorg_handler.rs | 7 ++----- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 0c98c132..081cb25f 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -502,7 +502,6 @@ impl Service { ) -> Result<(), ScannerError> { let max_block_range = self.max_block_range; let provider = self.provider.clone(); - let mut reorg_handler = self.reorg_handler.clone(); let (start_block, end_block) = try_join!( self.provider.get_block_by_number(start_height), @@ -516,8 +515,7 @@ impl Service { }; tokio::spawn(async move { - Self::stream_rewind(from, to, max_block_range, &sender, &provider, &mut reorg_handler) - .await; + Self::stream_rewind(from, to, max_block_range, &sender, &provider).await; }); Ok(()) @@ -536,12 +534,11 @@ impl Service { max_block_range: u64, sender: &mpsc::Sender, provider: &RobustProvider, - reorg_handler: &mut ReorgHandler, ) { let mut batch_count = 0; // for checking whether reorg occurred - let mut tip = from; + let mut tip_hash = from.header().hash(); let from = from.header().number(); let to = to.header().number(); @@ -568,10 +565,10 @@ impl Service { break; } - let reorged_opt = match reorg_handler.check(&tip).await { - Ok(opt) => { - info!(block_number = %from, hash = %tip.header().hash(), "Reorg detected"); - opt + let reorged = match reorg_detected(provider, tip_hash).await { + Ok(detected) => { + info!(block_number = %from, hash = %tip_hash, "Reorg detected"); + detected } Err(e) => { error!(error = %e, "Terminal RPC call error, shutting down"); @@ -580,8 +577,8 @@ impl Service { } }; - if let Some(common_ancestor) = reorged_opt { - info!(block_number = %from, hash = %tip.header().hash(), "Reorg detected"); + if reorged { + info!(block_number = %from, hash = %tip_hash, "Reorg detected"); if !sender.try_stream(Notification::ReorgDetected).await { break; @@ -590,8 +587,8 @@ impl Service { // restart rewind batch_from = from; // store the updated end block hash - tip = match provider.get_block_by_number(from.into()).await { - Ok(block) => block, + tip_hash = match provider.get_block_by_number(from.into()).await { + Ok(block) => block.header().hash(), Err(RobustProviderError::BlockNotFound(_)) => { panic!("Block with number '{from}' should exist post-reorg"); } diff --git a/src/block_range_scanner/reorg_handler.rs b/src/block_range_scanner/reorg_handler.rs index 2a996868..367b1096 100644 --- a/src/block_range_scanner/reorg_handler.rs +++ b/src/block_range_scanner/reorg_handler.rs @@ -31,12 +31,9 @@ impl ReorgHandler { let block = block.header(); info!(block_hash = %block.hash(), block_number = block.number(), "Checking if block was reorged"); if !self.reorg_detected(block).await? { - let hash = block.hash(); - info!(block_hash = %hash, block_number = block.number(), "No reorg detected"); + info!(block_hash = %block.hash(), block_number = block.number(), "No reorg detected"); // store the incoming block's hash for future reference - if !matches!(self.buffer.back(), Some(&hash) if hash == hash) { - self.buffer.push(hash); - } + self.buffer.push(block.hash()); return Ok(None); } From d8cf028a6ddb026c81648051c6b43cd619945cfb Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 10:02:08 +0100 Subject: [PATCH 52/63] ref: use reorg_handler in rewind --- src/block_range_scanner.rs | 39 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 081cb25f..5ef92313 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -76,7 +76,7 @@ use alloy::{ consensus::BlockHeader, eips::BlockNumberOrTag, network::{BlockResponse, Network, primitives::HeaderResponse}, - primitives::{B256, BlockNumber}, + primitives::BlockNumber, pubsub::Subscription, transports::{RpcError, TransportErrorKind}, }; @@ -502,6 +502,7 @@ impl Service { ) -> Result<(), ScannerError> { let max_block_range = self.max_block_range; let provider = self.provider.clone(); + let mut reorg_handler = self.reorg_handler.clone(); let (start_block, end_block) = try_join!( self.provider.get_block_by_number(start_height), @@ -515,7 +516,8 @@ impl Service { }; tokio::spawn(async move { - Self::stream_rewind(from, to, max_block_range, &sender, &provider).await; + Self::stream_rewind(from, to, max_block_range, &sender, &provider, &mut reorg_handler) + .await; }); Ok(()) @@ -534,13 +536,14 @@ impl Service { max_block_range: u64, sender: &mpsc::Sender, provider: &RobustProvider, + reorg_handler: &mut ReorgHandler, ) { let mut batch_count = 0; // for checking whether reorg occurred - let mut tip_hash = from.header().hash(); + let mut tip = from; - let from = from.header().number(); + let from = tip.header().number(); let to = to.header().number(); // we're iterating in reverse @@ -565,10 +568,10 @@ impl Service { break; } - let reorged = match reorg_detected(provider, tip_hash).await { - Ok(detected) => { - info!(block_number = %from, hash = %tip_hash, "Reorg detected"); - detected + let reorged_opt = match reorg_handler.check(&tip).await { + Ok(opt) => { + info!(block_number = %from, hash = %tip.header().hash(), "Reorg detected"); + opt } Err(e) => { error!(error = %e, "Terminal RPC call error, shutting down"); @@ -577,8 +580,9 @@ impl Service { } }; - if reorged { - info!(block_number = %from, hash = %tip_hash, "Reorg detected"); + // for now we only care if a reorg occurred, not which block it was + if let Some(_) = reorged_opt { + info!(block_number = %from, hash = %tip.header().hash(), "Reorg detected"); if !sender.try_stream(Notification::ReorgDetected).await { break; @@ -587,8 +591,8 @@ impl Service { // restart rewind batch_from = from; // store the updated end block hash - tip_hash = match provider.get_block_by_number(from.into()).await { - Ok(block) => block.header().hash(), + tip = match provider.get_block_by_number(from.into()).await { + Ok(block) => block, Err(RobustProviderError::BlockNotFound(_)) => { panic!("Block with number '{from}' should exist post-reorg"); } @@ -781,17 +785,6 @@ impl Service { } } -async fn reorg_detected( - provider: &RobustProvider, - hash_to_check: B256, -) -> Result { - match provider.get_block_by_hash(hash_to_check).await { - Ok(_) => Ok(false), - Err(RobustProviderError::BlockNotFound(_)) => Ok(true), - Err(e) => Err(e.into()), - } -} - pub struct BlockRangeScannerClient { command_sender: mpsc::Sender, } From c06475a32412b05258771bc6b3e3e4b845b05f1b Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 10:09:23 +0100 Subject: [PATCH 53/63] ref: reorg_handler: get hash before logging & storing it --- src/block_range_scanner/reorg_handler.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/block_range_scanner/reorg_handler.rs b/src/block_range_scanner/reorg_handler.rs index 367b1096..1652c3a8 100644 --- a/src/block_range_scanner/reorg_handler.rs +++ b/src/block_range_scanner/reorg_handler.rs @@ -31,9 +31,10 @@ impl ReorgHandler { let block = block.header(); info!(block_hash = %block.hash(), block_number = block.number(), "Checking if block was reorged"); if !self.reorg_detected(block).await? { - info!(block_hash = %block.hash(), block_number = block.number(), "No reorg detected"); + let hash = block.hash(); + info!(block_hash = %hash, block_number = block.number(), "No reorg detected"); // store the incoming block's hash for future reference - self.buffer.push(block.hash()); + self.buffer.push(hash); return Ok(None); } From 32c06f729609ea05574a1b801ea5230efbbdde64 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 10:13:20 +0100 Subject: [PATCH 54/63] fix: limit stream_historical post-reorg next_start_block to 'start' --- src/block_range_scanner.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 5ef92313..28ba5a04 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -659,7 +659,11 @@ impl Service { if !sender.try_stream(Notification::ReorgDetected).await { return None; } - common_ancestor.header().number() + 1 + if common_ancestor.header().number() < start { + start + } else { + common_ancestor.header().number() + 1 + } } else { batch_end_num.saturating_add(1) }; From 6eb394148f8243529b22c3d5b93e4d060ec30976 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 10:20:10 +0100 Subject: [PATCH 55/63] ref: stream_historical_blocks: add stream_start field --- src/block_range_scanner.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 28ba5a04..1b26cbd9 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -358,6 +358,7 @@ impl Service { tokio::spawn(async move { Self::stream_historical_blocks( + start_block_num, start_block_num, end_block_num, max_block_range, @@ -437,6 +438,7 @@ impl Service { while start_block < confirmed_tip { Self::stream_historical_blocks( + start_block, start_block, confirmed_tip, max_block_range, @@ -613,7 +615,8 @@ impl Service { } async fn stream_historical_blocks( - start: BlockNumber, + stream_start: BlockNumber, + mut next_start_block: BlockNumber, end: BlockNumber, max_block_range: u64, sender: &mpsc::Sender, @@ -622,8 +625,6 @@ impl Service { ) -> Option { let mut batch_count = 0; - let mut next_start_block = start; - // must be <= to include the edge case when start == end (i.e. return the single block // range) loop { @@ -659,8 +660,8 @@ impl Service { if !sender.try_stream(Notification::ReorgDetected).await { return None; } - if common_ancestor.header().number() < start { - start + if common_ancestor.header().number() < stream_start { + stream_start } else { common_ancestor.header().number() + 1 } @@ -707,6 +708,7 @@ impl Service { let confirmed = incoming_block_num.saturating_sub(block_confirmations); let mut previous_batch_end = Self::stream_historical_blocks( + stream_start, stream_start, confirmed, max_block_range, @@ -768,6 +770,7 @@ impl Service { let batch_end_num = incoming_block_num.saturating_sub(block_confirmations); if batch_end_num >= batch_start { previous_batch_end = Self::stream_historical_blocks( + stream_start, batch_start, batch_end_num, max_block_range, From 46ff151102e08d20250573c4b17c5e6328e655c7 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 10:21:33 +0100 Subject: [PATCH 56/63] doc: stream_historical_blocks: add comment about one of assumptions --- src/block_range_scanner.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 1b26cbd9..b7cc53a3 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -614,6 +614,7 @@ impl Service { info!(batch_count = batch_count, "Rewind completed"); } + /// Assumes that `stream_start <= next_start_block <= end`. async fn stream_historical_blocks( stream_start: BlockNumber, mut next_start_block: BlockNumber, @@ -625,8 +626,6 @@ impl Service { ) -> Option { let mut batch_count = 0; - // must be <= to include the edge case when start == end (i.e. return the single block - // range) loop { let batch_end_num = next_start_block.saturating_add(max_block_range - 1).min(end); let batch_end = match provider.get_block_by_number(batch_end_num.into()).await { From c1b67a7d1f5ea55f1ed710cd2efc41b844e64cb5 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 10:26:22 +0100 Subject: [PATCH 57/63] ref: reorg_handler: store block hash only if not already stored --- src/block_range_scanner/reorg_handler.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/block_range_scanner/reorg_handler.rs b/src/block_range_scanner/reorg_handler.rs index 1652c3a8..243a3f28 100644 --- a/src/block_range_scanner/reorg_handler.rs +++ b/src/block_range_scanner/reorg_handler.rs @@ -31,10 +31,12 @@ impl ReorgHandler { let block = block.header(); info!(block_hash = %block.hash(), block_number = block.number(), "Checking if block was reorged"); if !self.reorg_detected(block).await? { - let hash = block.hash(); - info!(block_hash = %hash, block_number = block.number(), "No reorg detected"); + let block_hash = block.hash(); + info!(block_hash = %block_hash, block_number = block.number(), "No reorg detected"); // store the incoming block's hash for future reference - self.buffer.push(hash); + if !matches!(self.buffer.back(), Some(&hash) if hash == block_hash) { + self.buffer.push(block_hash); + } return Ok(None); } From ca8b5094f50a438aaaefb41a794566a5d13dd62e Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 12:00:42 +0100 Subject: [PATCH 58/63] feat: impl get_block_number_by_id --- src/robust_provider/provider.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/robust_provider/provider.rs b/src/robust_provider/provider.rs index a5cbec2d..210865ee 100644 --- a/src/robust_provider/provider.rs +++ b/src/robust_provider/provider.rs @@ -98,6 +98,25 @@ impl RobustProvider { result } + /// Get the block number for a given block identifier. + /// + /// # Errors + /// + /// See [retry errors](#retry-errors). + pub async fn get_block_number_by_id(&self, id: BlockId) -> Result { + info!("get_block_number_by_id called"); + let result = self + .retry_with_total_timeout( + move |provider| async move { provider.get_block_number_by_id(id).await }, + false, + ) + .await; + if let Err(e) = &result { + error!(error = %e, "get_block_number_by_id failed"); + } + result?.ok_or_else(|| Error::BlockNotFound(id)) + } + /// Fetch the latest confirmed block number with retry and timeout. /// /// This method fetches the latest block number and subtracts the specified From e276d662b45535a12e9de13f71da4ad70f0376ed Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 12:40:16 +0100 Subject: [PATCH 59/63] ref: handle_sync --- src/block_range_scanner.rs | 318 ++---------------------- src/block_range_scanner/common.rs | 191 ++++++++++++++ src/block_range_scanner/sync_handler.rs | 222 +++++++++++++++++ 3 files changed, 431 insertions(+), 300 deletions(-) create mode 100644 src/block_range_scanner/common.rs create mode 100644 src/block_range_scanner/sync_handler.rs diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 05f06f37..bd491a73 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -64,26 +64,28 @@ use tokio::{ sync::{mpsc, oneshot}, try_join, }; -use tokio_stream::{StreamExt, wrappers::ReceiverStream}; +use tokio_stream::wrappers::ReceiverStream; use crate::{ ScannerMessage, + block_range_scanner::sync_handler::SyncHandler, error::ScannerError, robust_provider::{Error as RobustProviderError, IntoRobustProvider, RobustProvider}, types::{Notification, TryStream}, }; use alloy::{ consensus::BlockHeader, - eips::{BlockId, BlockNumberOrTag}, + eips::BlockId, network::{BlockResponse, Network, primitives::HeaderResponse}, primitives::BlockNumber, - pubsub::Subscription, transports::{RpcError, TransportErrorKind}, }; use tracing::{debug, error, info, warn}; +mod common; mod reorg_handler; mod ring_buffer; +mod sync_handler; use reorg_handler::ReorgHandler; @@ -314,15 +316,15 @@ impl Service { info!("WebSocket connected for live blocks"); tokio::spawn(async move { - Self::stream_live_blocks( + common::stream_live_blocks( range_start, subscription, - sender, + &sender, &provider, block_confirmations, max_block_range, &mut reorg_handler, - false, + false, // (notification unnecessary) ) .await; }); @@ -355,7 +357,7 @@ impl Service { let mut reorg_handler = self.reorg_handler.clone(); tokio::spawn(async move { - Self::stream_historical_blocks( + common::stream_historical_blocks( start_block_num, start_block_num, end_block_num, @@ -371,127 +373,19 @@ impl Service { } async fn handle_sync( - &mut self, + &self, start_id: BlockId, block_confirmations: u64, sender: mpsc::Sender, ) -> Result<(), ScannerError> { - let provider = self.provider.clone(); - let max_block_range = self.max_block_range; - let mut reorg_handler = self.reorg_handler.clone(); - - let get_start_block = async || -> Result { - let block = match start_id { - BlockId::Number(BlockNumberOrTag::Number(num)) => num, - _ => provider.get_block(start_id).await?.header().number(), - }; - Ok(block) - }; - - let get_confirmed_tip = async || -> Result { - let confirmed_block = provider.get_latest_confirmed(block_confirmations).await?; - Ok(confirmed_block) - }; - - // Step 1: - // Fetches the starting block and confirmed tip for historical sync in parallel - let (mut start_block, mut confirmed_tip) = - tokio::try_join!(get_start_block(), get_confirmed_tip())?; - - // If start is beyond confirmed tip, skip historical and go straight to live - if start_block > confirmed_tip { - info!( - start_block = start_block, - confirmed_tip = confirmed_tip, - "Start block is at or beyond confirmed tip, starting live stream" - ); - - let subscription: Subscription<::HeaderResponse> = - self.provider.subscribe_blocks().await?; - - tokio::spawn(async move { - Self::stream_live_blocks( - start_block, - subscription, - sender, - &provider, - block_confirmations, - max_block_range, - &mut reorg_handler, - true, - ) - .await; - }); - - return Ok(()); - } - - tokio::spawn(async move { - if start_block < confirmed_tip { - info!( - start_block = start_block, - confirmed_tip = confirmed_tip, - "Start block is before confirmed tip, syncing historical data" - ); - - while start_block < confirmed_tip { - Self::stream_historical_blocks( - start_block, - start_block, - confirmed_tip, - max_block_range, - &sender, - &provider, - &mut reorg_handler, - ) - .await; - - let latest = match provider.get_block_by_number(BlockNumberOrTag::Latest).await - { - Ok(block) => block.header().number(), - Err(e) => { - error!(error = %e, "Error latest block when calculating next historical batch, shutting down"); - _ = sender.try_stream(e).await; - return; - } - }; - - start_block = confirmed_tip + 1; - confirmed_tip = latest.saturating_sub(block_confirmations); - } - - info!("Chain tip reached, switching to live"); - } - - let subscription = match provider.subscribe_blocks().await { - Ok(sub) => sub, - Err(e) => { - error!(error = %e, "Error subscribing to live blocks, shutting down"); - _ = sender.try_stream(e).await; - return; - } - }; - - if !sender.try_stream(Notification::StartingLiveStream).await { - return; - } - - info!("Successfully transitioned from historical to live data"); - - Self::stream_live_blocks( - start_block, - subscription, - sender, - &provider, - block_confirmations, - max_block_range, - &mut reorg_handler, - false, - ) - .await; - }); - - Ok(()) + let sync_handler = SyncHandler::new( + self.provider.clone(), + self.max_block_range, + start_id, + block_confirmations, + sender, + ); + sync_handler.run().await } async fn handle_rewind( @@ -609,182 +503,6 @@ impl Service { info!(batch_count = batch_count, "Rewind completed"); } - - /// Assumes that `stream_start <= next_start_block <= end`. - async fn stream_historical_blocks( - stream_start: BlockNumber, - mut next_start_block: BlockNumber, - end: BlockNumber, - max_block_range: u64, - sender: &mpsc::Sender, - provider: &RobustProvider, - reorg_handler: &mut ReorgHandler, - ) -> Option { - let mut batch_count = 0; - - loop { - let batch_end_num = next_start_block.saturating_add(max_block_range - 1).min(end); - let batch_end = match provider.get_block_by_number(batch_end_num.into()).await { - Ok(block) => block, - Err(e) => { - error!(batch_start = next_start_block, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); - _ = sender.try_stream(e).await; - return None; - } - }; - - if !sender.try_stream(next_start_block..=batch_end_num).await { - return Some(batch_end); - } - - batch_count += 1; - if batch_count % 10 == 0 { - debug!(batch_count = batch_count, "Processed historical batches"); - } - - let reorged_opt = match reorg_handler.check(&batch_end).await { - Ok(opt) => opt, - Err(e) => { - error!(error = %e, "Failed to perform reorg check"); - _ = sender.try_stream(e).await; - return None; - } - }; - - next_start_block = if let Some(common_ancestor) = reorged_opt { - if !sender.try_stream(Notification::ReorgDetected).await { - return None; - } - if common_ancestor.header().number() < stream_start { - stream_start - } else { - common_ancestor.header().number() + 1 - } - } else { - batch_end_num.saturating_add(1) - }; - - if next_start_block > end { - info!(batch_count = batch_count, "Historical sync completed"); - return Some(batch_end); - } - } - } - - // TODO: refactor this function to reduce the number of arguments - #[allow(clippy::too_many_arguments)] - async fn stream_live_blocks( - stream_start: BlockNumber, - subscription: Subscription, - sender: mpsc::Sender, - provider: &RobustProvider, - block_confirmations: u64, - max_block_range: u64, - reorg_handler: &mut ReorgHandler, - notify_after_first_block: bool, - ) { - // ensure we start streaming only after the specified starting block - let mut stream = subscription.into_stream().skip_while(|header| { - header.number().saturating_sub(block_confirmations) < stream_start - }); - - let Some(incoming_block) = stream.next().await else { - warn!("Subscription channel closed"); - return; - }; - - if notify_after_first_block && !sender.try_stream(Notification::StartingLiveStream).await { - return; - } - - let incoming_block_num = incoming_block.number(); - info!(block_number = incoming_block_num, "Received block header"); - - let confirmed = incoming_block_num.saturating_sub(block_confirmations); - - let mut previous_batch_end = Self::stream_historical_blocks( - stream_start, - stream_start, - confirmed, - max_block_range, - &sender, - provider, - reorg_handler, - ) - .await; - - if previous_batch_end.is_none() { - // the sender channel is closed - return; - } - - let mut batch_start = stream_start; - - while let Some(incoming_block) = stream.next().await { - let incoming_block_num = incoming_block.number(); - info!(block_number = incoming_block_num, "Received block header"); - - let reorged_opt = match previous_batch_end.as_ref() { - None => None, - Some(batch_end) => match reorg_handler.check(batch_end).await { - Ok(opt) => opt, - Err(e) => { - error!(error = %e, "Failed to perform reorg check"); - _ = sender.try_stream(e).await; - return; - } - }, - }; - - if let Some(common_ancestor) = reorged_opt { - if !sender.try_stream(Notification::ReorgDetected).await { - return; - } - // no need to stream blocks prior to the previously specified starting block - if common_ancestor.header().number() < stream_start { - batch_start = stream_start; - previous_batch_end = None; - } else { - batch_start = common_ancestor.header().number() + 1; - previous_batch_end = Some(common_ancestor); - } - - // TODO: explain in docs that the returned block after a reorg will be the - // first confirmed block that is smaller between: - // - the first post-reorg block - // - the previous range_start - } else { - // no reorg happened, move the block range back to expected next start - // - // SAFETY: Overflow cannot realistically happen - if let Some(prev_batch_end) = previous_batch_end.as_ref() { - batch_start = prev_batch_end.header().number() + 1; - } - } - - let batch_end_num = incoming_block_num.saturating_sub(block_confirmations); - if batch_end_num >= batch_start { - previous_batch_end = Self::stream_historical_blocks( - stream_start, - batch_start, - batch_end_num, - max_block_range, - &sender, - provider, - reorg_handler, - ) - .await; - - if previous_batch_end.is_none() { - // the sender channel is closed - return; - } - - // SAFETY: Overflow cannot realistically happen - batch_start = batch_end_num + 1; - } - } - } } pub struct BlockRangeScannerClient { diff --git a/src/block_range_scanner/common.rs b/src/block_range_scanner/common.rs new file mode 100644 index 00000000..461ea1f5 --- /dev/null +++ b/src/block_range_scanner/common.rs @@ -0,0 +1,191 @@ +use tokio::sync::mpsc; +use tokio_stream::StreamExt; + +use crate::{ + block_range_scanner::{Message, reorg_handler::ReorgHandler}, + robust_provider::RobustProvider, + types::{Notification, TryStream}, +}; +use alloy::{ + consensus::BlockHeader, + network::{BlockResponse, Network}, + primitives::BlockNumber, + pubsub::Subscription, +}; +use tracing::{debug, error, info, warn}; + +// TODO: refactor this function to reduce the number of arguments +#[allow(clippy::too_many_arguments)] +pub(crate) async fn stream_live_blocks( + stream_start: BlockNumber, + subscription: Subscription, + sender: &mpsc::Sender, + provider: &RobustProvider, + block_confirmations: u64, + max_block_range: u64, + reorg_handler: &mut ReorgHandler, + notify_after_first_block: bool, +) { + // ensure we start streaming only after the specified starting block + let mut stream = subscription + .into_stream() + .skip_while(|header| header.number().saturating_sub(block_confirmations) < stream_start); + + let Some(incoming_block) = stream.next().await else { + warn!("Subscription channel closed"); + return; + }; + + if notify_after_first_block && !sender.try_stream(Notification::StartingLiveStream).await { + return; + } + + let incoming_block_num = incoming_block.number(); + info!(block_number = incoming_block_num, "Received block header"); + + let confirmed = incoming_block_num.saturating_sub(block_confirmations); + + let mut previous_batch_end = stream_historical_blocks( + stream_start, + stream_start, + confirmed, + max_block_range, + &sender, + provider, + reorg_handler, + ) + .await; + + if previous_batch_end.is_none() { + // the sender channel is closed + return; + } + + let mut batch_start = stream_start; + + while let Some(incoming_block) = stream.next().await { + let incoming_block_num = incoming_block.number(); + info!(block_number = incoming_block_num, "Received block header"); + + let reorged_opt = match previous_batch_end.as_ref() { + None => None, + Some(batch_end) => match reorg_handler.check(batch_end).await { + Ok(opt) => opt, + Err(e) => { + error!(error = %e, "Failed to perform reorg check"); + _ = sender.try_stream(e).await; + return; + } + }, + }; + + if let Some(common_ancestor) = reorged_opt { + if !sender.try_stream(Notification::ReorgDetected).await { + return; + } + // no need to stream blocks prior to the previously specified starting block + if common_ancestor.header().number() < stream_start { + batch_start = stream_start; + previous_batch_end = None; + } else { + batch_start = common_ancestor.header().number() + 1; + previous_batch_end = Some(common_ancestor); + } + + // TODO: explain in docs that the returned block after a reorg will be the + // first confirmed block that is smaller between: + // - the first post-reorg block + // - the previous range_start + } else { + // no reorg happened, move the block range back to expected next start + // + // SAFETY: Overflow cannot realistically happen + if let Some(prev_batch_end) = previous_batch_end.as_ref() { + batch_start = prev_batch_end.header().number() + 1; + } + } + + let batch_end_num = incoming_block_num.saturating_sub(block_confirmations); + if batch_end_num >= batch_start { + previous_batch_end = stream_historical_blocks( + stream_start, + batch_start, + batch_end_num, + max_block_range, + &sender, + provider, + reorg_handler, + ) + .await; + + if previous_batch_end.is_none() { + // the sender channel is closed + return; + } + + // SAFETY: Overflow cannot realistically happen + batch_start = batch_end_num + 1; + } + } +} + +/// Assumes that `stream_start <= next_start_block <= end`. +pub(crate) async fn stream_historical_blocks( + stream_start: BlockNumber, + mut next_start_block: BlockNumber, + end: BlockNumber, + max_block_range: u64, + sender: &mpsc::Sender, + provider: &RobustProvider, + reorg_handler: &mut ReorgHandler, +) -> Option { + let mut batch_count = 0; + + loop { + let batch_end_num = next_start_block.saturating_add(max_block_range - 1).min(end); + let batch_end = match provider.get_block_by_number(batch_end_num.into()).await { + Ok(block) => block, + Err(e) => { + error!(batch_start = next_start_block, batch_end = batch_end_num, error = %e, "Failed to get ending block of the current batch"); + _ = sender.try_stream(e).await; + return None; + } + }; + + if !sender.try_stream(next_start_block..=batch_end_num).await { + return Some(batch_end); + } + + batch_count += 1; + if batch_count % 10 == 0 { + debug!(batch_count = batch_count, "Processed historical batches"); + } + + let reorged_opt = match reorg_handler.check(&batch_end).await { + Ok(opt) => opt, + Err(e) => { + error!(error = %e, "Failed to perform reorg check"); + _ = sender.try_stream(e).await; + return None; + } + }; + + next_start_block = if let Some(common_ancestor) = reorged_opt { + if !sender.try_stream(Notification::ReorgDetected).await { + return None; + } + if common_ancestor.header().number() < stream_start { + stream_start + } else { + common_ancestor.header().number() + 1 + } + } else { + batch_end_num.saturating_add(1) + }; + + if next_start_block > end { + info!(batch_count = batch_count, "Historical sync completed"); + return Some(batch_end); + } + } +} diff --git a/src/block_range_scanner/sync_handler.rs b/src/block_range_scanner/sync_handler.rs new file mode 100644 index 00000000..f9db16b7 --- /dev/null +++ b/src/block_range_scanner/sync_handler.rs @@ -0,0 +1,222 @@ +use alloy::{eips::BlockId, network::Network, primitives::BlockNumber}; +use tokio::sync::mpsc; +use tracing::{error, info}; + +use crate::{ + Notification, ScannerError, + block_range_scanner::{Message, common, reorg_handler::ReorgHandler}, + robust_provider::RobustProvider, + types::TryStream, +}; + +/// Represents the initial state when starting a sync operation +enum SyncState { + /// Start block is already at or beyond the confirmed tip - go straight to live + AlreadyLive { start_block: BlockNumber }, + /// Start block is behind - need to catch up first, then go live + NeedsCatchup { start_block: BlockNumber, confirmed_tip: BlockNumber }, +} + +pub(crate) struct SyncHandler { + provider: RobustProvider, + max_block_range: u64, + start_id: BlockId, + block_confirmations: u64, + sender: mpsc::Sender, + reorg_handler: ReorgHandler, +} + +impl SyncHandler { + pub fn new( + provider: RobustProvider, + max_block_range: u64, + start_id: BlockId, + block_confirmations: u64, + sender: mpsc::Sender, + ) -> Self { + let reorg_handler = ReorgHandler::new(provider.clone()); + Self { provider, max_block_range, start_id, block_confirmations, sender, reorg_handler } + } + + pub async fn run(mut self) -> Result<(), ScannerError> { + let sync_state = self.determine_sync_state().await?; + + match sync_state { + SyncState::AlreadyLive { start_block } => { + info!( + start_block = start_block, + "Start block is beyond confirmed tip, waiting until starting block is confirmed before starting live stream" + ); + self.spawn_live_only(start_block).await + } + SyncState::NeedsCatchup { start_block, confirmed_tip } => { + info!( + start_block = start_block, + confirmed_tip = confirmed_tip, + "Start block is behind confirmed tip, catching up then transitioning to live" + ); + self.spawn_catchup_then_live(start_block, confirmed_tip).await + } + } + } + + /// Determines whether we need to catch up or can start live immediately + async fn determine_sync_state(&self) -> Result { + let (start_block, confirmed_tip) = tokio::try_join!( + self.provider.get_block_number_by_id(self.start_id), + self.provider.get_latest_confirmed(self.block_confirmations) + )?; + + if start_block > confirmed_tip { + Ok(SyncState::AlreadyLive { start_block }) + } else { + Ok(SyncState::NeedsCatchup { start_block, confirmed_tip }) + } + } + + /// Spawns a task that only streams live blocks (no historical catchup needed) + async fn spawn_live_only(&mut self, start_block: BlockNumber) -> Result<(), ScannerError> { + let max_block_range = self.max_block_range; + let block_confirmations = self.block_confirmations; + let provider = self.provider.clone(); + let sender = self.sender.clone(); + let mut reorg_handler = self.reorg_handler.clone(); + + let subscription = provider.subscribe_blocks().await?; + + tokio::spawn(async move { + common::stream_live_blocks( + start_block, + subscription, + &sender, + &provider, + block_confirmations, + max_block_range, + &mut reorg_handler, + true, + ) + .await; + }); + + Ok(()) + } + + /// Spawns a task that catches up on historical blocks, then transitions to live streaming + async fn spawn_catchup_then_live( + &self, + start_block: BlockNumber, + confirmed_tip: BlockNumber, + ) -> Result<(), ScannerError> { + let max_block_range = self.max_block_range; + let block_confirmations = self.block_confirmations; + let provider = self.provider.clone(); + let mut reorg_handler = self.reorg_handler.clone(); + let sender = self.sender.clone(); + + tokio::spawn(async move { + // Phase 1: Catch up on any blocks that have been minted during the historical sync + let start_block = match Self::catchup_historical_blocks( + start_block, + confirmed_tip, + block_confirmations, + max_block_range, + &sender, + &provider, + &mut reorg_handler, + ) + .await + { + Ok(start_block) => start_block, + Err(e) => { + error!(error = %e, "Error during historical catchup, shutting down"); + _ = sender.try_stream(e).await; + return; + } + }; + + // Phase 2: Transition to live streaming + Self::transition_to_live( + start_block, + block_confirmations, + max_block_range, + &sender, + &provider, + &mut reorg_handler, + ) + .await; + }); + + Ok(()) + } + + /// Catches up on historical blocks until we reach the chain tip + /// Returns the block number where live streaming should begin + async fn catchup_historical_blocks( + mut start_block: BlockNumber, + mut confirmed_tip: BlockNumber, + block_confirmations: u64, + max_block_range: u64, + sender: &mpsc::Sender, + provider: &RobustProvider, + reorg_handler: &mut ReorgHandler, + ) -> Result { + while start_block < confirmed_tip { + common::stream_historical_blocks( + start_block, + start_block, + confirmed_tip, + max_block_range, + sender, + provider, + reorg_handler, + ) + .await; + + let latest = provider.get_block_number().await?; + + start_block = confirmed_tip + 1; + confirmed_tip = latest.saturating_sub(block_confirmations); + } + + info!("Historical catchup complete, ready to transition to live"); + + Ok(start_block) + } + + /// Subscribes to live blocks and begins streaming + async fn transition_to_live( + start_block: BlockNumber, + block_confirmations: u64, + max_block_range: u64, + sender: &mpsc::Sender, + provider: &RobustProvider, + reorg_handler: &mut ReorgHandler, + ) { + let subscription = match provider.subscribe_blocks().await { + Ok(sub) => sub, + Err(e) => { + error!(error = %e, "Error subscribing to live blocks, shutting down"); + _ = sender.try_stream(e).await; + return; + } + }; + + if !sender.try_stream(Notification::StartingLiveStream).await { + return; + } + + info!("Successfully transitioned from historical to live streaming"); + + common::stream_live_blocks( + start_block, + subscription, + sender, + &provider, + block_confirmations, + max_block_range, + reorg_handler, + false, // (already notified above) + ) + .await; + } +} From 3dfad2c59bb101e09a0fef0d5930d821fdd755a1 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 12:41:30 +0100 Subject: [PATCH 60/63] test: remove commented out --- src/block_range_scanner.rs | 83 -------------------------------------- 1 file changed, 83 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index bd491a73..69dabefa 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -668,89 +668,6 @@ mod tests { assert_eq!(scanner.max_block_range, max_block_range); } - // TODO: update to valid handle_sync tests - // #[tokio::test] - // async fn buffered_messages_after_cutoff_are_all_passed() { - // let cutoff = 50; - // let (buffer_tx, buffer_rx) = mpsc::channel(8); - // buffer_tx.send(Message::Data(51..=55)).await.unwrap(); - // buffer_tx.send(Message::Data(56..=60)).await.unwrap(); - // buffer_tx.send(Message::Data(61..=70)).await.unwrap(); - // drop(buffer_tx); - - // let (out_tx, out_rx) = mpsc::channel(8); - // Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; - - // let mut stream = ReceiverStream::new(out_rx); - - // assert_next!(stream, 51..=55); - // assert_next!(stream, 56..=60); - // assert_next!(stream, 61..=70); - // assert_closed!(stream); - // } - - // #[tokio::test] - // async fn ranges_entirely_before_cutoff_are_discarded() { - // let cutoff = 100; - - // let (buffer_tx, buffer_rx) = mpsc::channel(8); - // buffer_tx.send(Message::Data(40..=50)).await.unwrap(); - // buffer_tx.send(Message::Data(51..=60)).await.unwrap(); - // buffer_tx.send(Message::Data(61..=70)).await.unwrap(); - // drop(buffer_tx); - - // let (out_tx, out_rx) = mpsc::channel(8); - // Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; - - // let mut stream = ReceiverStream::new(out_rx); - - // assert_closed!(stream); - // } - - // #[tokio::test] - // async fn ranges_overlapping_cutoff_are_trimmed() { - // let cutoff = 75; - - // let (buffer_tx, buffer_rx) = mpsc::channel(8); - // buffer_tx.send(Message::Data(60..=70)).await.unwrap(); - // buffer_tx.send(Message::Data(71..=80)).await.unwrap(); - // buffer_tx.send(Message::Data(81..=86)).await.unwrap(); - // drop(buffer_tx); - - // let (out_tx, out_rx) = mpsc::channel(8); - // Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; - - // let mut stream = ReceiverStream::new(out_rx); - - // assert_next!(stream, 75..=80); - // assert_next!(stream, 81..=86); - // assert_closed!(stream); - // } - - // #[tokio::test] - // async fn edge_case_range_exactly_at_cutoff() { - // let cutoff = 100; - - // let (buffer_tx, buffer_rx) = mpsc::channel(8); - // buffer_tx.send(Message::Data(98..=98)).await.unwrap(); // Just before: discard - // buffer_tx.send(Message::Data(99..=100)).await.unwrap(); // Includes cutoff: trim to - // 100..=100 buffer_tx.send(Message::Data(100..=100)).await.unwrap(); // Exactly at: - // forward buffer_tx.send(Message::Data(100..=101)).await.unwrap(); // Starts at cutoff: - // forward buffer_tx.send(Message::Data(102..=102)).await.unwrap(); // After cutoff: - // forward drop(buffer_tx); - - // let (out_tx, out_rx) = mpsc::channel(8); - // Service::::process_live_block_buffer(buffer_rx, out_tx, cutoff).await; - - // let mut stream = ReceiverStream::new(out_rx); - - // assert_next!(stream, 100..=100); - // assert_next!(stream, 100..=100); - // assert_next!(stream, 100..=101); - // assert_next!(stream, 102..=102); - // assert_closed!(stream); - // } - #[tokio::test] async fn try_send_forwards_errors_to_subscribers() { let (tx, mut rx) = mpsc::channel::(1); From 50a8a5a72c5dcabf1222f003a418b5b9a1618a44 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 12:54:36 +0100 Subject: [PATCH 61/63] test: exact_historical_count_then_live: add sleep to fix flakiness --- tests/sync/from_latest.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index ef2b9b9e..e36cd771 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -122,6 +122,9 @@ async fn exact_historical_count_then_live() -> anyhow::Result<()> { ); let mut stream = assert_empty!(stream); + // give scanner time to subscribe to live events + sleep(Duration::from_millis(10)).await; + // Live continues contract.increase().send().await?.watch().await?; From d40c1471b9ad2b1dc84c9329aba0620af738bf00 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Thu, 20 Nov 2025 13:16:26 +0100 Subject: [PATCH 62/63] ref: stream_live_blocks --- src/block_range_scanner/common.rs | 281 ++++++++++++++++++++++-------- 1 file changed, 212 insertions(+), 69 deletions(-) diff --git a/src/block_range_scanner/common.rs b/src/block_range_scanner/common.rs index 461ea1f5..9e3f2ff2 100644 --- a/src/block_range_scanner/common.rs +++ b/src/block_range_scanner/common.rs @@ -14,7 +14,6 @@ use alloy::{ }; use tracing::{debug, error, info, warn}; -// TODO: refactor this function to reduce the number of arguments #[allow(clippy::too_many_arguments)] pub(crate) async fn stream_live_blocks( stream_start: BlockNumber, @@ -26,13 +25,11 @@ pub(crate) async fn stream_live_blocks( reorg_handler: &mut ReorgHandler, notify_after_first_block: bool, ) { - // ensure we start streaming only after the specified starting block - let mut stream = subscription - .into_stream() - .skip_while(|header| header.number().saturating_sub(block_confirmations) < stream_start); + // Phase 1: Wait for first relevant block + let mut stream = skip_to_relevant_blocks::(subscription, stream_start, block_confirmations); - let Some(incoming_block) = stream.next().await else { - warn!("Subscription channel closed"); + let Some(first_block) = stream.next().await else { + warn!("Subscription channel closed before receiving any blocks"); return; }; @@ -40,95 +37,241 @@ pub(crate) async fn stream_live_blocks( return; } - let incoming_block_num = incoming_block.number(); - info!(block_number = incoming_block_num, "Received block header"); + // Phase 2: Initialize streaming state with first block + let mut state = match initialize_live_streaming_state( + first_block, + stream_start, + block_confirmations, + max_block_range, + sender, + provider, + reorg_handler, + ) + .await + { + Some(state) => state, + None => return, // Channel closed + }; + + // Phase 3: Continuously stream blocks with reorg handling + stream_blocks_continuously( + &mut stream, + &mut state, + stream_start, + block_confirmations, + max_block_range, + sender, + provider, + reorg_handler, + ) + .await; + + warn!("Live block subscription ended"); +} + +/// Skips blocks until we reach the first block that's relevant for streaming +fn skip_to_relevant_blocks( + subscription: Subscription, + stream_start: BlockNumber, + block_confirmations: u64, +) -> impl tokio_stream::Stream { + subscription.into_stream().skip_while(move |header| { + header.number().saturating_sub(block_confirmations) < stream_start + }) +} + +/// Initializes the streaming state after receiving the first block +/// Returns None if the channel is closed +async fn initialize_live_streaming_state( + first_block: N::HeaderResponse, + stream_start: BlockNumber, + block_confirmations: u64, + max_block_range: u64, + sender: &mpsc::Sender, + provider: &RobustProvider, + reorg_handler: &mut ReorgHandler, +) -> Option> { + let incoming_block_num = first_block.number(); + info!(block_number = incoming_block_num, "Received first block header"); let confirmed = incoming_block_num.saturating_sub(block_confirmations); - let mut previous_batch_end = stream_historical_blocks( + // Catch up on any confirmed blocks between stream_start and the confirmed tip + let previous_batch_end = stream_historical_blocks( stream_start, stream_start, confirmed, max_block_range, - &sender, + sender, provider, reorg_handler, ) - .await; + .await?; - if previous_batch_end.is_none() { - // the sender channel is closed - return; - } - - let mut batch_start = stream_start; + Some(LiveStreamingState { + batch_start: stream_start, + previous_batch_end: Some(previous_batch_end), + }) +} +/// Continuously streams blocks, handling reorgs as they occur +async fn stream_blocks_continuously< + N: Network, + S: tokio_stream::Stream + Unpin, +>( + stream: &mut S, + state: &mut LiveStreamingState, + stream_start: BlockNumber, + block_confirmations: u64, + max_block_range: u64, + sender: &mpsc::Sender, + provider: &RobustProvider, + reorg_handler: &mut ReorgHandler, +) { while let Some(incoming_block) = stream.next().await { let incoming_block_num = incoming_block.number(); info!(block_number = incoming_block_num, "Received block header"); - let reorged_opt = match previous_batch_end.as_ref() { - None => None, - Some(batch_end) => match reorg_handler.check(batch_end).await { - Ok(opt) => opt, - Err(e) => { - error!(error = %e, "Failed to perform reorg check"); - _ = sender.try_stream(e).await; - return; - } - }, - }; + // Check for reorgs and update state accordingly + let common_ancestor = + match check_for_reorg(&state.previous_batch_end, reorg_handler, sender).await { + Some(common_ancestor_opt) => common_ancestor_opt, + None => return, + }; - if let Some(common_ancestor) = reorged_opt { - if !sender.try_stream(Notification::ReorgDetected).await { - return; - } - // no need to stream blocks prior to the previously specified starting block - if common_ancestor.header().number() < stream_start { - batch_start = stream_start; - previous_batch_end = None; - } else { - batch_start = common_ancestor.header().number() + 1; - previous_batch_end = Some(common_ancestor); + if let Some(common_ancestor) = common_ancestor { + if !handle_reorg_detected(common_ancestor, stream_start, state, sender).await { + return; // Channel closed } - - // TODO: explain in docs that the returned block after a reorg will be the - // first confirmed block that is smaller between: - // - the first post-reorg block - // - the previous range_start } else { - // no reorg happened, move the block range back to expected next start - // - // SAFETY: Overflow cannot realistically happen - if let Some(prev_batch_end) = previous_batch_end.as_ref() { - batch_start = prev_batch_end.header().number() + 1; - } + // No reorg: advance batch_start to after the previous batch + advance_batch_start_after_previous_end(state); } + // Stream the next batch of confirmed blocks let batch_end_num = incoming_block_num.saturating_sub(block_confirmations); - if batch_end_num >= batch_start { - previous_batch_end = stream_historical_blocks( - stream_start, - batch_start, - batch_end_num, - max_block_range, - &sender, - provider, - reorg_handler, - ) - .await; - - if previous_batch_end.is_none() { - // the sender channel is closed - return; - } + if !stream_next_batch( + batch_end_num, + state, + stream_start, + max_block_range, + sender, + provider, + reorg_handler, + ) + .await + { + return; // Channel closed + } + } +} - // SAFETY: Overflow cannot realistically happen - batch_start = batch_end_num + 1; +/// Checks if a reorg occurred by verifying the previous batch end block. +/// Returns `None` if the channel is closed. +async fn check_for_reorg( + previous_batch_end: &Option, + reorg_handler: &mut ReorgHandler, + sender: &mpsc::Sender, +) -> Option> { + let batch_end = previous_batch_end.as_ref()?; + + match reorg_handler.check(batch_end).await { + Ok(reorg_opt) => Some(reorg_opt), + Err(e) => { + error!(error = %e, "Failed to perform reorg check"); + _ = sender.try_stream(e).await; + None } } } +/// Handles a detected reorg by notifying and adjusting the streaming state +/// Returns false if the channel is closed +async fn handle_reorg_detected( + common_ancestor: N::BlockResponse, + stream_start: BlockNumber, + state: &mut LiveStreamingState, + sender: &mpsc::Sender, +) -> bool { + if !sender.try_stream(Notification::ReorgDetected).await { + return false; + } + + let ancestor_num = common_ancestor.header().number(); + + // Reset streaming position based on common ancestor + if ancestor_num < stream_start { + // Reorg went before our starting point - restart from stream_start + info!( + ancestor_block = ancestor_num, + stream_start = stream_start, + "Reorg detected before stream start, resetting to stream start" + ); + state.batch_start = stream_start; + state.previous_batch_end = None; + } else { + // Resume from after the common ancestor + info!(ancestor_block = ancestor_num, "Reorg detected, resuming from common ancestor"); + state.batch_start = ancestor_num + 1; + state.previous_batch_end = Some(common_ancestor); + } + + true +} + +/// Advances batch_start after processing a normal (non-reorg) block +fn advance_batch_start_after_previous_end(state: &mut LiveStreamingState) { + if let Some(prev_batch_end) = state.previous_batch_end.as_ref() { + state.batch_start = prev_batch_end.header().number() + 1; + } +} + +/// Streams the next batch of blocks up to `batch_end_num`. +/// Returns false if the channel is closed +async fn stream_next_batch( + batch_end_num: BlockNumber, + state: &mut LiveStreamingState, + stream_start: BlockNumber, + max_block_range: u64, + sender: &mpsc::Sender, + provider: &RobustProvider, + reorg_handler: &mut ReorgHandler, +) -> bool { + if batch_end_num < state.batch_start { + // No new confirmed blocks to stream yet + return true; + } + + state.previous_batch_end = stream_historical_blocks( + stream_start, + state.batch_start, + batch_end_num, + max_block_range, + sender, + provider, + reorg_handler, + ) + .await; + + if state.previous_batch_end.is_none() { + // Channel closed + return false; + } + + // SAFETY: Overflow cannot realistically happen + state.batch_start = batch_end_num + 1; + + true +} + +/// Tracks the current state of live streaming +struct LiveStreamingState { + /// The starting block number for the next batch to stream + batch_start: BlockNumber, + /// The last block from the previous batch (used for reorg detection) + previous_batch_end: Option, +} + /// Assumes that `stream_start <= next_start_block <= end`. pub(crate) async fn stream_historical_blocks( stream_start: BlockNumber, From f49be2dd50b2a19ec254aeba0cae488d3758930c Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Fri, 21 Nov 2025 08:50:17 +0100 Subject: [PATCH 63/63] ref: clippy --- src/block_range_scanner.rs | 2 +- src/block_range_scanner/common.rs | 24 ++++++++++++------------ src/block_range_scanner/sync_handler.rs | 16 ++++++---------- 3 files changed, 19 insertions(+), 23 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 69dabefa..6cd9f24c 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -473,7 +473,7 @@ impl Service { }; // for now we only care if a reorg occurred, not which block it was - if let Some(_) = reorged_opt { + if reorged_opt.is_some() { info!(block_number = %from, hash = %tip.header().hash(), "Reorg detected"); if !sender.try_stream(Notification::ReorgDetected).await { diff --git a/src/block_range_scanner/common.rs b/src/block_range_scanner/common.rs index 9e3f2ff2..16a1a9e2 100644 --- a/src/block_range_scanner/common.rs +++ b/src/block_range_scanner/common.rs @@ -38,7 +38,7 @@ pub(crate) async fn stream_live_blocks( } // Phase 2: Initialize streaming state with first block - let mut state = match initialize_live_streaming_state( + let Some(mut state) = initialize_live_streaming_state( first_block, stream_start, block_confirmations, @@ -48,9 +48,8 @@ pub(crate) async fn stream_live_blocks( reorg_handler, ) .await - { - Some(state) => state, - None => return, // Channel closed + else { + return; }; // Phase 3: Continuously stream blocks with reorg handling @@ -115,6 +114,7 @@ async fn initialize_live_streaming_state( } /// Continuously streams blocks, handling reorgs as they occur +#[allow(clippy::too_many_arguments)] async fn stream_blocks_continuously< N: Network, S: tokio_stream::Stream + Unpin, @@ -133,11 +133,11 @@ async fn stream_blocks_continuously< info!(block_number = incoming_block_num, "Received block header"); // Check for reorgs and update state accordingly - let common_ancestor = - match check_for_reorg(&state.previous_batch_end, reorg_handler, sender).await { - Some(common_ancestor_opt) => common_ancestor_opt, - None => return, - }; + let Some(common_ancestor) = + check_for_reorg(state.previous_batch_end.as_ref(), reorg_handler, sender).await + else { + return; + }; if let Some(common_ancestor) = common_ancestor { if !handle_reorg_detected(common_ancestor, stream_start, state, sender).await { @@ -169,11 +169,11 @@ async fn stream_blocks_continuously< /// Checks if a reorg occurred by verifying the previous batch end block. /// Returns `None` if the channel is closed. async fn check_for_reorg( - previous_batch_end: &Option, + previous_batch_end: Option<&N::BlockResponse>, reorg_handler: &mut ReorgHandler, sender: &mpsc::Sender, ) -> Option> { - let batch_end = previous_batch_end.as_ref()?; + let batch_end = previous_batch_end?; match reorg_handler.check(batch_end).await { Ok(reorg_opt) => Some(reorg_opt), @@ -219,7 +219,7 @@ async fn handle_reorg_detected( true } -/// Advances batch_start after processing a normal (non-reorg) block +/// Advances `batch_start` after processing a normal (non-reorg) block fn advance_batch_start_after_previous_end(state: &mut LiveStreamingState) { if let Some(prev_batch_end) = state.previous_batch_end.as_ref() { state.batch_start = prev_batch_end.header().number() + 1; diff --git a/src/block_range_scanner/sync_handler.rs b/src/block_range_scanner/sync_handler.rs index f9db16b7..74ee3159 100644 --- a/src/block_range_scanner/sync_handler.rs +++ b/src/block_range_scanner/sync_handler.rs @@ -47,7 +47,7 @@ impl SyncHandler { start_block = start_block, "Start block is beyond confirmed tip, waiting until starting block is confirmed before starting live stream" ); - self.spawn_live_only(start_block).await + self.spawn_live_only(start_block).await?; } SyncState::NeedsCatchup { start_block, confirmed_tip } => { info!( @@ -55,9 +55,11 @@ impl SyncHandler { confirmed_tip = confirmed_tip, "Start block is behind confirmed tip, catching up then transitioning to live" ); - self.spawn_catchup_then_live(start_block, confirmed_tip).await + self.spawn_catchup_then_live(start_block, confirmed_tip); } } + + Ok(()) } /// Determines whether we need to catch up or can start live immediately @@ -102,11 +104,7 @@ impl SyncHandler { } /// Spawns a task that catches up on historical blocks, then transitions to live streaming - async fn spawn_catchup_then_live( - &self, - start_block: BlockNumber, - confirmed_tip: BlockNumber, - ) -> Result<(), ScannerError> { + fn spawn_catchup_then_live(&self, start_block: BlockNumber, confirmed_tip: BlockNumber) { let max_block_range = self.max_block_range; let block_confirmations = self.block_confirmations; let provider = self.provider.clone(); @@ -145,8 +143,6 @@ impl SyncHandler { ) .await; }); - - Ok(()) } /// Catches up on historical blocks until we reach the chain tip @@ -211,7 +207,7 @@ impl SyncHandler { start_block, subscription, sender, - &provider, + provider, block_confirmations, max_block_range, reorg_handler,