-
Notifications
You must be signed in to change notification settings - Fork 906
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fix: use --syncmode=execution-layer
from op-node
for optimistic pipeline sync
#7552
Changes from 55 commits
458ee1a
1704948
12f5ee9
7df63db
1f02388
f56103a
42158aa
de8928a
1d51433
bd9a17b
72501de
2c96574
522d65c
9320432
e7a6aa6
d28dc31
57e530b
e4c11eb
e377c57
13a426c
4c35e9d
5703bbd
63df12d
3167f7f
93f0f64
3ad4cb2
c7ba89d
3dd46f7
0fcfb8d
7e05cc6
dab38a0
e22e187
f9c584e
ce6de02
0dcf372
b44d842
c2f7b39
c0c83c5
a3f08cd
0da350c
fd51d36
17f886c
982dd8b
89077c6
61e5898
89b9645
5fdadde
02d7d0a
963156a
e18a704
a2b9ac3
b3e354e
1a3939f
6119832
e0867c8
d620b13
643c420
53cacf4
42df97a
0419207
0dd1825
c17460b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,13 +19,14 @@ use reth_interfaces::{ | |
}; | ||
use reth_primitives::{ | ||
BlockHash, BlockNumHash, BlockNumber, ForkBlock, GotExpected, Hardfork, PruneModes, Receipt, | ||
SealedBlock, SealedBlockWithSenders, SealedHeader, U256, | ||
SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256, | ||
}; | ||
use reth_provider::{ | ||
chain::{ChainSplit, ChainSplitTarget}, | ||
BlockExecutionWriter, BlockNumReader, BlockWriter, BundleStateWithReceipts, | ||
CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, Chain, | ||
ChainSpecProvider, DisplayBlocksChain, HeaderProvider, ProviderError, | ||
StaticFileProviderFactory, | ||
}; | ||
use reth_stages_api::{MetricEvent, MetricEventsSender}; | ||
use std::{ | ||
|
@@ -783,6 +784,11 @@ where | |
Ok(InsertPayloadOk::Inserted(status)) | ||
} | ||
|
||
/// Discard all blocks that precede block number from the buffer. | ||
pub fn remove_old_blocks(&mut self, block: BlockNumber) { | ||
self.state.buffered_blocks.remove_old_blocks(block); | ||
} | ||
|
||
/// Finalize blocks up until and including `finalized_block`, and remove them from the tree. | ||
pub fn finalize_block(&mut self, finalized_block: BlockNumber) { | ||
// remove blocks | ||
|
@@ -797,7 +803,7 @@ where | |
} | ||
} | ||
// clean block buffer. | ||
self.state.buffered_blocks.remove_old_blocks(finalized_block); | ||
self.remove_old_blocks(finalized_block); | ||
} | ||
|
||
/// Reads the last `N` canonical hashes from the database and updates the block indices of the | ||
|
@@ -817,6 +823,16 @@ where | |
) -> RethResult<()> { | ||
self.finalize_block(last_finalized_block); | ||
|
||
let last_canonical_hashes = self.update_block_hashes()?; | ||
|
||
self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; | ||
|
||
Ok(()) | ||
} | ||
|
||
/// Update all block hashes. iterate over present and new list of canonical hashes and compare | ||
/// them. Remove all mismatches, disconnect them and removes all chains. | ||
pub fn update_block_hashes(&mut self) -> RethResult<BTreeMap<BlockNumber, B256>> { | ||
let last_canonical_hashes = self | ||
.externals | ||
.fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; | ||
|
@@ -831,9 +847,7 @@ where | |
} | ||
} | ||
|
||
self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; | ||
|
||
Ok(()) | ||
Ok(last_canonical_hashes) | ||
} | ||
|
||
/// Reads the last `N` canonical hashes from the database and updates the block indices of the | ||
|
@@ -1220,6 +1234,28 @@ where | |
&self, | ||
revert_until: BlockNumber, | ||
) -> Result<Option<Chain>, CanonicalError> { | ||
if self | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. should be feature gated? feel free to introduce one if there isn't |
||
.externals | ||
.provider_factory | ||
.static_file_provider() | ||
.get_highest_static_file_block(StaticFileSegment::Headers) | ||
.unwrap_or_default() > | ||
joshieDo marked this conversation as resolved.
Show resolved
Hide resolved
|
||
revert_until | ||
{ | ||
trace!( | ||
target: "blockchain_tree", | ||
"Reverting optimistic canonical chain to block {}", | ||
revert_until | ||
); | ||
// This should only happen when an optimistic sync target was re-orged. | ||
mattsse marked this conversation as resolved.
Show resolved
Hide resolved
|
||
// | ||
// Static files generally contain finalized data. The blockchain tree only deals | ||
// with unfinalized data. The only scenario where canonical reverts go past the highest | ||
// static file is when an optimistic sync occured and unfinalized data was written to | ||
// static files. | ||
return Err(CanonicalError::OptimisticTargetRevert(revert_until)) | ||
} | ||
|
||
// read data that is needed for new sidechain | ||
let provider_rw = self.externals.provider_factory.provider_rw()?; | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,8 +15,9 @@ use reth_interfaces::{ | |
use reth_payload_builder::PayloadBuilderHandle; | ||
use reth_payload_validator::ExecutionPayloadValidator; | ||
use reth_primitives::{ | ||
constants::EPOCH_SLOTS, stage::StageId, BlockNumHash, BlockNumber, Head, Header, SealedBlock, | ||
SealedHeader, B256, | ||
constants::EPOCH_SLOTS, | ||
stage::{PipelineTarget, StageId}, | ||
BlockNumHash, BlockNumber, Head, Header, SealedBlock, SealedHeader, B256, | ||
}; | ||
use reth_provider::{ | ||
BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, | ||
|
@@ -316,7 +317,7 @@ where | |
}; | ||
|
||
if let Some(target) = maybe_pipeline_target { | ||
this.sync.set_pipeline_sync_target(target); | ||
this.sync.set_pipeline_sync_target(target.into()); | ||
} | ||
|
||
Ok((this, handle)) | ||
|
@@ -668,6 +669,21 @@ where | |
// threshold | ||
return Some(state.finalized_block_hash) | ||
} | ||
|
||
// OPTIMISTIC SYNCING | ||
joshieDo marked this conversation as resolved.
Show resolved
Hide resolved
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can we feature gate this? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. or should we do this only for OP (chainspec.is_optimism) for now? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. i mean, its unlikely that it can happen on mainnet, but it can happen on testnets/devnets l1 for sure. i can add the feature gate or check... but what would be the reason? |
||
// | ||
// It can happen when the node is doing an | ||
// optimistic sync, where the CL has no knowledge of the finalized hash, | ||
// but is expecting the EL to sync as high | ||
// as possible before finalizing. | ||
// | ||
// This usually doesn't happen on ETH mainnet since CLs use the more | ||
// secure checkpoint syncing. | ||
// | ||
// However, optimism chains will do this. The risk of a reorg is however | ||
// low. | ||
debug!(target: "consensus::engine", hash=?state.head_block_hash, "Setting head hash as an optimistic pipeline target."); | ||
return Some(state.head_block_hash) | ||
} | ||
Ok(Some(_)) => { | ||
// we're fully synced to the finalized block | ||
|
@@ -960,6 +976,10 @@ where | |
// so we should not warn the user, since this will result in us attempting to sync | ||
// to a new target and is considered normal operation during sync | ||
} | ||
CanonicalError::OptimisticTargetRevert(block_number) => { | ||
self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(*block_number)); | ||
return PayloadStatus::from_status(PayloadStatusEnum::Syncing) | ||
} | ||
_ => { | ||
warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash"); | ||
// TODO(mattsse) better error handling before attempting to sync (FCU could be | ||
|
@@ -990,7 +1010,7 @@ where | |
if self.pipeline_run_threshold == 0 { | ||
// use the pipeline to sync to the target | ||
trace!(target: "consensus::engine", %target, "Triggering pipeline run to sync missing ancestors of the new head"); | ||
self.sync.set_pipeline_sync_target(target); | ||
self.sync.set_pipeline_sync_target(target.into()); | ||
} else { | ||
// trigger a full block download for missing hash, or the parent of its lowest buffered | ||
// ancestor | ||
|
@@ -1340,7 +1360,7 @@ where | |
) { | ||
// we don't have the block yet and the distance exceeds the allowed | ||
// threshold | ||
self.sync.set_pipeline_sync_target(target); | ||
self.sync.set_pipeline_sync_target(target.into()); | ||
// we can exit early here because the pipeline will take care of syncing | ||
return | ||
} | ||
|
@@ -1424,6 +1444,8 @@ where | |
// TODO: do not ignore this | ||
let _ = self.blockchain.make_canonical(*target_hash.as_ref()); | ||
} | ||
} else if let Some(block_number) = err.optimistic_revert_block_number() { | ||
self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(block_number)); | ||
} | ||
|
||
Err((target.head_block_hash, err)) | ||
|
@@ -1485,13 +1507,7 @@ where | |
|
||
// update the canon chain if continuous is enabled | ||
if self.sync.run_pipeline_continuously() { | ||
let max_block = ctrl.block_number().unwrap_or_default(); | ||
let max_header = self.blockchain.sealed_header(max_block) | ||
.inspect_err(|error| { | ||
error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); | ||
})? | ||
.ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; | ||
self.blockchain.set_canonical_head(max_header); | ||
self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; | ||
} | ||
|
||
let sync_target_state = match self.forkchoice_state_tracker.sync_target_state() { | ||
|
@@ -1504,6 +1520,14 @@ where | |
} | ||
}; | ||
|
||
if sync_target_state.finalized_block_hash.is_zero() { | ||
self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; | ||
self.blockchain.update_block_hashes_and_clear_buffered()?; | ||
self.blockchain.connect_buffered_blocks_to_canonical_hashes()?; | ||
// We are on a optimistic syncing process, better to wait for the next FCU to handle | ||
joshieDo marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return Ok(()) | ||
} | ||
|
||
// Next, we check if we need to schedule another pipeline run or transition | ||
// to live sync via tree. | ||
// This can arise if we buffer the forkchoice head, and if the head is an | ||
|
@@ -1559,7 +1583,7 @@ where | |
// the tree update from executing too many blocks and blocking. | ||
if let Some(target) = pipeline_target { | ||
// run the pipeline to the target since the distance is sufficient | ||
self.sync.set_pipeline_sync_target(target); | ||
self.sync.set_pipeline_sync_target(target.into()); | ||
} else if let Some(number) = | ||
self.blockchain.block_number(sync_target_state.finalized_block_hash)? | ||
{ | ||
|
@@ -1571,12 +1595,23 @@ where | |
} else { | ||
// We don't have the finalized block in the database, so we need to | ||
// trigger another pipeline run. | ||
self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash); | ||
self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash.into()); | ||
} | ||
|
||
Ok(()) | ||
} | ||
|
||
fn set_canonical_head(&self, max_block: BlockNumber) -> RethResult<()> { | ||
let max_header = self.blockchain.sealed_header(max_block) | ||
.inspect_err(|error| { | ||
error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); | ||
})? | ||
.ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; | ||
self.blockchain.set_canonical_head(max_header); | ||
|
||
Ok(()) | ||
} | ||
|
||
fn on_hook_result(&self, polled_hook: PolledHook) -> Result<(), BeaconConsensusEngineError> { | ||
if let EngineHookEvent::Finished(Err(error)) = &polled_hook.event { | ||
error!( | ||
|
@@ -1725,16 +1760,20 @@ where | |
Err(BeaconOnNewPayloadError::Internal(Box::new(error.clone()))); | ||
let _ = tx.send(response); | ||
return Err(RethError::Canonical(error)) | ||
} else if error.optimistic_revert_block_number().is_some() { | ||
// engine already set the pipeline unwind target on | ||
// `try_make_sync_target_canonical` | ||
PayloadStatus::from_status(PayloadStatusEnum::Syncing) | ||
Comment on lines
+1763
to
+1766
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. unsure here if returning Syncing is the best There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this should be okay |
||
} else { | ||
// If we could not make the sync target block canonical, | ||
// we should return the error as an invalid payload status. | ||
PayloadStatus::new( | ||
PayloadStatusEnum::Invalid { validation_error: error.to_string() }, | ||
// TODO: return a proper latest valid hash | ||
// See: <https://github.com/paradigmxyz/reth/issues/7146> | ||
self.forkchoice_state_tracker.last_valid_head(), | ||
) | ||
} | ||
|
||
// If we could not make the sync target block canonical, | ||
// we should return the error as an invalid payload status. | ||
PayloadStatus::new( | ||
PayloadStatusEnum::Invalid { validation_error: error.to_string() }, | ||
// TODO: return a proper latest valid hash | ||
// See: <https://github.com/paradigmxyz/reth/issues/7146> | ||
self.forkchoice_state_tracker.last_valid_head(), | ||
) | ||
} | ||
}; | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm pretty sure this was problematic from the start