diff --git a/Cargo.lock b/Cargo.lock index 1d21a5f7df3..52f093bf17e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -215,11 +215,11 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "lighthouse_bootstrap 0.1.0", "lighthouse_metrics 0.1.0", - "lmd_ghost 0.1.0", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "merkle_proof 0.1.0", "operation_pool 0.1.0", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proto_array_fork_choice 0.1.0", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", @@ -515,7 +515,6 @@ dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "genesis 0.1.0", "lighthouse_bootstrap 0.1.0", - "lmd_ghost 0.1.0", "network 0.1.0", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "prometheus 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2395,27 +2394,6 @@ name = "linked-hash-map" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "lmd_ghost" -version = "0.1.0" -dependencies = [ - "beacon_chain 0.1.0", - "bls 0.1.0", - "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.1.2", - "eth2_ssz_derive 0.1.0", - "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "slot_clock 0.1.0", - "store 0.1.0", - "types 0.1.0", - "yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "lock_api" version = "0.1.5" @@ -2461,6 +2439,8 @@ dependencies = [ name = "logging" version = "0.1.0" dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lighthouse_metrics 0.1.0", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3026,6 +3006,20 @@ dependencies = [ "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "proto_array_fork_choice" +version = "0.1.0" +dependencies = [ + "eth2_ssz 0.1.2", + "eth2_ssz_derive 0.1.0", + "itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0", +] + [[package]] name = "protobuf" version = "2.8.1" @@ -3354,6 +3348,7 @@ dependencies = [ "eth2_ssz 0.1.2", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "proto_array_fork_choice 0.1.0", "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "rest_api 0.1.0", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3933,7 +3928,6 @@ dependencies = [ "integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lmd_ghost 0.1.0", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "merkle_proof 0.1.0", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/Cargo.toml b/Cargo.toml index 716c4c5b47f..172fcb0365c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [workspace] members = [ - "eth2/lmd_ghost", + "eth2/proto_array_fork_choice", "eth2/operation_pool", "eth2/state_processing", "eth2/types", diff --git a/README.md b/README.md index 21e716db325..06a8cb0b3cf 100644 --- a/README.md +++ b/README.md @@ -49,11 +49,10 @@ Current development overview: - ~~**April 2019**: Inital single-client testnets.~~ - ~~**September 2019**: Inter-operability with other Ethereum 2.0 clients.~~ -- **Q4 2019**: `lighthouse-0.0.1` release: All major phase 0 - features implemented. -- **Q4 2019**: Public, multi-client testnet with user-facing functionality. -- **Q4 2019**: Third-party security review. -- **Q1 2020**: Production Beacon Chain testnet (tentative). +- ~~ **Q1 2020**: `lighthouse-0.1.0` release: All major phase 0 features implemented.~~ +- **Q1 2020**: Public, multi-client testnet with user-facing functionality. +- **Q2 2020**: Third-party security review. +- **Q3 2020**: Production Beacon Chain testnet (tentative). ## Documentation diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index fcf9049c12a..7df36537b83 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -33,7 +33,6 @@ eth2_ssz_derive = "0.1.0" state_processing = { path = "../../eth2/state_processing" } tree_hash = "0.1.0" types = { path = "../../eth2/types" } -lmd_ghost = { path = "../../eth2/lmd_ghost" } eth1 = { path = "../eth1" } websocket_server = { path = "../websocket_server" } futures = "0.1.25" @@ -41,6 +40,7 @@ exit-future = "0.1.3" genesis = { path = "../genesis" } integer-sqrt = "0.1" rand = "0.7.2" +proto_array_fork_choice = { path = "../../eth2/proto_array_fork_choice" } [dev-dependencies] tempfile = "3.1.0" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d8b3b547cbd..61840135256 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -8,7 +8,6 @@ use crate::head_tracker::HeadTracker; use crate::metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use crate::timeout_rw_lock::TimeoutRwLock; -use lmd_ghost::LmdGhost; use operation_pool::{OperationPool, PersistedOperationPool}; use slog::{debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; @@ -32,7 +31,7 @@ use std::time::{Duration, Instant}; use store::iter::{ BlockRootsIterator, ReverseBlockRootIterator, ReverseStateRootIterator, StateRootsIterator, }; -use store::{BlockRootTree, Error as DBError, Migrate, Store}; +use store::{Error as DBError, Migrate, Store}; use tree_hash::TreeHash; use types::*; @@ -59,8 +58,11 @@ const HEAD_LOCK_TIMEOUT: Duration = Duration::from_secs(1); pub enum BlockProcessingOutcome { /// Block was valid and imported into the block graph. Processed { block_root: Hash256 }, - /// The blocks parent_root is unknown. - ParentUnknown { parent: Hash256 }, + /// The parent block was unknown. + ParentUnknown { + parent: Hash256, + reference_location: &'static str, + }, /// The block slot is greater than the present slot. FutureSlot { present_slot: Slot, @@ -116,7 +118,6 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type StoreMigrator: store::Migrate; type SlotClock: slot_clock::SlotClock; - type LmdGhost: LmdGhost; type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; type EventHandler: EventHandler; @@ -150,8 +151,6 @@ pub struct BeaconChain { pub(crate) head_tracker: HeadTracker, /// Provides a small cache of `BeaconState` and `BeaconBlock`. pub(crate) checkpoint_cache: CheckPointCache, - /// Cache of block roots for all known forks post-finalization. - pub block_root_tree: Arc, /// Logging to CLI, etc. pub(crate) log: Logger, } @@ -192,7 +191,6 @@ impl BeaconChain { ssz_head_tracker: self.head_tracker.to_ssz_container(), fork_choice: self.fork_choice.as_ssz_container(), eth1_cache: self.eth1_chain.as_ref().map(|x| x.as_ssz_container()), - block_root_tree: self.block_root_tree.as_ssz_container(), }; let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); @@ -1063,14 +1061,10 @@ impl BeaconChain { { // Provide the attestation to fork choice, updating the validator latest messages but // _without_ finding and updating the head. - if let Err(e) = self - .fork_choice - .process_attestation(&state, &attestation, block) - { + if let Err(e) = self.fork_choice.process_attestation(&state, &attestation) { error!( self.log, "Add attestation to fork choice failed"; - "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), "beacon_block_root" => format!("{}", attestation.data.beacon_block_root), "error" => format!("{:?}", e) ); @@ -1232,6 +1226,23 @@ impl BeaconChain { }); } + // Reject any block if its parent is not known to fork choice. + // + // A block that is not in fork choice is either: + // + // - Not yet imported: we should reject this block because we should only import a child + // after its parent has been fully imported. + // - Pre-finalized: if the parent block is _prior_ to finalization, we should ignore it + // because it will revert finalization. Note that the finalized block is stored in fork + // choice, so we will not reject any child of the finalized block (this is relevant during + // genesis). + if !self.fork_choice.contains_block(&block.parent_root) { + return Ok(BlockProcessingOutcome::ParentUnknown { + parent: block.parent_root, + reference_location: "fork_choice", + }); + } + let block_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOCK_ROOT); let block_root = block.canonical_root(); @@ -1252,8 +1263,8 @@ impl BeaconChain { } // Check if the block is already known. We know it is post-finalization, so it is - // sufficient to check the block root tree. - if self.block_root_tree.is_known_block_root(&block_root) { + // sufficient to check the fork choice. + if self.fork_choice.contains_block(&block_root) { return Ok(BlockProcessingOutcome::BlockIsAlreadyKnown); } @@ -1269,6 +1280,7 @@ impl BeaconChain { None => { return Ok(BlockProcessingOutcome::ParentUnknown { parent: block.parent_root, + reference_location: "database", }); } }; @@ -1363,6 +1375,24 @@ impl BeaconChain { }); } + let fork_choice_register_timer = + metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER); + + // Register the new block with the fork choice service. + if let Err(e) = self + .fork_choice + .process_block(self, &state, &block, block_root) + { + error!( + self.log, + "Add block to fork choice failed"; + "block_root" => format!("{}", block_root), + "error" => format!("{:?}", e), + ) + } + + metrics::stop_timer(fork_choice_register_timer); + let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); // Store all the states between the parent block state and this blocks slot before storing @@ -1392,30 +1422,8 @@ impl BeaconChain { metrics::stop_timer(db_write_timer); - self.block_root_tree - .add_block_root(block_root, block.parent_root, block.slot)?; - self.head_tracker.register_block(block_root, &block); - let fork_choice_register_timer = - metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER); - - // Register the new block with the fork choice service. - if let Err(e) = self - .fork_choice - .process_block(self, &state, &block, block_root) - { - error!( - self.log, - "Add block to fork choice failed"; - "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), - "block_root" => format!("{}", block_root), - "error" => format!("{:?}", e), - ) - } - - metrics::stop_timer(fork_choice_register_timer); - metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); metrics::observe( &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, @@ -1706,8 +1714,7 @@ impl BeaconChain { new_epoch: new_finalized_epoch, }) } else { - self.fork_choice - .process_finalization(&finalized_block, finalized_block_root)?; + self.fork_choice.prune()?; let finalized_state = self .get_state_caching_only_with_committee_caches( @@ -1726,12 +1733,6 @@ impl BeaconChain { max_finality_distance, ); - // Prune in-memory block root tree. - self.block_root_tree.prune_to( - finalized_block_root, - self.heads().into_iter().map(|(block_root, _)| block_root), - ); - let _ = self.event_handler.register(EventKind::BeaconFinalization { epoch: new_finalized_epoch, root: finalized_block_root, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index fc951635fce..03cefefb061 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -9,54 +9,35 @@ use crate::{ ForkChoice, }; use eth1::Config as Eth1Config; -use lmd_ghost::{LmdGhost, ThreadSafeReducedTree}; use operation_pool::OperationPool; +use proto_array_fork_choice::ProtoArrayForkChoice; use slog::{info, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; -use store::{BlockRootTree, Store}; +use store::Store; use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot}; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing /// functionality and only exists to satisfy the type system. -pub struct Witness< - TStore, - TStoreMigrator, - TSlotClock, - TLmdGhost, - TEth1Backend, - TEthSpec, - TEventHandler, ->( +pub struct Witness( PhantomData<( TStore, TStoreMigrator, TSlotClock, - TLmdGhost, TEth1Backend, TEthSpec, TEventHandler, )>, ); -impl - BeaconChainTypes - for Witness< - TStore, - TStoreMigrator, - TSlotClock, - TLmdGhost, - TEth1Backend, - TEthSpec, - TEventHandler, - > +impl BeaconChainTypes + for Witness where TStore: Store + 'static, TStoreMigrator: store::Migrate + 'static, TSlotClock: SlotClock + 'static, - TLmdGhost: LmdGhost + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -64,7 +45,6 @@ where type Store = TStore; type StoreMigrator = TStoreMigrator; type SlotClock = TSlotClock; - type LmdGhost = TLmdGhost; type Eth1Chain = TEth1Backend; type EthSpec = TEthSpec; type EventHandler = TEventHandler; @@ -92,28 +72,18 @@ pub struct BeaconChainBuilder { slot_clock: Option, persisted_beacon_chain: Option>, head_tracker: Option, - block_root_tree: Option>, spec: ChainSpec, log: Option, } -impl +impl BeaconChainBuilder< - Witness< - TStore, - TStoreMigrator, - TSlotClock, - TLmdGhost, - TEth1Backend, - TEthSpec, - TEventHandler, - >, + Witness, > where TStore: Store + 'static, TStoreMigrator: store::Migrate + 'static, TSlotClock: SlotClock + 'static, - TLmdGhost: LmdGhost + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -135,7 +105,6 @@ where slot_clock: None, persisted_beacon_chain: None, head_tracker: None, - block_root_tree: None, spec: TEthSpec::default_spec(), log: None, } @@ -194,15 +163,7 @@ where let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); let p: PersistedBeaconChain< - Witness< - TStore, - TStoreMigrator, - TSlotClock, - TLmdGhost, - TEth1Backend, - TEthSpec, - TEventHandler, - >, + Witness, > = match store.get(&key) { Err(e) => { return Err(format!( @@ -230,7 +191,6 @@ where Some(cache) => Some(Eth1Chain::from_ssz_container(cache, config, store, log)?), None => None, }; - self.block_root_tree = Some(Arc::new(p.block_root_tree.clone().into())); self.persisted_beacon_chain = Some(p); Ok(self) @@ -273,11 +233,6 @@ where ) })?; - self.block_root_tree = Some(Arc::new(BlockRootTree::new( - beacon_block_root, - beacon_block.slot, - ))); - self.finalized_checkpoint = Some(CheckPoint { beacon_block_root, beacon_block, @@ -327,15 +282,7 @@ where self, ) -> Result< BeaconChain< - Witness< - TStore, - TStoreMigrator, - TSlotClock, - TLmdGhost, - TEth1Backend, - TEthSpec, - TEventHandler, - >, + Witness, >, String, > { @@ -387,9 +334,6 @@ where .event_handler .ok_or_else(|| "Cannot build without an event handler".to_string())?, head_tracker: self.head_tracker.unwrap_or_default(), - block_root_tree: self - .block_root_tree - .ok_or_else(|| "Cannot build without a block root tree".to_string())?, checkpoint_cache: CheckPointCache::default(), log: log.clone(), }; @@ -412,15 +356,7 @@ where impl BeaconChainBuilder< - Witness< - TStore, - TStoreMigrator, - TSlotClock, - ThreadSafeReducedTree, - TEth1Backend, - TEthSpec, - TEventHandler, - >, + Witness, > where TStore: Store + 'static, @@ -435,23 +371,9 @@ where /// If this builder is being "resumed" from disk, then rebuild the last fork choice stored to /// the database. Otherwise, create a new, empty fork choice. pub fn reduced_tree_fork_choice(mut self) -> Result { - let store = self - .store - .clone() - .ok_or_else(|| "reduced_tree_fork_choice requires a store")?; - - let block_root_tree = self - .block_root_tree - .clone() - .ok_or_else(|| "reduced_tree_fork_choice requires a block root tree")?; - let fork_choice = if let Some(persisted_beacon_chain) = &self.persisted_beacon_chain { - ForkChoice::from_ssz_container( - persisted_beacon_chain.fork_choice.clone(), - store, - block_root_tree, - ) - .map_err(|e| format!("Unable to decode fork choice from db: {:?}", e))? + ForkChoice::from_ssz_container(persisted_beacon_chain.fork_choice.clone()) + .map_err(|e| format!("Unable to decode fork choice from db: {:?}", e))? } else { let finalized_checkpoint = &self .finalized_checkpoint @@ -461,14 +383,22 @@ where .genesis_block_root .ok_or_else(|| "fork_choice_backend requires a genesis_block_root")?; - let backend = ThreadSafeReducedTree::new( - store, - block_root_tree, - &finalized_checkpoint.beacon_block, + let backend = ProtoArrayForkChoice::new( + finalized_checkpoint.beacon_block.slot, + // Note: here we set the `justified_epoch` to be the same as the epoch of the + // finalized checkpoint. Whilst this finalized checkpoint may actually point to + // a _later_ justified checkpoint, that checkpoint won't yet exist in the fork + // choice. + finalized_checkpoint.beacon_state.current_epoch(), + finalized_checkpoint.beacon_state.current_epoch(), finalized_checkpoint.beacon_block_root, - ); + )?; - ForkChoice::new(backend, genesis_block_root, self.spec.genesis_slot) + ForkChoice::new( + backend, + genesis_block_root, + &finalized_checkpoint.beacon_state, + ) }; self.fork_choice = Some(fork_choice); @@ -477,13 +407,12 @@ where } } -impl +impl BeaconChainBuilder< Witness< TStore, TStoreMigrator, TSlotClock, - TLmdGhost, CachingEth1Backend, TEthSpec, TEventHandler, @@ -493,7 +422,6 @@ where TStore: Store + 'static, TStoreMigrator: store::Migrate + 'static, TSlotClock: SlotClock + 'static, - TLmdGhost: LmdGhost + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -529,22 +457,13 @@ where } } -impl +impl BeaconChainBuilder< - Witness< - TStore, - TStoreMigrator, - TestingSlotClock, - TLmdGhost, - TEth1Backend, - TEthSpec, - TEventHandler, - >, + Witness, > where TStore: Store + 'static, TStoreMigrator: store::Migrate + 'static, - TLmdGhost: LmdGhost + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -570,13 +489,12 @@ where } } -impl +impl BeaconChainBuilder< Witness< TStore, TStoreMigrator, TSlotClock, - TLmdGhost, TEth1Backend, TEthSpec, NullEventHandler, @@ -586,7 +504,6 @@ where TStore: Store + 'static, TStoreMigrator: store::Migrate + 'static, TSlotClock: SlotClock + 'static, - TLmdGhost: LmdGhost + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, { diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 3f0fd7c0f8d..8aa66bc651d 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -6,7 +6,6 @@ use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::BlockProcessingError; use state_processing::SlotProcessingError; use std::time::Duration; -use store::block_root_tree::BlockRootTreeError; use types::*; macro_rules! easy_from_to { @@ -51,13 +50,11 @@ pub enum BeaconChainError { InvariantViolated(String), SszTypesError(SszTypesError), CanonicalHeadLockTimeout, - BlockRootTreeError(BlockRootTreeError), } easy_from_to!(SlotProcessingError, BeaconChainError); easy_from_to!(AttestationValidationError, BeaconChainError); easy_from_to!(SszTypesError, BeaconChainError); -easy_from_to!(BlockRootTreeError, BeaconChainError); #[derive(Debug, PartialEq)] pub enum BlockProductionError { diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 3a727a1f65a..6985a913b89 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -1,13 +1,14 @@ +mod checkpoint_manager; + use crate::{errors::BeaconChainError, metrics, BeaconChain, BeaconChainTypes}; -use lmd_ghost::LmdGhost; -use parking_lot::RwLock; +use checkpoint_manager::{get_effective_balances, CheckpointManager, CheckpointWithBalances}; +use parking_lot::{RwLock, RwLockReadGuard}; +use proto_array_fork_choice::{core::ProtoArray, ProtoArrayForkChoice}; use ssz_derive::{Decode, Encode}; -use state_processing::{common::get_attesting_indices, per_slot_processing}; -use std::sync::Arc; -use store::{BlockRootTree, Error as StoreError, Store}; -use types::{ - Attestation, BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, Slot, -}; +use state_processing::common::get_attesting_indices; +use std::marker::PhantomData; +use store::Error as StoreError; +use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, Hash256}; type Result = std::result::Result; @@ -19,27 +20,29 @@ pub enum Error { BeaconStateError(BeaconStateError), StoreError(StoreError), BeaconChainError(Box), + UnknownBlockSlot(Hash256), + UnknownJustifiedBlock(Hash256), + UnknownJustifiedState(Hash256), + UnableToJsonEncode(String), } pub struct ForkChoice { - backend: T::LmdGhost, + backend: ProtoArrayForkChoice, /// Used for resolving the `0x00..00` alias back to genesis. /// /// Does not necessarily need to be the _actual_ genesis, it suffices to be the finalized root /// whenever the struct was instantiated. genesis_block_root: Hash256, - /// The fork choice rule's current view of the justified checkpoint. - justified_checkpoint: RwLock, - /// The best justified checkpoint we've seen, which may be ahead of `justified_checkpoint`. - best_justified_checkpoint: RwLock, + checkpoint_manager: RwLock, + _phantom: PhantomData, } impl PartialEq for ForkChoice { + /// This implementation ignores the `store`. fn eq(&self, other: &Self) -> bool { self.backend == other.backend && self.genesis_block_root == other.genesis_block_root - && *self.justified_checkpoint.read() == *other.justified_checkpoint.read() - && *self.best_justified_checkpoint.read() == *other.best_justified_checkpoint.read() + && *self.checkpoint_manager.read() == *other.checkpoint_manager.read() } } @@ -48,122 +51,48 @@ impl ForkChoice { /// /// "Genesis" does not necessarily need to be the absolute genesis, it can be some finalized /// block. - pub fn new(backend: T::LmdGhost, genesis_block_root: Hash256, genesis_slot: Slot) -> Self { - let justified_checkpoint = Checkpoint { - epoch: genesis_slot.epoch(T::EthSpec::slots_per_epoch()), + pub fn new( + backend: ProtoArrayForkChoice, + genesis_block_root: Hash256, + genesis_state: &BeaconState, + ) -> Self { + let genesis_checkpoint = CheckpointWithBalances { + epoch: genesis_state.current_epoch(), root: genesis_block_root, + balances: get_effective_balances(genesis_state), }; + Self { backend, genesis_block_root, - justified_checkpoint: RwLock::new(justified_checkpoint.clone()), - best_justified_checkpoint: RwLock::new(justified_checkpoint), - } - } - - /// Determine whether the fork choice's view of the justified checkpoint should be updated. - /// - /// To prevent the bouncing attack, an update is allowed only in these conditions: - /// - /// * We're in the first SAFE_SLOTS_TO_UPDATE_JUSTIFIED slots of the epoch, or - /// * The new justified checkpoint is a descendant of the current justified checkpoint - fn should_update_justified_checkpoint( - &self, - chain: &BeaconChain, - new_justified_checkpoint: &Checkpoint, - ) -> Result { - if Self::compute_slots_since_epoch_start(chain.slot()?) - < chain.spec.safe_slots_to_update_justified - { - return Ok(true); + checkpoint_manager: RwLock::new(CheckpointManager::new(genesis_checkpoint)), + _phantom: PhantomData, } - - let justified_checkpoint = self.justified_checkpoint.read().clone(); - - let current_justified_block = chain - .get_block(&justified_checkpoint.root)? - .ok_or_else(|| Error::MissingBlock(justified_checkpoint.root))?; - - let new_justified_block = chain - .get_block(&new_justified_checkpoint.root)? - .ok_or_else(|| Error::MissingBlock(new_justified_checkpoint.root))?; - - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - - Ok( - new_justified_block.slot > justified_checkpoint.epoch.start_slot(slots_per_epoch) - && chain.get_ancestor_block_root( - new_justified_checkpoint.root, - current_justified_block.slot, - )? == Some(justified_checkpoint.root), - ) - } - - /// Calculate how far `slot` lies from the start of its epoch. - fn compute_slots_since_epoch_start(slot: Slot) -> u64 { - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - (slot - slot.epoch(slots_per_epoch).start_slot(slots_per_epoch)).as_u64() } /// Run the fork choice rule to determine the head. pub fn find_head(&self, chain: &BeaconChain) -> Result { let timer = metrics::start_timer(&metrics::FORK_CHOICE_FIND_HEAD_TIMES); - let (start_state, start_block_root, start_block_slot) = { - // Check if we should update our view of the justified checkpoint. - // Doing this check here should be quasi-equivalent to the update in the `on_tick` - // function of the spec, so long as `find_head` is called at least once during the first - // SAFE_SLOTS_TO_UPDATE_JUSTIFIED slots. - let best_justified_checkpoint = self.best_justified_checkpoint.read(); - if self.should_update_justified_checkpoint(chain, &best_justified_checkpoint)? { - *self.justified_checkpoint.write() = best_justified_checkpoint.clone(); - } - - let current_justified_checkpoint = self.justified_checkpoint.read().clone(); - - let (block_root, block_justified_slot) = ( - current_justified_checkpoint.root, - current_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - ); - - let block = chain - .store - .get::>(&block_root)? - .ok_or_else(|| Error::MissingBlock(block_root))?; - - // Resolve the `0x00.. 00` alias back to genesis - let block_root = if block_root == Hash256::zero() { + let remove_alias = |root| { + if root == Hash256::zero() { self.genesis_block_root } else { - block_root - }; - - let mut state: BeaconState = chain - .get_state_caching_only_with_committee_caches(&block.state_root, Some(block.slot))? - .ok_or_else(|| Error::MissingState(block.state_root))?; - - // Fast-forward the state to the start slot of the epoch where it was justified. - for _ in block.slot.as_u64()..block_justified_slot.as_u64() { - per_slot_processing(&mut state, None, &chain.spec) - .map_err(BeaconChainError::SlotProcessingError)? + root } - - (state, block_root, block_justified_slot) }; - // A function that returns the weight for some validator index. - let weight = |validator_index: usize| -> Option { - start_state - .validators - .get(validator_index) - .map(|v| v.effective_balance) - }; + let mut manager = self.checkpoint_manager.write(); + manager.maybe_update(chain.slot()?, chain)?; let result = self .backend - .find_head(start_block_slot, start_block_root, weight) + .find_head( + manager.current.justified.epoch, + remove_alias(manager.current.justified.root), + manager.current.finalized.epoch, + &manager.current.justified.balances, + ) .map_err(Into::into); metrics::stop_timer(timer); @@ -171,6 +100,11 @@ impl ForkChoice { result } + /// Returns true if the given block is known to fork choice. + pub fn contains_block(&self, block_root: &Hash256) -> bool { + self.backend.contains_block(block_root) + } + /// Process all attestations in the given `block`. /// /// Assumes the block (and therefore its attestations) are valid. It is a logic error to @@ -183,36 +117,35 @@ impl ForkChoice { block_root: Hash256, ) -> Result<()> { let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); + + self.checkpoint_manager + .write() + .process_state(block_root, state, chain, &self.backend)?; + self.checkpoint_manager + .write() + .maybe_update(chain.slot()?, chain)?; + // Note: we never count the block as a latest message, only attestations. - // - // I (Paul H) do not have an explicit reference to this, but I derive it from this - // document: - // - // https://github.com/ethereum/eth2.0-specs/blob/v0.7.0/specs/core/0_fork-choice.md for attestation in &block.body.attestations { - // If the `data.beacon_block_root` block is not known to us, simply ignore the latest - // vote. - if let Some(block) = chain.get_block_caching(&attestation.data.beacon_block_root)? { - self.process_attestation(state, attestation, &block)?; - } - } - - // Check if we should update our view of the justified checkpoint - if state.current_justified_checkpoint.epoch > self.justified_checkpoint.read().epoch { - *self.best_justified_checkpoint.write() = state.current_justified_checkpoint.clone(); + // If the `data.beacon_block_root` block is not known to the fork choice, simply ignore + // the vote. if self - .should_update_justified_checkpoint(chain, &state.current_justified_checkpoint)? + .backend + .contains_block(&attestation.data.beacon_block_root) { - *self.justified_checkpoint.write() = state.current_justified_checkpoint.clone(); + self.process_attestation(state, attestation)?; } } // This does not apply a vote to the block, it just makes fork choice aware of the block so // it can still be identified as the head even if it doesn't have any votes. - // - // A case where a block without any votes can be the head is where it is the only child of - // a block that has the majority of votes applied to it. - self.backend.process_block(block, block_root)?; + self.backend.process_block( + block.slot, + block_root, + block.parent_root, + state.current_justified_checkpoint.epoch, + state.finalized_checkpoint.epoch, + )?; metrics::stop_timer(timer); @@ -226,7 +159,6 @@ impl ForkChoice { &self, state: &BeaconState, attestation: &Attestation, - block: &BeaconBlock, ) -> Result<()> { let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); @@ -252,8 +184,11 @@ impl ForkChoice { get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; for validator_index in validator_indices { - self.backend - .process_attestation(validator_index, block_hash, block.slot)?; + self.backend.process_attestation( + validator_index, + block_hash, + attestation.data.target.epoch, + )?; } } @@ -265,38 +200,29 @@ impl ForkChoice { /// Returns the latest message for a given validator, if any. /// /// Returns `(block_root, block_slot)`. - pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { + pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { self.backend.latest_message(validator_index) } - /// Runs an integrity verification function on the underlying fork choice algorithm. - /// - /// Returns `Ok(())` if the underlying fork choice has maintained it's integrity, - /// `Err(description)` otherwise. - pub fn verify_integrity(&self) -> core::result::Result<(), String> { - self.backend.verify_integrity() + /// Trigger a prune on the underlying fork choice backend. + pub fn prune(&self) -> Result<()> { + let finalized_root = self.checkpoint_manager.read().current.finalized.root; + + self.backend.maybe_prune(finalized_root).map_err(Into::into) } - /// Inform the fork choice that the given block (and corresponding root) have been finalized so - /// it may prune it's storage. + /// Returns a read-lock to the core `ProtoArray` struct. /// - /// `finalized_block_root` must be the root of `finalized_block`. - pub fn process_finalization( - &self, - finalized_block: &BeaconBlock, - finalized_block_root: Hash256, - ) -> Result<()> { - self.backend - .update_finalized_root(finalized_block, finalized_block_root) - .map_err(Into::into) + /// Should only be used when encoding/decoding during troubleshooting. + pub fn core_proto_array(&self) -> RwLockReadGuard { + self.backend.core_proto_array() } /// Returns a `SszForkChoice` which contains the current state of `Self`. pub fn as_ssz_container(&self) -> SszForkChoice { SszForkChoice { - genesis_block_root: self.genesis_block_root, - justified_checkpoint: self.justified_checkpoint.read().clone(), - best_justified_checkpoint: self.best_justified_checkpoint.read().clone(), + genesis_block_root: self.genesis_block_root.clone(), + checkpoint_manager: self.checkpoint_manager.read().clone(), backend_bytes: self.backend.as_bytes(), } } @@ -304,18 +230,14 @@ impl ForkChoice { /// Instantiates `Self` from a prior `SszForkChoice`. /// /// The created `Self` will have the same state as the `Self` that created the `SszForkChoice`. - pub fn from_ssz_container( - ssz_container: SszForkChoice, - store: Arc, - block_root_tree: Arc, - ) -> Result { - let backend = LmdGhost::from_bytes(&ssz_container.backend_bytes, store, block_root_tree)?; + pub fn from_ssz_container(ssz_container: SszForkChoice) -> Result { + let backend = ProtoArrayForkChoice::from_bytes(&ssz_container.backend_bytes)?; Ok(Self { backend, genesis_block_root: ssz_container.genesis_block_root, - justified_checkpoint: RwLock::new(ssz_container.justified_checkpoint), - best_justified_checkpoint: RwLock::new(ssz_container.best_justified_checkpoint), + checkpoint_manager: RwLock::new(ssz_container.checkpoint_manager), + _phantom: PhantomData, }) } } @@ -326,8 +248,7 @@ impl ForkChoice { #[derive(Encode, Decode, Clone)] pub struct SszForkChoice { genesis_block_root: Hash256, - justified_checkpoint: Checkpoint, - best_justified_checkpoint: Checkpoint, + checkpoint_manager: CheckpointManager, backend_bytes: Vec, } diff --git a/beacon_node/beacon_chain/src/fork_choice/checkpoint_manager.rs b/beacon_node/beacon_chain/src/fork_choice/checkpoint_manager.rs new file mode 100644 index 00000000000..5441f4aa78e --- /dev/null +++ b/beacon_node/beacon_chain/src/fork_choice/checkpoint_manager.rs @@ -0,0 +1,340 @@ +use super::Error; +use crate::{metrics, BeaconChain, BeaconChainTypes}; +use proto_array_fork_choice::ProtoArrayForkChoice; +use ssz_derive::{Decode, Encode}; +use types::{BeaconState, Checkpoint, Epoch, EthSpec, Hash256, Slot}; + +const MAX_BALANCE_CACHE_SIZE: usize = 4; + +/// An item that is stored in the `BalancesCache`. +#[derive(PartialEq, Clone, Encode, Decode)] +struct CacheItem { + /// The block root at which `self.balances` are valid. + block_root: Hash256, + /// The `state.balances` list. + balances: Vec, +} + +/// Provides a cache to avoid reading `BeaconState` from disk when updating the current justified +/// checkpoint. +/// +/// It should store a mapping of `epoch_boundary_block_root -> state.balances`. +#[derive(PartialEq, Clone, Default, Encode, Decode)] +struct BalancesCache { + items: Vec, +} + +impl BalancesCache { + /// Inspect the given `state` and determine the root of the block at the first slot of + /// `state.current_epoch`. If there is not already some entry for the given block root, then + /// add `state.balances` to the cache. + pub fn process_state( + &mut self, + block_root: Hash256, + state: &BeaconState, + ) -> Result<(), Error> { + // We are only interested in balances from states that are at the start of an epoch, + // because this is where the `current_justified_checkpoint.root` will point. + if !Self::is_first_block_in_epoch(block_root, state)? { + return Ok(()); + } + + let epoch_boundary_slot = state.current_epoch().start_slot(E::slots_per_epoch()); + let epoch_boundary_root = if epoch_boundary_slot == state.slot { + block_root + } else { + // This call remains sensible as long as `state.block_roots` is larger than a single + // epoch. + *state.get_block_root(epoch_boundary_slot)? + }; + + if self.position(epoch_boundary_root).is_none() { + let item = CacheItem { + block_root: epoch_boundary_root, + balances: get_effective_balances(state), + }; + + if self.items.len() == MAX_BALANCE_CACHE_SIZE { + self.items.remove(0); + } + + self.items.push(item); + } + + Ok(()) + } + + /// Returns `true` if the given `block_root` is the first/only block to have been processed in + /// the epoch of the given `state`. + /// + /// We can determine if it is the first block by looking back through `state.block_roots` to + /// see if there is a block in the current epoch with a different root. + fn is_first_block_in_epoch( + block_root: Hash256, + state: &BeaconState, + ) -> Result { + let mut prior_block_found = false; + + for slot in state.current_epoch().slot_iter(E::slots_per_epoch()) { + if slot < state.slot { + if *state.get_block_root(slot)? != block_root { + prior_block_found = true; + break; + } + } else { + break; + } + } + + Ok(!prior_block_found) + } + + fn position(&self, block_root: Hash256) -> Option { + self.items + .iter() + .position(|item| item.block_root == block_root) + } + + /// Get the balances for the given `block_root`, if any. + /// + /// If some balances are found, they are removed from the cache. + pub fn get(&mut self, block_root: Hash256) -> Option> { + let i = self.position(block_root)?; + Some(self.items.remove(i).balances) + } +} + +/// Returns the effective balances for every validator in the given `state`. +/// +/// Any validator who is not active in the epoch of the given `state` is assigned a balance of +/// zero. +pub fn get_effective_balances(state: &BeaconState) -> Vec { + state + .validators + .iter() + .map(|validator| { + if validator.is_active_at(state.current_epoch()) { + validator.effective_balance + } else { + 0 + } + }) + .collect() +} + +/// A `types::Checkpoint` that also stores the validator balances from a `BeaconState`. +/// +/// Useful because we need to track the justified checkpoint balances. +#[derive(PartialEq, Clone, Encode, Decode)] +pub struct CheckpointWithBalances { + pub epoch: Epoch, + pub root: Hash256, + /// These are the balances of the state with `self.root`. + /// + /// Importantly, these are _not_ the balances of the first state that we saw that has + /// `self.epoch` and `self.root` as `state.current_justified_checkpoint`. These are the + /// balances of the state from the block with `state.current_justified_checkpoint.root`. + pub balances: Vec, +} + +impl Into for CheckpointWithBalances { + fn into(self) -> Checkpoint { + Checkpoint { + epoch: self.epoch, + root: self.root, + } + } +} + +/// A pair of checkpoints, representing `state.current_justified_checkpoint` and +/// `state.finalized_checkpoint` for some `BeaconState`. +#[derive(PartialEq, Clone, Encode, Decode)] +pub struct FFGCheckpoints { + pub justified: CheckpointWithBalances, + pub finalized: Checkpoint, +} + +/// A struct to manage the justified and finalized checkpoints to be used for `ForkChoice`. +/// +/// This struct exists to manage the `should_update_justified_checkpoint` logic in the fork choice +/// section of the spec: +/// +/// https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/fork-choice.md#should_update_justified_checkpoint +#[derive(PartialEq, Clone, Encode, Decode)] +pub struct CheckpointManager { + /// The current FFG checkpoints that should be used for finding the head. + pub current: FFGCheckpoints, + /// The best-known checkpoints that should be moved to `self.current` when the time is right. + best: FFGCheckpoints, + /// The epoch at which `self.current` should become `self.best`, if any. + update_at: Option, + /// A cached used to try and avoid DB reads when updating `self.current` and `self.best`. + balances_cache: BalancesCache, +} + +impl CheckpointManager { + /// Create a new checkpoint cache from `genesis_checkpoint` derived from the genesis block. + pub fn new(genesis_checkpoint: CheckpointWithBalances) -> Self { + let ffg_checkpoint = FFGCheckpoints { + justified: genesis_checkpoint.clone(), + finalized: genesis_checkpoint.into(), + }; + Self { + current: ffg_checkpoint.clone(), + best: ffg_checkpoint, + update_at: None, + balances_cache: BalancesCache::default(), + } + } + + /// Potentially updates `self.current`, if the conditions are correct. + /// + /// Should be called before running the fork choice `find_head` function to ensure + /// `self.current` is up-to-date. + pub fn maybe_update( + &mut self, + current_slot: Slot, + chain: &BeaconChain, + ) -> Result<(), Error> { + if self.best.justified.epoch > self.current.justified.epoch { + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + + match self.update_at { + None => { + if self.best.justified.epoch > self.current.justified.epoch { + if Self::compute_slots_since_epoch_start::(current_slot) + < chain.spec.safe_slots_to_update_justified + { + self.current = self.best.clone(); + } else { + self.update_at = Some(current_epoch + 1) + } + } + } + Some(epoch) if epoch <= current_epoch => { + self.current = self.best.clone(); + self.update_at = None + } + _ => {} + } + } + + Ok(()) + } + + /// Checks the given `state` (must correspond to the given `block_root`) to see if it contains + /// a `current_justified_checkpoint` that is better than `self.best_justified_checkpoint`. If + /// so, the value is updated. + /// + /// Note: this does not update `self.justified_checkpoint`. + pub fn process_state( + &mut self, + block_root: Hash256, + state: &BeaconState, + chain: &BeaconChain, + proto_array: &ProtoArrayForkChoice, + ) -> Result<(), Error> { + // Only proceed if the new checkpoint is better than our current checkpoint. + if state.current_justified_checkpoint.epoch > self.current.justified.epoch + && state.finalized_checkpoint.epoch >= self.current.finalized.epoch + { + let candidate = FFGCheckpoints { + justified: CheckpointWithBalances { + epoch: state.current_justified_checkpoint.epoch, + root: state.current_justified_checkpoint.root, + balances: self + .get_balances_for_block(state.current_justified_checkpoint.root, chain)?, + }, + finalized: state.finalized_checkpoint.clone(), + }; + + // Using the given `state`, determine its ancestor at the slot of our current justified + // epoch. Later, this will be compared to the root of the current justified checkpoint + // to determine if this state is descendant of our current justified state. + let new_checkpoint_ancestor = Self::get_block_root_at_slot( + state, + chain, + candidate.justified.root, + self.current + .justified + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + )?; + + let candidate_justified_block_slot = proto_array + .block_slot(&candidate.justified.root) + .ok_or_else(|| Error::UnknownBlockSlot(candidate.justified.root))?; + + // If the new justified checkpoint is an ancestor of the current justified checkpoint, + // it is always safe to change it. + if new_checkpoint_ancestor == Some(self.current.justified.root) + && candidate_justified_block_slot + >= candidate + .justified + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + { + self.current = candidate.clone() + } + + if candidate.justified.epoch > self.best.justified.epoch { + // Always update the best checkpoint, if it's better. + self.best = candidate; + } + + // Add the state's balances to the balances cache to avoid a state read later. + self.balances_cache.process_state(block_root, state)?; + } + + Ok(()) + } + + fn get_balances_for_block( + &mut self, + block_root: Hash256, + chain: &BeaconChain, + ) -> Result, Error> { + if let Some(balances) = self.balances_cache.get(block_root) { + metrics::inc_counter(&metrics::BALANCES_CACHE_HITS); + + Ok(balances) + } else { + metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES); + + let block = chain + .get_block_caching(&block_root)? + .ok_or_else(|| Error::UnknownJustifiedBlock(block_root))?; + + let state = chain + .get_state_caching_only_with_committee_caches(&block.state_root, Some(block.slot))? + .ok_or_else(|| Error::UnknownJustifiedState(block.state_root))?; + + Ok(get_effective_balances(&state)) + } + } + + /// Attempts to get the block root for the given `slot`. + /// + /// First, the `state` is used to see if the slot is within the distance of its historical + /// lists. Then, the `chain` is used which will anchor the search at the given + /// `justified_root`. + fn get_block_root_at_slot( + state: &BeaconState, + chain: &BeaconChain, + justified_root: Hash256, + slot: Slot, + ) -> Result, Error> { + match state.get_block_root(slot) { + Ok(root) => Ok(Some(*root)), + Err(_) => chain + .get_ancestor_block_root(justified_root, slot) + .map_err(Into::into), + } + } + + /// Calculate how far `slot` lies from the start of its epoch. + fn compute_slots_since_epoch_start(slot: Slot) -> u64 { + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + (slot - slot.epoch(slots_per_epoch).start_slot(slots_per_epoch)).as_u64() + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 3bede253cfb..3a02745b7b6 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -24,7 +24,6 @@ pub use self::errors::{BeaconChainError, BlockProductionError}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::EventHandler; pub use fork_choice::ForkChoice; -pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; pub use slot_clock; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 7ad92bf1514..72b1fdbc1c3 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -138,6 +138,10 @@ lazy_static! { "beacon_fork_choice_process_attestation_seconds", "Time taken to add an attestation to fork choice" ); + pub static ref BALANCES_CACHE_HITS: Result = + try_create_int_counter("beacon_balances_cache_hits_total", "Count of times balances cache fulfils request"); + pub static ref BALANCES_CACHE_MISSES: Result = + try_create_int_counter("beacon_balances_cache_misses_total", "Count of times balances cache fulfils request"); /* * Persisting BeaconChain to disk diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index a0e281a98ca..e8f619e92f3 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -5,7 +5,7 @@ use crate::{BeaconChainTypes, CheckPoint}; use operation_pool::PersistedOperationPool; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use store::{DBColumn, Error as StoreError, SimpleStoreItem, SszBlockRootTree}; +use store::{DBColumn, Error as StoreError, SimpleStoreItem}; use types::Hash256; /// 32-byte key for accessing the `PersistedBeaconChain`. @@ -20,7 +20,6 @@ pub struct PersistedBeaconChain { pub ssz_head_tracker: SszHeadTracker, pub fork_choice: SszForkChoice, pub eth1_cache: Option, - pub block_root_tree: SszBlockRootTree, } impl SimpleStoreItem for PersistedBeaconChain { diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 337878d4cd1..57f05cb79f7 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -6,7 +6,6 @@ use crate::{ }; use eth1::Config as Eth1Config; use genesis::interop_genesis_state; -use lmd_ghost::ThreadSafeReducedTree; use rayon::prelude::*; use sloggers::{terminal::TerminalLoggerBuilder, types::Severity, Build}; use slot_clock::TestingSlotClock; @@ -35,7 +34,6 @@ pub type BaseHarnessType = Witness< TStore, TStoreMigrator, TestingSlotClock, - ThreadSafeReducedTree, CachingEth1Backend, TEthSpec, NullEventHandler, diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index c88fcf4d14c..58bd10a4ec7 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -391,7 +391,7 @@ fn free_attestations_added_to_fork_choice_some_none() { if slot <= num_blocks_produced && slot != 0 { assert_eq!( latest_message.unwrap().1, - slot, + slot.epoch(MinimalEthSpec::slots_per_epoch()), "Latest message slot for {} should be equal to slot {}.", validator, slot @@ -483,7 +483,7 @@ fn free_attestations_added_to_fork_choice_all_updated() { assert_eq!( latest_message.unwrap().1, - slot, + slot.epoch(MinimalEthSpec::slots_per_epoch()), "Latest message slot should be equal to attester duty." ); diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index e5293cd1968..8feed866e40 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -33,7 +33,6 @@ exit-future = "0.1.4" futures = "0.1.29" reqwest = "0.9.22" url = "2.1.0" -lmd_ghost = { path = "../../eth2/lmd_ghost" } eth1 = { path = "../eth1" } genesis = { path = "../genesis" } environment = { path = "../../lighthouse/environment" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 611763e9b61..6519906713f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -4,7 +4,6 @@ use crate::Client; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::CachingEth1Backend, - lmd_ghost::ThreadSafeReducedTree, slot_clock::{SlotClock, SystemTimeSlotClock}, store::{ migrate::{BackgroundMigrator, Migrate, NullMigrator}, @@ -21,7 +20,6 @@ use genesis::{ generate_deterministic_keypairs, interop_genesis_state, state_from_ssz_file, Eth1GenesisService, }; use lighthouse_bootstrap::Bootstrapper; -use lmd_ghost::LmdGhost; use network::{NetworkConfig, NetworkMessage, Service as NetworkService}; use slog::info; use ssz::Decode; @@ -67,23 +65,14 @@ pub struct ClientBuilder { eth_spec_instance: T::EthSpec, } -impl +impl ClientBuilder< - Witness< - TStore, - TStoreMigrator, - TSlotClock, - TLmdGhost, - TEth1Backend, - TEthSpec, - TEventHandler, - >, + Witness, > where TStore: Store + 'static, TStoreMigrator: store::Migrate, TSlotClock: SlotClock + Clone + 'static, - TLmdGhost: LmdGhost + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -367,17 +356,8 @@ where /// If type inference errors are being raised, see the comment on the definition of `Self`. pub fn build( self, - ) -> Client< - Witness< - TStore, - TStoreMigrator, - TSlotClock, - TLmdGhost, - TEth1Backend, - TEthSpec, - TEventHandler, - >, - > { + ) -> Client> + { Client { beacon_chain: self.beacon_chain, libp2p_network: self.libp2p_network, @@ -390,15 +370,7 @@ where impl ClientBuilder< - Witness< - TStore, - TStoreMigrator, - TSlotClock, - ThreadSafeReducedTree, - TEth1Backend, - TEthSpec, - TEventHandler, - >, + Witness, > where TStore: Store + 'static, @@ -435,13 +407,12 @@ where } } -impl +impl ClientBuilder< Witness< TStore, TStoreMigrator, TSlotClock, - TLmdGhost, TEth1Backend, TEthSpec, WebSocketSender, @@ -451,7 +422,6 @@ where TStore: Store + 'static, TStoreMigrator: store::Migrate, TSlotClock: SlotClock + 'static, - TLmdGhost: LmdGhost + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, { @@ -485,13 +455,12 @@ where } } -impl +impl ClientBuilder< Witness< DiskStore, TStoreMigrator, TSlotClock, - TLmdGhost, TEth1Backend, TEthSpec, TEventHandler, @@ -500,7 +469,6 @@ impl, TEthSpec> + 'static, - TLmdGhost: LmdGhost, TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -535,13 +503,12 @@ where } } -impl +impl ClientBuilder< Witness< SimpleDiskStore, TStoreMigrator, TSlotClock, - TLmdGhost, TEth1Backend, TEthSpec, TEventHandler, @@ -550,7 +517,6 @@ impl, TEthSpec> + 'static, - TLmdGhost: LmdGhost, TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -564,13 +530,12 @@ where } } -impl +impl ClientBuilder< Witness< MemoryStore, NullMigrator, TSlotClock, - TLmdGhost, TEth1Backend, TEthSpec, TEventHandler, @@ -578,7 +543,6 @@ impl > where TSlotClock: SlotClock + 'static, - TLmdGhost: LmdGhost, TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -594,13 +558,12 @@ where } } -impl +impl ClientBuilder< Witness< DiskStore, BackgroundMigrator, TSlotClock, - TLmdGhost, TEth1Backend, TEthSpec, TEventHandler, @@ -608,7 +571,6 @@ impl > where TSlotClock: SlotClock + 'static, - TLmdGhost: LmdGhost, TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -622,13 +584,12 @@ where } } -impl +impl ClientBuilder< Witness< TStore, TStoreMigrator, TSlotClock, - TLmdGhost, CachingEth1Backend, TEthSpec, TEventHandler, @@ -638,7 +599,6 @@ where TStore: Store + 'static, TStoreMigrator: store::Migrate, TSlotClock: SlotClock + 'static, - TLmdGhost: LmdGhost + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -724,22 +684,13 @@ where } } -impl +impl ClientBuilder< - Witness< - TStore, - TStoreMigrator, - SystemTimeSlotClock, - TLmdGhost, - TEth1Backend, - TEthSpec, - TEventHandler, - >, + Witness, > where TStore: Store + 'static, TStoreMigrator: store::Migrate, - TLmdGhost: LmdGhost + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index d6424f0afec..ea597228500 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -12,7 +12,7 @@ use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeError}; use libp2p::swarm::protocols_handler::{ KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, }; -use slog::{crit, debug, error}; +use slog::{crit, debug, error, warn}; use smallvec::SmallVec; use std::collections::hash_map::Entry; use std::time::{Duration, Instant}; @@ -319,8 +319,12 @@ where substream: out, request, }; - self.outbound_substreams - .insert(id, (awaiting_stream, delay_key)); + if let Some(_) = self + .outbound_substreams + .insert(id, (awaiting_stream, delay_key)) + { + warn!(self.log, "Duplicate outbound substream id"; "id" => format!("{:?}", id)); + } } _ => { // a response is not expected, drop the stream for all other requests } diff --git a/beacon_node/network/src/sync/range_sync/batch_processing.rs b/beacon_node/network/src/sync/range_sync/batch_processing.rs index 27c4fb295ea..a054b6b31af 100644 --- a/beacon_node/network/src/sync/range_sync/batch_processing.rs +++ b/beacon_node/network/src/sync/range_sync/batch_processing.rs @@ -70,7 +70,7 @@ fn process_batch( ); successful_block_import = true; } - BlockProcessingOutcome::ParentUnknown { parent } => { + BlockProcessingOutcome::ParentUnknown { parent, .. } => { // blocks should be sequential and all parents should exist warn!( log, "Parent block is unknown"; diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 2964ba0b695..beb39265a9e 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -307,8 +307,8 @@ impl ChainCollection { self.finalized_chains.retain(|chain| { if chain.target_head_slot <= local_finalized_slot || beacon_chain - .block_root_tree - .is_known_block_root(&chain.target_head_root) + .fork_choice + .contains_block(&chain.target_head_root) { debug!(log, "Purging out of finalized chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot); chain.status_peers(network); @@ -320,8 +320,8 @@ impl ChainCollection { self.head_chains.retain(|chain| { if chain.target_head_slot <= local_finalized_slot || beacon_chain - .block_root_tree - .is_known_block_root(&chain.target_head_root) + .fork_choice + .contains_block(&chain.target_head_root) { debug!(log, "Purging out of date head chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot); chain.status_peers(network); diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index ee7fb8ae722..ad61629363c 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -150,9 +150,7 @@ impl RangeSync { self.chains.purge_outdated_chains(network, &self.log); if remote_finalized_slot > local_info.head_slot - && !chain - .block_root_tree - .is_known_block_root(&remote.finalized_root) + && !chain.fork_choice.contains_block(&remote.finalized_root) { debug!(self.log, "Finalization sync peer joined"; "peer_id" => format!("{:?}", peer_id)); // Finalized chain search diff --git a/beacon_node/rest_api/src/advanced.rs b/beacon_node/rest_api/src/advanced.rs new file mode 100644 index 00000000000..d7ab80299aa --- /dev/null +++ b/beacon_node/rest_api/src/advanced.rs @@ -0,0 +1,15 @@ +use crate::response_builder::ResponseBuilder; +use crate::ApiResult; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use hyper::{Body, Request}; +use std::sync::Arc; + +/// Returns the `proto_array` fork choice struct, encoded as JSON. +/// +/// Useful for debugging or advanced inspection of the chain. +pub fn get_fork_choice( + req: Request, + beacon_chain: Arc>, +) -> ApiResult { + ResponseBuilder::new(&req)?.body_no_ssz(&*beacon_chain.fork_choice.core_proto_array()) +} diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 221d96e251b..f4f13babc81 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -4,6 +4,7 @@ mod macros; extern crate lazy_static; extern crate network as client_network; +mod advanced; mod beacon; pub mod config; mod consensus; diff --git a/beacon_node/rest_api/src/router.rs b/beacon_node/rest_api/src/router.rs index d9fd63ce551..02c4678c95b 100644 --- a/beacon_node/rest_api/src/router.rs +++ b/beacon_node/rest_api/src/router.rs @@ -1,6 +1,6 @@ use crate::{ - beacon, consensus, error::ApiError, helpers, metrics, network, node, spec, validator, BoxFut, - NetworkChannel, + advanced, beacon, consensus, error::ApiError, helpers, metrics, network, node, spec, validator, + BoxFut, NetworkChannel, }; use beacon_chain::{BeaconChain, BeaconChainTypes}; use client_network::Service as NetworkService; @@ -147,6 +147,11 @@ pub fn route( into_boxfut(spec::get_eth2_config::(req, eth2_config)) } + // Methods for advanced parameters + (&Method::GET, "/advanced/fork_choice") => { + into_boxfut(advanced::get_fork_choice::(req, beacon_chain)) + } + (&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::( req, beacon_chain, diff --git a/beacon_node/rest_api/tests/test.rs b/beacon_node/rest_api/tests/test.rs index 1d438253a53..12eed8bbc5b 100644 --- a/beacon_node/rest_api/tests/test.rs +++ b/beacon_node/rest_api/tests/test.rs @@ -792,6 +792,30 @@ fn get_committees() { assert_eq!(result, expected, "result should be as expected"); } +#[test] +fn get_fork_choice() { + let mut env = build_env(); + + let node = build_node(&mut env, testing_client_config()); + let remote_node = node.remote_node().expect("should produce remote node"); + + let fork_choice = env + .runtime() + .block_on(remote_node.http.advanced().get_fork_choice()) + .expect("should not error when getting fork choice"); + + assert_eq!( + fork_choice, + *node + .client + .beacon_chain() + .expect("node should have beacon chain") + .fork_choice + .core_proto_array(), + "result should be as expected" + ); +} + fn compare_validator_response( state: &BeaconState, response: &ValidatorResponse, diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index eda78383c24..2baef229626 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -11,7 +11,7 @@ pub use eth2_config::Eth2Config; use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, events::WebSocketSender, - lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock, + slot_clock::SystemTimeSlotClock, }; use clap::ArgMatches; use config::get_configs; @@ -28,7 +28,6 @@ pub type ProductionClient = Client< DiskStore, BackgroundMigrator, SystemTimeSlotClock, - ThreadSafeReducedTree, E>, CachingEth1Backend>, E, WebSocketSender, diff --git a/beacon_node/store/src/block_root_tree.rs b/beacon_node/store/src/block_root_tree.rs deleted file mode 100644 index dba1b1c24b9..00000000000 --- a/beacon_node/store/src/block_root_tree.rs +++ /dev/null @@ -1,363 +0,0 @@ -use itertools::Itertools; -use parking_lot::RwLock; -use ssz_derive::{Decode, Encode}; -use std::collections::{HashMap, HashSet}; -use std::iter::{self, FromIterator}; -use types::{Hash256, Slot}; - -/// In-memory cache of all block roots post-finalization. Includes short-lived forks. -/// -/// Used by fork choice to avoid reconstructing hot states just for their block roots. -// NOTE: could possibly be streamlined by combining with the head tracker and/or fork choice -#[derive(Debug)] -pub struct BlockRootTree { - nodes: RwLock>, -} - -impl Clone for BlockRootTree { - fn clone(&self) -> Self { - Self { - nodes: RwLock::new(self.nodes.read().clone()), - } - } -} - -#[derive(Debug, PartialEq)] -pub enum BlockRootTreeError { - PrevUnknown(Hash256), -} - -/// Data for a single `block_root` in the tree. -#[derive(Debug, Clone, Encode, Decode)] -struct Node { - /// Hash of the preceding block (should be the parent block). - /// - /// A `previous` of `Hash256::zero` indicates the root of the tree. - previous: Hash256, - /// Slot of this node's block. - slot: Slot, -} - -impl BlockRootTree { - /// Create a new block root tree where `(root_hash, root_slot)` is considered finalized. - /// - /// All subsequent blocks added should descend from the root block. - pub fn new(root_hash: Hash256, root_slot: Slot) -> Self { - Self { - nodes: RwLock::new(HashMap::from_iter(iter::once(( - root_hash, - Node { - previous: Hash256::zero(), - slot: root_slot, - }, - )))), - } - } - - /// Check if `block_root` exists in the tree. - pub fn is_known_block_root(&self, block_root: &Hash256) -> bool { - self.nodes.read().contains_key(block_root) - } - - /// Add a new `block_root` to the tree. - /// - /// Will return an error if `prev_block_root` doesn't exist in the tree. - pub fn add_block_root( - &self, - block_root: Hash256, - prev_block_root: Hash256, - block_slot: Slot, - ) -> Result<(), BlockRootTreeError> { - let mut nodes = self.nodes.write(); - if nodes.contains_key(&prev_block_root) { - nodes.insert( - block_root, - Node { - previous: prev_block_root, - slot: block_slot, - }, - ); - Ok(()) - } else { - Err(BlockRootTreeError::PrevUnknown(prev_block_root)) - } - } - - /// Create a reverse iterator from `block_root` (inclusive). - /// - /// Will skip slots, see `every_slot_iter_from` for a non-skipping variant. - pub fn iter_from(&self, block_root: Hash256) -> BlockRootTreeIter { - BlockRootTreeIter { - tree: self, - current_block_root: block_root, - } - } - - /// Create a reverse iterator that yields a block root for every slot. - /// - /// E.g. if slot 6 is skipped, this iterator will return the block root from slot 5 at slot 6. - pub fn every_slot_iter_from<'a>( - &'a self, - block_root: Hash256, - ) -> impl Iterator + 'a { - let mut block_roots = self.iter_from(block_root).peekable(); - - // Include the value for the first `block_root` if any, then fill in the skipped slots - // between each pair of previous block roots by duplicating the older root. - block_roots - .peek() - .cloned() - .into_iter() - .chain(block_roots.tuple_windows().flat_map( - |((_, high_slot), (low_hash, low_slot))| { - (low_slot.as_u64()..high_slot.as_u64()) - .rev() - .map(move |slot| (low_hash, Slot::new(slot))) - }, - )) - } - - /// Prune the tree. - /// - /// Only keep block roots descended from `finalized_root`, which lie on a chain leading - /// to one of the heads contained in `heads`. - pub fn prune_to(&self, finalized_root: Hash256, heads: impl IntoIterator) { - let mut keep = HashSet::new(); - keep.insert(finalized_root); - - for head_block_root in heads.into_iter() { - // Iterate backwards until we reach a portion of the chain that we've already decided - // to keep. This also discards the pre-finalization block roots. - let mut keep_head = false; - - let head_blocks = self - .iter_from(head_block_root) - .map(|(block_root, _)| block_root) - .inspect(|block_root| { - if block_root == &finalized_root { - keep_head = true; - } - }) - .take_while(|block_root| !keep.contains(&block_root)) - .collect::>(); - - // If the head descends from the finalized root, keep it. Else throw it out. - if keep_head { - keep.extend(head_blocks); - } - } - - self.nodes - .write() - .retain(|block_root, _| keep.contains(block_root)); - } - - pub fn as_ssz_container(&self) -> SszBlockRootTree { - SszBlockRootTree { - nodes: Vec::from_iter(self.nodes.read().clone()), - } - } -} - -/// Simple (skipping) iterator for `BlockRootTree`. -#[derive(Debug)] -pub struct BlockRootTreeIter<'a> { - tree: &'a BlockRootTree, - current_block_root: Hash256, -} - -impl<'a> Iterator for BlockRootTreeIter<'a> { - type Item = (Hash256, Slot); - - fn next(&mut self) -> Option { - // Genesis - if self.current_block_root.is_zero() { - None - } else { - let block_root = self.current_block_root; - self.tree.nodes.read().get(&block_root).map(|node| { - self.current_block_root = node.previous; - (block_root, node.slot) - }) - } - } -} - -/// Serializable version of `BlockRootTree` that can be persisted to disk. -#[derive(Debug, Clone, Encode, Decode)] -pub struct SszBlockRootTree { - nodes: Vec<(Hash256, Node)>, -} - -impl Into for SszBlockRootTree { - fn into(self) -> BlockRootTree { - BlockRootTree { - nodes: RwLock::new(HashMap::from_iter(self.nodes)), - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - fn int_hash(x: u64) -> Hash256 { - Hash256::from_low_u64_be(x) - } - - fn check_iter_from( - block_tree: &BlockRootTree, - start_block_root: Hash256, - expected: &[(Hash256, Slot)], - ) { - assert_eq!( - &block_tree.iter_from(start_block_root).collect::>()[..], - expected - ); - } - - fn check_every_slot_iter_from( - block_tree: &BlockRootTree, - start_block_root: Hash256, - expected: &[(Hash256, Slot)], - ) { - assert_eq!( - &block_tree - .every_slot_iter_from(start_block_root) - .collect::>()[..], - expected - ); - } - - #[test] - fn single_chain() { - let block_tree = BlockRootTree::new(int_hash(1), Slot::new(1)); - for i in 2..100 { - block_tree - .add_block_root(int_hash(i), int_hash(i - 1), Slot::new(i)) - .expect("add_block_root ok"); - - let expected = (1..=i) - .rev() - .map(|j| (int_hash(j), Slot::new(j))) - .collect::>(); - - check_iter_from(&block_tree, int_hash(i), &expected); - check_every_slot_iter_from(&block_tree, int_hash(i), &expected); - - // Still OK after pruning. - block_tree.prune_to(int_hash(1), vec![int_hash(i)]); - - check_iter_from(&block_tree, int_hash(i), &expected); - check_every_slot_iter_from(&block_tree, int_hash(i), &expected); - } - } - - #[test] - fn skips_of_2() { - let block_tree = BlockRootTree::new(int_hash(1), Slot::new(1)); - let step_length = 2u64; - for i in (1 + step_length..100).step_by(step_length as usize) { - block_tree - .add_block_root(int_hash(i), int_hash(i - step_length), Slot::new(i)) - .expect("add_block_root ok"); - - let sparse_expected = (1..=i) - .rev() - .step_by(step_length as usize) - .map(|j| (int_hash(j), Slot::new(j))) - .collect_vec(); - let every_slot_expected = (1..=i) - .rev() - .map(|j| { - let nearest = 1 + (j - 1) / step_length * step_length; - (int_hash(nearest), Slot::new(j)) - }) - .collect_vec(); - - check_iter_from(&block_tree, int_hash(i), &sparse_expected); - check_every_slot_iter_from(&block_tree, int_hash(i), &every_slot_expected); - - // Still OK after pruning. - block_tree.prune_to(int_hash(1), vec![int_hash(i)]); - - check_iter_from(&block_tree, int_hash(i), &sparse_expected); - check_every_slot_iter_from(&block_tree, int_hash(i), &every_slot_expected); - } - } - - #[test] - fn prune_small_fork() { - let tree = BlockRootTree::new(int_hash(1), Slot::new(1)); - // Space between fork hash values - let offset = 1000; - let num_blocks = 50; - - let fork1_start = 2; - let fork2_start = 2 + offset; - - tree.add_block_root(int_hash(fork1_start), int_hash(1), Slot::new(2)) - .expect("add first block of left fork"); - tree.add_block_root(int_hash(fork2_start), int_hash(1), Slot::new(2)) - .expect("add first block of right fork"); - - for i in 3..num_blocks { - tree.add_block_root(int_hash(i), int_hash(i - 1), Slot::new(i)) - .expect("add block to left fork"); - tree.add_block_root(int_hash(i + offset), int_hash(i + offset - 1), Slot::new(i)) - .expect("add block to right fork"); - } - - let root = (int_hash(1), Slot::new(1)); - - let (all_fork1_blocks, all_fork2_blocks): (Vec<_>, Vec<_>) = (2..num_blocks) - .rev() - .map(|i| { - ( - (int_hash(i), Slot::new(i)), - (int_hash(i + offset), Slot::new(i)), - ) - }) - .chain(iter::once((root, root))) - .unzip(); - - let fork1_head = int_hash(num_blocks - 1); - let fork2_head = int_hash(num_blocks + offset - 1); - - // Check that pruning with both heads preserves both chains. - let both_tree = tree.clone(); - both_tree.prune_to(root.0, vec![fork1_head, fork2_head]); - check_iter_from(&both_tree, fork1_head, &all_fork1_blocks); - check_iter_from(&both_tree, fork2_head, &all_fork2_blocks); - - // Check that pruning to either of the single chains leaves just that chain in the tree. - let fork1_tree = tree.clone(); - fork1_tree.prune_to(root.0, vec![fork1_head]); - check_iter_from(&fork1_tree, fork1_head, &all_fork1_blocks); - check_iter_from(&fork1_tree, fork2_head, &[]); - - let fork2_tree = tree.clone(); - fork2_tree.prune_to(root.0, vec![fork2_head]); - check_iter_from(&fork2_tree, fork1_head, &[]); - check_iter_from(&fork2_tree, fork2_head, &all_fork2_blocks); - - // Check that advancing the finalized root onto one side completely removes the other - // side. - let fin_tree = tree; - let prune_point = num_blocks / 2; - let remaining_fork1_blocks = all_fork1_blocks - .into_iter() - .take_while(|(_, slot)| *slot >= prune_point) - .collect_vec(); - fin_tree.prune_to(int_hash(prune_point), vec![fork1_head, fork2_head]); - check_iter_from(&fin_tree, fork1_head, &remaining_fork1_blocks); - check_iter_from(&fin_tree, fork2_head, &[]); - } - - #[test] - fn iter_zero() { - let block_tree = BlockRootTree::new(int_hash(0), Slot::new(0)); - assert_eq!(block_tree.iter_from(int_hash(0)).count(), 0); - assert_eq!(block_tree.every_slot_iter_from(int_hash(0)).count(), 0); - } -} diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index c4f26704df7..e3b3e7926d2 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -11,7 +11,6 @@ extern crate lazy_static; mod block_at_slot; -pub mod block_root_tree; pub mod chunked_iter; pub mod chunked_vector; pub mod config; @@ -29,7 +28,6 @@ pub mod migrate; use std::sync::Arc; -pub use self::block_root_tree::{BlockRootTree, SszBlockRootTree}; pub use self::config::StoreConfig; pub use self::hot_cold_store::HotColdDB as DiskStore; pub use self::leveldb_store::LevelDB as SimpleDiskStore; diff --git a/eth2/lmd_ghost/Cargo.toml b/eth2/lmd_ghost/Cargo.toml deleted file mode 100644 index 1a24537869d..00000000000 --- a/eth2/lmd_ghost/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "lmd_ghost" -version = "0.1.0" -authors = ["Age Manning ", "Paul Hauner "] -edition = "2018" - -[dependencies] -parking_lot = "0.9.0" -store = { path = "../../beacon_node/store" } -types = { path = "../types" } -itertools = "0.8.1" -eth2_ssz = "0.1.2" -eth2_ssz_derive = "0.1.0" - -[dev-dependencies] -criterion = "0.3.0" -hex = "0.3" -yaml-rust = "0.4.3" -bls = { path = "../utils/bls" } -slot_clock = { path = "../utils/slot_clock" } -beacon_chain = { path = "../../beacon_node/beacon_chain" } -env_logger = "0.7.1" -lazy_static = "1.4.0" -rand = "0.7.2" diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs deleted file mode 100644 index 08f74848489..00000000000 --- a/eth2/lmd_ghost/src/lib.rs +++ /dev/null @@ -1,69 +0,0 @@ -mod reduced_tree; - -use std::sync::Arc; -use store::{BlockRootTree, Store}; -use types::{BeaconBlock, EthSpec, Hash256, Slot}; - -pub use reduced_tree::ThreadSafeReducedTree; - -pub type Result = std::result::Result; - -// Note: the `PartialEq` bound is only required for testing. If it becomes a serious annoyance we -// can remove it. -pub trait LmdGhost, E: EthSpec>: PartialEq + Send + Sync + Sized { - /// Create a new instance, with the given `store` and `finalized_root`. - fn new( - store: Arc, - block_root_tree: Arc, - finalized_block: &BeaconBlock, - finalized_root: Hash256, - ) -> Self; - - /// Process an attestation message from some validator that attests to some `block_hash` - /// representing a block at some `block_slot`. - fn process_attestation( - &self, - validator_index: usize, - block_hash: Hash256, - block_slot: Slot, - ) -> Result<()>; - - /// Process a block that was seen on the network. - fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> Result<()>; - - /// Returns the head of the chain, starting the search at `start_block_root` and moving upwards - /// (in block height). - fn find_head( - &self, - start_block_slot: Slot, - start_block_root: Hash256, - weight: F, - ) -> Result - where - F: Fn(usize) -> Option + Copy; - - /// Provide an indication that the blockchain has been finalized at the given `finalized_block`. - /// - /// `finalized_block_root` must be the root of `finalized_block`. - fn update_finalized_root( - &self, - finalized_block: &BeaconBlock, - finalized_block_root: Hash256, - ) -> Result<()>; - - /// Returns the latest message for a given validator index. - fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)>; - - /// Runs an integrity verification function on fork choice algorithm. - /// - /// Returns `Ok(())` if the underlying fork choice has maintained its integrity, - /// `Err(description)` otherwise. - fn verify_integrity(&self) -> Result<()>; - - /// Encode the `LmdGhost` instance to bytes. - fn as_bytes(&self) -> Vec; - - /// Create a new `LmdGhost` instance given a `store` and encoded bytes. - fn from_bytes(bytes: &[u8], store: Arc, block_root_tree: Arc) - -> Result; -} diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs deleted file mode 100644 index d0798afd20d..00000000000 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ /dev/null @@ -1,1027 +0,0 @@ -//! An implementation of "reduced tree" LMD GHOST fork choice. -//! -//! This algorithm was conceived at IC3 Cornell, 2019. -//! -//! This implementation is incomplete and has known bugs. Do not use in production. -use super::{LmdGhost, Result as SuperResult}; -use itertools::Itertools; -use parking_lot::RwLock; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::cmp::Ordering; -use std::collections::HashMap; -use std::fmt; -use std::marker::PhantomData; -use std::sync::Arc; -use store::{BlockRootTree, Error as StoreError, Store}; -use types::{BeaconBlock, EthSpec, Hash256, Slot}; - -type Result = std::result::Result; - -#[derive(Debug, PartialEq)] -pub enum Error { - MissingNode(Hash256), - MissingBlock(Hash256), - MissingState(Hash256), - MissingChild(Hash256), - MissingSuccessor(Hash256, Hash256), - NotInTree(Hash256), - NoCommonAncestor((Hash256, Hash256)), - StoreError(StoreError), - ValidatorWeightUnknown(usize), - SszDecodingError(ssz::DecodeError), - InvalidReducedTreeSsz(String), -} - -impl From for Error { - fn from(e: StoreError) -> Error { - Error::StoreError(e) - } -} - -impl From for Error { - fn from(e: ssz::DecodeError) -> Error { - Error::SszDecodingError(e) - } -} - -pub struct ThreadSafeReducedTree { - core: RwLock>, -} - -impl fmt::Debug for ThreadSafeReducedTree { - /// `Debug` just defers to the implementation of `self.core`. - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.core.fmt(f) - } -} - -impl PartialEq for ThreadSafeReducedTree { - /// This implementation ignores the `store`. - fn eq(&self, other: &Self) -> bool { - *self.core.read() == *other.core.read() - } -} - -impl LmdGhost for ThreadSafeReducedTree -where - T: Store, - E: EthSpec, -{ - fn new( - store: Arc, - block_root_tree: Arc, - genesis_block: &BeaconBlock, - genesis_root: Hash256, - ) -> Self { - ThreadSafeReducedTree { - core: RwLock::new(ReducedTree::new( - store, - block_root_tree, - genesis_block, - genesis_root, - )), - } - } - - fn process_attestation( - &self, - validator_index: usize, - block_hash: Hash256, - block_slot: Slot, - ) -> SuperResult<()> { - self.core - .write() - .process_message(validator_index, block_hash, block_slot) - .map_err(|e| format!("process_attestation failed: {:?}", e)) - } - - /// Process a block that was seen on the network. - fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> SuperResult<()> { - self.core - .write() - .maybe_add_weightless_node(block.slot, block_hash) - .map_err(|e| format!("process_block failed: {:?}", e)) - } - - fn find_head( - &self, - start_block_slot: Slot, - start_block_root: Hash256, - weight_fn: F, - ) -> SuperResult - where - F: Fn(usize) -> Option + Copy, - { - self.core - .write() - .update_weights_and_find_head(start_block_slot, start_block_root, weight_fn) - .map_err(|e| format!("find_head failed: {:?}", e)) - } - - fn update_finalized_root( - &self, - new_block: &BeaconBlock, - new_root: Hash256, - ) -> SuperResult<()> { - self.core - .write() - .update_root(new_block.slot, new_root) - .map_err(|e| format!("update_finalized_root failed: {:?}", e)) - } - - fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { - self.core.read().latest_message(validator_index) - } - - fn verify_integrity(&self) -> SuperResult<()> { - self.core.read().verify_integrity() - } - - /// Consume the `ReducedTree` object and return its ssz encoded bytes representation. - fn as_bytes(&self) -> Vec { - self.core.read().as_bytes() - } - - /// Create a new `ThreadSafeReducedTree` instance from a `store` and the - /// encoded ssz bytes representation. - /// - /// Returns an error if ssz bytes are not a valid `ReducedTreeSsz` object. - fn from_bytes( - bytes: &[u8], - store: Arc, - block_root_tree: Arc, - ) -> SuperResult { - Ok(ThreadSafeReducedTree { - core: RwLock::new( - ReducedTree::from_bytes(bytes, store, block_root_tree) - .map_err(|e| format!("Cannot decode ssz bytes {:?}", e))?, - ), - }) - } -} - -/// Intermediate representation of a `ReducedTree` `LmdGhost` fork choice. -#[derive(Debug, PartialEq, Encode, Decode)] -struct ReducedTreeSsz { - pub node_hashes: Vec, - pub nodes: Vec, - pub latest_votes: Vec>, - pub root_hash: Hash256, - pub root_slot: Slot, -} - -impl ReducedTreeSsz { - pub fn from_reduced_tree(tree: &ReducedTree) -> Self { - let (node_hashes, nodes): (Vec<_>, Vec<_>) = tree.nodes.clone().into_iter().unzip(); - ReducedTreeSsz { - node_hashes, - nodes, - latest_votes: tree.latest_votes.0.clone(), - root_hash: tree.root.0, - root_slot: tree.root.1, - } - } - - pub fn into_reduced_tree( - self, - store: Arc, - block_root_tree: Arc, - ) -> Result> { - if self.node_hashes.len() != self.nodes.len() { - return Err(Error::InvalidReducedTreeSsz( - "node_hashes and nodes should have equal length".to_string(), - )); - } - let nodes: HashMap<_, _> = self - .node_hashes - .into_iter() - .zip(self.nodes.into_iter()) - .collect(); - let latest_votes = ElasticList(self.latest_votes); - let root = (self.root_hash, self.root_slot); - Ok(ReducedTree { - store, - block_root_tree, - nodes, - latest_votes, - root, - _phantom: PhantomData, - }) - } -} - -#[derive(Clone)] -struct ReducedTree { - store: Arc, - block_root_tree: Arc, - /// Stores all nodes of the tree, keyed by the block hash contained in the node. - nodes: HashMap, - /// Maps validator indices to their latest votes. - latest_votes: ElasticList>, - /// Stores the root of the tree, used for pruning. - root: (Hash256, Slot), - _phantom: PhantomData, -} - -impl fmt::Debug for ReducedTree { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.nodes.fmt(f) - } -} - -impl PartialEq for ReducedTree { - /// This implementation ignores the `store` field. - fn eq(&self, other: &Self) -> bool { - self.nodes == other.nodes - && self.latest_votes == other.latest_votes - && self.root == other.root - } -} - -impl ReducedTree -where - T: Store, - E: EthSpec, -{ - pub fn new( - store: Arc, - block_root_tree: Arc, - genesis_block: &BeaconBlock, - genesis_root: Hash256, - ) -> Self { - let mut nodes = HashMap::new(); - - // Insert the genesis node. - nodes.insert(genesis_root, Node::new(genesis_root)); - - Self { - store, - block_root_tree, - nodes, - latest_votes: ElasticList::default(), - root: (genesis_root, genesis_block.slot), - _phantom: PhantomData, - } - } - - /// Set the root node (the node without any parents) to the given `new_slot` and `new_root`. - /// - /// The given `new_root` must be in the block tree (but not necessarily in the reduced tree). - /// Any nodes which are not a descendant of `new_root` will be removed from the store. - pub fn update_root(&mut self, new_slot: Slot, new_root: Hash256) -> Result<()> { - self.maybe_add_weightless_node(new_slot, new_root)?; - - self.retain_subtree(self.root.0, new_root)?; - - self.root = (new_root, new_slot); - - let root_node = self.get_mut_node(new_root)?; - root_node.parent_hash = None; - - Ok(()) - } - - /// Removes `current_hash` and all descendants, except `subtree_hash` and all nodes - /// which have `subtree_hash` as an ancestor. - /// - /// In effect, prunes the tree so that only decendants of `subtree_hash` exist. - fn retain_subtree(&mut self, current_hash: Hash256, subtree_hash: Hash256) -> Result<()> { - if current_hash != subtree_hash { - let children = self.get_node(current_hash)?.children.clone(); - - for child in children { - self.retain_subtree(child.hash, subtree_hash)?; - } - - self.nodes.remove(¤t_hash); - } - - Ok(()) - } - - pub fn process_message( - &mut self, - validator_index: usize, - block_hash: Hash256, - slot: Slot, - ) -> Result<()> { - if slot >= self.root_slot() { - if let Some(previous_vote) = self.latest_votes.get(validator_index) { - // Note: it is possible to do a cheap equivocation check here: - // - // slashable = (previous_vote.slot == slot) && (previous_vote.hash != block_hash) - - if previous_vote.slot < slot { - self.remove_latest_message(validator_index)?; - } else { - return Ok(()); - } - } - - self.latest_votes.insert( - validator_index, - Some(Vote { - slot, - hash: block_hash, - }), - ); - - self.add_latest_message(validator_index, block_hash)?; - } - - Ok(()) - } - - pub fn update_weights_and_find_head( - &mut self, - start_block_slot: Slot, - start_block_root: Hash256, - weight_fn: F, - ) -> Result - where - F: Fn(usize) -> Option + Copy, - { - // It is possible that the given `start_block_root` is not in the reduced tree. - // - // In this case, we add a weightless node at `start_block_root`. - if !self.nodes.contains_key(&start_block_root) { - self.maybe_add_weightless_node(start_block_slot, start_block_root)?; - }; - - let _root_weight = self.update_weight(start_block_root, weight_fn)?; - - let start_node = self.get_node(start_block_root)?; - let head_node = self.find_head_from(start_node, start_block_slot)?; - - Ok(head_node.block_hash) - } - - pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { - match self.latest_votes.get_ref(validator_index) { - Some(Some(v)) => Some((v.hash, v.slot)), - _ => None, - } - } - - // Corresponds to the loop in `get_head` in the spec. - fn find_head_from<'a>( - &'a self, - start_node: &'a Node, - justified_slot: Slot, - ) -> Result<&'a Node> { - let children = start_node - .children - .iter() - // This check is primarily for the first iteration, where we must ensure that - // we only consider votes that were made after the last justified checkpoint. - .filter(|c| c.successor_slot > justified_slot) - .map(|c| self.get_node(c.hash)) - .collect::>>()?; - - if children.is_empty() { - Ok(start_node) - } else { - let best_child = children - .iter() - .max_by_key(|child| (child.weight, child.block_hash)) - // There can only be no maximum if there are no children. This code path is guarded - // against that condition. - .expect("There must be a maximally weighted node."); - - self.find_head_from(best_child, justified_slot) - } - } - - fn update_weight(&mut self, start_block_root: Hash256, weight_fn: F) -> Result - where - F: Fn(usize) -> Option + Copy, - { - let weight = { - let node = self.get_node(start_block_root)?.clone(); - - let mut weight = 0; - - for child in &node.children { - weight += self.update_weight(child.hash, weight_fn)?; - } - - for &voter in &node.voters { - weight += weight_fn(voter).ok_or_else(|| Error::ValidatorWeightUnknown(voter))?; - } - - weight - }; - - let node = self.get_mut_node(start_block_root)?; - node.weight = weight; - - Ok(weight) - } - - /// Removes the vote from `validator_index` from the reduced tree. - /// - /// If the validator had a vote in the tree, the removal of that vote may cause a node to - /// become redundant and removed from the reduced tree. - fn remove_latest_message(&mut self, validator_index: usize) -> Result<()> { - if let Some(vote) = *self.latest_votes.get(validator_index) { - if self.nodes.contains_key(&vote.hash) { - self.get_mut_node(vote.hash)?.remove_voter(validator_index); - let node = self.get_node(vote.hash)?.clone(); - - if let Some(parent_hash) = node.parent_hash { - if node.has_votes() || node.children.len() > 1 { - // A node with votes or more than one child is never removed. - } else if node.children.len() == 1 { - // A node which has only one child may be removed. - // - // Load the child of the node and set it's parent to be the parent of this - // node (viz., graft the node's child to the node's parent) - let child = self.get_mut_node(node.children[0].hash)?; - child.parent_hash = node.parent_hash; - - // Graft the parent of this node to it's child. - if let Some(parent_hash) = node.parent_hash { - let parent = self.get_mut_node(parent_hash)?; - parent.replace_child_hash(node.block_hash, node.children[0].hash)?; - } - - self.nodes.remove(&vote.hash); - } else if node.children.is_empty() { - // Remove the to-be-deleted node from it's parent. - if let Some(parent_hash) = node.parent_hash { - self.get_mut_node(parent_hash)? - .remove_child(node.block_hash)?; - } - - self.nodes.remove(&vote.hash); - - // A node which has no children may be deleted and potentially it's parent - // too. - self.maybe_delete_node(parent_hash)?; - } else { - // It is impossible for a node to have a number of children that is not 0, 1 or - // greater than one. - // - // This code is strictly unnecessary, however we keep it for readability. - unreachable!(); - } - } else { - // A node without a parent is the genesis/finalized node and should never be removed. - } - - self.latest_votes.insert(validator_index, Some(vote)); - } - } - - Ok(()) - } - - /// Deletes a node if it is unnecessary. - /// - /// Any node is unnecessary if all of the following are true: - /// - /// - it is not the root node. - /// - it only has one child. - /// - it does not have any votes. - fn maybe_delete_node(&mut self, hash: Hash256) -> Result<()> { - let should_delete = { - if let Ok(node) = self.get_node(hash) { - let node = node.clone(); - - if let Some(parent_hash) = node.parent_hash { - if node.children.len() == 1 && !node.has_votes() { - let child = &node.children[0]; - - // Graft the single descendant `node` to the `parent` of node. - self.get_mut_node(child.hash)?.parent_hash = Some(parent_hash); - - // Detach `node` from `parent`, replacing it with `child`. - // Preserve the parent's direct descendant slot. - self.get_mut_node(parent_hash)? - .replace_child_hash(hash, child.hash)?; - - true - } else { - false - } - } else { - // A node without a parent is the genesis node and should not be deleted. - false - } - } else { - // No need to delete a node that does not exist. - false - } - }; - - if should_delete { - self.nodes.remove(&hash); - } - - Ok(()) - } - - fn add_latest_message(&mut self, validator_index: usize, hash: Hash256) -> Result<()> { - if let Ok(node) = self.get_mut_node(hash) { - node.add_voter(validator_index); - } else { - let node = Node { - voters: vec![validator_index], - ..Node::new(hash) - }; - - self.add_node(node)?; - } - - Ok(()) - } - - fn maybe_add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> { - if slot > self.root_slot() && !self.nodes.contains_key(&hash) { - let node = Node::new(hash); - - self.add_node(node)?; - - // Read the `parent_hash` from the newly created node. If it has a parent (i.e., it's - // not the root), see if it is superfluous. - if let Some(parent_hash) = self.get_node(hash)?.parent_hash { - self.maybe_delete_node(parent_hash)?; - } - } - - Ok(()) - } - - /// Find the direct successor block of `ancestor` if `descendant` is a descendant. - fn find_ancestor_successor_opt( - &self, - ancestor: Hash256, - descendant: Hash256, - ) -> Result> { - Ok(self - .iter_ancestors(descendant, true) - .take_while(|(_, slot)| *slot >= self.root_slot()) - .map(|(block_hash, _)| block_hash) - .tuple_windows() - .find_map(|(successor, block_hash)| { - if block_hash == ancestor { - Some(successor) - } else { - None - } - })) - } - - /// Same as `find_ancestor_successor_opt` but will return an error instead of an option. - fn find_ancestor_successor(&self, ancestor: Hash256, descendant: Hash256) -> Result { - self.find_ancestor_successor_opt(ancestor, descendant)? - .ok_or_else(|| Error::MissingSuccessor(ancestor, descendant)) - } - - /// Look up the successor of the given `ancestor`, returning the slot of that block. - fn find_ancestor_successor_slot(&self, ancestor: Hash256, descendant: Hash256) -> Result { - let successor_hash = self.find_ancestor_successor(ancestor, descendant)?; - Ok(self.get_block(successor_hash)?.slot) - } - - /// Add `node` to the reduced tree, returning an error if `node` is not rooted in the tree. - fn add_node(&mut self, mut node: Node) -> Result<()> { - // Find the highest (by slot) ancestor of the given node in the reduced tree. - // - // If this node has no ancestor in the tree, exit early. - let mut prev_in_tree = self - .find_prev_in_tree(&node) - .ok_or_else(|| Error::NotInTree(node.block_hash)) - .and_then(|hash| self.get_node(hash))? - .clone(); - - // If the ancestor of `node` has children, there are three possible operations: - // - // 1. Graft the `node` between two existing nodes. - // 2. Create another node that will be grafted between two existing nodes, then graft - // `node` to it. - // 3. Graft `node` to an existing node. - if !prev_in_tree.children.is_empty() { - for child_link in &prev_in_tree.children { - let child_hash = child_link.hash; - - // 1. Graft the new node between two existing nodes. - // - // If `node` is a descendant of `prev_in_tree` but an ancestor of a child connected to - // `prev_in_tree`. - // - // This means that `node` can be grafted between `prev_in_tree` and the child that is a - // descendant of both `node` and `prev_in_tree`. - if let Some(successor) = - self.find_ancestor_successor_opt(node.block_hash, child_hash)? - { - let successor_slot = self.get_block(successor)?.slot; - let child = self.get_mut_node(child_hash)?; - - // Graft `child` to `node`. - child.parent_hash = Some(node.block_hash); - // Graft `node` to `child`. - node.children.push(ChildLink { - hash: child_hash, - successor_slot, - }); - // Detach `child` from `prev_in_tree`, replacing it with `node`. - prev_in_tree.replace_child_hash(child_hash, node.block_hash)?; - // Graft `node` to `prev_in_tree`. - node.parent_hash = Some(prev_in_tree.block_hash); - - break; - } - } - - // 2. Create another node that will be grafted between two existing nodes, then graft - // `node` to it. - // - // Note: given that `prev_in_tree` has children and that `node` is not an ancestor of - // any of the children of `prev_in_tree`, we know that `node` is on a different fork to - // all of the children of `prev_in_tree`. - if node.parent_hash.is_none() { - for child_link in &prev_in_tree.children { - let child_hash = child_link.hash; - // Find the highest (by slot) common ancestor between `node` and `child`. - // - // The common ancestor is the last block before `node` and `child` forked. - let ancestor_hash = - self.find_highest_common_ancestor(node.block_hash, child_hash)?; - - // If the block before `node` and `child` forked is _not_ `prev_in_tree` we - // must add this new block into the tree (because it is a decision node - // between two forks). - if ancestor_hash != prev_in_tree.block_hash { - // Create a new `common_ancestor` node which represents the `ancestor_hash` - // block, has `prev_in_tree` as the parent and has both `node` and `child` - // as children. - let common_ancestor = Node { - parent_hash: Some(prev_in_tree.block_hash), - children: vec![ - ChildLink { - hash: node.block_hash, - successor_slot: self.find_ancestor_successor_slot( - ancestor_hash, - node.block_hash, - )?, - }, - ChildLink { - hash: child_hash, - successor_slot: self - .find_ancestor_successor_slot(ancestor_hash, child_hash)?, - }, - ], - ..Node::new(ancestor_hash) - }; - - let child = self.get_mut_node(child_hash)?; - - // Graft `child` and `node` to `common_ancestor`. - child.parent_hash = Some(common_ancestor.block_hash); - node.parent_hash = Some(common_ancestor.block_hash); - - // Detach `child` from `prev_in_tree`, replacing it with `common_ancestor`. - prev_in_tree.replace_child_hash(child_hash, common_ancestor.block_hash)?; - - // Store the new `common_ancestor` node. - self.nodes - .insert(common_ancestor.block_hash, common_ancestor); - - break; - } - } - } - } - - if node.parent_hash.is_none() { - // 3. Graft `node` to an existing node. - // - // Graft `node` to `prev_in_tree` and `prev_in_tree` to `node` - node.parent_hash = Some(prev_in_tree.block_hash); - prev_in_tree.children.push(ChildLink { - hash: node.block_hash, - successor_slot: self - .find_ancestor_successor_slot(prev_in_tree.block_hash, node.block_hash)?, - }); - } - - // Update `prev_in_tree`. A mutable reference was not maintained to satisfy the borrow - // checker. Perhaps there's a better way? - self.nodes.insert(prev_in_tree.block_hash, prev_in_tree); - self.nodes.insert(node.block_hash, node); - - Ok(()) - } - - /// For the given block `hash`, find its highest (by slot) ancestor that exists in the reduced - /// tree. - fn find_prev_in_tree(&mut self, node: &Node) -> Option { - self.iter_ancestors(node.block_hash, false) - .take_while(|(_, slot)| *slot >= self.root_slot()) - .find(|(root, _)| self.nodes.contains_key(root)) - .map(|(root, _)| root) - } - - /// For the two given block roots (`a_root` and `b_root`), find the first block they share in - /// the tree. Viz, find the block that these two distinct blocks forked from. - fn find_highest_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result { - let mut a_iter = self - .iter_ancestors(a_root, false) - .take_while(|(_, slot)| *slot >= self.root_slot()); - let mut b_iter = self - .iter_ancestors(b_root, false) - .take_while(|(_, slot)| *slot >= self.root_slot()); - - // Combines the `next()` fns on the `a_iter` and `b_iter` and returns the roots of two - // blocks at the same slot, or `None` if we have gone past genesis or the root of this tree. - let mut iter_blocks_at_same_height = || -> Option<(Hash256, Hash256)> { - match (a_iter.next(), b_iter.next()) { - (Some((mut a_root, a_slot)), Some((mut b_root, b_slot))) => { - // If either of the slots are lower than the root of this tree, exit early. - if a_slot < self.root.1 || b_slot < self.root.1 { - None - } else { - match a_slot.cmp(&b_slot) { - Ordering::Less => { - for _ in a_slot.as_u64()..b_slot.as_u64() { - b_root = b_iter.next()?.0; - } - } - Ordering::Greater => { - for _ in b_slot.as_u64()..a_slot.as_u64() { - a_root = a_iter.next()?.0; - } - } - Ordering::Equal => (), - } - Some((a_root, b_root)) - } - } - _ => None, - } - }; - - loop { - match iter_blocks_at_same_height() { - Some((a_root, b_root)) if a_root == b_root => break Ok(a_root), - Some(_) => (), - None => break Err(Error::NoCommonAncestor((a_root, b_root))), - } - } - } - - /// Return an iterator from the given `block_root` back to finalization. - /// - /// If `include_latest` is true, then the hash and slot for `block_root` will be included. - pub fn iter_ancestors<'a>( - &'a self, - block_root: Hash256, - include_latest: bool, - ) -> impl Iterator + 'a { - self.block_root_tree - .every_slot_iter_from(block_root) - .skip(if include_latest { 0 } else { 1 }) - } - - /// Verify the integrity of `self`. Returns `Ok(())` if the tree has integrity, otherwise returns `Err(description)`. - /// - /// Tries to detect the following erroneous conditions: - /// - /// - Dangling references inside the tree. - /// - Any scenario where there's not exactly one root node. - /// - /// ## Notes - /// - /// Computationally intensive, likely only useful during testing. - pub fn verify_integrity(&self) -> std::result::Result<(), String> { - let num_root_nodes = self - .nodes - .iter() - .filter(|(_key, node)| node.parent_hash.is_none()) - .count(); - - if num_root_nodes != 1 { - return Err(format!( - "Tree has {} roots, should have exactly one.", - num_root_nodes - )); - } - - let verify_node_exists = |key: Hash256, msg: String| -> std::result::Result<(), String> { - if self.nodes.contains_key(&key) { - Ok(()) - } else { - Err(msg) - } - }; - - // Iterate through all the nodes and ensure all references they store are valid. - self.nodes - .iter() - .map(|(_key, node)| { - if let Some(parent_hash) = node.parent_hash { - verify_node_exists(parent_hash, "parent must exist".to_string())?; - } - - node.children - .iter() - .map(|child| { - verify_node_exists(child.hash, "child_must_exist".to_string())?; - - if self.find_ancestor_successor_slot(node.block_hash, child.hash)? - == child.successor_slot - { - Ok(()) - } else { - Err("successor slot on child link is incorrect".to_string()) - } - }) - .collect::>()?; - - verify_node_exists(node.block_hash, "block hash must exist".to_string())?; - - Ok(()) - }) - .collect::>()?; - - Ok(()) - } - - fn get_node(&self, hash: Hash256) -> Result<&Node> { - self.nodes - .get(&hash) - .ok_or_else(|| Error::MissingNode(hash)) - } - - fn get_mut_node(&mut self, hash: Hash256) -> Result<&mut Node> { - self.nodes - .get_mut(&hash) - .ok_or_else(|| Error::MissingNode(hash)) - } - - fn get_block(&self, block_root: Hash256) -> Result> { - self.store - .get::>(&block_root)? - .ok_or_else(|| Error::MissingBlock(block_root)) - } - - fn root_slot(&self) -> Slot { - self.root.1 - } - - fn as_bytes(&self) -> Vec { - let reduced_tree_ssz = ReducedTreeSsz::from_reduced_tree(&self); - reduced_tree_ssz.as_ssz_bytes() - } - - fn from_bytes( - bytes: &[u8], - store: Arc, - block_root_tree: Arc, - ) -> Result { - let reduced_tree_ssz = ReducedTreeSsz::from_ssz_bytes(bytes)?; - Ok(reduced_tree_ssz.into_reduced_tree(store, block_root_tree)?) - } -} - -#[derive(Debug, Clone, PartialEq, Encode, Decode)] -pub struct Node { - /// Hash of the parent node in the reduced tree (not necessarily parent block). - pub parent_hash: Option, - pub children: Vec, - pub weight: u64, - pub block_hash: Hash256, - pub voters: Vec, -} - -#[derive(Default, Clone, Debug, PartialEq, Encode, Decode)] -pub struct ChildLink { - /// Hash of the child block (may not be a direct descendant). - pub hash: Hash256, - /// Slot of the block which is a direct descendant on the chain leading to `hash`. - /// - /// Node <--- Successor <--- ... <--- Child - pub successor_slot: Slot, -} - -impl Node { - pub fn new(block_hash: Hash256) -> Self { - Self { - parent_hash: None, - children: vec![], - weight: 0, - block_hash, - voters: vec![], - } - } - - /// Replace a child with a new child, whilst preserving the successor slot. - /// - /// The new child should have the same ancestor successor block as the old one. - pub fn replace_child_hash(&mut self, old: Hash256, new: Hash256) -> Result<()> { - let i = self - .children - .iter() - .position(|c| c.hash == old) - .ok_or_else(|| Error::MissingChild(old))?; - self.children[i].hash = new; - - Ok(()) - } - - pub fn remove_child(&mut self, child: Hash256) -> Result<()> { - let i = self - .children - .iter() - .position(|c| c.hash == child) - .ok_or_else(|| Error::MissingChild(child))?; - - self.children.remove(i); - - Ok(()) - } - - pub fn remove_voter(&mut self, voter: usize) -> Option { - let i = self.voters.iter().position(|&v| v == voter)?; - Some(self.voters.remove(i)) - } - - pub fn add_voter(&mut self, voter: usize) { - self.voters.push(voter); - } - - pub fn has_votes(&self) -> bool { - !self.voters.is_empty() - } -} - -#[derive(Debug, Clone, Copy, PartialEq, Encode, Decode)] -pub struct Vote { - hash: Hash256, - slot: Slot, -} - -/// A Vec-wrapper which will grow to match any request. -/// -/// E.g., a `get` or `insert` to an out-of-bounds element will cause the Vec to grow (using -/// Default) to the smallest size required to fulfill the request. -#[derive(Default, Clone, Debug, PartialEq)] -pub struct ElasticList(Vec); - -impl ElasticList -where - T: Default, -{ - fn ensure(&mut self, i: usize) { - if self.0.len() <= i { - self.0.resize_with(i + 1, Default::default); - } - } - - pub fn get(&mut self, i: usize) -> &T { - self.ensure(i); - &self.0[i] - } - - pub fn get_ref(&self, i: usize) -> Option<&T> { - self.0.get(i) - } - - pub fn insert(&mut self, i: usize, element: T) { - self.ensure(i); - self.0[i] = element; - } -} - -impl From for String { - fn from(e: Error) -> String { - format!("{:?}", e) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use store::MemoryStore; - use types::eth_spec::MinimalEthSpec; - - #[test] - fn test_reduced_tree_ssz() { - let store = Arc::new(MemoryStore::::open()); - let block_root_tree = Arc::new(BlockRootTree::new(Hash256::zero(), Slot::new(0))); - let tree = ReducedTree::new( - store.clone(), - block_root_tree.clone(), - &BeaconBlock::empty(&MinimalEthSpec::default_spec()), - Hash256::zero(), - ); - let ssz_tree = ReducedTreeSsz::from_reduced_tree(&tree); - let bytes = tree.as_bytes(); - let recovered_tree = ReducedTree::from_bytes(&bytes, store, block_root_tree).unwrap(); - - let recovered_ssz = ReducedTreeSsz::from_reduced_tree(&recovered_tree); - assert_eq!(ssz_tree, recovered_ssz); - } -} diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs deleted file mode 100644 index f0a952d70a6..00000000000 --- a/eth2/lmd_ghost/tests/test.rs +++ /dev/null @@ -1,400 +0,0 @@ -#![cfg(not(debug_assertions))] - -#[macro_use] -extern crate lazy_static; - -use beacon_chain::test_utils::{ - generate_deterministic_keypairs, AttestationStrategy, - BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, HarnessType, -}; -use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; -use rand::{prelude::*, rngs::StdRng}; -use std::sync::Arc; -use store::{iter::AncestorIter, MemoryStore, Store}; -use types::{BeaconBlock, EthSpec, Hash256, MinimalEthSpec, Slot}; - -// Should ideally be divisible by 3. -pub const VALIDATOR_COUNT: usize = 3 * 8; - -type TestEthSpec = MinimalEthSpec; -type ThreadSafeReducedTree = BaseThreadSafeReducedTree, TestEthSpec>; -type BeaconChainHarness = BaseBeaconChainHarness>; -type RootAndSlot = (Hash256, Slot); - -lazy_static! { - /// A lazy-static instance of a `BeaconChainHarness` that contains two forks. - /// - /// Reduces test setup time by providing a common harness. - static ref FORKED_HARNESS: ForkedHarness = ForkedHarness::new(); -} - -/// Contains a `BeaconChainHarness` that has two forks, caused by a validator skipping a slot and -/// then some validators building on one head and some on the other. -/// -/// Care should be taken to ensure that the `ForkedHarness` does not expose any interior mutability -/// from it's fields. This would cause cross-contamination between tests when used with -/// `lazy_static`. -struct ForkedHarness { - /// Private (not `pub`) because the `BeaconChainHarness` has interior mutability. We - /// don't expose it to avoid contamination between tests. - harness: BeaconChainHarness, - pub genesis_block_root: Hash256, - pub genesis_block: BeaconBlock, - pub honest_head: RootAndSlot, - pub faulty_head: RootAndSlot, - /// Honest roots in reverse order (slot high to low) - pub honest_roots: Vec, - /// Faulty roots in reverse order (slot high to low) - pub faulty_roots: Vec, -} - -impl ForkedHarness { - /// A new standard instance of with constant parameters. - pub fn new() -> Self { - let harness = BeaconChainHarness::new( - MinimalEthSpec, - generate_deterministic_keypairs(VALIDATOR_COUNT), - ); - - // Move past the zero slot. - harness.advance_slot(); - - let delay = TestEthSpec::default_spec().min_attestation_inclusion_delay as usize; - - let initial_blocks = delay + 5; - - // Build an initial chain where all validators agree. - harness.extend_chain( - initial_blocks, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); - - let two_thirds = (VALIDATOR_COUNT / 3) * 2; - let honest_validators: Vec = (0..two_thirds).collect(); - let faulty_validators: Vec = (two_thirds..VALIDATOR_COUNT).collect(); - let honest_fork_blocks = delay + 5; - let faulty_fork_blocks = delay + 5; - - let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block( - &honest_validators, - &faulty_validators, - honest_fork_blocks, - faulty_fork_blocks, - ); - - let mut honest_roots = get_ancestor_roots(harness.chain.store.clone(), honest_head); - - honest_roots.insert( - 0, - (honest_head, get_slot_for_block_root(&harness, honest_head)), - ); - - let mut faulty_roots = get_ancestor_roots(harness.chain.store.clone(), faulty_head); - - faulty_roots.insert( - 0, - (faulty_head, get_slot_for_block_root(&harness, faulty_head)), - ); - - let genesis_block_root = harness.chain.genesis_block_root; - let genesis_block = harness - .chain - .store - .get::>(&genesis_block_root) - .expect("Genesis block should exist") - .expect("DB should not error"); - - Self { - harness, - genesis_block_root, - genesis_block, - honest_head: *honest_roots.last().expect("Chain cannot be empty"), - faulty_head: *faulty_roots.last().expect("Chain cannot be empty"), - honest_roots, - faulty_roots, - } - } - - pub fn store_clone(&self) -> MemoryStore { - (*self.harness.chain.store).clone() - } - - /// Return a brand-new, empty fork choice with a reference to `harness.store`. - pub fn new_fork_choice(&self) -> ThreadSafeReducedTree { - // Take a full clone of the store built by the harness. - // - // Taking a clone here ensures that each fork choice gets it's own store so there is no - // cross-contamination between tests. - let store: MemoryStore = self.store_clone(); - - ThreadSafeReducedTree::new( - Arc::new(store), - self.harness.chain.block_root_tree.clone(), - &self.genesis_block, - self.genesis_block_root, - ) - } - - pub fn all_block_roots(&self) -> Vec { - let mut all_roots = self.honest_roots.clone(); - all_roots.append(&mut self.faulty_roots.clone()); - - all_roots.dedup(); - - all_roots - } - - pub fn weight_function(_validator_index: usize) -> Option { - Some(1) - } -} - -/// Helper: returns all the ancestor roots and slots for a given block_root. -fn get_ancestor_roots>( - store: Arc, - block_root: Hash256, -) -> Vec<(Hash256, Slot)> { - let block = store - .get::>(&block_root) - .expect("block should exist") - .expect("store should not error"); - - as AncestorIter<_, _, _>>::try_iter_ancestor_roots(&block, store) - .expect("should be able to create ancestor iter") - .collect() -} - -/// Helper: returns the slot for some block_root. -fn get_slot_for_block_root(harness: &BeaconChainHarness, block_root: Hash256) -> Slot { - harness - .chain - .store - .get::>(&block_root) - .expect("head block should exist") - .expect("DB should not error") - .slot -} - -const RANDOM_ITERATIONS: usize = 50; -const RANDOM_ACTIONS_PER_ITERATION: usize = 100; - -/// Create a single LMD instance and have one validator vote in reverse (highest to lowest slot) -/// down the chain. -#[test] -fn random_scenario() { - let harness = &FORKED_HARNESS; - let block_roots = harness.all_block_roots(); - let validators: Vec = (0..VALIDATOR_COUNT).collect(); - let mut rng = StdRng::seed_from_u64(9375205782030385); // Keyboard mash. - - for _ in 0..RANDOM_ITERATIONS { - let lmd = harness.new_fork_choice(); - - for _ in 0..RANDOM_ACTIONS_PER_ITERATION { - let (root, slot) = block_roots[rng.next_u64() as usize % block_roots.len()]; - let validator_index = validators[rng.next_u64() as usize % validators.len()]; - - lmd.process_attestation(validator_index, root, slot) - .expect("fork choice should accept randomly-placed attestations"); - - assert_eq!( - lmd.verify_integrity(), - Ok(()), - "New tree should have integrity" - ); - } - } -} - -/// Create a single LMD instance and have one validator vote in reverse (highest to lowest slot) -/// down the chain. -#[test] -fn single_voter_persistent_instance_reverse_order() { - let harness = &FORKED_HARNESS; - - let lmd = harness.new_fork_choice(); - - assert_eq!( - lmd.verify_integrity(), - Ok(()), - "New tree should have integrity" - ); - - for (root, slot) in &harness.honest_roots { - lmd.process_attestation(0, *root, *slot) - .expect("fork choice should accept attestations to honest roots in reverse"); - - assert_eq!( - lmd.verify_integrity(), - Ok(()), - "Tree integrity should be maintained whilst processing attestations" - ); - } - - // The honest head should be selected. - let (head_root, _) = harness.honest_roots.first().unwrap(); - let (finalized_root, finalized_slot) = harness.honest_roots.last().unwrap(); - - assert_eq!( - lmd.find_head( - *finalized_slot, - *finalized_root, - ForkedHarness::weight_function - ), - Ok(*head_root), - "Honest head should be selected" - ); -} - -/// A single validator applies a single vote to each block in the honest fork, using a new tree -/// each time. -#[test] -fn single_voter_many_instance_honest_blocks_voting_forwards() { - let harness = &FORKED_HARNESS; - - for (root, slot) in harness.honest_roots.iter().rev() { - let lmd = harness.new_fork_choice(); - lmd.process_attestation(0, *root, *slot) - .expect("fork choice should accept attestations to honest roots"); - - assert_eq!( - lmd.verify_integrity(), - Ok(()), - "Tree integrity should be maintained whilst processing attestations" - ); - } -} - -/// Same as above, but in reverse order (votes on the highest honest block first). -#[test] -fn single_voter_many_instance_honest_blocks_voting_in_reverse() { - let harness = &FORKED_HARNESS; - - // Same as above, but in reverse order (votes on the highest honest block first). - for (root, slot) in &harness.honest_roots { - let lmd = harness.new_fork_choice(); - lmd.process_attestation(0, *root, *slot) - .expect("fork choice should accept attestations to honest roots in reverse"); - - assert_eq!( - lmd.verify_integrity(), - Ok(()), - "Tree integrity should be maintained whilst processing attestations" - ); - } -} - -/// A single validator applies a single vote to each block in the faulty fork, using a new tree -/// each time. -#[test] -fn single_voter_many_instance_faulty_blocks_voting_forwards() { - let harness = &FORKED_HARNESS; - - for (root, slot) in harness.faulty_roots.iter().rev() { - let lmd = harness.new_fork_choice(); - lmd.process_attestation(0, *root, *slot) - .expect("fork choice should accept attestations to faulty roots"); - - assert_eq!( - lmd.verify_integrity(), - Ok(()), - "Tree integrity should be maintained whilst processing attestations" - ); - } -} - -/// Same as above, but in reverse order (votes on the highest faulty block first). -#[test] -fn single_voter_many_instance_faulty_blocks_voting_in_reverse() { - let harness = &FORKED_HARNESS; - - for (root, slot) in &harness.faulty_roots { - let lmd = harness.new_fork_choice(); - lmd.process_attestation(0, *root, *slot) - .expect("fork choice should accept attestations to faulty roots in reverse"); - - assert_eq!( - lmd.verify_integrity(), - Ok(()), - "Tree integrity should be maintained whilst processing attestations" - ); - } -} - -/// Ensure that votes with slots before the justified slot are not counted. -#[test] -fn discard_votes_before_justified_slot() { - let harness = &FORKED_HARNESS; - - let lmd = harness.new_fork_choice(); - - let (genesis_root, genesis_slot) = *harness.honest_roots.last().unwrap(); - - // Add attestations from all validators for all honest blocks. - for (root, slot) in harness.honest_roots.iter().rev() { - for i in 0..VALIDATOR_COUNT { - lmd.process_attestation(i, *root, *slot) - .expect("should accept attestations in increasing order"); - } - - // Head starting from 0 checkpoint (genesis) should be current root - assert_eq!( - lmd.find_head(genesis_slot, genesis_root, ForkedHarness::weight_function), - Ok(*root), - "Honest head should be selected" - ); - - // Head from one slot after genesis should still be genesis, because the successor - // block of the genesis block has slot `genesis_slot + 1` which isn't greater than - // the slot we're starting from. This is a very artifical test, but one that's easy to - // describe. - assert_eq!( - lmd.find_head( - genesis_slot + 1, - genesis_root, - ForkedHarness::weight_function - ), - Ok(genesis_root) - ); - } -} - -/// Ensures that the finalized root can be set to all values in `roots`. -fn test_update_finalized_root(roots: &[(Hash256, Slot)]) { - let harness = &FORKED_HARNESS; - - let lmd = harness.new_fork_choice(); - - for (root, _slot) in roots.iter().rev() { - let block = harness - .store_clone() - .get::>(root) - .expect("block should exist") - .expect("db should not error"); - lmd.update_finalized_root(&block, *root) - .expect("finalized root should update for faulty fork"); - - assert_eq!( - lmd.verify_integrity(), - Ok(()), - "Tree integrity should be maintained after updating the finalized root" - ); - } -} - -/// Iterates from low-to-high slot through the faulty roots, updating the finalized root. -#[test] -fn update_finalized_root_faulty() { - let harness = &FORKED_HARNESS; - - test_update_finalized_root(&harness.faulty_roots) -} - -/// Iterates from low-to-high slot through the honest roots, updating the finalized root. -#[test] -fn update_finalized_root_honest() { - let harness = &FORKED_HARNESS; - - test_update_finalized_root(&harness.honest_roots) -} diff --git a/eth2/proto_array_fork_choice/.gitignore b/eth2/proto_array_fork_choice/.gitignore new file mode 100644 index 00000000000..1e82fc7debc --- /dev/null +++ b/eth2/proto_array_fork_choice/.gitignore @@ -0,0 +1 @@ +*.yaml diff --git a/eth2/proto_array_fork_choice/Cargo.toml b/eth2/proto_array_fork_choice/Cargo.toml new file mode 100644 index 00000000000..687b6aefcc4 --- /dev/null +++ b/eth2/proto_array_fork_choice/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "proto_array_fork_choice" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[[bin]] +name = "proto_array_fork_choice" +path = "src/bin.rs" + +[dependencies] +parking_lot = "0.9.0" +types = { path = "../types" } +itertools = "0.8.1" +eth2_ssz = "0.1.2" +eth2_ssz_derive = "0.1.0" +serde = "1.0.102" +serde_derive = "1.0.102" +serde_yaml = "0.8.11" diff --git a/eth2/proto_array_fork_choice/src/bin.rs b/eth2/proto_array_fork_choice/src/bin.rs new file mode 100644 index 00000000000..dec53a4ed52 --- /dev/null +++ b/eth2/proto_array_fork_choice/src/bin.rs @@ -0,0 +1,15 @@ +use proto_array_fork_choice::fork_choice_test_definition::*; +use serde_yaml; +use std::fs::File; + +fn main() { + write_test_def_to_yaml("votes.yaml", get_votes_test_definition()); + write_test_def_to_yaml("no_votes.yaml", get_no_votes_test_definition()); + write_test_def_to_yaml("ffg_01.yaml", get_ffg_case_01_test_definition()); + write_test_def_to_yaml("ffg_02.yaml", get_ffg_case_02_test_definition()); +} + +fn write_test_def_to_yaml(filename: &str, def: ForkChoiceTestDefinition) { + let file = File::create(filename).expect("Should be able to open file"); + serde_yaml::to_writer(file, &def).expect("Should be able to write YAML to file"); +} diff --git a/eth2/proto_array_fork_choice/src/error.rs b/eth2/proto_array_fork_choice/src/error.rs new file mode 100644 index 00000000000..11265aa3627 --- /dev/null +++ b/eth2/proto_array_fork_choice/src/error.rs @@ -0,0 +1,33 @@ +use types::{Epoch, Hash256}; + +#[derive(Clone, PartialEq, Debug)] +pub enum Error { + FinalizedNodeUnknown(Hash256), + JustifiedNodeUnknown(Hash256), + InvalidFinalizedRootChange, + InvalidNodeIndex(usize), + InvalidParentIndex(usize), + InvalidBestChildIndex(usize), + InvalidJustifiedIndex(usize), + InvalidBestDescendant(usize), + InvalidParentDelta(usize), + InvalidNodeDelta(usize), + DeltaOverflow(usize), + IndexOverflow(&'static str), + InvalidDeltaLen { + deltas: usize, + indices: usize, + }, + RevertedFinalizedEpoch { + current_finalized_epoch: Epoch, + new_finalized_epoch: Epoch, + }, + InvalidBestNode { + start_root: Hash256, + justified_epoch: Epoch, + finalized_epoch: Epoch, + head_root: Hash256, + head_justified_epoch: Epoch, + head_finalized_epoch: Epoch, + }, +} diff --git a/eth2/proto_array_fork_choice/src/fork_choice_test_definition.rs b/eth2/proto_array_fork_choice/src/fork_choice_test_definition.rs new file mode 100644 index 00000000000..b016ed04211 --- /dev/null +++ b/eth2/proto_array_fork_choice/src/fork_choice_test_definition.rs @@ -0,0 +1,181 @@ +mod ffg_updates; +mod no_votes; +mod votes; + +use crate::proto_array_fork_choice::ProtoArrayForkChoice; +use serde_derive::{Deserialize, Serialize}; +use types::{Epoch, Hash256, Slot}; + +pub use ffg_updates::*; +pub use no_votes::*; +pub use votes::*; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum Operation { + FindHead { + justified_epoch: Epoch, + justified_root: Hash256, + finalized_epoch: Epoch, + justified_state_balances: Vec, + expected_head: Hash256, + }, + InvalidFindHead { + justified_epoch: Epoch, + justified_root: Hash256, + finalized_epoch: Epoch, + justified_state_balances: Vec, + }, + ProcessBlock { + slot: Slot, + root: Hash256, + parent_root: Hash256, + justified_epoch: Epoch, + finalized_epoch: Epoch, + }, + ProcessAttestation { + validator_index: usize, + block_root: Hash256, + target_epoch: Epoch, + }, + Prune { + finalized_root: Hash256, + prune_threshold: usize, + expected_len: usize, + }, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ForkChoiceTestDefinition { + pub finalized_block_slot: Slot, + pub justified_epoch: Epoch, + pub finalized_epoch: Epoch, + pub finalized_root: Hash256, + pub operations: Vec, +} + +impl ForkChoiceTestDefinition { + pub fn run(self) { + let fork_choice = ProtoArrayForkChoice::new( + self.finalized_block_slot, + self.justified_epoch, + self.finalized_epoch, + self.finalized_root, + ) + .expect("should create fork choice struct"); + + for (op_index, op) in self.operations.into_iter().enumerate() { + match op.clone() { + Operation::FindHead { + justified_epoch, + justified_root, + finalized_epoch, + justified_state_balances, + expected_head, + } => { + let head = fork_choice + .find_head( + justified_epoch, + justified_root, + finalized_epoch, + &justified_state_balances, + ) + .expect(&format!( + "find_head op at index {} returned error", + op_index + )); + + assert_eq!( + head, expected_head, + "Operation at index {} failed checks. Operation: {:?}", + op_index, op + ); + check_bytes_round_trip(&fork_choice); + } + Operation::InvalidFindHead { + justified_epoch, + justified_root, + finalized_epoch, + justified_state_balances, + } => { + let result = fork_choice.find_head( + justified_epoch, + justified_root, + finalized_epoch, + &justified_state_balances, + ); + + assert!( + result.is_err(), + "Operation at index {} . Operation: {:?}", + op_index, + op + ); + check_bytes_round_trip(&fork_choice); + } + Operation::ProcessBlock { + slot, + root, + parent_root, + justified_epoch, + finalized_epoch, + } => { + fork_choice + .process_block(slot, root, parent_root, justified_epoch, finalized_epoch) + .expect(&format!( + "process_block op at index {} returned error", + op_index + )); + check_bytes_round_trip(&fork_choice); + } + Operation::ProcessAttestation { + validator_index, + block_root, + target_epoch, + } => { + fork_choice + .process_attestation(validator_index, block_root, target_epoch) + .expect(&format!( + "process_attestation op at index {} returned error", + op_index + )); + check_bytes_round_trip(&fork_choice); + } + Operation::Prune { + finalized_root, + prune_threshold, + expected_len, + } => { + fork_choice.set_prune_threshold(prune_threshold); + fork_choice + .maybe_prune(finalized_root) + .expect("update_finalized_root op at index {} returned error"); + + // Ensure that no pruning happened. + assert_eq!( + fork_choice.len(), + expected_len, + "Prune op at index {} failed with {} instead of {}", + op_index, + fork_choice.len(), + expected_len + ); + } + } + } + } +} + +/// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. +fn get_hash(i: u64) -> Hash256 { + Hash256::from_low_u64_be(i) +} + +fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { + let bytes = original.as_bytes(); + let decoded = + ProtoArrayForkChoice::from_bytes(&bytes).expect("fork choice should decode from bytes"); + assert!( + *original == decoded, + "fork choice should encode and decode without change" + ); +} diff --git a/eth2/proto_array_fork_choice/src/fork_choice_test_definition/ffg_updates.rs b/eth2/proto_array_fork_choice/src/fork_choice_test_definition/ffg_updates.rs new file mode 100644 index 00000000000..9dd9417f291 --- /dev/null +++ b/eth2/proto_array_fork_choice/src/fork_choice_test_definition/ffg_updates.rs @@ -0,0 +1,452 @@ +use super::*; + +pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { + let balances = vec![1; 2]; + let mut ops = vec![]; + + // Ensure that the head starts at the finalized block. + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(0), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(0), + }); + + // Build the following tree (stick? lol). + // + // 0 <- just: 0, fin: 0 + // | + // 1 <- just: 0, fin: 0 + // | + // 2 <- just: 1, fin: 0 + // | + // 3 <- just: 2, fin: 1 + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_hash(1), + parent_root: get_hash(0), + justified_epoch: Epoch::new(0), + finalized_epoch: Epoch::new(0), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_hash(2), + parent_root: get_hash(1), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(0), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(3), + root: get_hash(3), + parent_root: get_hash(2), + justified_epoch: Epoch::new(2), + finalized_epoch: Epoch::new(1), + }); + + // Ensure that with justified epoch 0 we find 3 + // + // 0 <- start + // | + // 1 + // | + // 2 + // | + // 3 <- head + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(0), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(3), + }); + + // Ensure that with justified epoch 1 we find 2 + // + // 0 + // | + // 1 + // | + // 2 <- start + // | + // 3 <- head + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(2), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(2), + }); + + // Ensure that with justified epoch 2 we find 3 + // + // 0 + // | + // 1 + // | + // 2 + // | + // 3 <- start + head + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(3), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(3), + }); + + // END OF TESTS + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + finalized_root: get_hash(0), + operations: ops, + } +} + +pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { + let balances = vec![1; 2]; + let mut ops = vec![]; + + // Ensure that the head starts at the finalized block. + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(0), + }); + + // Build the following tree. + // + // 0 + // / \ + // just: 0, fin: 0 -> 1 2 <- just: 0, fin: 0 + // | | + // just: 1, fin: 0 -> 3 4 <- just: 0, fin: 0 + // | | + // just: 1, fin: 0 -> 5 6 <- just: 0, fin: 0 + // | | + // just: 1, fin: 0 -> 7 8 <- just: 1, fin: 0 + // | | + // just: 2, fin: 0 -> 9 10 <- just: 2, fin: 0 + + // Left branch + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_hash(1), + parent_root: get_hash(0), + justified_epoch: Epoch::new(0), + finalized_epoch: Epoch::new(0), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_hash(3), + parent_root: get_hash(1), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(0), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(3), + root: get_hash(5), + parent_root: get_hash(3), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(0), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(4), + root: get_hash(7), + parent_root: get_hash(5), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(0), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(4), + root: get_hash(9), + parent_root: get_hash(7), + justified_epoch: Epoch::new(2), + finalized_epoch: Epoch::new(0), + }); + + // Right branch + ops.push(Operation::ProcessBlock { + slot: Slot::new(1), + root: get_hash(2), + parent_root: get_hash(0), + justified_epoch: Epoch::new(0), + finalized_epoch: Epoch::new(0), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_hash(4), + parent_root: get_hash(2), + justified_epoch: Epoch::new(0), + finalized_epoch: Epoch::new(0), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(3), + root: get_hash(6), + parent_root: get_hash(4), + justified_epoch: Epoch::new(0), + finalized_epoch: Epoch::new(0), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(4), + root: get_hash(8), + parent_root: get_hash(6), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(0), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(4), + root: get_hash(10), + parent_root: get_hash(8), + justified_epoch: Epoch::new(2), + finalized_epoch: Epoch::new(0), + }); + + // Ensure that if we start at 0 we find 10 (just: 0, fin: 0). + // + // 0 <-- start + // / \ + // 1 2 + // | | + // 3 4 + // | | + // 5 6 + // | | + // 7 8 + // | | + // 9 10 <-- head + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(0), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(10), + }); + // Same as above, but with justified epoch 2. + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(10), + }); + // Same as above, but with justified epoch 3 (should be invalid). + ops.push(Operation::InvalidFindHead { + justified_epoch: Epoch::new(3), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + }); + + // Add a vote to 1. + // + // 0 + // / \ + // +1 vote -> 1 2 + // | | + // 3 4 + // | | + // 5 6 + // | | + // 7 8 + // | | + // 9 10 + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_hash(1), + target_epoch: Epoch::new(0), + }); + + // Ensure that if we start at 0 we find 9 (just: 0, fin: 0). + // + // 0 <-- start + // / \ + // 1 2 + // | | + // 3 4 + // | | + // 5 6 + // | | + // 7 8 + // | | + // head -> 9 10 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(0), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(9), + }); + // Save as above but justified epoch 2. + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(9), + }); + // Save as above but justified epoch 3 (should fail). + ops.push(Operation::InvalidFindHead { + justified_epoch: Epoch::new(3), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + }); + + // Add a vote to 2. + // + // 0 + // / \ + // 1 2 <- +1 vote + // | | + // 3 4 + // | | + // 5 6 + // | | + // 7 8 + // | | + // 9 10 + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_hash(2), + target_epoch: Epoch::new(0), + }); + + // Ensure that if we start at 0 we find 10 (just: 0, fin: 0). + // + // 0 <-- start + // / \ + // 1 2 + // | | + // 3 4 + // | | + // 5 6 + // | | + // 7 8 + // | | + // 9 10 <-- head + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(0), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(10), + }); + // Same as above but justified epoch 2. + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(10), + }); + // Same as above but justified epoch 3 (should fail). + ops.push(Operation::InvalidFindHead { + justified_epoch: Epoch::new(3), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + }); + + // Ensure that if we start at 1 we find 9 (just: 0, fin: 0). + // + // 0 + // / \ + // start-> 1 2 + // | | + // 3 4 + // | | + // 5 6 + // | | + // 7 8 + // | | + // head -> 9 10 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(0), + justified_root: get_hash(1), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(9), + }); + // Same as above but justified epoch 2. + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(1), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(9), + }); + // Same as above but justified epoch 3 (should fail). + ops.push(Operation::InvalidFindHead { + justified_epoch: Epoch::new(3), + justified_root: get_hash(1), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + }); + + // Ensure that if we start at 2 we find 10 (just: 0, fin: 0). + // + // 0 + // / \ + // 1 2 <- start + // | | + // 3 4 + // | | + // 5 6 + // | | + // 7 8 + // | | + // 9 10 <- head + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(0), + justified_root: get_hash(2), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(10), + }); + // Same as above but justified epoch 2. + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(2), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + expected_head: get_hash(10), + }); + // Same as above but justified epoch 3 (should fail). + ops.push(Operation::InvalidFindHead { + justified_epoch: Epoch::new(3), + justified_root: get_hash(2), + finalized_epoch: Epoch::new(0), + justified_state_balances: balances.clone(), + }); + + // END OF TESTS + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + finalized_root: get_hash(0), + operations: ops, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn ffg_case_01() { + let test = get_ffg_case_01_test_definition(); + test.run(); + } + + #[test] + fn ffg_case_02() { + let test = get_ffg_case_02_test_definition(); + test.run(); + } +} diff --git a/eth2/proto_array_fork_choice/src/fork_choice_test_definition/no_votes.rs b/eth2/proto_array_fork_choice/src/fork_choice_test_definition/no_votes.rs new file mode 100644 index 00000000000..279cde52cdf --- /dev/null +++ b/eth2/proto_array_fork_choice/src/fork_choice_test_definition/no_votes.rs @@ -0,0 +1,237 @@ +use super::*; + +pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { + let balances = vec![0; 16]; + + let operations = vec![ + // Check that the head is the finalized block. + Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: Hash256::zero(), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: Hash256::zero(), + }, + // Add block 2 + // + // 0 + // / + // 2 + Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(2), + parent_root: get_hash(0), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + }, + // Ensure the head is 2 + // + // 0 + // / + // 2 <- head + Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: Hash256::zero(), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(2), + }, + // Add block 1 + // + // 0 + // / \ + // 2 1 + Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(1), + parent_root: get_hash(0), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + }, + // Ensure the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: Hash256::zero(), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(2), + }, + // Add block 3 + // + // 0 + // / \ + // 2 1 + // | + // 3 + Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(3), + parent_root: get_hash(1), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + }, + // Ensure 2 is still the head + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 + Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: Hash256::zero(), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(2), + }, + // Add block 4 + // + // 0 + // / \ + // 2 1 + // | | + // 4 3 + Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(4), + parent_root: get_hash(2), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + }, + // Ensure the head is 4. + // + // 0 + // / \ + // 2 1 + // | | + // head-> 4 3 + Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: Hash256::zero(), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(4), + }, + // Add block 5 with a justified epoch of 2 + // + // 0 + // / \ + // 2 1 + // | | + // 4 3 + // | + // 5 <- justified epoch = 2 + Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(5), + parent_root: get_hash(4), + justified_epoch: Epoch::new(2), + finalized_epoch: Epoch::new(1), + }, + // Ensure the head is still 4 whilst the justified epoch is 0. + // + // 0 + // / \ + // 2 1 + // | | + // head-> 4 3 + // | + // 5 + Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: Hash256::zero(), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(4), + }, + // Ensure there is an error when starting from a block that has the wrong justified epoch. + // + // 0 + // / \ + // 2 1 + // | | + // 4 3 + // | + // 5 <- starting from 5 with justified epoch 0 should error. + Operation::InvalidFindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + }, + // Set the justified epoch to 2 and the start block to 5 and ensure 5 is the head. + // + // 0 + // / \ + // 2 1 + // | | + // 4 3 + // | + // 5 <- head + Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(5), + }, + // Add block 6 + // + // 0 + // / \ + // 2 1 + // | | + // 4 3 + // | + // 5 + // | + // 6 + Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(6), + parent_root: get_hash(5), + justified_epoch: Epoch::new(2), + finalized_epoch: Epoch::new(1), + }, + // Ensure 6 is the head + // + // 0 + // / \ + // 2 1 + // | | + // 4 3 + // | + // 5 + // | + // 6 <- head + Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(6), + }, + ]; + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + finalized_root: get_hash(0), + operations, + } +} + +#[cfg(test)] +mod tests { + use super::*; + #[test] + fn test() { + let test = get_no_votes_test_definition(); + test.run(); + } +} diff --git a/eth2/proto_array_fork_choice/src/fork_choice_test_definition/votes.rs b/eth2/proto_array_fork_choice/src/fork_choice_test_definition/votes.rs new file mode 100644 index 00000000000..4f80912699b --- /dev/null +++ b/eth2/proto_array_fork_choice/src/fork_choice_test_definition/votes.rs @@ -0,0 +1,698 @@ +use super::*; + +pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { + let mut balances = vec![1; 2]; + let mut ops = vec![]; + + // Ensure that the head starts at the finalized block. + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(0), + }); + + // Add a block with a hash of 2. + // + // 0 + // / + // 2 + ops.push(Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(2), + parent_root: get_hash(0), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + }); + + // Ensure that the head is 2 + // + // 0 + // / + // head-> 2 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(2), + }); + + // Add a block with a hash of 1 that comes off the genesis block (this is a fork compared + // to the previous block). + // + // 0 + // / \ + // 2 1 + ops.push(Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(1), + parent_root: get_hash(0), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(2), + }); + + // Add a vote to block 1 + // + // 0 + // / \ + // 2 1 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_hash(1), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is now 1, beacuse 1 has a vote. + // + // 0 + // / \ + // 2 1 <- head + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(1), + }); + + // Add a vote to block 2 + // + // 0 + // / \ + // +vote-> 2 1 + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_hash(2), + target_epoch: Epoch::new(2), + }); + + // Ensure that the head is 2 since 1 and 2 both have a vote + // + // 0 + // / \ + // head-> 2 1 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(2), + }); + + // Add block 3. + // + // 0 + // / \ + // 2 1 + // | + // 3 + ops.push(Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(3), + parent_root: get_hash(1), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(2), + }); + + // Move validator #0 vote from 1 to 3 + // + // 0 + // / \ + // 2 1 <- -vote + // | + // 3 <- +vote + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_hash(3), + target_epoch: Epoch::new(3), + }); + + // Ensure that the head is still 2 + // + // 0 + // / \ + // head-> 2 1 + // | + // 3 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(2), + }); + + // Move validator #1 vote from 2 to 1 (this is an equivocation, but fork choice doesn't + // care) + // + // 0 + // / \ + // -vote-> 2 1 <- +vote + // | + // 3 + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_hash(1), + target_epoch: Epoch::new(3), + }); + + // Ensure that the head is now 3 + // + // 0 + // / \ + // 2 1 + // | + // 3 <- head + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(3), + }); + + // Add block 4. + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + ops.push(Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(4), + parent_root: get_hash(3), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + }); + + // Ensure that the head is now 4 + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 <- head + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(4), + }); + + // Add block 5, which has a justified epoch of 2. + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + // / + // 5 <- justified epoch = 2 + ops.push(Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(5), + parent_root: get_hash(4), + justified_epoch: Epoch::new(2), + finalized_epoch: Epoch::new(2), + }); + + // Ensure that 5 is filtered out and the head stays at 4. + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 <- head + // / + // 5 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(4), + }); + + // Add block 6, which has a justified epoch of 0. + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + // / \ + // 5 6 <- justified epoch = 0 + ops.push(Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(6), + parent_root: get_hash(4), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + }); + + // Move both votes to 5. + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + // / \ + // +2 vote-> 5 6 + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_hash(5), + target_epoch: Epoch::new(4), + }); + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_hash(5), + target_epoch: Epoch::new(4), + }); + + // Add blocks 7, 8 and 9. Adding these blocks helps test the `best_descendant` + // functionality. + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + // / \ + // 5 6 + // | + // 7 + // | + // 8 + // / + // 9 + ops.push(Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(7), + parent_root: get_hash(5), + justified_epoch: Epoch::new(2), + finalized_epoch: Epoch::new(2), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(8), + parent_root: get_hash(7), + justified_epoch: Epoch::new(2), + finalized_epoch: Epoch::new(2), + }); + ops.push(Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(9), + parent_root: get_hash(8), + justified_epoch: Epoch::new(2), + finalized_epoch: Epoch::new(2), + }); + + // Ensure that 6 is the head, even though 5 has all the votes. This is testing to ensure + // that 5 is filtered out due to a differing justified epoch. + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + // / \ + // 5 6 <- head + // | + // 7 + // | + // 8 + // / + // 9 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(1), + justified_root: get_hash(0), + finalized_epoch: Epoch::new(1), + justified_state_balances: balances.clone(), + expected_head: get_hash(6), + }); + + // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is + // the head. + // + // << Change justified epoch to 1 >> + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + // / \ + // 5 6 + // | + // 7 + // | + // 8 + // / + // head-> 9 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(2), + justified_state_balances: balances.clone(), + expected_head: get_hash(9), + }); + + // Change fork-choice justified epoch to 1, and the start block to 5 and ensure that 9 is + // the head. + // + // << Change justified epoch to 1 >> + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + // / \ + // 5 6 + // | + // 7 + // | + // 8 + // / + // 9 <- +2 votes + ops.push(Operation::ProcessAttestation { + validator_index: 0, + block_root: get_hash(9), + target_epoch: Epoch::new(5), + }); + ops.push(Operation::ProcessAttestation { + validator_index: 1, + block_root: get_hash(9), + target_epoch: Epoch::new(5), + }); + + // Add block 10 + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + // / \ + // 5 6 + // | + // 7 + // | + // 8 + // / \ + // 9 10 + ops.push(Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(10), + parent_root: get_hash(8), + justified_epoch: Epoch::new(2), + finalized_epoch: Epoch::new(2), + }); + + // Double-check the head is still 9 (no diagram this time) + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(2), + justified_state_balances: balances.clone(), + expected_head: get_hash(9), + }); + + // Introduce 2 more validators into the system + balances = vec![1; 4]; + + // Have the two new validators vote for 10 + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + // / \ + // 5 6 + // | + // 7 + // | + // 8 + // / \ + // 9 10 <- +2 votes + ops.push(Operation::ProcessAttestation { + validator_index: 2, + block_root: get_hash(10), + target_epoch: Epoch::new(5), + }); + ops.push(Operation::ProcessAttestation { + validator_index: 3, + block_root: get_hash(10), + target_epoch: Epoch::new(5), + }); + + // Check the head is now 10. + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + // / \ + // 5 6 + // | + // 7 + // | + // 8 + // / \ + // 9 10 <- head + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(2), + justified_state_balances: balances.clone(), + expected_head: get_hash(10), + }); + + // Set the balances of the last two validators to zero + balances = vec![1, 1, 0, 0]; + + // Check the head is 9 again. + // + // . + // . + // . + // | + // 8 + // / \ + // head-> 9 10 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(2), + justified_state_balances: balances.clone(), + expected_head: get_hash(9), + }); + + // Set the balances of the last two validators back to 1 + balances = vec![1; 4]; + + // Check the head is 10. + // + // . + // . + // . + // | + // 8 + // / \ + // 9 10 <- head + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(2), + justified_state_balances: balances.clone(), + expected_head: get_hash(10), + }); + + // Remove the last two validators + balances = vec![1; 2]; + + // Check the head is 9 again. + // + // (prior blocks omitted for brevity) + // . + // . + // . + // | + // 8 + // / \ + // head-> 9 10 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(2), + justified_state_balances: balances.clone(), + expected_head: get_hash(9), + }); + + // Ensure that pruning below the prune threshold does not prune. + ops.push(Operation::Prune { + finalized_root: get_hash(5), + prune_threshold: usize::max_value(), + expected_len: 11, + }); + + // Run find-head, ensure the no-op prune didn't change the head. + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(2), + justified_state_balances: balances.clone(), + expected_head: get_hash(9), + }); + + // Ensure that pruning above the prune threshold does prune. + // + // + // 0 + // / \ + // 2 1 + // | + // 3 + // | + // 4 + // -------pruned here ------ + // 5 6 + // | + // 7 + // | + // 8 + // / \ + // 9 10 + ops.push(Operation::Prune { + finalized_root: get_hash(5), + prune_threshold: 1, + expected_len: 6, + }); + + // Run find-head, ensure the prune didn't change the head. + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(2), + justified_state_balances: balances.clone(), + expected_head: get_hash(9), + }); + + // Add block 11 + // + // 5 6 + // | + // 7 + // | + // 8 + // / \ + // 9 10 + // | + // 11 + ops.push(Operation::ProcessBlock { + slot: Slot::new(0), + root: get_hash(11), + parent_root: get_hash(9), + justified_epoch: Epoch::new(2), + finalized_epoch: Epoch::new(2), + }); + + // Ensure the head is now 11 + // + // 5 6 + // | + // 7 + // | + // 8 + // / \ + // 9 10 + // | + // head-> 11 + ops.push(Operation::FindHead { + justified_epoch: Epoch::new(2), + justified_root: get_hash(5), + finalized_epoch: Epoch::new(2), + justified_state_balances: balances.clone(), + expected_head: get_hash(11), + }); + + ForkChoiceTestDefinition { + finalized_block_slot: Slot::new(0), + justified_epoch: Epoch::new(1), + finalized_epoch: Epoch::new(1), + finalized_root: get_hash(0), + operations: ops, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test() { + let test = get_votes_test_definition(); + test.run(); + } +} diff --git a/eth2/proto_array_fork_choice/src/lib.rs b/eth2/proto_array_fork_choice/src/lib.rs new file mode 100644 index 00000000000..65134e3e66b --- /dev/null +++ b/eth2/proto_array_fork_choice/src/lib.rs @@ -0,0 +1,12 @@ +mod error; +pub mod fork_choice_test_definition; +mod proto_array; +mod proto_array_fork_choice; +mod ssz_container; + +pub use crate::proto_array_fork_choice::ProtoArrayForkChoice; +pub use error::Error; + +pub mod core { + pub use super::proto_array::ProtoArray; +} diff --git a/eth2/proto_array_fork_choice/src/proto_array.rs b/eth2/proto_array_fork_choice/src/proto_array.rs new file mode 100644 index 00000000000..85d47ede995 --- /dev/null +++ b/eth2/proto_array_fork_choice/src/proto_array.rs @@ -0,0 +1,405 @@ +use crate::error::Error; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use std::collections::HashMap; +use types::{Epoch, Hash256, Slot}; + +#[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)] +pub struct ProtoNode { + /// The `slot` is not necessary for `ProtoArray`, it just exists so external components can + /// easily query the block slot. This is useful for upstream fork choice logic. + pub slot: Slot, + root: Hash256, + parent: Option, + justified_epoch: Epoch, + finalized_epoch: Epoch, + weight: u64, + best_child: Option, + best_descendant: Option, +} + +#[derive(PartialEq, Debug, Serialize, Deserialize)] +pub struct ProtoArray { + /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes + /// simply waste time. + pub prune_threshold: usize, + pub justified_epoch: Epoch, + pub finalized_epoch: Epoch, + pub nodes: Vec, + pub indices: HashMap, +} + +impl ProtoArray { + /// Iterate backwards through the array, touching all nodes and their parents and potentially + /// the best-child of each parent. + /// + /// The structure of the `self.nodes` array ensures that the child of each node is always + /// touched before its parent. + /// + /// For each node, the following is done: + /// + /// - Update the node's weight with the corresponding delta. + /// - Back-propagate each node's delta to its parents delta. + /// - Compare the current node with the parents best-child, updating it if the current node + /// should become the best child. + /// - If required, update the parents best-descendant with the current node or its best-descendant. + pub fn apply_score_changes( + &mut self, + mut deltas: Vec, + justified_epoch: Epoch, + finalized_epoch: Epoch, + ) -> Result<(), Error> { + if deltas.len() != self.indices.len() { + return Err(Error::InvalidDeltaLen { + deltas: deltas.len(), + indices: self.indices.len(), + }); + } + + if justified_epoch != self.justified_epoch || finalized_epoch != self.finalized_epoch { + self.justified_epoch = justified_epoch; + self.finalized_epoch = finalized_epoch; + } + + // Iterate backwards through all indices in `self.nodes`. + for node_index in (0..self.nodes.len()).rev() { + let node = self + .nodes + .get_mut(node_index) + .ok_or_else(|| Error::InvalidNodeIndex(node_index))?; + + // There is no need to adjust the balances or manage parent of the zero hash since it + // is an alias to the genesis block. The weight applied to the genesis block is + // irrelevant as we _always_ choose it and it's impossible for it to have a parent. + if node.root == Hash256::zero() { + continue; + } + + let node_delta = deltas + .get(node_index) + .copied() + .ok_or_else(|| Error::InvalidNodeDelta(node_index))?; + + // Apply the delta to the node. + if node_delta < 0 { + // Note: I am conflicted about whether to use `saturating_sub` or `checked_sub` + // here. + // + // I can't think of any valid reason why `node_delta.abs()` should be greater than + // `node.weight`, so I have chosen `checked_sub` to try and fail-fast if there is + // some error. + // + // However, I am not fully convinced that some valid case for `saturating_sub` does + // not exist. + node.weight = node + .weight + .checked_sub(node_delta.abs() as u64) + .ok_or_else(|| Error::DeltaOverflow(node_index))?; + } else { + node.weight = node + .weight + .checked_add(node_delta as u64) + .ok_or_else(|| Error::DeltaOverflow(node_index))?; + } + + // If the node has a parent, try to update its best-child and best-descendant. + if let Some(parent_index) = node.parent { + let parent_delta = deltas + .get_mut(parent_index) + .ok_or_else(|| Error::InvalidParentDelta(parent_index))?; + + // Back-propagate the nodes delta to its parent. + *parent_delta += node_delta; + + self.maybe_update_best_child_and_descendant(parent_index, node_index)?; + } + } + + Ok(()) + } + + /// Register a block with the fork choice. + /// + /// It is only sane to supply a `None` parent for the genesis block. + pub fn on_block( + &mut self, + slot: Slot, + root: Hash256, + parent_opt: Option, + justified_epoch: Epoch, + finalized_epoch: Epoch, + ) -> Result<(), Error> { + // If the block is already known, simply ignore it. + if self.indices.contains_key(&root) { + return Ok(()); + } + + let node_index = self.nodes.len(); + + let node = ProtoNode { + slot, + root, + parent: parent_opt.and_then(|parent| self.indices.get(&parent).copied()), + justified_epoch, + finalized_epoch, + weight: 0, + best_child: None, + best_descendant: None, + }; + + self.indices.insert(node.root, node_index); + self.nodes.push(node.clone()); + + if let Some(parent_index) = node.parent { + self.maybe_update_best_child_and_descendant(parent_index, node_index)?; + } + + Ok(()) + } + + /// Follows the best-descendant links to find the best-block (i.e., head-block). + /// + /// ## Notes + /// + /// The result of this function is not guaranteed to be accurate if `Self::on_new_block` has + /// been called without a subsequent `Self::apply_score_changes` call. This is because + /// `on_new_block` does not attempt to walk backwards through the tree and update the + /// best-child/best-descendant links. + pub fn find_head(&self, justified_root: &Hash256) -> Result { + let justified_index = self + .indices + .get(justified_root) + .copied() + .ok_or_else(|| Error::JustifiedNodeUnknown(*justified_root))?; + + let justified_node = self + .nodes + .get(justified_index) + .ok_or_else(|| Error::InvalidJustifiedIndex(justified_index))?; + + let best_descendant_index = justified_node + .best_descendant + .unwrap_or_else(|| justified_index); + + let best_node = self + .nodes + .get(best_descendant_index) + .ok_or_else(|| Error::InvalidBestDescendant(best_descendant_index))?; + + // Perform a sanity check that the node is indeed valid to be the head. + if !self.node_is_viable_for_head(&best_node) { + return Err(Error::InvalidBestNode { + start_root: *justified_root, + justified_epoch: self.justified_epoch, + finalized_epoch: self.finalized_epoch, + head_root: justified_node.root, + head_justified_epoch: justified_node.justified_epoch, + head_finalized_epoch: justified_node.finalized_epoch, + }); + } + + Ok(best_node.root) + } + + /// Update the tree with new finalization information. The tree is only actually pruned if both + /// of the two following criteria are met: + /// + /// - The supplied finalized epoch and root are different to the current values. + /// - The number of nodes in `self` is at least `self.prune_threshold`. + /// + /// # Errors + /// + /// Returns errors if: + /// + /// - The finalized epoch is less than the current one. + /// - The finalized epoch is equal to the current one, but the finalized root is different. + /// - There is some internal error relating to invalid indices inside `self`. + pub fn maybe_prune(&mut self, finalized_root: Hash256) -> Result<(), Error> { + let finalized_index = *self + .indices + .get(&finalized_root) + .ok_or_else(|| Error::FinalizedNodeUnknown(finalized_root))?; + + if finalized_index < self.prune_threshold { + // Pruning at small numbers incurs more cost than benefit. + return Ok(()); + } + + // Remove the `self.indices` key/values for all the to-be-deleted nodes. + for node_index in 0..finalized_index { + let root = &self + .nodes + .get(node_index) + .ok_or_else(|| Error::InvalidNodeIndex(node_index))? + .root; + self.indices.remove(root); + } + + // Drop all the nodes prior to finalization. + self.nodes = self.nodes.split_off(finalized_index); + + // Adjust the indices map. + for (_root, index) in self.indices.iter_mut() { + *index = index + .checked_sub(finalized_index) + .ok_or_else(|| Error::IndexOverflow("indices"))?; + } + + // Iterate through all the existing nodes and adjust their indices to match the new layout + // of `self.nodes`. + for node in self.nodes.iter_mut() { + if let Some(parent) = node.parent { + // If `node.parent` is less than `finalized_index`, set it to `None`. + node.parent = parent.checked_sub(finalized_index); + } + if let Some(best_child) = node.best_child { + node.best_child = Some( + best_child + .checked_sub(finalized_index) + .ok_or_else(|| Error::IndexOverflow("best_child"))?, + ); + } + if let Some(best_descendant) = node.best_descendant { + node.best_descendant = Some( + best_descendant + .checked_sub(finalized_index) + .ok_or_else(|| Error::IndexOverflow("best_descendant"))?, + ); + } + } + + Ok(()) + } + + /// Observe the parent at `parent_index` with respect to the child at `child_index` and + /// potentially modify the `parent.best_child` and `parent.best_descendant` values. + /// + /// ## Detail + /// + /// There are four outcomes: + /// + /// - The child is already the best child but it's now invalid due to a FFG change and should be removed. + /// - The child is already the best child and the parent is updated with the new + /// best-descendant. + /// - The child is not the best child but becomes the best child. + /// - The child is not the best child and does not become the best child. + fn maybe_update_best_child_and_descendant( + &mut self, + parent_index: usize, + child_index: usize, + ) -> Result<(), Error> { + let child = self + .nodes + .get(child_index) + .ok_or_else(|| Error::InvalidNodeIndex(child_index))?; + + let parent = self + .nodes + .get(parent_index) + .ok_or_else(|| Error::InvalidNodeIndex(parent_index))?; + + let child_leads_to_viable_head = self.node_leads_to_viable_head(&child)?; + + // These three variables are aliases to the three options that we may set the + // `parent.best_child` and `parent.best_descendant` to. + // + // I use the aliases to assist readability. + let change_to_none = (None, None); + let change_to_child = ( + Some(child_index), + child.best_descendant.or(Some(child_index)), + ); + let no_change = (parent.best_child, parent.best_descendant); + + let (new_best_child, new_best_descendant) = + if let Some(best_child_index) = parent.best_child { + if best_child_index == child_index && !child_leads_to_viable_head { + // If the child is already the best-child of the parent but it's not viable for + // the head, remove it. + change_to_none + } else if best_child_index == child_index { + // If the child is the best-child already, set it again to ensure that the + // best-descendant of the parent is updated. + change_to_child + } else { + let best_child = self + .nodes + .get(best_child_index) + .ok_or_else(|| Error::InvalidBestDescendant(best_child_index))?; + + let best_child_leads_to_viable_head = + self.node_leads_to_viable_head(&best_child)?; + + if child_leads_to_viable_head && !best_child_leads_to_viable_head { + // The child leads to a viable head, but the current best-child doesn't. + change_to_child + } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { + // The best child leads to a viable head, but the child doesn't. + no_change + } else if child.weight == best_child.weight { + // Tie-breaker of equal weights by root. + if child.root >= best_child.root { + change_to_child + } else { + no_change + } + } else { + // Choose the winner by weight. + if child.weight >= best_child.weight { + change_to_child + } else { + no_change + } + } + } + } else { + if child_leads_to_viable_head { + // There is no current best-child and the child is viable. + change_to_child + } else { + // There is no current best-child but the child is not viable. + no_change + } + }; + + let parent = self + .nodes + .get_mut(parent_index) + .ok_or_else(|| Error::InvalidNodeIndex(parent_index))?; + + parent.best_child = new_best_child; + parent.best_descendant = new_best_descendant; + + Ok(()) + } + + /// Indicates if the node itself is viable for the head, or if it's best descendant is viable + /// for the head. + fn node_leads_to_viable_head(&self, node: &ProtoNode) -> Result { + let best_descendant_is_viable_for_head = + if let Some(best_descendant_index) = node.best_descendant { + let best_descendant = self + .nodes + .get(best_descendant_index) + .ok_or_else(|| Error::InvalidBestDescendant(best_descendant_index))?; + + self.node_is_viable_for_head(best_descendant) + } else { + false + }; + + Ok(best_descendant_is_viable_for_head || self.node_is_viable_for_head(node)) + } + + /// This is the equivalent to the `filter_block_tree` function in the eth2 spec: + /// + /// https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/fork-choice.md#filter_block_tree + /// + /// Any node that has a different finalized or justified epoch should not be viable for the + /// head. + fn node_is_viable_for_head(&self, node: &ProtoNode) -> bool { + (node.justified_epoch == self.justified_epoch || self.justified_epoch == Epoch::new(0)) + && (node.finalized_epoch == self.finalized_epoch + || self.finalized_epoch == Epoch::new(0)) + } +} diff --git a/eth2/proto_array_fork_choice/src/proto_array_fork_choice.rs b/eth2/proto_array_fork_choice/src/proto_array_fork_choice.rs new file mode 100644 index 00000000000..0112b690c60 --- /dev/null +++ b/eth2/proto_array_fork_choice/src/proto_array_fork_choice.rs @@ -0,0 +1,697 @@ +use crate::error::Error; +use crate::proto_array::ProtoArray; +use crate::ssz_container::SszContainer; +use parking_lot::{RwLock, RwLockReadGuard}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::collections::HashMap; +use types::{Epoch, Hash256, Slot}; + +pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; + +#[derive(Default, PartialEq, Clone, Encode, Decode)] +pub struct VoteTracker { + current_root: Hash256, + next_root: Hash256, + next_epoch: Epoch, +} + +/// A Vec-wrapper which will grow to match any request. +/// +/// E.g., a `get` or `insert` to an out-of-bounds element will cause the Vec to grow (using +/// Default) to the smallest size required to fulfill the request. +#[derive(Default, Clone, Debug, PartialEq)] +pub struct ElasticList(pub Vec); + +impl ElasticList +where + T: Default, +{ + fn ensure(&mut self, i: usize) { + if self.0.len() <= i { + self.0.resize_with(i + 1, Default::default); + } + } + + pub fn get_mut(&mut self, i: usize) -> &mut T { + self.ensure(i); + &mut self.0[i] + } + + pub fn iter_mut(&mut self) -> impl Iterator { + self.0.iter_mut() + } +} + +pub struct ProtoArrayForkChoice { + pub(crate) proto_array: RwLock, + pub(crate) votes: RwLock>, + pub(crate) balances: RwLock>, +} + +impl PartialEq for ProtoArrayForkChoice { + fn eq(&self, other: &Self) -> bool { + *self.proto_array.read() == *other.proto_array.read() + && *self.votes.read() == *other.votes.read() + && *self.balances.read() == *other.balances.read() + } +} + +impl ProtoArrayForkChoice { + pub fn new( + finalized_block_slot: Slot, + justified_epoch: Epoch, + finalized_epoch: Epoch, + finalized_root: Hash256, + ) -> Result { + let mut proto_array = ProtoArray { + prune_threshold: DEFAULT_PRUNE_THRESHOLD, + justified_epoch, + finalized_epoch, + nodes: Vec::with_capacity(1), + indices: HashMap::with_capacity(1), + }; + + proto_array + .on_block( + finalized_block_slot, + finalized_root, + None, + justified_epoch, + finalized_epoch, + ) + .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; + + Ok(Self { + proto_array: RwLock::new(proto_array), + votes: RwLock::new(ElasticList::default()), + balances: RwLock::new(vec![]), + }) + } + + pub fn process_attestation( + &self, + validator_index: usize, + block_root: Hash256, + target_epoch: Epoch, + ) -> Result<(), String> { + let mut votes = self.votes.write(); + let vote = votes.get_mut(validator_index); + + if target_epoch > vote.next_epoch || *vote == VoteTracker::default() { + vote.next_root = block_root; + vote.next_epoch = target_epoch; + } + + Ok(()) + } + + pub fn process_block( + &self, + slot: Slot, + block_root: Hash256, + parent_root: Hash256, + justified_epoch: Epoch, + finalized_epoch: Epoch, + ) -> Result<(), String> { + self.proto_array + .write() + .on_block( + slot, + block_root, + Some(parent_root), + justified_epoch, + finalized_epoch, + ) + .map_err(|e| format!("process_block_error: {:?}", e)) + } + + pub fn find_head( + &self, + justified_epoch: Epoch, + justified_root: Hash256, + finalized_epoch: Epoch, + justified_state_balances: &[u64], + ) -> Result { + let mut proto_array = self.proto_array.write(); + let mut votes = self.votes.write(); + let mut old_balances = self.balances.write(); + + let new_balances = justified_state_balances; + + let deltas = compute_deltas( + &proto_array.indices, + &mut votes, + &old_balances, + &new_balances, + ) + .map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?; + + proto_array + .apply_score_changes(deltas, justified_epoch, finalized_epoch) + .map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?; + + *old_balances = new_balances.to_vec(); + + proto_array + .find_head(&justified_root) + .map_err(|e| format!("find_head failed: {:?}", e)) + } + + pub fn maybe_prune(&self, finalized_root: Hash256) -> Result<(), String> { + self.proto_array + .write() + .maybe_prune(finalized_root) + .map_err(|e| format!("find_head maybe_prune failed: {:?}", e)) + } + + pub fn set_prune_threshold(&self, prune_threshold: usize) { + self.proto_array.write().prune_threshold = prune_threshold; + } + + pub fn len(&self) -> usize { + self.proto_array.read().nodes.len() + } + + pub fn contains_block(&self, block_root: &Hash256) -> bool { + self.proto_array.read().indices.contains_key(block_root) + } + + pub fn block_slot(&self, block_root: &Hash256) -> Option { + let proto_array = self.proto_array.read(); + + let i = proto_array.indices.get(block_root)?; + let block = proto_array.nodes.get(*i)?; + + Some(block.slot) + } + + pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { + let votes = self.votes.read(); + + if validator_index < votes.0.len() { + let vote = &votes.0[validator_index]; + + if *vote == VoteTracker::default() { + None + } else { + Some((vote.next_root, vote.next_epoch)) + } + } else { + None + } + } + + pub fn as_bytes(&self) -> Vec { + SszContainer::from(self).as_ssz_bytes() + } + + pub fn from_bytes(bytes: &[u8]) -> Result { + SszContainer::from_ssz_bytes(bytes) + .map(Into::into) + .map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e)) + } + + /// Returns a read-lock to core `ProtoArray` struct. + /// + /// Should only be used when encoding/decoding during troubleshooting. + pub fn core_proto_array(&self) -> RwLockReadGuard { + self.proto_array.read() + } +} + +/// Returns a list of `deltas`, where there is one delta for each of the indices in +/// `0..indices.len()`. +/// +/// The deltas are formed by a change between `old_balances` and `new_balances`, and/or a change of vote in `votes`. +/// +/// ## Errors +/// +/// - If a value in `indices` is greater to or equal to `indices.len()`. +/// - If some `Hash256` in `votes` is not a key in `indices` (except for `Hash256::zero()`, this is +/// always valid). +fn compute_deltas( + indices: &HashMap, + votes: &mut ElasticList, + old_balances: &[u64], + new_balances: &[u64], +) -> Result, Error> { + let mut deltas = vec![0_i64; indices.len()]; + + for (val_index, vote) in votes.iter_mut().enumerate() { + // There is no need to create a score change if the validator has never voted or both their + // votes are for the zero hash (alias to the genesis block). + if vote.current_root == Hash256::zero() && vote.next_root == Hash256::zero() { + continue; + } + + // If the validator was not included in the _old_ balances (i.e., it did not exist yet) + // then say its balance was zero. + let old_balance = old_balances.get(val_index).copied().unwrap_or_else(|| 0); + + // If the validators vote is not known in the _new_ balances, then use a balance of zero. + // + // It is possible that there is a vote for an unknown validator if we change our justified + // state to a new state with a higher epoch that is on a different fork because that fork may have + // on-boarded less validators than the prior fork. + let new_balance = new_balances.get(val_index).copied().unwrap_or_else(|| 0); + + if vote.current_root != vote.next_root || old_balance != new_balance { + // We ignore the vote if it is not known in `indices`. We assume that it is outside + // of our tree (i.e., pre-finalization) and therefore not interesting. + if let Some(current_delta_index) = indices.get(&vote.current_root).copied() { + let delta = deltas + .get(current_delta_index) + .ok_or_else(|| Error::InvalidNodeDelta(current_delta_index))? + .checked_sub(old_balance as i64) + .ok_or_else(|| Error::DeltaOverflow(current_delta_index))?; + + // Array access safe due to check on previous line. + deltas[current_delta_index] = delta; + } + + // We ignore the vote if it is not known in `indices`. We assume that it is outside + // of our tree (i.e., pre-finalization) and therefore not interesting. + if let Some(next_delta_index) = indices.get(&vote.next_root).copied() { + let delta = deltas + .get(next_delta_index) + .ok_or_else(|| Error::InvalidNodeDelta(next_delta_index))? + .checked_add(new_balance as i64) + .ok_or_else(|| Error::DeltaOverflow(next_delta_index))?; + + // Array access safe due to check on previous line. + deltas[next_delta_index] = delta; + } + + vote.current_root = vote.next_root; + } + } + + Ok(deltas) +} + +#[cfg(test)] +mod test_compute_deltas { + use super::*; + + /// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. + fn hash_from_index(i: usize) -> Hash256 { + Hash256::from_low_u64_be(i as u64 + 1) + } + + #[test] + fn zero_hash() { + let validator_count: usize = 16; + + let mut indices = HashMap::new(); + let mut votes = ElasticList::default(); + let mut old_balances = vec![]; + let mut new_balances = vec![]; + + for i in 0..validator_count { + indices.insert(hash_from_index(i), i); + votes.0.push(VoteTracker { + current_root: Hash256::zero(), + next_root: Hash256::zero(), + next_epoch: Epoch::new(0), + }); + old_balances.push(0); + new_balances.push(0); + } + + let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) + .expect("should compute deltas"); + + assert_eq!( + deltas.len(), + validator_count, + "deltas should have expected length" + ); + assert_eq!( + deltas, + vec![0; validator_count], + "deltas should all be zero" + ); + + for vote in votes.0 { + assert_eq!( + vote.current_root, vote.next_root, + "the vote shoulds should have been updated" + ); + } + } + + #[test] + fn all_voted_the_same() { + const BALANCE: u64 = 42; + + let validator_count: usize = 16; + + let mut indices = HashMap::new(); + let mut votes = ElasticList::default(); + let mut old_balances = vec![]; + let mut new_balances = vec![]; + + for i in 0..validator_count { + indices.insert(hash_from_index(i), i); + votes.0.push(VoteTracker { + current_root: Hash256::zero(), + next_root: hash_from_index(0), + next_epoch: Epoch::new(0), + }); + old_balances.push(BALANCE); + new_balances.push(BALANCE); + } + + let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) + .expect("should compute deltas"); + + assert_eq!( + deltas.len(), + validator_count, + "deltas should have expected length" + ); + + for (i, delta) in deltas.into_iter().enumerate() { + if i == 0 { + assert_eq!( + delta, + BALANCE as i64 * validator_count as i64, + "zero'th root should have a delta" + ); + } else { + assert_eq!(delta, 0, "all other deltas should be zero"); + } + } + + for vote in votes.0 { + assert_eq!( + vote.current_root, vote.next_root, + "the vote shoulds should have been updated" + ); + } + } + + #[test] + fn different_votes() { + const BALANCE: u64 = 42; + + let validator_count: usize = 16; + + let mut indices = HashMap::new(); + let mut votes = ElasticList::default(); + let mut old_balances = vec![]; + let mut new_balances = vec![]; + + for i in 0..validator_count { + indices.insert(hash_from_index(i), i); + votes.0.push(VoteTracker { + current_root: Hash256::zero(), + next_root: hash_from_index(i), + next_epoch: Epoch::new(0), + }); + old_balances.push(BALANCE); + new_balances.push(BALANCE); + } + + let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) + .expect("should compute deltas"); + + assert_eq!( + deltas.len(), + validator_count, + "deltas should have expected length" + ); + + for delta in deltas.into_iter() { + assert_eq!( + delta, BALANCE as i64, + "each root should have the same delta" + ); + } + + for vote in votes.0 { + assert_eq!( + vote.current_root, vote.next_root, + "the vote shoulds should have been updated" + ); + } + } + + #[test] + fn moving_votes() { + const BALANCE: u64 = 42; + + let validator_count: usize = 16; + + let mut indices = HashMap::new(); + let mut votes = ElasticList::default(); + let mut old_balances = vec![]; + let mut new_balances = vec![]; + + for i in 0..validator_count { + indices.insert(hash_from_index(i), i); + votes.0.push(VoteTracker { + current_root: hash_from_index(0), + next_root: hash_from_index(1), + next_epoch: Epoch::new(0), + }); + old_balances.push(BALANCE); + new_balances.push(BALANCE); + } + + let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) + .expect("should compute deltas"); + + assert_eq!( + deltas.len(), + validator_count, + "deltas should have expected length" + ); + + let total_delta = BALANCE as i64 * validator_count as i64; + + for (i, delta) in deltas.into_iter().enumerate() { + if i == 0 { + assert_eq!( + delta, + 0 - total_delta, + "zero'th root should have a negative delta" + ); + } else if i == 1 { + assert_eq!(delta, total_delta, "first root should have positive delta"); + } else { + assert_eq!(delta, 0, "all other deltas should be zero"); + } + } + + for vote in votes.0 { + assert_eq!( + vote.current_root, vote.next_root, + "the vote shoulds should have been updated" + ); + } + } + + #[test] + fn move_out_of_tree() { + const BALANCE: u64 = 42; + + let mut indices = HashMap::new(); + let mut votes = ElasticList::default(); + + // There is only one block. + indices.insert(hash_from_index(1), 0); + + // There are two validators. + let old_balances = vec![BALANCE; 2]; + let new_balances = vec![BALANCE; 2]; + + // One validator moves their vote from the block to the zero hash. + votes.0.push(VoteTracker { + current_root: hash_from_index(1), + next_root: Hash256::zero(), + next_epoch: Epoch::new(0), + }); + + // One validator moves their vote from the block to something outside the tree. + votes.0.push(VoteTracker { + current_root: hash_from_index(1), + next_root: Hash256::from_low_u64_be(1337), + next_epoch: Epoch::new(0), + }); + + let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) + .expect("should compute deltas"); + + assert_eq!(deltas.len(), 1, "deltas should have expected length"); + + assert_eq!( + deltas[0], + 0 - BALANCE as i64 * 2, + "the block should have lost both balances" + ); + + for vote in votes.0 { + assert_eq!( + vote.current_root, vote.next_root, + "the vote shoulds should have been updated" + ); + } + } + + #[test] + fn changing_balances() { + const OLD_BALANCE: u64 = 42; + const NEW_BALANCE: u64 = OLD_BALANCE * 2; + + let validator_count: usize = 16; + + let mut indices = HashMap::new(); + let mut votes = ElasticList::default(); + let mut old_balances = vec![]; + let mut new_balances = vec![]; + + for i in 0..validator_count { + indices.insert(hash_from_index(i), i); + votes.0.push(VoteTracker { + current_root: hash_from_index(0), + next_root: hash_from_index(1), + next_epoch: Epoch::new(0), + }); + old_balances.push(OLD_BALANCE); + new_balances.push(NEW_BALANCE); + } + + let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) + .expect("should compute deltas"); + + assert_eq!( + deltas.len(), + validator_count, + "deltas should have expected length" + ); + + for (i, delta) in deltas.into_iter().enumerate() { + if i == 0 { + assert_eq!( + delta, + 0 - OLD_BALANCE as i64 * validator_count as i64, + "zero'th root should have a negative delta" + ); + } else if i == 1 { + assert_eq!( + delta, + NEW_BALANCE as i64 * validator_count as i64, + "first root should have positive delta" + ); + } else { + assert_eq!(delta, 0, "all other deltas should be zero"); + } + } + + for vote in votes.0 { + assert_eq!( + vote.current_root, vote.next_root, + "the vote shoulds should have been updated" + ); + } + } + + #[test] + fn validator_appears() { + const BALANCE: u64 = 42; + + let mut indices = HashMap::new(); + let mut votes = ElasticList::default(); + + // There are two blocks. + indices.insert(hash_from_index(1), 0); + indices.insert(hash_from_index(2), 1); + + // There is only one validator in the old balances. + let old_balances = vec![BALANCE; 1]; + // There are two validators in the new balances. + let new_balances = vec![BALANCE; 2]; + + // Both validator move votes from block 1 to block 2. + for _ in 0..2 { + votes.0.push(VoteTracker { + current_root: hash_from_index(1), + next_root: hash_from_index(2), + next_epoch: Epoch::new(0), + }); + } + + let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) + .expect("should compute deltas"); + + assert_eq!(deltas.len(), 2, "deltas should have expected length"); + + assert_eq!( + deltas[0], + 0 - BALANCE as i64, + "block 1 should have only lost one balance" + ); + assert_eq!( + deltas[1], + 2 * BALANCE as i64, + "block 2 should have gained two balances" + ); + + for vote in votes.0 { + assert_eq!( + vote.current_root, vote.next_root, + "the vote shoulds should have been updated" + ); + } + } + + #[test] + fn validator_disappears() { + const BALANCE: u64 = 42; + + let mut indices = HashMap::new(); + let mut votes = ElasticList::default(); + + // There are two blocks. + indices.insert(hash_from_index(1), 0); + indices.insert(hash_from_index(2), 1); + + // There are two validators in the old balances. + let old_balances = vec![BALANCE; 2]; + // There is only one validator in the new balances. + let new_balances = vec![BALANCE; 1]; + + // Both validator move votes from block 1 to block 2. + for _ in 0..2 { + votes.0.push(VoteTracker { + current_root: hash_from_index(1), + next_root: hash_from_index(2), + next_epoch: Epoch::new(0), + }); + } + + let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) + .expect("should compute deltas"); + + assert_eq!(deltas.len(), 2, "deltas should have expected length"); + + assert_eq!( + deltas[0], + 0 - BALANCE as i64 * 2, + "block 1 should have lost both balances" + ); + assert_eq!( + deltas[1], BALANCE as i64, + "block 2 should have only gained one balance" + ); + + for vote in votes.0 { + assert_eq!( + vote.current_root, vote.next_root, + "the vote should have been updated" + ); + } + } +} diff --git a/eth2/proto_array_fork_choice/src/ssz_container.rs b/eth2/proto_array_fork_choice/src/ssz_container.rs new file mode 100644 index 00000000000..bd305ae72cf --- /dev/null +++ b/eth2/proto_array_fork_choice/src/ssz_container.rs @@ -0,0 +1,54 @@ +use crate::{ + proto_array::{ProtoArray, ProtoNode}, + proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, +}; +use parking_lot::RwLock; +use ssz_derive::{Decode, Encode}; +use std::collections::HashMap; +use std::iter::FromIterator; +use types::{Epoch, Hash256}; + +#[derive(Encode, Decode)] +pub struct SszContainer { + votes: Vec, + balances: Vec, + prune_threshold: usize, + justified_epoch: Epoch, + finalized_epoch: Epoch, + nodes: Vec, + indices: Vec<(Hash256, usize)>, +} + +impl From<&ProtoArrayForkChoice> for SszContainer { + fn from(from: &ProtoArrayForkChoice) -> Self { + let proto_array = from.proto_array.read(); + + Self { + votes: from.votes.read().0.clone(), + balances: from.balances.read().clone(), + prune_threshold: proto_array.prune_threshold, + justified_epoch: proto_array.justified_epoch, + finalized_epoch: proto_array.finalized_epoch, + nodes: proto_array.nodes.clone(), + indices: proto_array.indices.iter().map(|(k, v)| (*k, *v)).collect(), + } + } +} + +impl From for ProtoArrayForkChoice { + fn from(from: SszContainer) -> Self { + let proto_array = ProtoArray { + prune_threshold: from.prune_threshold, + justified_epoch: from.justified_epoch, + finalized_epoch: from.finalized_epoch, + nodes: from.nodes, + indices: HashMap::from_iter(from.indices.into_iter()), + }; + + Self { + proto_array: RwLock::new(proto_array), + votes: RwLock::new(ElasticList(from.votes)), + balances: RwLock::new(from.balances), + } + } +} diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index 96b8f201456..80b2525ef5b 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -18,7 +18,6 @@ serde_yaml = "0.8.11" eth2_ssz = "0.1.2" beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } -lmd_ghost = { path = "../lmd_ghost" } [dependencies] diff --git a/eth2/utils/logging/Cargo.toml b/eth2/utils/logging/Cargo.toml index 9d9405429e4..0fe05666312 100644 --- a/eth2/utils/logging/Cargo.toml +++ b/eth2/utils/logging/Cargo.toml @@ -7,3 +7,5 @@ edition = "2018" [dependencies] slog = "2.5.2" slog-term = "2.4.2" +lighthouse_metrics = { path = "../lighthouse_metrics" } +lazy_static = "1.4.0" diff --git a/eth2/utils/logging/src/lib.rs b/eth2/utils/logging/src/lib.rs index cbd256f426d..5a2a0757f21 100644 --- a/eth2/utils/logging/src/lib.rs +++ b/eth2/utils/logging/src/lib.rs @@ -1,7 +1,24 @@ +#[macro_use] +extern crate lazy_static; + +use lighthouse_metrics::{ + inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult, +}; use std::io::{Result, Write}; pub const MAX_MESSAGE_WIDTH: usize = 40; +lazy_static! { + pub static ref INFOS_TOTAL: MetricsResult = + try_create_int_counter("info_total", "Count of infos logged"); + pub static ref WARNS_TOTAL: MetricsResult = + try_create_int_counter("warn_total", "Count of warns logged"); + pub static ref ERRORS_TOTAL: MetricsResult = + try_create_int_counter("error_total", "Count of errors logged"); + pub static ref CRITS_TOTAL: MetricsResult = + try_create_int_counter("crit_total", "Count of crits logged"); +} + pub struct AlignedTermDecorator { wrapped: slog_term::TermDecorator, message_width: usize, @@ -19,14 +36,22 @@ impl AlignedTermDecorator { impl slog_term::Decorator for AlignedTermDecorator { fn with_record( &self, - _record: &slog::Record, + record: &slog::Record, _logger_values: &slog::OwnedKVList, f: F, ) -> Result<()> where F: FnOnce(&mut dyn slog_term::RecordDecorator) -> std::io::Result<()>, { - self.wrapped.with_record(_record, _logger_values, |deco| { + match record.level() { + slog::Level::Info => inc_counter(&INFOS_TOTAL), + slog::Level::Warning => inc_counter(&WARNS_TOTAL), + slog::Level::Error => inc_counter(&ERRORS_TOTAL), + slog::Level::Critical => inc_counter(&CRITS_TOTAL), + _ => (), + } + + self.wrapped.with_record(record, _logger_values, |deco| { f(&mut AlignedRecordDecorator::new(deco, self.message_width)) }) } diff --git a/eth2/utils/remote_beacon_node/Cargo.toml b/eth2/utils/remote_beacon_node/Cargo.toml index 68a5e5df0d1..f17109d8d9b 100644 --- a/eth2/utils/remote_beacon_node/Cargo.toml +++ b/eth2/utils/remote_beacon_node/Cargo.toml @@ -17,3 +17,4 @@ hex = "0.3" eth2_ssz = { path = "../../../eth2/utils/ssz" } serde_json = "^1.0" eth2_config = { path = "../../../eth2/utils/eth2_config" } +proto_array_fork_choice = { path = "../../../eth2/proto_array_fork_choice" } diff --git a/eth2/utils/remote_beacon_node/src/lib.rs b/eth2/utils/remote_beacon_node/src/lib.rs index 55d6d9cbf16..f01c9f1aa0d 100644 --- a/eth2/utils/remote_beacon_node/src/lib.rs +++ b/eth2/utils/remote_beacon_node/src/lib.rs @@ -5,6 +5,7 @@ use eth2_config::Eth2Config; use futures::{future, Future, IntoFuture}; +use proto_array_fork_choice::core::ProtoArray; use reqwest::{ r#async::{Client, ClientBuilder, Response}, StatusCode, @@ -101,6 +102,10 @@ impl HttpClient { Node(self.clone()) } + pub fn advanced(&self) -> Advanced { + Advanced(self.clone()) + } + fn url(&self, path: &str) -> Result { self.url.join(path).map_err(|e| e.into()) } @@ -536,6 +541,27 @@ impl Node { } } +/// Provides the functions on the `/advanced` endpoint of the node. +#[derive(Clone)] +pub struct Advanced(HttpClient); + +impl Advanced { + fn url(&self, path: &str) -> Result { + self.0 + .url("advanced/") + .and_then(move |url| url.join(path).map_err(Error::from)) + .map_err(Into::into) + } + + /// Gets the core `ProtoArray` struct from the node. + pub fn get_fork_choice(&self) -> impl Future { + let client = self.0.clone(); + self.url("fork_choice") + .into_future() + .and_then(move |url| client.json_get(url, vec![])) + } +} + #[derive(Deserialize)] #[serde(bound = "T: EthSpec")] pub struct BlockResponse {