diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 919bdbac2aa..0b29751106b 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -190,6 +190,9 @@ jobs: needs: cargo-fmt steps: - uses: actions/checkout@v1 + - uses: actions/setup-go@v2 + with: + go-version: '1.17' - name: Get latest version of stable Rust run: rustup update stable - name: Run exec engine integration tests in release diff --git a/Cargo.lock b/Cargo.lock index 4c5ae0a007b..a7057004880 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -514,6 +514,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "serde_yaml", "slog", "slog-async", "slog-scope", @@ -730,6 +731,10 @@ dependencies = [ "eth2_ssz", "ethereum-types 0.12.1", "hex", + "serde", + "serde_json", + "serde_yaml", + "types", ] [[package]] @@ -3357,8 +3362,10 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "malloc_utils", + "sensitive_url", "serde", "serde_json", + "serde_yaml", "slashing_protection", "slog", "sloggers", diff --git a/README.md b/README.md index 00900b8c3d7..acf5f5926de 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# Lighthouse: Ethereum 2.0 +# Lighthouse: Ethereum consensus client -An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prime. +An open-source Ethereum consensus client, written in Rust and maintained by Sigma Prime. [![Build Status]][Build Link] [![Book Status]][Book Link] [![Chat Badge]][Chat Link] @@ -22,7 +22,7 @@ An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prim Lighthouse is: -- Ready for use on Eth2 mainnet. +- Ready for use on Ethereum consensus mainnet. - Fully open-source, licensed under Apache 2.0. - Security-focused. Fuzzing techniques have been continuously applied and several external security reviews have been performed. - Built in [Rust](https://www.rust-lang.org), a modern language providing unique safety guarantees and @@ -30,13 +30,13 @@ Lighthouse is: - Funded by various organisations, including Sigma Prime, the Ethereum Foundation, ConsenSys, the Decentralization Foundation and private individuals. - Actively involved in the specification and security analysis of the - Ethereum 2.0 specification. + Ethereum proof-of-stake consensus specification. -## Eth2 Deposit Contract +## Staking Deposit Contract The Lighthouse team acknowledges [`0x00000000219ab540356cBB839Cbe05303d7705Fa`](https://etherscan.io/address/0x00000000219ab540356cbb839cbe05303d7705fa) -as the canonical Eth2 deposit contract address. +as the canonical staking deposit contract address. ## Documentation @@ -66,7 +66,7 @@ of the Lighthouse book. ## Contact The best place for discussion is the [Lighthouse Discord -server](https://discord.gg/cyAszAh). +server](https://discord.gg/cyAszAh). Sign up to the [Lighthouse Development Updates](http://eepurl.com/dh9Lvb) mailing list for email notifications about releases, network status and other important information. diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 4ac587fd762..866e1e33831 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -312,6 +312,8 @@ pub enum ExecutionPayloadError { /// /// The peer is not necessarily invalid. PoWParentMissing(ExecutionBlockHash), + /// The execution node is syncing but we fail the conditions for optimistic sync + UnverifiedNonOptimisticCandidate, } impl From for ExecutionPayloadError { @@ -1128,6 +1130,29 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // `randao` may change. let payload_verification_status = notify_new_payload(chain, &state, block.message())?; + // If the payload did not validate or invalidate the block, check to see if this block is + // valid for optimistic import. + if payload_verification_status == PayloadVerificationStatus::NotVerified { + let current_slot = chain + .slot_clock + .now() + .ok_or(BeaconChainError::UnableToReadSlot)?; + + if !chain + .fork_choice + .read() + .is_optimistic_candidate_block( + current_slot, + block.slot(), + &block.parent_root(), + &chain.spec, + ) + .map_err(BeaconChainError::from)? + { + return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + } + } + // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 30a0d2b1982..f947d8509bf 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -141,13 +141,33 @@ pub fn validate_merge_block( } .into()), None => { - debug!( - chain.log, - "Optimistically accepting terminal block"; - "block_hash" => ?execution_payload.parent_hash, - "msg" => "the terminal block/parent was unavailable" - ); - Ok(()) + let current_slot = chain + .slot_clock + .now() + .ok_or(BeaconChainError::UnableToReadSlot)?; + + // Ensure the block is a candidate for optimistic import. + if chain + .fork_choice + .read() + .is_optimistic_candidate_block( + current_slot, + block.slot(), + &block.parent_root(), + &chain.spec, + ) + .map_err(BeaconChainError::from)? + { + debug!( + chain.log, + "Optimistically accepting terminal block"; + "block_hash" => ?execution_payload.parent_hash, + "msg" => "the terminal block/parent was unavailable" + ); + Ok(()) + } else { + Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()) + } } } } diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 198f6741570..6d3ffff1944 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -231,8 +231,10 @@ fn valid_invalid_syncing() { /// `latest_valid_hash`. #[test] fn invalid_payload_invalidates_parent() { - let mut rig = InvalidPayloadRig::new(); + let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); + rig.import_block(Payload::Valid); // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing); let roots = vec![ rig.import_block(Payload::Syncing), @@ -258,6 +260,7 @@ fn invalid_payload_invalidates_parent() { fn justified_checkpoint_becomes_invalid() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); + rig.import_block(Payload::Valid); // Import a valid transition block. rig.move_to_first_justification(Payload::Syncing); let justified_checkpoint = rig.head_info().current_justified_checkpoint; @@ -305,7 +308,9 @@ fn pre_finalized_latest_valid_hash() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + let mut blocks = vec![]; + blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. + blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing)); assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); @@ -330,7 +335,11 @@ fn pre_finalized_latest_valid_hash() { for i in E::slots_per_epoch() * finalized_epoch..num_blocks { let slot = Slot::new(i); let root = rig.block_root_at_slot(slot).unwrap(); - assert!(rig.execution_status(root).is_not_verified()); + if slot == 1 { + assert!(rig.execution_status(root).is_valid()); + } else { + assert!(rig.execution_status(root).is_not_verified()); + } } } @@ -344,7 +353,10 @@ fn latest_valid_hash_will_validate() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - let blocks = rig.build_blocks(4, Payload::Syncing); + + let mut blocks = vec![]; + blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. + blocks.extend(rig.build_blocks(4, Payload::Syncing)); let latest_valid_root = rig .block_root_at_slot(Slot::new(LATEST_VALID_SLOT)) @@ -357,7 +369,7 @@ fn latest_valid_hash_will_validate() { assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT); - for slot in 0..=4 { + for slot in 0..=5 { let slot = Slot::new(slot); let root = if slot > 0 { // If not the genesis slot, check the blocks we just produced. @@ -386,7 +398,9 @@ fn latest_valid_hash_is_junk() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + let mut blocks = vec![]; + blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. + blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing)); assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); @@ -408,7 +422,11 @@ fn latest_valid_hash_is_junk() { for i in E::slots_per_epoch() * finalized_epoch..num_blocks { let slot = Slot::new(i); let root = rig.block_root_at_slot(slot).unwrap(); - assert!(rig.execution_status(root).is_not_verified()); + if slot == 1 { + assert!(rig.execution_status(root).is_valid()); + } else { + assert!(rig.execution_status(root).is_not_verified()); + } } } @@ -421,6 +439,7 @@ fn invalidates_all_descendants() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); + rig.import_block(Payload::Valid); // Import a valid transition block. let blocks = rig.build_blocks(num_blocks, Payload::Syncing); assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); @@ -493,6 +512,7 @@ fn switches_heads() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); + rig.import_block(Payload::Valid); // Import a valid transition block. let blocks = rig.build_blocks(num_blocks, Payload::Syncing); assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); @@ -571,8 +591,9 @@ fn invalid_during_processing() { #[test] fn invalid_after_optimistic_sync() { - let mut rig = InvalidPayloadRig::new(); + let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); + rig.import_block(Payload::Valid); // Import a valid transition block. let mut roots = vec![ rig.import_block(Payload::Syncing), diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index d1d4ac4d73f..334be3bfead 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -126,7 +126,7 @@ pub struct ExecutionBlock { #[derive(Clone, Copy, Debug)] pub struct PayloadAttributes { pub timestamp: u64, - pub random: Hash256, + pub prev_randao: Hash256, pub suggested_fee_recipient: Address, } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 0d86837243b..e443e3ed8fa 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -328,7 +328,7 @@ mod test { "stateRoot": HASH_01, "receiptsRoot": HASH_00, "logsBloom": LOGS_BLOOM_01, - "random": HASH_01, + "prevRandao": HASH_01, "blockNumber": "0x0", "gasLimit": "0x1", "gasUsed": "0x2", @@ -497,7 +497,7 @@ mod test { }, Some(PayloadAttributes { timestamp: 5, - random: Hash256::zero(), + prev_randao: Hash256::zero(), suggested_fee_recipient: Address::repeat_byte(0), }), ) @@ -514,7 +514,7 @@ mod test { }, { "timestamp":"0x5", - "random": HASH_00, + "prevRandao": HASH_00, "suggestedFeeRecipient": ADDRESS_00 }] }), @@ -576,7 +576,7 @@ mod test { state_root: Hash256::repeat_byte(1), receipts_root: Hash256::repeat_byte(0), logs_bloom: vec![1; 256].into(), - random: Hash256::repeat_byte(1), + prev_randao: Hash256::repeat_byte(1), block_number: 0, gas_limit: 1, gas_used: 2, @@ -598,7 +598,7 @@ mod test { "stateRoot": HASH_01, "receiptsRoot": HASH_00, "logsBloom": LOGS_BLOOM_01, - "random": HASH_01, + "prevRandao": HASH_01, "blockNumber": "0x0", "gasLimit": "0x1", "gasUsed": "0x2", @@ -715,7 +715,7 @@ mod test { }, Some(PayloadAttributes { timestamp: 5, - random: Hash256::zero(), + prev_randao: Hash256::zero(), suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), }) ) @@ -732,7 +732,7 @@ mod test { }, { "timestamp":"0x5", - "random": HASH_00, + "prevRandao": HASH_00, "suggestedFeeRecipient":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" }] }) @@ -762,7 +762,7 @@ mod test { }, Some(PayloadAttributes { timestamp: 5, - random: Hash256::zero(), + prev_randao: Hash256::zero(), suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), }) ) @@ -806,7 +806,7 @@ mod test { "stateRoot":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45", "receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "logsBloom": LOGS_BLOOM_00, - "random": HASH_00, + "prevRandao": HASH_00, "blockNumber":"0x1", "gasLimit":"0x1c95111", "gasUsed":"0x0", @@ -829,7 +829,7 @@ mod test { state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), logs_bloom: vec![0; 256].into(), - random: Hash256::zero(), + prev_randao: Hash256::zero(), block_number: 1, gas_limit: u64::from_str_radix("1c95111",16).unwrap(), gas_used: 0, @@ -854,7 +854,7 @@ mod test { state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), logs_bloom: vec![0; 256].into(), - random: Hash256::zero(), + prev_randao: Hash256::zero(), block_number: 1, gas_limit: u64::from_str_radix("1c9c380",16).unwrap(), gas_used: 0, @@ -876,7 +876,7 @@ mod test { "stateRoot":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45", "receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "logsBloom": LOGS_BLOOM_00, - "random": HASH_00, + "prevRandao": HASH_00, "blockNumber":"0x1", "gasLimit":"0x1c9c380", "gasUsed":"0x0", diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index e9559e894cc..8febe451d33 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -64,7 +64,7 @@ pub struct JsonExecutionPayloadV1 { pub receipts_root: Hash256, #[serde(with = "serde_logs_bloom")] pub logs_bloom: FixedVector, - pub random: Hash256, + pub prev_randao: Hash256, #[serde(with = "eth2_serde_utils::u64_hex_be")] pub block_number: u64, #[serde(with = "eth2_serde_utils::u64_hex_be")] @@ -91,7 +91,7 @@ impl From> for JsonExecutionPayloadV1 { state_root, receipts_root, logs_bloom, - random, + prev_randao, block_number, gas_limit, gas_used, @@ -108,7 +108,7 @@ impl From> for JsonExecutionPayloadV1 { state_root, receipts_root, logs_bloom, - random, + prev_randao, block_number, gas_limit, gas_used, @@ -130,7 +130,7 @@ impl From> for ExecutionPayload { state_root, receipts_root, logs_bloom, - random, + prev_randao, block_number, gas_limit, gas_used, @@ -147,7 +147,7 @@ impl From> for ExecutionPayload { state_root, receipts_root, logs_bloom, - random, + prev_randao, block_number, gas_limit, gas_used, @@ -165,7 +165,7 @@ impl From> for ExecutionPayload { pub struct JsonPayloadAttributesV1 { #[serde(with = "eth2_serde_utils::u64_hex_be")] pub timestamp: u64, - pub random: Hash256, + pub prev_randao: Hash256, pub suggested_fee_recipient: Address, } @@ -174,13 +174,13 @@ impl From for JsonPayloadAttributesV1 { // Use this verbose deconstruction pattern to ensure no field is left unused. let PayloadAttributes { timestamp, - random, + prev_randao, suggested_fee_recipient, } = p; Self { timestamp, - random, + prev_randao, suggested_fee_recipient, } } @@ -191,13 +191,13 @@ impl From for PayloadAttributes { // Use this verbose deconstruction pattern to ensure no field is left unused. let JsonPayloadAttributesV1 { timestamp, - random, + prev_randao, suggested_fee_recipient, } = j; Self { timestamp, - random, + prev_randao, suggested_fee_recipient, } } diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index f51313299c6..550154cdf54 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -51,7 +51,7 @@ impl Logging { struct PayloadIdCacheKey { pub head_block_hash: ExecutionBlockHash, pub timestamp: u64, - pub random: Hash256, + pub prev_randao: Hash256, pub suggested_fee_recipient: Address, } @@ -78,7 +78,7 @@ impl Engine { &self, head_block_hash: ExecutionBlockHash, timestamp: u64, - random: Hash256, + prev_randao: Hash256, suggested_fee_recipient: Address, ) -> Option { self.payload_id_cache @@ -87,7 +87,7 @@ impl Engine { .get(&PayloadIdCacheKey { head_block_hash, timestamp, - random, + prev_randao, suggested_fee_recipient, }) .cloned() @@ -416,7 +416,7 @@ impl PayloadIdCacheKey { Self { head_block_hash: state.head_block_hash, timestamp: attributes.timestamp, - random: attributes.random, + prev_randao: attributes.prev_randao, suggested_fee_recipient: attributes.suggested_fee_recipient, } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 90d58c1ccaf..0138f15cf22 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -434,7 +434,7 @@ impl ExecutionLayer { &self, parent_hash: ExecutionBlockHash, timestamp: u64, - random: Hash256, + prev_randao: Hash256, finalized_block_hash: ExecutionBlockHash, proposer_index: u64, ) -> Result, Error> { @@ -444,14 +444,14 @@ impl ExecutionLayer { self.log(), "Issuing engine_getPayload"; "suggested_fee_recipient" => ?suggested_fee_recipient, - "random" => ?random, + "prev_randao" => ?prev_randao, "timestamp" => timestamp, "parent_hash" => ?parent_hash, ); self.engines() .first_success(|engine| async move { let payload_id = if let Some(id) = engine - .get_payload_id(parent_hash, timestamp, random, suggested_fee_recipient) + .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) .await { // The payload id has been cached for this engine. @@ -470,7 +470,7 @@ impl ExecutionLayer { }; let payload_attributes = PayloadAttributes { timestamp, - random, + prev_randao, suggested_fee_recipient, }; diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 52accad3a1f..b61092cf0e4 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -326,7 +326,7 @@ impl ExecutionBlockGenerator { receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), logs_bloom: vec![0; 256].into(), - random: attributes.random, + prev_randao: attributes.prev_randao, block_number: parent.block_number() + 1, gas_limit: GAS_LIMIT, gas_used: GAS_USED, diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index ba4a21300a0..d4ff5a2d78c 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -120,7 +120,7 @@ impl MockExecutionLayer { let parent_hash = latest_execution_block.block_hash(); let block_number = latest_execution_block.block_number() + 1; let timestamp = block_number; - let random = Hash256::from_low_u64_be(block_number); + let prev_randao = Hash256::from_low_u64_be(block_number); let finalized_block_hash = parent_hash; self.el @@ -129,7 +129,7 @@ impl MockExecutionLayer { ExecutionBlockHash::zero(), Some(PayloadAttributes { timestamp, - random, + prev_randao, suggested_fee_recipient: Address::repeat_byte(42), }), ) @@ -142,7 +142,7 @@ impl MockExecutionLayer { .get_payload::( parent_hash, timestamp, - random, + prev_randao, finalized_block_hash, validator_index, ) @@ -152,7 +152,7 @@ impl MockExecutionLayer { assert_eq!(payload.parent_hash, parent_hash); assert_eq!(payload.block_number, block_number); assert_eq!(payload.timestamp, timestamp); - assert_eq!(payload.random, random); + assert_eq!(payload.prev_randao, prev_randao); let status = self.el.notify_new_payload(&payload).await.unwrap(); assert_eq!(status, PayloadStatus::Valid); diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 72cb3a7ee1f..bb85d063e98 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -772,6 +772,7 @@ impl Worker { } // TODO(merge): reconsider peer scoring for this event. Err(e @BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_))) + | Err(e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::UnverifiedNonOptimisticCandidate)) | Err(e @BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection)) => { debug!(self.log, "Could not verify block for gossip, ignoring the block"; "error" => %e); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 2ec764b3745..d1405cec3d3 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -93,7 +93,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("target-peers") .long("target-peers") .help("The target number of peers.") - .default_value("50") + .default_value("80") .takes_value(true), ) .arg( diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index f719a3a2b19..22d279d8b77 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -17,7 +17,7 @@ * [Create a validator](./validator-create.md) * [Key recovery](./key-recovery.md) * [Validator Management](./validator-management.md) - * [Importing from the Eth2 Launchpad](./validator-import-launchpad.md) + * [Importing from the Staking Launchpad](./validator-import-launchpad.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) * [Validator Monitoring](./validator-monitoring.md) diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 461f33f2202..c79ddab01ff 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -46,7 +46,7 @@ still function if it is behind a NAT without any port mappings. Although Lighthouse still functions, we recommend that some mechanism is used to ensure that your Lighthouse node is publicly accessible. This will typically improve your peer count, allow the scoring system to find the best/most favourable -peers for your node and overall improve the eth2 network. +peers for your node and overall improve the Ethereum consensus network. Lighthouse currently supports UPnP. If UPnP is enabled on your router, Lighthouse will automatically establish the port mappings for you (the beacon @@ -63,7 +63,7 @@ settings allow you construct your initial ENR. Their primary intention is for setting up boot-like nodes and having a contactable ENR on boot. On normal operation of a Lighthouse node, none of these flags need to be set. Setting these flags incorrectly can lead to your node being incorrectly added to the -global DHT which will degrades the discovery process for all Eth2 peers. +global DHT which will degrades the discovery process for all Ethereum consensus peers. The ENR of a Lighthouse node is initially set to be non-contactable. The in-built discovery mechanism can determine if you node is publicly accessible, diff --git a/book/src/api-bn.md b/book/src/api-bn.md index b82a8da1d03..f806f8783e9 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -1,6 +1,6 @@ # Beacon Node API -Lighthouse implements the standard [Eth2 Beacon Node API +Lighthouse implements the standard [Beacon Node API specification][OpenAPI]. Please follow that link for a full description of each API endpoint. ## Starting the server @@ -22,7 +22,7 @@ The following CLI flags control the HTTP server: - `--http-tls-cert`: specify the path to the certificate file for Lighthouse to use. - `--http-tls-key`: specify the path to the private key file for Lighthouse to use. -The schema of the API aligns with the standard Eth2 Beacon Node API as defined +The schema of the API aligns with the standard Beacon Node API as defined at [github.com/ethereum/beacon-APIs](https://github.com/ethereum/beacon-APIs). An interactive specification is available [here][OpenAPI]. @@ -64,7 +64,7 @@ lighthouse bn --http ## HTTP Request/Response Examples This section contains some simple examples of using the HTTP API via `curl`. -All endpoints are documented in the [Eth2 Beacon Node API +All endpoints are documented in the [Beacon Node API specification][OpenAPI]. ### View the head of the beacon chain diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 77800b5396e..ea282cf2bcb 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -199,23 +199,23 @@ See [Validator Inclusion APIs](./validator-inclusion.md). ### `/lighthouse/eth1/syncing` -Returns information regarding the Eth1 network, as it is required for use in -Eth2 +Returns information regarding execution layer, as it is required for use in +consensus layer #### Fields - `head_block_number`, `head_block_timestamp`: the block number and timestamp -from the very head of the Eth1 chain. Useful for understanding the immediate -health of the Eth1 node that the beacon node is connected to. +from the very head of the execution chain. Useful for understanding the immediate +health of the execution node that the beacon node is connected to. - `latest_cached_block_number` & `latest_cached_block_timestamp`: the block number and timestamp of the latest block we have in our block cache. - - For correct Eth1 voting this timestamp should be later than the + - For correct execution client voting this timestamp should be later than the `voting_period_start_timestamp`. -- `voting_target_timestamp`: The latest timestamp allowed for an eth1 block in this voting period. +- `voting_target_timestamp`: The latest timestamp allowed for an execution layer block in this voting period. - `eth1_node_sync_status_percentage` (float): An estimate of how far the head of the - Eth1 node is from the head of the Eth1 chain. - - `100.0` indicates a fully synced Eth1 node. - - `0.0` indicates an Eth1 node that has not verified any blocks past the + execution node is from the head of the execution chain. + - `100.0` indicates a fully synced execution node. + - `0.0` indicates an execution node that has not verified any blocks past the genesis block. - `lighthouse_is_cached_and_ready`: Is set to `true` if the caches in the beacon node are ready for block production. @@ -248,7 +248,7 @@ curl -X GET "http://localhost:5052/lighthouse/eth1/syncing" -H "accept: applica ### `/lighthouse/eth1/block_cache` -Returns a list of all the Eth1 blocks in the Eth1 voting cache. +Returns a list of all the execution layer blocks in the execution client voting cache. #### Example @@ -320,7 +320,7 @@ curl -X GET "http://localhost:5052/lighthouse/eth1/deposit_cache" -H "accept: a Obtains a `BeaconState` in SSZ bytes. Useful for obtaining a genesis state. -The `state_id` parameter is identical to that used in the [Standard Eth2.0 Beacon Node API +The `state_id` parameter is identical to that used in the [Standard Beacon Node API `beacon/state` routes](https://ethereum.github.io/beacon-APIs/#/Beacon/getStateRoot). diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 1066d5ef3a2..ae091130f3f 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -6,7 +6,7 @@ HTTP Path | Description | | --- | -- | [`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version. [`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine. -[`GET /lighthouse/spec`](#get-lighthousespec) | Get the Eth2 specification used by the validator. +[`GET /lighthouse/spec`](#get-lighthousespec) | Get the Ethereum proof-of-stake consensus specification used by the validator. [`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token. [`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. [`GET /lighthouse/validators/:voting_pubkey`](#get-lighthousevalidatorsvoting_pubkey) | Get a specific validator. @@ -79,7 +79,7 @@ Typical Responses | 200 ## `GET /lighthouse/spec` -Returns the Eth2 specification loaded for this validator. +Returns the Ethereum proof-of-stake consensus specification loaded for this validator. ### HTTP Specification diff --git a/book/src/api.md b/book/src/api.md index 56c1ff5ce00..f8c54ad9a91 100644 --- a/book/src/api.md +++ b/book/src/api.md @@ -1,6 +1,6 @@ # APIs -Lighthouse allows users to query the state of Eth2.0 using web-standard, +Lighthouse allows users to query the state of Ethereum consensus using web-standard, RESTful HTTP/JSON APIs. There are two APIs served by Lighthouse: diff --git a/book/src/cli.md b/book/src/cli.md index 60c87d5dcc3..6540d3fc3a0 100644 --- a/book/src/cli.md +++ b/book/src/cli.md @@ -1,6 +1,6 @@ # Command-Line Interface (CLI) -The `lighthouse` binary provides all necessary Ethereum 2.0 functionality. It +The `lighthouse` binary provides all necessary Ethereum consensus client functionality. It has two primary sub-commands: - `$ lighthouse beacon_node`: the largest and most fundamental component which connects to @@ -48,7 +48,7 @@ maintained by Sigma Prime. However, for developers, testnets can be created by following the instructions outlined in [testnets](./testnets.md). The steps listed here will create a -local database specified to a new testnet. +local database specified to a new testnet. ## Resuming from an existing database diff --git a/book/src/contributing.md b/book/src/contributing.md index da18439c866..9204ff84638 100644 --- a/book/src/contributing.md +++ b/book/src/contributing.md @@ -35,10 +35,10 @@ Lighthouse maintains two permanent branches: - [`unstable`][unstable]: Used for development, contains the latest PRs. - Developers should base thier PRs on this branch. -## Ethereum 2.0 +## Ethereum consensus client -Lighthouse is an implementation of the Ethereum 2.0 specification, as defined -in the [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs) +Lighthouse is an implementation of the Ethereum proof-of-stake consensus specification, as defined +in the [ethereum/consensus-specs](https://github.com/ethereum/consensus-specs) repository. We recommend reading Danny Ryan's (incomplete) [Phase 0 for diff --git a/book/src/faq.md b/book/src/faq.md index 02a4bfea669..e14947fb053 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -12,68 +12,68 @@ ### Why does it take so long for a validator to be activated? -After validators create their Eth1 deposit transaction there are two waiting +After validators create their execution layer deposit transaction there are two waiting periods before they can start producing blocks and attestations: -1. Waiting for the beacon chain to recognise the Eth1 block containing the +1. Waiting for the beacon chain to recognise the execution layer block containing the deposit (generally 4 to 7.4 hours). 1. Waiting in the queue for validator activation (generally 6.4 minutes for every 4 validators in the queue). Detailed answers below: -#### 1. Waiting for the beacon chain to detect the Eth1 deposit +#### 1. Waiting for the beacon chain to detect the execution layer deposit -Since the beacon chain uses Eth1 for validator on-boarding, beacon chain +Since the beacon chain uses the execution layer for validator on-boarding, beacon chain validators must listen to event logs from the deposit contract. Since the -latest blocks of the Eth1 chain are vulnerable to re-orgs due to minor network -partitions, beacon nodes follow the Eth1 chain at a distance of 1,024 blocks +latest blocks of the execution chain are vulnerable to re-orgs due to minor network +partitions, beacon nodes follow the execution chain at a distance of 1,024 blocks (~4 hours) (see -[`ETH1_FOLLOW_DISTANCE`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#misc)). +[`ETH1_FOLLOW_DISTANCE`](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/validator.md#misc)). This follow distance protects the beacon chain from on-boarding validators that -are likely to be removed due to an Eth1 re-org. +are likely to be removed due to an execution chain re-org. Now we know there's a 4 hours delay before the beacon nodes even _consider_ an -Eth1 block. Once they _are_ considering these blocks, there's a voting period -where beacon validators vote on which Eth1 to include in the beacon chain. This +execution layer block. Once they _are_ considering these blocks, there's a voting period +where beacon validators vote on which execution block hash to include in the beacon chain. This period is defined as 32 epochs (~3.4 hours, see -[`ETH1_VOTING_PERIOD`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#time-parameters)). +[`ETH1_VOTING_PERIOD`](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#time-parameters)). During this voting period, each beacon block producer includes an -[`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#eth1data) +[`Eth1Data`](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#eth1data) in their block which counts as a vote towards what that validator considers to -be the head of the Eth1 chain at the start of the voting period (with respect +be the head of the execution chain at the start of the voting period (with respect to `ETH1_FOLLOW_DISTANCE`, of course). You can see the exact voting logic -[here](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#eth1-data). +[here](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/validator.md#eth1-data). -These two delays combined represent the time between an Eth1 deposit being -included in an Eth1 data vote and that validator appearing in the beacon chain. +These two delays combined represent the time between an execution layer deposit being +included in an execution data vote and that validator appearing in the beacon chain. The `ETH1_FOLLOW_DISTANCE` delay causes a minimum delay of ~4 hours and `ETH1_VOTING_PERIOD` means that if a validator deposit happens just _before_ the start of a new voting period then they might not notice this delay at all. However, if the validator deposit happens just _after_ the start of the new voting period the validator might have to wait ~3.4 hours for next voting period. In times of very, very severe network issues, the network may even fail -to vote in new Eth1 blocks, stopping all new validator deposits! +to vote in new execution layer blocks, stopping all new validator deposits! #### 2. Waiting for a validator to be activated If a validator has provided an invalid public key or signature, they will _never_ be activated. They will simply be forgotten by the beacon chain! But, if those parameters were -correct, once the Eth1 delays have elapsed and the validator appears in the +correct, once the execution layer delays have elapsed and the validator appears in the beacon chain, there's _another_ delay before the validator becomes "active" (canonical definition -[here](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_active_validator)) and can start producing blocks and attestations. +[here](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_active_validator)) and can start producing blocks and attestations. Firstly, the validator won't become active until their beacon chain balance is equal to or greater than -[`MAX_EFFECTIVE_BALANCE`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#gwei-values) +[`MAX_EFFECTIVE_BALANCE`](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#gwei-values) (32 ETH on mainnet, usually 3.2 ETH on testnets). Once this balance is reached, the validator must wait until the start of the next epoch (up to 6.4 minutes) for the -[`process_registry_updates`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#registry-updates) +[`process_registry_updates`](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#registry-updates) routine to run. This routine activates validators with respect to a [churn -limit](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_validator_churn_limit); +limit](https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_validator_churn_limit); it will only allow the number of validators to increase (churn) by a certain amount. Up until there are about 330,000 validators this churn limit is set to 4 and it starts to very slowly increase as the number of validators increases @@ -161,7 +161,7 @@ Nov 30 21:04:28.268 WARN Syncing eth1 block cache est_blocks_remaining: initia ``` This log indicates that your beacon node is downloading blocks and deposits -from your eth1 node. When the `est_blocks_remaining` is +from your execution node. When the `est_blocks_remaining` is `initializing_deposits`, your node is downloading deposit logs. It may stay in this stage for several minutes. Once the deposits logs are finished downloading, the `est_blocks_remaining` value will start decreasing. @@ -170,7 +170,7 @@ It is perfectly normal to see this log when starting a node for the first time or after being off for more than several minutes. If this log continues appearing sporadically during operation, there may be an -issue with your eth1 endpoint. +issue with your execution client endpoint. ### Can I use redundancy in my staking setup? diff --git a/book/src/http.md b/book/src/http.md index 0f9cd124d69..82a688586b0 100644 --- a/book/src/http.md +++ b/book/src/http.md @@ -13,7 +13,7 @@ The following CLI flags control the HTTP server: - `--http-port`: specify the listen port of the server. - `--http-address`: specify the listen address of the server. -The schema of the API aligns with the standard Eth2 Beacon Node API as defined +The schema of the API aligns with the standard Ethereum Beacon Node API as defined at [github.com/ethereum/beacon-APIs](https://github.com/ethereum/beacon-APIs). It is an easy-to-use RESTful HTTP/JSON API. An interactive specification is available [here](https://ethereum.github.io/beacon-APIs/). diff --git a/book/src/intro.md b/book/src/intro.md index b31deeef884..fca075892b1 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -7,11 +7,11 @@ _Documentation for Lighthouse users and developers._ [Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da [Chat Link]: https://discord.gg/cyAszAh -Lighthouse is an **Ethereum 2.0 client** that connects to other Ethereum 2.0 +Lighthouse is an **Ethereum consensus client** that connects to other Ethereum consensus clients to form a resilient and decentralized proof-of-stake blockchain. We implement the specification as defined in the -[ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs) repository. +[ethereum/consensus-specs](https://github.com/ethereum/consensus-specs) repository. ## Topics diff --git a/book/src/key-management.md b/book/src/key-management.md index 23d11d550c0..30d649f3463 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -3,7 +3,7 @@ [launchpad]: https://launchpad.ethereum.org/ > -> **Note: we recommend using the [Eth2 launchpad][launchpad] to create validators.** +> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** Lighthouse uses a _hierarchical_ key management system for producing validator keys. It is hierarchical because each validator key can be _derived_ from a @@ -92,7 +92,7 @@ leaking private key data. ### Withdrawal Keypairs -In Eth2 Phase 0, withdrawal keypairs do not serve any immediate purpose. +In Ethereum consensus Phase 0, withdrawal keypairs do not serve any immediate purpose. However, they become very important _after_ Phase 0: they will provide the ultimate control of the ETH of withdrawn validators. diff --git a/book/src/key-recovery.md b/book/src/key-recovery.md index e45887c29c6..2474d123caf 100644 --- a/book/src/key-recovery.md +++ b/book/src/key-recovery.md @@ -48,7 +48,7 @@ which contains all the information necessary to run a validator using the `lighthouse vc` command. The password to this new keystore will be placed in the `--secrets-dir` (default `~/.lighthouse/{network}/secrets`). -where `network` is the name of the Eth2 network passed in the `--network` parameter (default is `mainnet`). +where `network` is the name of the consensus layer network passed in the `--network` parameter (default is `mainnet`). ## Recover a EIP-2386 wallet diff --git a/book/src/mainnet-validator.md b/book/src/mainnet-validator.md index c5881c60669..0f91b8e272b 100644 --- a/book/src/mainnet-validator.md +++ b/book/src/mainnet-validator.md @@ -1,4 +1,4 @@ -# Become an Eth2 Mainnet Validator +# Become an Ethereum Consensus Mainnet Validator [launchpad]: https://launchpad.ethereum.org/ [lh-book]: https://lighthouse-book.sigmaprime.io/ @@ -8,18 +8,18 @@ [slashing]: ./slashing-protection.md [discord]: https://discord.gg/cyAszAh -Becoming an Eth2 validator is rewarding, but it's not for the faint of heart. You'll need to be +Becoming an Ethereum consensus validator is rewarding, but it's not for the faint of heart. You'll need to be familiar with the rules of staking (e.g., rewards, penalties, etc.) and also configuring and managing servers. You'll also need at least 32 ETH! -For those with an understanding of Eth2 and server maintenance, you'll find that running Lighthouse +For those with an understanding of Ethereum consensus and server maintenance, you'll find that running Lighthouse is easy. Install it, start it, monitor it and keep it updated. You shouldn't need to interact with it on a day-to-day basis. Being educated is critical to validator success. Before submitting your mainnet deposit, we recommend: -- Thoroughly exploring the [Eth2 Launchpad][launchpad] website +- Thoroughly exploring the [Staking Launchpad][launchpad] website - Try running through the deposit process *without* actually submitting a deposit. - Reading through this documentation, especially the [Slashing Protection][slashing] section. - Running a [testnet validator][testnet-validator]. @@ -37,7 +37,7 @@ Remember, if you get stuck you can always reach out on our [Discord][discord]. > occured through the use of Lighthouse. We have an experienced internal security team and have > undergone multiple third-party security-reviews, however the possibility of bugs or malicious > interference remains a real and constant threat. Validators should be prepared to lose some rewards -> due to the actions of other actors on the Eth2 network or software bugs. See the +> due to the actions of other actors on the consensus layer or software bugs. See the > [software license][license] for more detail on liability. ## Using Lighthouse for Mainnet @@ -57,7 +57,7 @@ provide a `--network` flag instead of relying on the default. There are five primary steps to become a testnet validator: 1. Create validator keys and submit deposits. -1. Start an Eth1 client. +1. Start an execution client. 1. Install Lighthouse. 1. Import the validator keys into Lighthouse. 1. Start Lighthouse. @@ -68,10 +68,10 @@ setting aside one or two hours for this process. ### Step 1. Create validator keys -The Ethereum Foundation provides an "Eth2 launch pad" for creating validator keypairs and submitting +The Ethereum Foundation provides a "Staking Launchpad" for creating validator keypairs and submitting deposits: -- [Eth2 Launchpad][launchpad] +- [Staking Launchpad][launchpad] Please follow the steps on the launch pad site to generate validator keys and submit deposits. Make sure you select "Lighthouse" as your client. @@ -79,10 +79,10 @@ sure you select "Lighthouse" as your client. Move to the next step once you have completed the steps on the launch pad, including generating keys via the Python CLI and submitting gETH/ETH deposits. -### Step 2. Start an Eth1 client +### Step 2. Start an execution client -Since Eth2 relies upon the Eth1 chain for validator on-boarding, all Eth2 validators must have a -connection to an Eth1 node. +Since the consensus chain relies upon the execution chain for validator on-boarding, all consensus validators must have a +connection to an execution client. We provide instructions for using Geth, but you could use any client that implements the JSON RPC via HTTP. A fast-synced node is sufficient. @@ -95,7 +95,7 @@ geth. Otherwise [see here](https://github.com/ethereum/go-ethereum/wiki/Installi #### Starting Geth -Once you have geth installed, use this command to start your Eth1 node: +Once you have geth installed, use this command to start your execution node: ```bash geth --http @@ -116,7 +116,7 @@ its `--version` info. ### Step 4. Import validator keys to Lighthouse -When Lighthouse is installed, follow the [Importing from the Ethereum 2.0 Launch +When Lighthouse is installed, follow the [Importing from the Ethereum Staking Launch pad](./validator-import-launchpad.md) instructions so the validator client can perform your validator duties. diff --git a/book/src/redundancy.md b/book/src/redundancy.md index b01a01dd268..3409effb36e 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -5,7 +5,7 @@ There are three places in Lighthouse where redundancy is notable: 1. ✅ GOOD: Using a redundant Beacon node in `lighthouse vc --beacon-nodes` -1. ✅ GOOD: Using a redundant Eth1 node in `lighthouse bn --eth1-endpoints` +1. ✅ GOOD: Using a redundant execution node in `lighthouse bn --eth1-endpoints` 1. ☠️ BAD: Running redundant `lighthouse vc` instances with overlapping keypairs. I mention (3) since it is unsafe and should not be confused with the other two @@ -55,7 +55,7 @@ In our previous example we listed `http://192.168.1.1:5052` as a redundant node. Apart from having sufficient resources, the backup node should have the following flags: -- `--staking`: starts the HTTP API server and ensures the Eth1 chain is synced. +- `--staking`: starts the HTTP API server and ensures the execution chain is synced. - `--http-address 0.0.0.0`: this allows *any* external IP address to access the HTTP server (a firewall should be configured to deny unauthorized access to port `5052`). This is only required if your backup node is on a different host. @@ -92,23 +92,23 @@ There are 64 subnets and each validator will result in a subscription to *at least* one subnet. So, using the two aforementioned flags will result in resource consumption akin to running 64+ validators. -## Redundant Eth1 nodes +## Redundant execution nodes -Compared to redundancy in beacon nodes (see above), using redundant Eth1 nodes +Compared to redundancy in beacon nodes (see above), using redundant execution nodes is very straight-forward: 1. `lighthouse bn --eth1-endpoints http://localhost:8545` 1. `lighthouse bn --eth1-endpoints http://localhost:8545,http://192.168.0.1:8545` In the case of (1), any failure on `http://localhost:8545` will result in a -failure to update the Eth1 cache in the beacon node. Consistent failure over a +failure to update the execution client cache in the beacon node. Consistent failure over a period of hours may result in a failure in block production. -However, in the case of (2), the `http://192.168.0.1:8545` Eth1 endpoint will -be tried each time the first fails. Eth1 endpoints will be tried from first to +However, in the case of (2), the `http://192.168.0.1:8545` execution client endpoint will +be tried each time the first fails. Execution client endpoints will be tried from first to last in the list, until a successful response is obtained. -There is no need for special configuration on the Eth1 endpoint, all endpoints can (probably should) +There is no need for special configuration on the execution client endpoint, all endpoints can (probably should) be configured identically. > Note: When supplying multiple endpoints the `http://localhost:8545` address must be explicitly diff --git a/book/src/setup.md b/book/src/setup.md index e4e1d92ff54..cd9bce80366 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -10,7 +10,7 @@ base dependencies. The additional requirements for developers are: - [`ganache-cli`](https://github.com/trufflesuite/ganache-cli). This is used to - simulate the Eth1 chain during tests. You'll get failures during tests if you + simulate the execution chain during tests. You'll get failures during tests if you don't have `ganache-cli` available on your `PATH`. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by some dependencies. See [`Installation Guide`](./installation.md) for more info. diff --git a/book/src/slashing-protection.md b/book/src/slashing-protection.md index a9bd3164537..9ae6c102e3f 100644 --- a/book/src/slashing-protection.md +++ b/book/src/slashing-protection.md @@ -1,6 +1,6 @@ # Slashing Protection -The security of Ethereum 2.0's proof of stake protocol depends on penalties for misbehaviour, known +The security of the Ethereum proof-of-stake protocol depends on penalties for misbehaviour, known as _slashings_. Validators that sign conflicting messages (blocks or attestations), can be slashed by other validators through the inclusion of a `ProposerSlashing` or `AttesterSlashing` on chain. diff --git a/book/src/testnet-validator.md b/book/src/testnet-validator.md index 0bcd58c9acd..98ba66c2445 100644 --- a/book/src/testnet-validator.md +++ b/book/src/testnet-validator.md @@ -3,17 +3,17 @@ [mainnet-validator]: ./mainnet-validator.md [prater-launchpad]: https://prater.launchpad.ethereum.org/ -Joining an Eth2 testnet is a great way to get familiar with staking in Phase 0. All users should +Joining an Ethereum consensus testnet is a great way to get familiar with staking in Phase 0. All users should experiment with a testnet prior to staking mainnet ETH. -To join a testnet, you can follow the [Become an Eth2 Mainnet Validator][mainnet-validator] +To join a testnet, you can follow the [Become an Ethereum consensus Mainnet Validator][mainnet-validator] instructions but with a few differences: -1. Use the appropriate Eth2 launchpad website: +1. Use the appropriate Staking launchpad website: - [Prater][prater-launchpad] 1. Instead of `--network mainnet`, use the appropriate network flag: - `--network prater`: Prater. -1. Use a Goerli Eth1 node instead of a mainnet one: +1. Use a Goerli execution node instead of a mainnet one: - For Geth, this means using `geth --goerli --http`. 1. Notice that Lighthouse will store its files in a different directory by default: - `~/.lighthouse/prater`: Prater. diff --git a/book/src/validator-create.md b/book/src/validator-create.md index e7c316a95f6..f13c449b9f8 100644 --- a/book/src/validator-create.md +++ b/book/src/validator-create.md @@ -3,7 +3,7 @@ [launchpad]: https://launchpad.ethereum.org/ > -> **Note: we recommend using the [Eth2 launchpad][launchpad] to create validators.** +> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** Validators are fundamentally represented by a BLS keypair. In Lighthouse, we use a [wallet](./wallet-create.md) to generate these keypairs. Once a wallet diff --git a/book/src/validator-doppelganger.md b/book/src/validator-doppelganger.md index 3fa29043d39..d880cce0ae4 100644 --- a/book/src/validator-doppelganger.md +++ b/book/src/validator-doppelganger.md @@ -6,7 +6,7 @@ From Lighthouse `v1.5.0`, the *Doppelganger Protection* feature is available for the Validator Client. Taken from the German *[doppelgänger]*, which translates literally to "double-walker", a -"doppelganger" in Eth2 refers to another instance of a validator running in a separate validator +"doppelganger" in the context of Ethereum proof-of-stake refers to another instance of a validator running in a separate validator process. As detailed in [Slashing Protection], running the same validator twice will inevitably result in slashing. diff --git a/book/src/validator-import-launchpad.md b/book/src/validator-import-launchpad.md index 6bde24732a1..aee9ac7b96c 100644 --- a/book/src/validator-import-launchpad.md +++ b/book/src/validator-import-launchpad.md @@ -1,9 +1,9 @@ -# Importing from the Ethereum 2.0 Launch pad +# Importing from the Ethereum Staking Launch pad -The [Eth2 Lauchpad](https://github.com/ethereum/eth2.0-deposit) is a website +The [Staking Lauchpad](https://github.com/ethereum/eth2.0-deposit) is a website from the Ethereum Foundation which guides users how to use the [`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli) -command-line program to generate Eth2 validator keys. +command-line program to generate consensus validator keys. The keys that are generated from `eth2.0-deposit-cli` can be easily loaded into a Lighthouse validator client (`lighthouse vc`). In fact, both of these @@ -20,7 +20,7 @@ Whilst following the steps on the website, users are instructed to download the repository. This `eth2-deposit-cli` script will generate the validator BLS keys into a `validator_keys` directory. We assume that the user's present-working-directory is the `eth2-deposit-cli` repository (this is where -you will be if you just ran the `./deposit.sh` script from the Eth2 Launch pad +you will be if you just ran the `./deposit.sh` script from the Staking Launch pad website). If this is not the case, simply change the `--directory` to point to the `validator_keys` directory. @@ -38,7 +38,7 @@ section, all other users can use: lighthouse --network mainnet account validator import --directory validator_keys ``` -Note: The user must specify the Eth2 network that they are importing the keys for using the `--network` flag. +Note: The user must specify the consensus client network that they are importing the keys for using the `--network` flag. After which they will be prompted for a password for each keystore discovered: diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index 67e17fecad9..e6fbc0f16f8 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -4,7 +4,7 @@ The `/lighthouse/validator_inclusion` API endpoints provide information on results of the proof-of-stake voting process used for finality/justification under Casper FFG. -These endpoints are not stable or included in the Eth2 standard API. As such, +These endpoints are not stable or included in the Ethereum consensus standard API. As such, they are subject to change or removal without a change in major release version. diff --git a/book/src/voluntary-exit.md b/book/src/voluntary-exit.md index 593bc9969b4..0a26cbac17e 100644 --- a/book/src/voluntary-exit.md +++ b/book/src/voluntary-exit.md @@ -15,7 +15,7 @@ This number can be much higher depending on how many other validators are queued Even though users can perform a voluntary exit in phase 0, they **cannot withdraw their exited funds at this point in time**. This implies that the staked funds are effectively **frozen** until withdrawals are enabled in future phases. -To understand the phased rollout strategy for Eth2, please visit . +To understand the phased rollout strategy for Ethereum upgrages, please visit . @@ -25,7 +25,7 @@ In order to initiate an exit, users can use the `lighthouse account validator ex - The `--keystore` flag is used to specify the path to the EIP-2335 voting keystore for the validator. -- The `--beacon-node` flag is used to specify a beacon chain HTTP endpoint that confirms to the [Eth2.0 Standard Beacon Node API](https://ethereum.github.io/beacon-APIs/) specifications. That beacon node will be used to validate and propagate the voluntary exit. The default value for this flag is `http://localhost:5052`. +- The `--beacon-node` flag is used to specify a beacon chain HTTP endpoint that confirms to the [Beacon Node API](https://ethereum.github.io/beacon-APIs/) specifications. That beacon node will be used to validate and propagate the voluntary exit. The default value for this flag is `http://localhost:5052`. - The `--network` flag is used to specify a particular Eth2 network (default is `mainnet`). diff --git a/book/src/wallet-create.md b/book/src/wallet-create.md index 17cac248b96..0ebb4491777 100644 --- a/book/src/wallet-create.md +++ b/book/src/wallet-create.md @@ -3,7 +3,7 @@ [launchpad]: https://launchpad.ethereum.org/ > -> **Note: we recommend using the [Eth2 launchpad][launchpad] to create validators.** +> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** A wallet allows for generating practically unlimited validators from an easy-to-remember 24-word string (a mnemonic). As long as that mnemonic is diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 8c89ab2e4e1..be58ae616c9 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -23,4 +23,5 @@ hex = "0.4.2" serde = "1.0.116" serde_derive = "1.0.116" serde_json = "1.0.66" +serde_yaml = "0.8.13" eth2_network_config = { path = "../common/eth2_network_config" } diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index 6b933013fc3..f4391f987a9 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -3,8 +3,6 @@ use clap::ArgMatches; use slog::{o, Drain, Level, Logger}; use eth2_network_config::Eth2NetworkConfig; -use std::fs::File; -use std::path::PathBuf; mod cli; pub mod config; mod server; @@ -86,15 +84,13 @@ fn main( // parse the CLI args into a useable config let config: BootNodeConfig = BootNodeConfig::new(bn_matches, eth2_network_config)?; - // Dump config if `dump-config` flag is set - let dump_config = clap_utils::parse_optional::(lh_matches, "dump-config")?; - if let Some(dump_path) = dump_config { - let config_sz = BootNodeConfigSerialization::from_config_ref(&config); - let mut file = File::create(dump_path) - .map_err(|e| format!("Failed to create dumped config: {:?}", e))?; - serde_json::to_writer(&mut file, &config_sz) - .map_err(|e| format!("Error serializing config: {:?}", e))?; - } + // Dump configs if `dump-config` or `dump-chain-config` flags are set + let config_sz = BootNodeConfigSerialization::from_config_ref(&config); + clap_utils::check_dump_configs::<_, T>( + lh_matches, + &config_sz, + ð2_network_config.chain_spec::()?, + )?; // Run the boot node if !lh_matches.is_present("immediate-shutdown") { diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 0aa35b23337..b370eb0825b 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -13,3 +13,7 @@ dirs = "3.0.1" eth2_network_config = { path = "../eth2_network_config" } eth2_ssz = "0.4.1" ethereum-types = "0.12.1" +serde = "1.0.116" +serde_json = "1.0.59" +serde_yaml = "0.8.13" +types = { path = "../../consensus/types"} diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index 3dd42f2a99f..1ebd2b1740f 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -6,6 +6,7 @@ use ethereum_types::U256 as Uint256; use ssz::Decode; use std::path::PathBuf; use std::str::FromStr; +use types::{ChainSpec, Config, EthSpec}; pub mod flags; @@ -52,6 +53,12 @@ pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result( }) .transpose() } + +/// Writes configs to file if `dump-config` or `dump-chain-config` flags are set +pub fn check_dump_configs( + matches: &ArgMatches, + config: S, + spec: &ChainSpec, +) -> Result<(), String> +where + S: serde::Serialize, + E: EthSpec, +{ + if let Some(dump_path) = parse_optional::(matches, "dump-config")? { + let mut file = std::fs::File::create(dump_path) + .map_err(|e| format!("Failed to open file for writing config: {:?}", e))?; + serde_json::to_writer(&mut file, &config) + .map_err(|e| format!("Error serializing config: {:?}", e))?; + } + if let Some(dump_path) = parse_optional::(matches, "dump-chain-config")? { + let chain_config = Config::from_chain_spec::(spec); + let mut file = std::fs::File::create(dump_path) + .map_err(|e| format!("Failed to open file for writing chain config: {:?}", e))?; + serde_yaml::to_writer(&mut file, &chain_config) + .map_err(|e| format!("Error serializing config: {:?}", e))?; + } + Ok(()) +} diff --git a/common/sensitive_url/src/lib.rs b/common/sensitive_url/src/lib.rs index b7e620485ad..7a3cbae20c6 100644 --- a/common/sensitive_url/src/lib.rs +++ b/common/sensitive_url/src/lib.rs @@ -10,7 +10,7 @@ pub enum SensitiveError { } // Wrapper around Url which provides a custom `Display` implementation to protect user secrets. -#[derive(Clone)] +#[derive(Clone, PartialEq)] pub struct SensitiveUrl { pub full: Url, pub redacted: String, diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 9f98dadf3b0..b153da4d534 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -928,6 +928,54 @@ where .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) } + /// Returns `Ok(false)` if a block is not viable to be imported optimistically. + /// + /// ## Notes + /// + /// Equivalent to the function with the same name in the optimistic sync specs: + /// + /// https://github.com/ethereum/consensus-specs/blob/dev/sync/optimistic.md#helpers + pub fn is_optimistic_candidate_block( + &self, + current_slot: Slot, + block_slot: Slot, + block_parent_root: &Hash256, + spec: &ChainSpec, + ) -> Result> { + // If the block is sufficiently old, import it. + if block_slot + spec.safe_slots_to_import_optimistically <= current_slot { + return Ok(true); + } + + // If the justified block has execution enabled, then optimistically import any block. + if self + .get_justified_block()? + .execution_status + .is_execution_enabled() + { + return Ok(true); + } + + // If the parent block has execution enabled, always import the block. + // + // TODO(bellatrix): this condition has not yet been merged into the spec. + // + // See: + // + // https://github.com/ethereum/consensus-specs/pull/2844 + if self + .proto_array + .get_block(block_parent_root) + .map_or(false, |parent| { + parent.execution_status.is_execution_enabled() + }) + { + return Ok(true); + } + + Ok(false) + } + /// Return the current finalized checkpoint. pub fn finalized_checkpoint(&self) -> Checkpoint { *self.fc_store.finalized_checkpoint() diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index b0e8991a785..847235e0619 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -395,6 +395,12 @@ impl ProtoArray { // Collect all *ancestors* which were declared invalid since they reside between the // `head_block_root` and the `latest_valid_ancestor_root`. loop { + // If there is no latest valid block then exit this loop early and don't invalidate any + // blocks. + if !latest_valid_ancestor_is_descendant { + break; + } + let node = self .nodes .get_mut(index) @@ -404,17 +410,7 @@ impl ProtoArray { ExecutionStatus::Valid(hash) | ExecutionStatus::Invalid(hash) | ExecutionStatus::Unknown(hash) => { - // If we're no longer processing the `head_block_root` and the last valid - // ancestor is unknown, exit this loop and proceed to invalidate and - // descendants of `head_block_root`/`latest_valid_ancestor_root`. - // - // In effect, this means that if an unknown hash (junk or pre-finalization) is - // supplied, don't validate any ancestors. The alternative is to invalidate - // *all* ancestors, which would likely involve shutting down the client due to - // an invalid justified checkpoint. - if !latest_valid_ancestor_is_descendant && node.root != head_block_root { - break; - } else if Some(hash) == latest_valid_ancestor_hash { + if Some(hash) == latest_valid_ancestor_hash { // If the `best_child` or `best_descendant` of the latest valid hash was // invalidated, set those fields to `None`. // diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 1f5b997f670..2c1341be9e5 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,5 +1,5 @@ use crate::error::Error; -use crate::proto_array::{ProposerBoost, ProtoArray}; +use crate::proto_array::{Iter, ProposerBoost, ProtoArray}; use crate::ssz_container::SszContainer; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; @@ -40,6 +40,10 @@ pub enum ExecutionStatus { } impl ExecutionStatus { + pub fn is_execution_enabled(&self) -> bool { + !matches!(self, ExecutionStatus::Irrelevant(_)) + } + pub fn irrelevant() -> Self { ExecutionStatus::Irrelevant(false) } @@ -341,6 +345,11 @@ impl ProtoArrayForkChoice { } } + /// See `ProtoArray::iter_nodes` + pub fn iter_nodes<'a>(&'a self, block_root: &Hash256) -> Iter<'a> { + self.proto_array.iter_nodes(block_root) + } + pub fn as_bytes(&self) -> Vec { SszContainer::from(self).as_ssz_bytes() } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index a874ce64284..f87756c122e 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -329,10 +329,10 @@ pub fn partially_verify_execution_payload( ); } block_verify!( - payload.random == *state.get_randao_mix(state.current_epoch())?, + payload.prev_randao == *state.get_randao_mix(state.current_epoch())?, BlockProcessingError::ExecutionRandaoMismatch { expected: *state.get_randao_mix(state.current_epoch())?, - found: payload.random, + found: payload.prev_randao, } ); @@ -368,7 +368,7 @@ pub fn process_execution_payload( state_root: payload.state_root, receipts_root: payload.receipts_root, logs_bloom: payload.logs_bloom.clone(), - random: payload.random, + prev_randao: payload.prev_randao, block_number: payload.block_number, gas_limit: payload.gas_limit, gas_used: payload.gas_used, diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 29c67808cc8..1ea34eafc4f 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -146,6 +146,7 @@ pub struct ChainSpec { pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, pub terminal_block_hash_activation_epoch: Epoch, + pub safe_slots_to_import_optimistically: u64, /* * Networking @@ -551,6 +552,7 @@ impl ChainSpec { .expect("addition does not overflow"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), + safe_slots_to_import_optimistically: 128u64, /* * Network specific @@ -748,6 +750,7 @@ impl ChainSpec { .expect("addition does not overflow"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), + safe_slots_to_import_optimistically: 128u64, /* * Network specific @@ -791,6 +794,9 @@ pub struct Config { // TODO(merge): remove this default #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, + // TODO(merge): remove this default + #[serde(default = "default_safe_slots_to_import_optimistically")] + pub safe_slots_to_import_optimistically: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, @@ -878,6 +884,10 @@ fn default_terminal_block_hash_activation_epoch() -> Epoch { Epoch::new(u64::MAX) } +fn default_safe_slots_to_import_optimistically() -> u64 { + 128u64 +} + impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); @@ -935,6 +945,7 @@ impl Config { terminal_total_difficulty: spec.terminal_total_difficulty, terminal_block_hash: spec.terminal_block_hash, terminal_block_hash_activation_epoch: spec.terminal_block_hash_activation_epoch, + safe_slots_to_import_optimistically: spec.safe_slots_to_import_optimistically, min_genesis_active_validator_count: spec.min_genesis_active_validator_count, min_genesis_time: spec.min_genesis_time, @@ -985,6 +996,7 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, + safe_slots_to_import_optimistically, min_genesis_active_validator_count, min_genesis_time, genesis_fork_version, @@ -1040,6 +1052,7 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, + safe_slots_to_import_optimistically, ..chain_spec.clone() }) } @@ -1227,6 +1240,7 @@ mod yaml_tests { #TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638911 #TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000001 #TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551614 + #SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY: 2 MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 MIN_GENESIS_TIME: 1606824000 GENESIS_FORK_VERSION: 0x00000000 @@ -1266,6 +1280,10 @@ mod yaml_tests { chain_spec.terminal_block_hash_activation_epoch, default_terminal_block_hash_activation_epoch() ); + assert_eq!( + chain_spec.safe_slots_to_import_optimistically, + default_safe_slots_to_import_optimistically() + ); assert_eq!( chain_spec.bellatrix_fork_epoch, diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index fc37c1193bf..ab5e6b5aed3 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -21,7 +21,7 @@ pub struct ExecutionPayload { pub receipts_root: Hash256, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub logs_bloom: FixedVector, - pub random: Hash256, + pub prev_randao: Hash256, #[serde(with = "eth2_serde_utils::quoted_u64")] pub block_number: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 1c173093a42..24390bcf4cb 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -15,7 +15,7 @@ pub struct ExecutionPayloadHeader { pub receipts_root: Hash256, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub logs_bloom: FixedVector, - pub random: Hash256, + pub prev_randao: Hash256, #[serde(with = "eth2_serde_utils::quoted_u64")] pub block_number: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs index 04122d0e6b1..9e91f425a73 100644 --- a/lcli/src/create_payload_header.rs +++ b/lcli/src/create_payload_header.rs @@ -23,7 +23,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { base_fee_per_gas, timestamp: genesis_time, block_hash: eth1_block_hash, - random: eth1_block_hash.into_root(), + prev_randao: eth1_block_hash.into_root(), ..ExecutionPayloadHeader::default() }; let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 0b4b38b5896..dc9baafb02f 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -41,6 +41,7 @@ lighthouse_metrics = { path = "../common/lighthouse_metrics" } lazy_static = "1.4.0" serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.59" +serde_yaml = "0.8.13" task_executor = { path = "../common/task_executor" } malloc_utils = { path = "../common/malloc_utils" } directory = { path = "../common/directory" } @@ -51,6 +52,7 @@ tempfile = "3.1.0" validator_dir = { path = "../common/validator_dir" } slashing_protection = { path = "../validator_client/slashing_protection" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } +sensitive_url = { path = "../common/sensitive_url" } [[test]] name = "lighthouse_tests" diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 2f04b95ca4a..b60f3404c8f 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -13,7 +13,6 @@ use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODE use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; use slog::{crit, info, warn}; -use std::fs::File; use std::path::PathBuf; use std::process::exit; use task_executor::ShutdownReason; @@ -193,6 +192,14 @@ fn main() { .takes_value(true) .global(true) ) + .arg( + Arg::with_name("dump-chain-config") + .long("dump-chain-config") + .hidden(true) + .help("Dumps the chain config to a desired location. Used for testing only.") + .takes_value(true) + .global(true) + ) .arg( Arg::with_name("immediate-shutdown") .long("immediate-shutdown") @@ -251,6 +258,19 @@ fn main() { .takes_value(true) .global(true) ) + .arg( + Arg::with_name("safe-slots-to-import-optimistically") + .long("safe-slots-to-import-optimistically") + .value_name("INTEGER") + .help("Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY \ + parameter. This flag should only be used if the user has a clear understanding \ + that the broad Ethereum community has elected to override this parameter in the event \ + of an attack at the PoS transition block. Incorrect use of this flag can cause your \ + node to possibly accept an invalid chain or sync more slowly. Be extremely careful with \ + this flag.") + .takes_value(true) + .global(true) + ) .subcommand(beacon_node::cli_app()) .subcommand(boot_node::cli_app()) .subcommand(validator_client::cli_app()) @@ -481,14 +501,8 @@ fn run( let executor = context.executor.clone(); let config = beacon_node::get_config::(matches, &context)?; let shutdown_flag = matches.is_present("immediate-shutdown"); - if let Some(dump_path) = clap_utils::parse_optional::(matches, "dump-config")? - { - let mut file = File::create(dump_path) - .map_err(|e| format!("Failed to create dumped config: {:?}", e))?; - serde_json::to_writer(&mut file, &config) - .map_err(|e| format!("Error serializing config: {:?}", e))?; - }; - + // Dump configs if `dump-config` or `dump-chain-config` flags are set + clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; executor.clone().spawn( async move { if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { @@ -514,13 +528,8 @@ fn run( let config = validator_client::Config::from_cli(matches, context.log()) .map_err(|e| format!("Unable to initialize validator config: {}", e))?; let shutdown_flag = matches.is_present("immediate-shutdown"); - if let Some(dump_path) = clap_utils::parse_optional::(matches, "dump-config")? - { - let mut file = File::create(dump_path) - .map_err(|e| format!("Failed to create dumped config: {:?}", e))?; - serde_json::to_writer(&mut file, &config) - .map_err(|e| format!("Error serializing config: {:?}", e))?; - }; + // Dump configs if `dump-config` or `dump-chain-config` flags are set + clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; if !shutdown_flag { executor.clone().spawn( async move { diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index c79a03eb5f5..721a06c93d5 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -10,7 +10,7 @@ use std::process::Command; use std::str::FromStr; use std::string::ToString; use tempfile::TempDir; -use types::{Address, Checkpoint, Epoch, Hash256}; +use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, Hash256, MainnetEthSpec}; use unused_port::{unused_tcp_port, unused_udp_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -206,7 +206,35 @@ fn eth1_purge_cache_flag() { .with_config(|config| assert!(config.eth1.purge_cache)); } -// Tests for Merge flags. +// Tests for Bellatrix flags. +#[test] +fn merge_flag() { + CommandLineTest::new() + .flag("merge", None) + .run_with_zero_port() + .with_config(|config| assert!(config.execution_endpoints.is_some())); +} +#[test] +fn merge_execution_endpoints_flag() { + use sensitive_url::SensitiveUrl; + let urls = vec!["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; + let endpoints = urls + .iter() + .map(|s| SensitiveUrl::parse(s).unwrap()) + .collect::>(); + let mut endpoint_arg = urls[0].to_string(); + for url in urls.into_iter().skip(1) { + endpoint_arg.push(','); + endpoint_arg.push_str(url); + } + // this is way better but intersperse is still a nightly feature :/ + // let endpoint_arg: String = urls.into_iter().intersperse(",").collect(); + CommandLineTest::new() + .flag("merge", None) + .flag("execution-endpoints", Some(&endpoint_arg)) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.execution_endpoints.as_ref(), Some(&endpoints))); +} #[test] fn merge_fee_recipient_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); @@ -236,6 +264,62 @@ fn merge_fee_recipient_flag() { ) }); } +#[test] +fn terminal_total_difficulty_override_flag() { + use beacon_node::beacon_chain::types::Uint256; + CommandLineTest::new() + .flag("terminal-total-difficulty-override", Some("1337424242")) + .run_with_zero_port() + .with_spec::(|spec| { + assert_eq!(spec.terminal_total_difficulty, Uint256::from(1337424242)) + }); +} +#[test] +fn terminal_block_hash_and_activation_epoch_override_flags() { + CommandLineTest::new() + .flag("terminal-block-hash-epoch-override", Some("1337")) + .flag( + "terminal-block-hash-override", + Some("0x4242424242424242424242424242424242424242424242424242424242424242"), + ) + .run_with_zero_port() + .with_spec::(|spec| { + assert_eq!( + spec.terminal_block_hash, + ExecutionBlockHash::from_str( + "0x4242424242424242424242424242424242424242424242424242424242424242" + ) + .unwrap() + ); + assert_eq!(spec.terminal_block_hash_activation_epoch, 1337); + }); +} +#[test] +#[should_panic] +fn terminal_block_hash_missing_activation_epoch() { + CommandLineTest::new() + .flag( + "terminal-block-hash-override", + Some("0x4242424242424242424242424242424242424242424242424242424242424242"), + ) + .run_with_zero_port(); +} +#[test] +#[should_panic] +fn epoch_override_missing_terminal_block_hash() { + CommandLineTest::new() + .flag("terminal-block-hash-epoch-override", Some("1337")) + .run_with_zero_port(); +} +#[test] +fn safe_slots_to_import_optimistically_flag() { + CommandLineTest::new() + .flag("safe-slots-to-import-optimistically", Some("421337")) + .run_with_zero_port() + .with_spec::(|spec| { + assert_eq!(spec.safe_slots_to_import_optimistically, 421337) + }); +} // Tests for Network flags. #[test] @@ -423,6 +507,15 @@ fn zero_ports_flag() { assert_eq!(config.http_metrics.listen_port, 0); }); } +#[test] +fn network_load_flag() { + CommandLineTest::new() + .flag("network-load", Some("4")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.network.network_load, 4); + }); +} // Tests for ENR flags. #[test] @@ -540,6 +633,13 @@ fn http_allow_origin_all_flag() { .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); } #[test] +fn http_allow_sync_stalled_flag() { + CommandLineTest::new() + .flag("http-allow-sync-stalled", None) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.allow_sync_stalled, true)); +} +#[test] fn http_tls_flags() { let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() diff --git a/lighthouse/tests/exec.rs b/lighthouse/tests/exec.rs index 9526a1caf8f..61e0677ca8c 100644 --- a/lighthouse/tests/exec.rs +++ b/lighthouse/tests/exec.rs @@ -5,6 +5,7 @@ use std::path::PathBuf; use std::process::{Command, Output}; use std::str::from_utf8; use tempfile::TempDir; +use types::{ChainSpec, Config, EthSpec}; pub trait CommandLineTestExec { type Config: DeserializeOwned; @@ -23,19 +24,22 @@ pub trait CommandLineTestExec { /// Executes the `Command` returned by `Self::cmd_mut` with temporary data directory, dumps /// the configuration and shuts down immediately. /// - /// Options `--datadir`, `--dump-config` and `--immediate-shutdown` must not be set on the - /// command. + /// Options `--datadir`, `--dump-config`, `--dump-chain-config` and `--immediate-shutdown` must + /// not be set on the command. fn run(&mut self) -> CompletedTest { // Setup temp directory. let tmp_dir = TempDir::new().expect("Unable to create temporary directory"); let tmp_config_path: PathBuf = tmp_dir.path().join("config.json"); + let tmp_chain_config_path: PathBuf = tmp_dir.path().join("chain_spec.yaml"); - // Add args --datadir --dump-config --immediate-shutdown + // Add args --datadir --dump-config --dump-chain-config --immediate-shutdown let cmd = self.cmd_mut(); cmd.arg("--datadir") .arg(tmp_dir.path().as_os_str()) - .arg(format!("--{}", "--dump-config")) + .arg(format!("--{}", "dump-config")) .arg(tmp_config_path.as_os_str()) + .arg(format!("--{}", "dump-chain-config")) + .arg(tmp_chain_config_path.as_os_str()) .arg("--immediate-shutdown"); // Run the command. @@ -47,23 +51,32 @@ pub trait CommandLineTestExec { // Grab the config. let config_file = File::open(tmp_config_path).expect("Unable to open dumped config"); let config: Self::Config = from_reader(config_file).expect("Unable to deserialize config"); + // Grab the chain config. + let spec_file = + File::open(tmp_chain_config_path).expect("Unable to open dumped chain spec"); + let chain_config: Config = + serde_yaml::from_reader(spec_file).expect("Unable to deserialize config"); - CompletedTest::new(config, tmp_dir) + CompletedTest::new(config, chain_config, tmp_dir) } /// Executes the `Command` returned by `Self::cmd_mut` dumps the configuration and /// shuts down immediately. /// - /// Options `--dump-config` and `--immediate-shutdown` must not be set on the command. + /// Options `--dump-config`, `--dump-chain-config` and `--immediate-shutdown` must not be set on + /// the command. fn run_with_no_datadir(&mut self) -> CompletedTest { // Setup temp directory. let tmp_dir = TempDir::new().expect("Unable to create temporary directory"); let tmp_config_path: PathBuf = tmp_dir.path().join("config.json"); + let tmp_chain_config_path: PathBuf = tmp_dir.path().join("chain_spec.yaml"); - // Add args --datadir --dump-config --immediate-shutdown + // Add args --datadir --dump-config --dump-chain-config --immediate-shutdown let cmd = self.cmd_mut(); - cmd.arg(format!("--{}", "--dump-config")) + cmd.arg(format!("--{}", "dump-config")) .arg(tmp_config_path.as_os_str()) + .arg(format!("--{}", "dump-chain-config")) + .arg(tmp_chain_config_path.as_os_str()) .arg("--immediate-shutdown"); // Run the command. @@ -75,8 +88,13 @@ pub trait CommandLineTestExec { // Grab the config. let config_file = File::open(tmp_config_path).expect("Unable to open dumped config"); let config: Self::Config = from_reader(config_file).expect("Unable to deserialize config"); + // Grab the chain config. + let spec_file = + File::open(tmp_chain_config_path).expect("Unable to open dumped chain spec"); + let chain_config: Config = + serde_yaml::from_reader(spec_file).expect("Unable to deserialize config"); - CompletedTest::new(config, tmp_dir) + CompletedTest::new(config, chain_config, tmp_dir) } } @@ -95,19 +113,35 @@ fn output_result(cmd: &mut Command) -> Result { pub struct CompletedTest { config: C, + chain_config: Config, dir: TempDir, } impl CompletedTest { - fn new(config: C, dir: TempDir) -> Self { - CompletedTest { config, dir } + fn new(config: C, chain_config: Config, dir: TempDir) -> Self { + CompletedTest { + config, + chain_config, + dir, + } } pub fn with_config(self, func: F) { func(&self.config); } + pub fn with_spec(self, func: F) { + let spec = ChainSpec::from_config::(&self.chain_config).unwrap(); + func(spec); + } + pub fn with_config_and_dir(self, func: F) { func(&self.config, &self.dir); } + + #[allow(dead_code)] + pub fn with_config_and_spec(self, func: F) { + let spec = ChainSpec::from_config::(&self.chain_config).unwrap(); + func(&self.config, spec); + } } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 816651bb45b..13d8f631cce 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.1.9 +TESTS_TAG := v1.1.10 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 3af06312927..c2e38de10e0 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -3,8 +3,6 @@ name = "execution_engine_integration" version = "0.1.0" edition = "2021" -build = "build.rs" - [dependencies] tempfile = "3.1.0" serde_json = "1.0.58" diff --git a/testing/execution_engine_integration/Makefile b/testing/execution_engine_integration/Makefile index 8bb2b592332..70620650666 100644 --- a/testing/execution_engine_integration/Makefile +++ b/testing/execution_engine_integration/Makefile @@ -1,5 +1,5 @@ test: - cargo test --release --locked + cargo run --release --locked clean: rm -rf execution_clients diff --git a/testing/execution_engine_integration/build.rs b/testing/execution_engine_integration/src/build_geth.rs similarity index 81% rename from testing/execution_engine_integration/build.rs rename to testing/execution_engine_integration/src/build_geth.rs index bedf74fbd15..772d3e3d85f 100644 --- a/testing/execution_engine_integration/build.rs +++ b/testing/execution_engine_integration/src/build_geth.rs @@ -3,10 +3,10 @@ use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; -const GETH_BRANCH: &str = "merge-kiln"; +const GETH_BRANCH: &str = "merge-kiln-v2"; const GETH_REPO_URL: &str = "https://github.com/MariusVanDerWijden/go-ethereum"; -fn main() { +pub fn build() { let manifest_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into(); let execution_clients_dir = manifest_dir.join("execution_clients"); @@ -52,11 +52,15 @@ fn build_geth(execution_clients_dir: &Path) { .success()); // Build geth - assert!(Command::new("make") + let make_result = Command::new("make") .arg("geth") .current_dir(&repo_dir) .output() - .expect("failed to make geth") - .status - .success()); + .expect("failed to make geth"); + + if !make_result.status.success() { + dbg!(String::from_utf8_lossy(&make_result.stdout)); + dbg!(String::from_utf8_lossy(&make_result.stderr)); + panic!("make failed"); + } } diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index cff36a025bd..84d72100848 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -9,7 +9,7 @@ use unused_port::unused_tcp_port; /// Defined for each EE type (e.g., Geth, Nethermind, etc). pub trait GenericExecutionEngine: Clone { fn init_datadir() -> TempDir; - fn start_client(datadir: &TempDir, http_port: u16) -> Child; + fn start_client(datadir: &TempDir, http_port: u16, http_auth_port: u16) -> Child; } /// Holds handle to a running EE process, plus some other metadata. @@ -19,6 +19,7 @@ pub struct ExecutionEngine { #[allow(dead_code)] datadir: TempDir, http_port: u16, + http_auth_port: u16, child: Child, } @@ -35,11 +36,13 @@ impl ExecutionEngine { pub fn new(engine: E) -> Self { let datadir = E::init_datadir(); let http_port = unused_tcp_port().unwrap(); - let child = E::start_client(&datadir, http_port); + let http_auth_port = unused_tcp_port().unwrap(); + let child = E::start_client(&datadir, http_port, http_auth_port); Self { engine, datadir, http_port, + http_auth_port, child, } } @@ -47,6 +50,11 @@ impl ExecutionEngine { pub fn http_url(&self) -> SensitiveUrl { SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap() } + + #[allow(dead_code)] // Future use. + pub fn http_ath_url(&self) -> SensitiveUrl { + SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_auth_port)).unwrap() + } } /* @@ -90,7 +98,7 @@ impl GenericExecutionEngine for Geth { datadir } - fn start_client(datadir: &TempDir, http_port: u16) -> Child { + fn start_client(datadir: &TempDir, http_port: u16, http_auth_port: u16) -> Child { let network_port = unused_tcp_port().unwrap(); Command::new(Self::binary_path()) @@ -101,6 +109,8 @@ impl GenericExecutionEngine for Geth { .arg("engine,eth") .arg("--http.port") .arg(http_port.to_string()) + .arg("--http.authport") + .arg(http_auth_port.to_string()) .arg("--port") .arg(network_port.to_string()) .stdout(build_stdio()) diff --git a/testing/execution_engine_integration/src/lib.rs b/testing/execution_engine_integration/src/lib.rs deleted file mode 100644 index 19a73e6bf29..00000000000 --- a/testing/execution_engine_integration/src/lib.rs +++ /dev/null @@ -1,12 +0,0 @@ -/// This library provides integration testing between Lighthouse and other execution engines. -/// -/// See the `tests/tests.rs` file to run tests. -mod execution_engine; -mod genesis_json; -mod test_rig; - -pub use execution_engine::Geth; -pub use test_rig::TestRig; - -/// Set to `false` to send logs to the console during tests. Logs are useful when debugging. -const SUPPRESS_LOGS: bool = true; diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs new file mode 100644 index 00000000000..ef9cbd79483 --- /dev/null +++ b/testing/execution_engine_integration/src/main.rs @@ -0,0 +1,28 @@ +/// This binary runs integration tests between Lighthouse and execution engines. +/// +/// It will first attempt to build any supported integration clients, then it will run tests. +/// +/// A return code of `0` indicates the tests succeeded. +mod build_geth; +mod execution_engine; +mod genesis_json; +mod test_rig; + +use execution_engine::Geth; +use test_rig::TestRig; + +/// Set to `false` to send logs to the console during tests. Logs are useful when debugging. +const SUPPRESS_LOGS: bool = false; + +fn main() { + if cfg!(windows) { + panic!("windows is not supported, only linux"); + } + + test_geth() +} + +fn test_geth() { + build_geth::build(); + TestRig::new(Geth).perform_tests_blocking(); +} diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index e7071278c7b..5edda525047 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -166,7 +166,7 @@ impl TestRig { let parent_hash = terminal_pow_block_hash; let timestamp = timestamp_now(); - let random = Hash256::zero(); + let prev_randao = Hash256::zero(); let finalized_block_hash = ExecutionBlockHash::zero(); let proposer_index = 0; let valid_payload = self @@ -175,7 +175,7 @@ impl TestRig { .get_payload::( parent_hash, timestamp, - random, + prev_randao, finalized_block_hash, proposer_index, ) @@ -238,7 +238,7 @@ impl TestRig { */ let mut invalid_payload = valid_payload.clone(); - invalid_payload.random = Hash256::from_low_u64_be(42); + invalid_payload.prev_randao = Hash256::from_low_u64_be(42); let status = self .ee_a .execution_layer @@ -255,7 +255,7 @@ impl TestRig { let parent_hash = valid_payload.block_hash; let timestamp = valid_payload.timestamp + 1; - let random = Hash256::zero(); + let prev_randao = Hash256::zero(); let finalized_block_hash = ExecutionBlockHash::zero(); let proposer_index = 0; let second_payload = self @@ -264,7 +264,7 @@ impl TestRig { .get_payload::( parent_hash, timestamp, - random, + prev_randao, finalized_block_hash, proposer_index, ) @@ -294,7 +294,7 @@ impl TestRig { let finalized_block_hash = ExecutionBlockHash::zero(); let payload_attributes = Some(PayloadAttributes { timestamp: second_payload.timestamp + 1, - random: Hash256::zero(), + prev_randao: Hash256::zero(), suggested_fee_recipient: Address::zero(), }); let status = self diff --git a/testing/execution_engine_integration/tests/tests.rs b/testing/execution_engine_integration/tests/tests.rs deleted file mode 100644 index d4fcb29dca8..00000000000 --- a/testing/execution_engine_integration/tests/tests.rs +++ /dev/null @@ -1,16 +0,0 @@ -#[cfg(not(target_family = "windows"))] -mod not_windows { - use execution_engine_integration::{Geth, TestRig}; - #[test] - fn geth() { - TestRig::new(Geth).perform_tests_blocking() - } -} - -#[cfg(target_family = "windows")] -mod windows { - #[test] - fn all_tests_skipped_on_windows() { - // - } -}