From c8b2324880ef3d3dd5f9233285cb59fc3701db71 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 4 Dec 2023 14:42:49 +1100 Subject: [PATCH 01/19] Enable progressive balances fast mode by default (#4971) * Enable progressive balances fast mode by default * Fix default in chain_config --- beacon_node/beacon_chain/src/chain_config.rs | 2 +- beacon_node/src/cli.rs | 12 ++++++------ lighthouse/tests/beacon_node.rs | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index bccc3732c3d..7bcb764ab0c 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -112,7 +112,7 @@ impl Default for ChainConfig { shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, genesis_backfill: false, always_prepare_payload: false, - progressive_balances_mode: ProgressiveBalancesMode::Checked, + progressive_balances_mode: ProgressiveBalancesMode::Fast, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, } } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index d76f2f375f4..214accd3fde 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1228,12 +1228,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("progressive-balances") .long("progressive-balances") .value_name("MODE") - .help("Options to enable or disable the progressive balances cache for \ - unrealized FFG progression calculation. The default `checked` mode compares \ - the progressive balances from the cache against results from the existing \ - method. If there is a mismatch, it falls back to the existing method. The \ - optimized mode (`fast`) is faster but is still experimental, and is \ - not recommended for mainnet usage at this time.") + .help("Control the progressive balances cache mode. The default `fast` mode uses \ + the cache to speed up fork choice. A more conservative `checked` mode \ + compares the cache's results against results without the cache. If \ + there is a mismatch, it falls back to the cache-free result. Using the \ + default `fast` mode is recommended unless advised otherwise by the \ + Lighthouse team.") .takes_value(true) .possible_values(ProgressiveBalancesMode::VARIANTS) ) diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 5f75cb1acff..fd74b1b5b92 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2443,20 +2443,20 @@ fn progressive_balances_default() { .with_config(|config| { assert_eq!( config.chain.progressive_balances_mode, - ProgressiveBalancesMode::Checked + ProgressiveBalancesMode::Fast ) }); } #[test] -fn progressive_balances_fast() { +fn progressive_balances_checked() { CommandLineTest::new() - .flag("progressive-balances", Some("fast")) + .flag("progressive-balances", Some("checked")) .run_with_zero_port() .with_config(|config| { assert_eq!( config.chain.progressive_balances_mode, - ProgressiveBalancesMode::Fast + ProgressiveBalancesMode::Checked ) }); } From 4250385ae1bc1c29bf88d16510a144fac4a5a076 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 5 Dec 2023 10:32:10 +1100 Subject: [PATCH 02/19] Fix stuck linkcheck on CI (#4977) * Fix linkcheck CI job timeouts. Use linkcheck 3.0.0 without Docker. * Add sleep to wait for the mdbook server to start serving --- .github/workflows/linkcheck.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index 19236691f63..7f5d3e0b602 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -22,14 +22,15 @@ jobs: - name: Checkout code uses: actions/checkout@v3 - - name: Create docker network - run: docker network create book - - name: Run mdbook server - run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:v0.4.20-rust serve --hostname 0.0.0.0 + run: | + docker run -v ${{ github.workspace }}/book:/book --name book -p 3000:3000 -d peaceiris/mdbook:latest serve --hostname 0.0.0.0 + sleep 5 - name: Print logs run: docker logs book - name: Run linkcheck - run: docker run --network book tennox/linkcheck:latest book:3000 + run: | + curl -sL https://github.com/filiph/linkcheck/releases/download/3.0.0/linkcheck-3.0.0-linux-x64.tar.gz | tar xvzf - linkcheck/linkcheck --strip 1 + ./linkcheck localhost:3000 -d From ec8edfb89a8358dc05fb2a6abfe0d315e2bed302 Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Tue, 5 Dec 2023 00:49:50 +0100 Subject: [PATCH 03/19] EIP-3076 tests - complete database (#4958) * EIP-3076 interchange tests: Add `should_succeed_complete` boolean. This new added `should_succeed_complete` boolean means the test should succeed using complete anti-slashing DB, while the untouched `should_succeed` boolean is still implicitely reserved for minimal anti-slashing DB. Note: This commit only adds the new `should_succeed_complete` boolean, and copy the value from `should_succeed` without any meaning regarding the value this boolean should take. * `TestEIP3076SpecTests`: Modify two tests Those two tests are modified in a way they both comply with minimal AND complete anti-slashing DB. * Disallow false positives and differentiate more minimal vs complete cases * Fix my own typos * Update to v5.3.0 tag --------- Co-authored-by: Michael Sproul --- validator_client/slashing_protection/Makefile | 2 +- .../src/bin/test_generator.rs | 272 +++++++++--------- .../src/interchange_test.rs | 74 ++--- 3 files changed, 183 insertions(+), 165 deletions(-) diff --git a/validator_client/slashing_protection/Makefile b/validator_client/slashing_protection/Makefile index 0663b3cba2b..1b9729634e5 100644 --- a/validator_client/slashing_protection/Makefile +++ b/validator_client/slashing_protection/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v5.2.1 +TESTS_TAG := v5.3.0 GENERATE_DIR := generated-tests OUTPUT_DIR := interchange-tests TARBALL := $(OUTPUT_DIR)-$(TESTS_TAG).tar.gz diff --git a/validator_client/slashing_protection/src/bin/test_generator.rs b/validator_client/slashing_protection/src/bin/test_generator.rs index b96dd8eb796..c95cb6917c5 100644 --- a/validator_client/slashing_protection/src/bin/test_generator.rs +++ b/validator_client/slashing_protection/src/bin/test_generator.rs @@ -70,14 +70,18 @@ fn interchange_with_signing_roots( } fn main() { - let single_validator_blocks = - vec![(0, 32, false), (0, 33, true), (0, 31, false), (0, 1, false)]; + let single_validator_blocks = vec![ + (0, 32, false, false), + (0, 33, true, true), + (0, 31, false, false), + (0, 1, false, false), + ]; let single_validator_attestations = vec![ - (0, 3, 4, false), - (0, 14, 19, false), - (0, 15, 20, false), - (0, 16, 20, false), - (0, 15, 21, true), + (0, 3, 4, false, false), + (0, 14, 19, false, false), + (0, 15, 20, false, false), + (0, 16, 20, false, false), + (0, 15, 21, true, true), ]; let tests = vec![ @@ -104,7 +108,7 @@ fn main() { MultiTestCase::single( "single_validator_genesis_attestation", TestCase::new(interchange(vec![(0, vec![], vec![(0, 0)])])) - .with_attestations(vec![(0, 0, 0, false)]), + .with_attestations(vec![(0, 0, 0, false, false)]), ), MultiTestCase::single( "single_validator_multiple_blocks_and_attestations", @@ -114,23 +118,23 @@ fn main() { vec![(10, 11), (12, 13), (20, 24)], )])) .with_blocks(vec![ - (0, 1, false), - (0, 2, false), - (0, 3, false), - (0, 10, false), - (0, 1200, false), - (0, 4, true), - (0, 256, true), - (0, 1201, true), + (0, 1, false, false), + (0, 2, false, false), + (0, 3, false, false), + (0, 10, false, false), + (0, 1200, false, false), + (0, 4, false, true), + (0, 256, false, true), + (0, 1201, true, true), ]) .with_attestations(vec![ - (0, 9, 10, false), - (0, 12, 13, false), - (0, 11, 14, false), - (0, 21, 22, false), - (0, 10, 24, false), - (0, 11, 12, true), - (0, 20, 25, true), + (0, 9, 10, false, false), + (0, 12, 13, false, false), + (0, 11, 14, false, false), + (0, 21, 22, false, false), + (0, 10, 24, false, false), + (0, 11, 12, false, true), + (0, 20, 25, true, true), ]), ), MultiTestCase::single( @@ -157,30 +161,30 @@ fn main() { (2, vec![10, 15, 20], vec![(1, 2), (1, 3), (2, 4)]), ])) .with_blocks(vec![ - (0, 9, false), - (0, 10, false), - (0, 21, true), - (0, 11, true), - (1, 2, false), - (1, 3, false), - (1, 0, false), - (1, 101, true), - (2, 9, false), - (2, 10, false), - (2, 22, true), + (0, 9, false, false), + (0, 10, false, false), + (0, 21, true, true), + (0, 11, false, true), + (1, 2, false, false), + (1, 3, false, false), + (1, 0, false, false), + (1, 101, true, true), + (2, 9, false, false), + (2, 10, false, false), + (2, 22, true, true), ]) .with_attestations(vec![ - (0, 0, 5, false), - (0, 3, 6, false), - (0, 4, 6, true), - (0, 5, 7, true), - (0, 6, 8, true), - (1, 1, 7, false), - (1, 1, 4, true), - (1, 5, 7, true), - (2, 0, 0, false), - (2, 0, 1, false), - (2, 2, 5, true), + (0, 0, 5, false, false), + (0, 3, 6, false, false), + (0, 4, 6, true, true), + (0, 5, 7, true, true), + (0, 6, 8, true, true), + (1, 1, 7, false, false), + (1, 1, 4, false, true), + (1, 5, 7, true, true), + (2, 0, 0, false, false), + (2, 0, 1, false, false), + (2, 2, 5, true, true), ]), ), MultiTestCase::single( @@ -202,16 +206,16 @@ fn main() { TestCase::new(interchange(vec![(0, vec![40], vec![(2, 30)])])), TestCase::new(interchange(vec![(0, vec![50], vec![(10, 50)])])) .with_blocks(vec![ - (0, 41, false), - (0, 45, false), - (0, 49, false), - (0, 50, false), - (0, 51, true), + (0, 41, false, true), + (0, 45, false, true), + (0, 49, false, true), + (0, 50, false, false), + (0, 51, true, true), ]) .with_attestations(vec![ - (0, 3, 31, false), - (0, 9, 49, false), - (0, 10, 51, true), + (0, 3, 31, false, true), + (0, 9, 49, false, true), + (0, 10, 51, true, true), ]), ], ), @@ -221,20 +225,20 @@ fn main() { TestCase::new(interchange(vec![(0, vec![40], vec![])])), TestCase::new(interchange(vec![(0, vec![20], vec![])])) .contains_slashable_data() - .with_blocks(vec![(0, 20, false)]), + .with_blocks(vec![(0, 20, false, false)]), ], ), MultiTestCase::new( "multiple_interchanges_single_validator_multiple_blocks_out_of_order", vec![ TestCase::new(interchange(vec![(0, vec![0], vec![])])).with_blocks(vec![ - (0, 10, true), - (0, 20, true), - (0, 30, true), + (0, 10, true, true), + (0, 20, true, true), + (0, 30, true, true), ]), TestCase::new(interchange(vec![(0, vec![20], vec![])])) .contains_slashable_data() - .with_blocks(vec![(0, 29, false)]), + .with_blocks(vec![(0, 29, false, true)]), ], ), MultiTestCase::new( @@ -243,7 +247,7 @@ fn main() { TestCase::new(interchange(vec![(0, vec![40], vec![])])), TestCase::new(interchange(vec![(0, vec![20, 50], vec![])])) .contains_slashable_data() - .with_blocks(vec![(0, 20, false), (0, 50, false)]), + .with_blocks(vec![(0, 20, false, false), (0, 50, false, false)]), ], ), MultiTestCase::new( @@ -253,10 +257,10 @@ fn main() { TestCase::new(interchange(vec![(0, vec![], vec![(10, 11)])])) .contains_slashable_data() .with_attestations(vec![ - (0, 10, 14, false), - (0, 12, 13, false), - (0, 12, 14, true), - (0, 13, 15, true), + (0, 10, 14, false, false), + (0, 12, 13, false, false), + (0, 12, 14, true, true), + (0, 13, 15, true, true), ]), ], ), @@ -267,11 +271,11 @@ fn main() { TestCase::new(interchange(vec![(0, vec![], vec![(9, 21)])])) .contains_slashable_data() .with_attestations(vec![ - (0, 10, 20, false), - (0, 10, 21, false), - (0, 9, 21, false), - (0, 9, 22, false), - (0, 10, 22, true), + (0, 10, 20, false, false), + (0, 10, 21, false, false), + (0, 9, 21, false, false), + (0, 9, 22, false, false), + (0, 10, 22, true, true), ]), ], ), @@ -282,11 +286,11 @@ fn main() { TestCase::new(interchange(vec![(0, vec![], vec![(10, 20)])])) .contains_slashable_data() .with_attestations(vec![ - (0, 10, 20, false), - (0, 10, 21, false), - (0, 9, 21, false), - (0, 9, 22, false), - (0, 10, 22, true), + (0, 10, 20, false, false), + (0, 10, 21, false, false), + (0, 9, 21, false, false), + (0, 9, 22, false, false), + (0, 10, 22, true, true), ]), ], ), @@ -303,13 +307,13 @@ fn main() { ])) .contains_slashable_data() .with_blocks(vec![ - (0, 0, false), - (0, 3, true), - (0, 7, true), - (0, 3, true), - (1, 0, false), + (0, 0, false, false), + (0, 3, false, true), + (0, 7, true, true), + (0, 3, false, true), + (1, 0, false, false), ]) - .with_attestations(vec![(0, 0, 4, false), (1, 0, 4, true)]), + .with_attestations(vec![(0, 0, 4, false, false), (1, 0, 4, true, true)]), ], ), MultiTestCase::new( @@ -330,9 +334,9 @@ fn main() { ])) .contains_slashable_data() .with_attestations(vec![ - (0, 0, 4, false), - (1, 1, 2, false), - (2, 1, 2, false), + (0, 0, 4, false, false), + (1, 1, 2, false, false), + (2, 1, 2, false, false), ]), ], ), @@ -351,23 +355,23 @@ fn main() { ])) .contains_slashable_data() .with_blocks(vec![ - (0, 100, false), - (1, 101, false), - (2, 102, false), - (0, 103, true), - (1, 104, true), - (2, 105, true), + (0, 100, false, false), + (1, 101, false, false), + (2, 102, false, false), + (0, 103, true, true), + (1, 104, true, true), + (2, 105, true, true), ]) .with_attestations(vec![ - (0, 12, 13, false), - (0, 11, 14, false), - (1, 12, 13, false), - (1, 11, 14, false), - (2, 12, 13, false), - (2, 11, 14, false), - (0, 12, 14, true), - (1, 13, 14, true), - (2, 13, 14, true), + (0, 12, 13, false, false), + (0, 11, 14, false, false), + (1, 12, 13, false, false), + (1, 11, 14, false, false), + (2, 12, 13, false, false), + (2, 11, 14, false, false), + (0, 12, 14, true, true), + (1, 13, 14, true, true), + (2, 13, 14, true, true), ]), ], ), @@ -379,36 +383,36 @@ fn main() { "single_validator_source_greater_than_target_surrounding", TestCase::new(interchange(vec![(0, vec![], vec![(5, 2)])])) .contains_slashable_data() - .with_attestations(vec![(0, 3, 4, false)]), + .with_attestations(vec![(0, 3, 4, false, false)]), ), MultiTestCase::single( "single_validator_source_greater_than_target_surrounded", TestCase::new(interchange(vec![(0, vec![], vec![(5, 2)])])) .contains_slashable_data() - .with_attestations(vec![(0, 6, 1, false)]), + .with_attestations(vec![(0, 6, 1, false, false)]), ), MultiTestCase::single( "single_validator_source_greater_than_target_sensible_iff_minified", TestCase::new(interchange(vec![(0, vec![], vec![(5, 2), (6, 7)])])) .contains_slashable_data() - .with_attestations(vec![(0, 5, 8, false), (0, 6, 8, true)]), + .with_attestations(vec![(0, 5, 8, false, false), (0, 6, 8, true, true)]), ), MultiTestCase::single( "single_validator_out_of_order_blocks", TestCase::new(interchange(vec![(0, vec![6, 5], vec![])])).with_blocks(vec![ - (0, 5, false), - (0, 6, false), - (0, 7, true), + (0, 5, false, false), + (0, 6, false, false), + (0, 7, true, true), ]), ), MultiTestCase::single( "single_validator_out_of_order_attestations", TestCase::new(interchange(vec![(0, vec![], vec![(4, 5), (3, 4)])])).with_attestations( vec![ - (0, 3, 4, false), - (0, 4, 5, false), - (0, 1, 10, false), - (0, 3, 3, false), + (0, 3, 4, false, false), + (0, 4, 5, false, false), + (0, 1, 10, false, false), + (0, 3, 3, false, false), ], ), ), @@ -417,15 +421,15 @@ fn main() { MultiTestCase::single( "single_validator_two_blocks_no_signing_root", TestCase::new(interchange(vec![(0, vec![10, 20], vec![])])) - .with_blocks(vec![(0, 20, false)]), + .with_blocks(vec![(0, 20, false, false)]), ), MultiTestCase::single( "single_validator_multiple_block_attempts", TestCase::new(interchange(vec![(0, vec![15, 16, 17], vec![])])) .with_signing_root_blocks(vec![ - (0, 16, 0, false), - (0, 16, 1, false), - (0, 16, u64::MAX, false), + (0, 16, 0, false, false), + (0, 16, 1, false, false), + (0, 16, u64::MAX, false, false), ]), ), MultiTestCase::single( @@ -436,15 +440,15 @@ fn main() { vec![], )])) .with_signing_root_blocks(vec![ - (0, 15, 151, true), - (0, 16, 161, true), - (0, 17, 171, true), - (0, 15, 152, false), - (0, 15, 0, false), - (0, 16, 151, false), - (0, 17, 151, false), - (0, 18, 151, true), - (0, 14, 171, false), + (0, 15, 151, false, true), + (0, 16, 161, false, true), + (0, 17, 171, false, true), + (0, 15, 152, false, false), + (0, 15, 0, false, false), + (0, 16, 151, false, false), + (0, 17, 151, false, false), + (0, 18, 151, true, true), + (0, 14, 171, false, false), ]), ), MultiTestCase::single( @@ -455,11 +459,11 @@ fn main() { vec![(5, 15, Some(515))], )])) .with_signing_root_attestations(vec![ - (0, 5, 15, 0, false), - (0, 5, 15, 1, false), - (0, 5, 15, 515, true), - (0, 6, 15, 615, false), - (0, 5, 14, 515, false), + (0, 5, 15, 0, false, false), + (0, 5, 15, 1, false, false), + (0, 5, 15, 515, false, true), + (0, 6, 15, 615, false, false), + (0, 5, 14, 515, false, false), ]), ), MultiTestCase::single( @@ -500,8 +504,12 @@ fn main() { (0, vec![10, 11], vec![(0, 2)]), (0, vec![12, 13], vec![(1, 3)]), ])) - .with_blocks(vec![(0, 10, false), (0, 13, false), (0, 14, true)]) - .with_attestations(vec![(0, 0, 2, false), (0, 1, 3, false)]), + .with_blocks(vec![ + (0, 10, false, false), + (0, 13, false, false), + (0, 14, true, true), + ]) + .with_attestations(vec![(0, 0, 2, false, false), (0, 1, 3, false, false)]), ), MultiTestCase::single( "duplicate_pubkey_slashable_block", @@ -510,7 +518,7 @@ fn main() { (0, vec![10], vec![(1, 3)]), ])) .contains_slashable_data() - .with_blocks(vec![(0, 10, false), (0, 11, true)]), + .with_blocks(vec![(0, 10, false, false), (0, 11, true, true)]), ), MultiTestCase::single( "duplicate_pubkey_slashable_attestation", @@ -520,10 +528,10 @@ fn main() { ])) .contains_slashable_data() .with_attestations(vec![ - (0, 0, 1, false), - (0, 0, 2, false), - (0, 0, 4, false), - (0, 1, 4, true), + (0, 0, 1, false, false), + (0, 0, 2, false, false), + (0, 0, 4, false, false), + (0, 1, 4, true, true), ]), ), ]; diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index 1bb1fc550bf..d88bb93a0d5 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -33,6 +33,7 @@ pub struct TestBlock { pub slot: Slot, pub signing_root: Hash256, pub should_succeed: bool, + pub should_succeed_complete: bool, } #[derive(Debug, Clone, Deserialize, Serialize)] @@ -43,6 +44,7 @@ pub struct TestAttestation { pub target_epoch: Epoch, pub signing_root: Hash256, pub should_succeed: bool, + pub should_succeed_complete: bool, } impl MultiTestCase { @@ -68,10 +70,6 @@ impl MultiTestCase { let slashing_db_file = dir.path().join("slashing_protection.sqlite"); let slashing_db = SlashingDatabase::create(&slashing_db_file).unwrap(); - // Now that we are using implicit minification on import, we must always allow - // false positives. - let allow_false_positives = true; - for test_case in &self.steps { // If the test case is marked as containing slashable data, then the spec allows us to // fail to import the file. However, we minify on import and ignore slashable data, so @@ -124,7 +122,7 @@ impl MultiTestCase { i, self.name, safe ); } - Err(e) if block.should_succeed && !allow_false_positives => { + Err(e) if block.should_succeed => { panic!( "block {} from `{}` failed when it should have succeeded: {:?}", i, self.name, e @@ -147,7 +145,7 @@ impl MultiTestCase { i, self.name, safe ); } - Err(e) if att.should_succeed && !allow_false_positives => { + Err(e) if att.should_succeed => { panic!( "attestation {} from `{}` failed when it should have succeeded: {:?}", i, self.name, e @@ -181,53 +179,65 @@ impl TestCase { self } - pub fn with_blocks(self, blocks: impl IntoIterator) -> Self { - self.with_signing_root_blocks( - blocks - .into_iter() - .map(|(index, slot, should_succeed)| (index, slot, 0, should_succeed)), - ) + pub fn with_blocks(self, blocks: impl IntoIterator) -> Self { + self.with_signing_root_blocks(blocks.into_iter().map( + |(index, slot, should_succeed, should_succeed_complete)| { + (index, slot, 0, should_succeed, should_succeed_complete) + }, + )) } pub fn with_signing_root_blocks( mut self, - blocks: impl IntoIterator, + blocks: impl IntoIterator, ) -> Self { - self.blocks.extend( - blocks - .into_iter() - .map(|(pk, slot, signing_root, should_succeed)| TestBlock { + self.blocks.extend(blocks.into_iter().map( + |(pk, slot, signing_root, should_succeed, should_succeed_complete)| { + assert!( + !should_succeed || should_succeed_complete, + "if should_succeed is true then should_succeed_complete must also be true" + ); + TestBlock { pubkey: pubkey(pk), slot: Slot::new(slot), signing_root: Hash256::from_low_u64_be(signing_root), should_succeed, - }), - ); + should_succeed_complete, + } + }, + )); self } pub fn with_attestations( self, - attestations: impl IntoIterator, + attestations: impl IntoIterator, ) -> Self { - self.with_signing_root_attestations( - attestations - .into_iter() - .map(|(id, source, target, succeed)| (id, source, target, 0, succeed)), - ) + self.with_signing_root_attestations(attestations.into_iter().map( + |(id, source, target, succeed, succeed_complete)| { + (id, source, target, 0, succeed, succeed_complete) + }, + )) } pub fn with_signing_root_attestations( mut self, - attestations: impl IntoIterator, + attestations: impl IntoIterator, ) -> Self { self.attestations.extend(attestations.into_iter().map( - |(pk, source, target, signing_root, should_succeed)| TestAttestation { - pubkey: pubkey(pk), - source_epoch: Epoch::new(source), - target_epoch: Epoch::new(target), - signing_root: Hash256::from_low_u64_be(signing_root), - should_succeed, + |(pk, source, target, signing_root, should_succeed, should_succeed_complete)| { + assert!( + !should_succeed || should_succeed_complete, + "if should_succeed is true then should_succeed_complete must also be true" + ); + TestAttestation { + pubkey: pubkey(pk), + source_epoch: Epoch::new(source), + target_epoch: Epoch::new(target), + signing_root: Hash256::from_low_u64_be(signing_root), + should_succeed, + should_succeed_complete, + } }, )); self From 31044402ee180ff937027ec842513bef90d7eec8 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 5 Dec 2023 08:19:59 -0800 Subject: [PATCH 04/19] Sidecar inclusion proof (#4900) * Refactor BlobSidecar to new type * Fix some compile errors * Gossip verification compiles * Fix http api types take 1 * Fix another round of compile errors * Beacon node crate compiles * EF tests compile * Remove all blob signing from VC * fmt * Tests compile * Fix some tests * Fix more http tests * get compiling * Fix gossip conditions and tests * Add basic proof generation and verification * remove unnecessary ssz decode * add back build_sidecar * remove default at fork for blobs * fix beacon chain tests * get relase tests compiling * fix lints * fix existing spec tests * add new ef tests * fix gossip duplicate rule * lints * add back sidecar signature check in gossip * add finalized descendant check to blob sidecar gossip * fix error conversion * fix release tests * sidecar inclusion self review cleanup * Add proof verification and computation metrics * Remove accidentally committed file * Unify some block and blob errors; add slashing conditions for sidecars * Address review comment * Clean up re-org tests (#4957) * Address more review comments * Add Comments & Eliminate Unnecessary Clones * update names * Update beacon_node/beacon_chain/src/metrics.rs Co-authored-by: Jimmy Chen * Update beacon_node/network/src/network_beacon_processor/tests.rs Co-authored-by: Jimmy Chen * pr feedback * fix test compile * Sidecar Inclusion proof small refactor and updates (#4967) * Update some comments, variables and small cosmetic fixes. * Couple blobs and proofs into a tuple in `PayloadAndBlobs` for simplicity and safety. * Update function comment. * Update testing/ef_tests/src/cases/merkle_proof_validity.rs Co-authored-by: Jimmy Chen * Rename the block and blob wrapper types used in the beacon API interfaces. * make sure gossip invalid blobs are passed to the slasher (#4970) * Add blob headers to slasher before adding to DA checker * Replace Vec with HashSet in BlockQueue * fmt * Rename gindex -> index * Simplify gossip condition --------- Co-authored-by: realbigsean Co-authored-by: realbigsean Co-authored-by: Michael Sproul Co-authored-by: Mark Mackey Co-authored-by: Jimmy Chen --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 149 ++++-- .../beacon_chain/src/blob_verification.rs | 397 ++++++++------ .../beacon_chain/src/block_times_cache.rs | 92 +++- .../beacon_chain/src/block_verification.rs | 123 +++-- .../src/block_verification_types.rs | 13 +- .../src/data_availability_checker.rs | 21 +- .../src/data_availability_checker/error.rs | 13 +- .../overflow_lru_cache.rs | 81 +-- beacon_node/beacon_chain/src/kzg_utils.rs | 8 +- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/beacon_chain/src/metrics.rs | 8 + .../src/observed_blob_sidecars.rs | 72 +-- beacon_node/beacon_chain/src/test_utils.rs | 180 ++----- .../beacon_chain/tests/block_verification.rs | 185 +++---- beacon_node/beacon_chain/tests/events.rs | 22 +- beacon_node/beacon_processor/src/lib.rs | 13 +- beacon_node/builder_client/src/lib.rs | 8 +- beacon_node/execution_layer/src/lib.rs | 61 +-- .../test_utils/execution_block_generator.rs | 6 +- .../src/test_utils/mock_builder.rs | 8 +- .../http_api/src/build_block_contents.rs | 72 ++- beacon_node/http_api/src/lib.rs | 52 +- beacon_node/http_api/src/produce_block.rs | 69 +-- beacon_node/http_api/src/publish_blocks.rs | 59 ++- .../tests/broadcast_validation_tests.rs | 138 ++--- .../http_api/tests/interactive_tests.rs | 12 +- beacon_node/http_api/tests/tests.rs | 238 ++++----- .../lighthouse_network/src/rpc/methods.rs | 4 +- .../lighthouse_network/src/types/pubsub.rs | 24 +- .../gossip_methods.rs | 45 +- .../src/network_beacon_processor/mod.rs | 6 +- .../network_beacon_processor/sync_methods.rs | 2 +- .../src/network_beacon_processor/tests.rs | 38 +- beacon_node/network/src/router.rs | 4 +- .../network/src/sync/block_lookups/common.rs | 2 +- .../network/src/sync/block_lookups/tests.rs | 6 +- .../src/sync/block_sidecar_coupling.rs | 2 +- beacon_node/network/src/sync/manager.rs | 6 +- common/eth2/src/lib.rs | 98 ++-- common/eth2/src/types.rs | 495 ++++++------------ consensus/fork_choice/src/fork_choice.rs | 2 +- consensus/fork_choice/src/lib.rs | 4 +- consensus/merkle_proof/src/lib.rs | 2 +- .../src/proto_array_fork_choice.rs | 6 +- consensus/types/presets/gnosis/deneb.yaml | 2 + consensus/types/presets/mainnet/deneb.yaml | 2 + consensus/types/presets/minimal/deneb.yaml | 2 + consensus/types/src/beacon_block_body.rs | 78 +++ consensus/types/src/beacon_block_header.rs | 10 + consensus/types/src/blob_sidecar.rs | 216 ++++---- consensus/types/src/builder_bid.rs | 15 +- consensus/types/src/chain_spec.rs | 8 - consensus/types/src/config_and_preset.rs | 1 - consensus/types/src/eth_spec.rs | 9 + consensus/types/src/lib.rs | 9 +- consensus/types/src/payload.rs | 53 +- consensus/types/src/sidecar.rs | 221 -------- consensus/types/src/signed_blob.rs | 114 ---- crypto/kzg/src/lib.rs | 40 +- slasher/src/block_queue.rs | 7 +- testing/ef_tests/Cargo.toml | 1 + testing/ef_tests/Makefile | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 154 +++++- .../src/cases/kzg_verify_blob_kzg_proof.rs | 12 +- .../cases/kzg_verify_blob_kzg_proof_batch.rs | 22 +- .../src/cases/merkle_proof_validity.rs | 73 ++- testing/ef_tests/src/handler.rs | 35 ++ testing/ef_tests/src/type_name.rs | 1 - testing/ef_tests/tests/tests.rs | 28 +- validator_client/src/block_service.rs | 207 ++++---- validator_client/src/http_metrics/metrics.rs | 5 - validator_client/src/signing_method.rs | 6 - validator_client/src/validator_store.rs | 44 +- 74 files changed, 1953 insertions(+), 2273 deletions(-) delete mode 100644 consensus/types/src/sidecar.rs delete mode 100644 consensus/types/src/signed_blob.rs diff --git a/Cargo.lock b/Cargo.lock index 9c1b591349b..0a1af70bb15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1857,6 +1857,7 @@ dependencies = [ "fs2", "hex", "kzg", + "logging", "rayon", "serde", "serde_json", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 53583390fa3..71270c197d6 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -7,7 +7,7 @@ use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; -use crate::blob_verification::{self, GossipBlobError, GossipVerifiedBlob}; +use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::POS_PANDA_BANNER; use crate::block_verification::{ @@ -121,7 +121,6 @@ use tree_hash::TreeHash; use types::beacon_state::CloneConfig; use types::blob_sidecar::{BlobSidecarList, FixedBlobSidecarList}; use types::payload::BlockProductionVersion; -use types::sidecar::BlobItems; use types::*; pub type ForkChoiceError = fork_choice::Error; @@ -489,16 +488,49 @@ pub struct BeaconChain { pub block_production_state: Arc)>>>, } -pub enum BeaconBlockResponseType { +pub enum BeaconBlockResponseWrapper { Full(BeaconBlockResponse>), Blinded(BeaconBlockResponse>), } +impl BeaconBlockResponseWrapper { + pub fn fork_name(&self, spec: &ChainSpec) -> Result { + Ok(match self { + BeaconBlockResponseWrapper::Full(resp) => resp.block.to_ref().fork_name(spec)?, + BeaconBlockResponseWrapper::Blinded(resp) => resp.block.to_ref().fork_name(spec)?, + }) + } + + pub fn execution_payload_value(&self) -> Option { + match self { + BeaconBlockResponseWrapper::Full(resp) => resp.execution_payload_value, + BeaconBlockResponseWrapper::Blinded(resp) => resp.execution_payload_value, + } + } + + pub fn consensus_block_value(&self) -> Option { + match self { + BeaconBlockResponseWrapper::Full(resp) => resp.consensus_block_value, + BeaconBlockResponseWrapper::Blinded(resp) => resp.consensus_block_value, + } + } + + pub fn is_blinded(&self) -> bool { + matches!(self, BeaconBlockResponseWrapper::Blinded(_)) + } +} + +/// The components produced when the local beacon node creates a new block to extend the chain pub struct BeaconBlockResponse> { + /// The newly produced beacon block pub block: BeaconBlock, + /// The post-state after applying the new block pub state: BeaconState, - pub maybe_side_car: Option>::Sidecar>>, + /// The Blobs / Proofs associated with the new block + pub blob_items: Option<(KzgProofs, BlobsList)>, + /// The execution layer reward for the block pub execution_payload_value: Option, + /// The consensus layer reward to the proposer pub consensus_block_value: Option, } @@ -2022,17 +2054,15 @@ impl BeaconChain { pub fn verify_blob_sidecar_for_gossip( self: &Arc, - blob_sidecar: SignedBlobSidecar, + blob_sidecar: Arc>, subnet_id: u64, ) -> Result, GossipBlobError> { metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS); let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES); - blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self).map( - |v| { - metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES); - v - }, - ) + GossipVerifiedBlob::new(blob_sidecar, subnet_id, self).map(|v| { + metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES); + v + }) } /// Accepts some 'LightClientOptimisticUpdate' from the network and attempts to verify it @@ -2832,7 +2862,7 @@ impl BeaconChain { } self.data_availability_checker - .notify_gossip_blob(blob.as_blob().slot, block_root, &blob); + .notify_gossip_blob(blob.slot(), block_root, &blob); let r = self.check_gossip_blob_availability_and_import(blob).await; self.remove_notified(&block_root, r) } @@ -2942,6 +2972,20 @@ impl BeaconChain { // Increment the Prometheus counter for block processing requests. metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); + // Set observed time if not already set. Usually this should be set by gossip or RPC, + // but just in case we set it again here (useful for tests). + if let (Some(seen_timestamp), Some(current_slot)) = + (self.slot_clock.now_duration(), self.slot_clock.now()) + { + self.block_times_cache.write().set_time_observed( + block_root, + current_slot, + seen_timestamp, + None, + None, + ); + } + let block_slot = unverified_block.block().slot(); // A small closure to group the verification and import errors. @@ -3097,6 +3141,9 @@ impl BeaconChain { blob: GossipVerifiedBlob, ) -> Result> { let slot = blob.slot(); + if let Some(slasher) = self.slasher.as_ref() { + slasher.accept_block_header(blob.signed_block_header()); + } let availability = self.data_availability_checker.put_gossip_blob(blob)?; self.process_availability(slot, availability).await @@ -3110,6 +3157,11 @@ impl BeaconChain { block_root: Hash256, blobs: FixedBlobSidecarList, ) -> Result> { + if let Some(slasher) = self.slasher.as_ref() { + for blob_sidecar in blobs.iter().filter_map(|blob| blob.clone()) { + slasher.accept_block_header(blob_sidecar.signed_block_header.clone()); + } + } let availability = self .data_availability_checker .put_rpc_blobs(block_root, blobs)?; @@ -3968,7 +4020,7 @@ impl BeaconChain { validator_graffiti: Option, verification: ProduceBlockVerification, block_production_version: BlockProductionVersion, - ) -> Result, BlockProductionError> { + ) -> Result, BlockProductionError> { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); // Part 1/2 (blocking) @@ -4414,7 +4466,7 @@ impl BeaconChain { /// This function uses heuristics that align quite closely but not exactly with the re-org /// conditions set out in `get_state_for_re_org` and `get_proposer_head`. The differences are /// documented below. - fn overridden_forkchoice_update_params( + pub fn overridden_forkchoice_update_params( &self, canonical_forkchoice_params: ForkchoiceUpdateParameters, ) -> Result { @@ -4432,7 +4484,7 @@ impl BeaconChain { }) } - fn overridden_forkchoice_update_params_or_failure_reason( + pub fn overridden_forkchoice_update_params_or_failure_reason( &self, canonical_forkchoice_params: &ForkchoiceUpdateParameters, ) -> Result> { @@ -4573,7 +4625,7 @@ impl BeaconChain { .unwrap_or_else(|| Duration::from_secs(0)), ); block_delays.observed.map_or(false, |delay| { - delay > self.slot_clock.unagg_attestation_production_delay() + delay >= self.slot_clock.unagg_attestation_production_delay() }) } @@ -4599,7 +4651,7 @@ impl BeaconChain { validator_graffiti: Option, verification: ProduceBlockVerification, block_production_version: BlockProductionVersion, - ) -> Result, BlockProductionError> { + ) -> Result, BlockProductionError> { // Part 1/3 (blocking) // // Perform the state advance and block-packing functions. @@ -4658,7 +4710,7 @@ impl BeaconChain { .await .map_err(BlockProductionError::TokioJoin)??; - Ok(BeaconBlockResponseType::Full(beacon_block_response)) + Ok(BeaconBlockResponseWrapper::Full(beacon_block_response)) } BlockProposalContentsType::Blinded(block_contents) => { let chain = self.clone(); @@ -4678,7 +4730,7 @@ impl BeaconChain { .await .map_err(BlockProductionError::TokioJoin)??; - Ok(BeaconBlockResponseType::Blinded(beacon_block_response)) + Ok(BeaconBlockResponseWrapper::Blinded(beacon_block_response)) } } } else { @@ -4699,7 +4751,7 @@ impl BeaconChain { .await .map_err(BlockProductionError::TokioJoin)??; - Ok(BeaconBlockResponseType::Full(beacon_block_response)) + Ok(BeaconBlockResponseWrapper::Full(beacon_block_response)) } } @@ -4977,7 +5029,7 @@ impl BeaconChain { bls_to_execution_changes, } = partial_beacon_block; - let (inner_block, blobs_opt, proofs_opt, execution_payload_value) = match &state { + let (inner_block, maybe_blobs_and_proofs, execution_payload_value) = match &state { BeaconState::Base(_) => ( BeaconBlock::Base(BeaconBlockBase { slot, @@ -4997,7 +5049,6 @@ impl BeaconChain { }, }), None, - None, Uint256::zero(), ), BeaconState::Altair(_) => ( @@ -5021,7 +5072,6 @@ impl BeaconChain { }, }), None, - None, Uint256::zero(), ), BeaconState::Merge(_) => { @@ -5052,7 +5102,6 @@ impl BeaconChain { }, }), None, - None, execution_payload_value, ) } @@ -5086,12 +5135,11 @@ impl BeaconChain { }, }), None, - None, execution_payload_value, ) } BeaconState::Deneb(_) => { - let (payload, kzg_commitments, blobs, proofs, execution_payload_value) = + let (payload, kzg_commitments, maybe_blobs_and_proofs, execution_payload_value) = block_contents .ok_or(BlockProductionError::MissingExecutionPayload)? .deconstruct(); @@ -5121,8 +5169,7 @@ impl BeaconChain { .ok_or(BlockProductionError::InvalidPayloadFork)?, }, }), - blobs, - proofs, + maybe_blobs_and_proofs, execution_payload_value, ) } @@ -5181,8 +5228,8 @@ impl BeaconChain { let blobs_verification_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES); - let maybe_sidecar_list = match (blobs_opt, proofs_opt) { - (Some(blobs_or_blobs_roots), Some(proofs)) => { + let blob_items = match maybe_blobs_and_proofs { + Some((blobs, proofs)) => { let expected_kzg_commitments = block.body().blob_kzg_commitments().map_err(|_| { BlockProductionError::InvalidBlockVariant( @@ -5190,42 +5237,32 @@ impl BeaconChain { ) })?; - if expected_kzg_commitments.len() != blobs_or_blobs_roots.len() { + if expected_kzg_commitments.len() != blobs.len() { return Err(BlockProductionError::MissingKzgCommitment(format!( "Missing KZG commitment for slot {}. Expected {}, got: {}", block.slot(), - blobs_or_blobs_roots.len(), + blobs.len(), expected_kzg_commitments.len() ))); } let kzg_proofs = Vec::from(proofs); - if let Some(blobs) = blobs_or_blobs_roots.blobs() { - let kzg = self - .kzg - .as_ref() - .ok_or(BlockProductionError::TrustedSetupNotInitialized)?; - kzg_utils::validate_blobs::( - kzg, - expected_kzg_commitments, - blobs.iter().collect(), - &kzg_proofs, - ) - .map_err(BlockProductionError::KzgError)?; - } - - Some( - Sidecar::build_sidecar( - blobs_or_blobs_roots, - &block, - expected_kzg_commitments, - kzg_proofs, - ) - .map_err(BlockProductionError::FailedToBuildBlobSidecars)?, + let kzg = self + .kzg + .as_ref() + .ok_or(BlockProductionError::TrustedSetupNotInitialized)?; + kzg_utils::validate_blobs::( + kzg, + expected_kzg_commitments, + blobs.iter().collect(), + &kzg_proofs, ) + .map_err(BlockProductionError::KzgError)?; + + Some((kzg_proofs.into(), blobs)) } - _ => None, + None => None, }; drop(blobs_verification_timer); @@ -5243,7 +5280,7 @@ impl BeaconChain { Ok(BeaconBlockResponse { block, state, - maybe_side_car: maybe_sidecar_list, + blob_items, execution_payload_value: Some(execution_payload_value), consensus_block_value: Some(consensus_block_value), }) diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index ca69d2ab6f1..e2a1f0928f0 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -2,15 +2,15 @@ use derivative::Derivative; use slot_clock::SlotClock; use std::sync::Arc; -use crate::beacon_chain::{ - BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, - VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, +use crate::beacon_chain::{BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT}; +use crate::block_verification::{ + cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, + BlockSlashInfo, }; -use crate::block_verification::cheap_state_advance_to_obtain_committees; -use crate::data_availability_checker::AvailabilityCheckError; use crate::kzg_utils::{validate_blob, validate_blobs}; use crate::{metrics, BeaconChainError}; -use kzg::{Kzg, KzgCommitment}; +use kzg::{Error as KzgError, Kzg, KzgCommitment}; +use merkle_proof::MerkleTreeError; use slog::{debug, warn}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; @@ -18,7 +18,7 @@ use tree_hash::TreeHash; use types::blob_sidecar::BlobIdentifier; use types::{ BeaconStateError, BlobSidecar, BlobSidecarList, CloneConfig, EthSpec, Hash256, - SignedBlobSidecar, Slot, + SignedBeaconBlockHeader, Slot, }; /// An error occurred while validating a gossip blob. @@ -75,7 +75,7 @@ pub enum GossipBlobError { /// ## Peer scoring /// /// The blob is invalid and the peer is faulty. - ProposerSignatureInvalid, + ProposalSignatureInvalid, /// The proposal_index corresponding to blob.beacon_block_root is not known. /// @@ -98,6 +98,12 @@ pub enum GossipBlobError { /// We cannot process the blob without validating its parent, the peer isn't necessarily faulty. BlobParentUnknown(Arc>), + /// Invalid kzg commitment inclusion proof + /// ## Peer scoring + /// + /// The blob sidecar is invalid and the peer is faulty + InvalidInclusionProof, + /// A blob has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple /// over gossip or no gossip sources. /// @@ -109,6 +115,42 @@ pub enum GossipBlobError { slot: Slot, index: u64, }, + + /// `Kzg` struct hasn't been initialized. This is an internal error. + /// + /// ## Peer scoring + /// + /// The peer isn't faulty, This is an internal error. + KzgNotInitialized, + + /// The kzg verification failed. + /// + /// ## Peer scoring + /// + /// The blob sidecar is invalid and the peer is faulty. + KzgError(kzg::Error), + + /// The kzg commitment inclusion proof failed. + /// + /// ## Peer scoring + /// + /// The blob sidecar is invalid + InclusionProof(MerkleTreeError), + + /// The pubkey cache timed out. + /// + /// ## Peer scoring + /// + /// The blob sidecar may be valid, this is an internal error. + PubkeyCacheTimeout, + + /// The block conflicts with finalization, no need to propagate. + /// + /// ## Peer scoring + /// + /// It's unclear if this block is valid, but it conflicts with finality and shouldn't be + /// imported. + NotFinalizedDescendant { block_parent_root: Hash256 }, } impl std::fmt::Display for GossipBlobError { @@ -118,7 +160,7 @@ impl std::fmt::Display for GossipBlobError { write!( f, "BlobParentUnknown(parent_root:{})", - blob_sidecar.block_parent_root + blob_sidecar.block_parent_root() ) } other => write!(f, "{:?}", other), @@ -147,63 +189,168 @@ pub type GossipVerifiedBlobList = VariableList< /// the p2p network. #[derive(Debug)] pub struct GossipVerifiedBlob { - blob: SignedBlobSidecar, + block_root: Hash256, + blob: KzgVerifiedBlob, } impl GossipVerifiedBlob { pub fn new( - blob: SignedBlobSidecar, + blob: Arc>, + subnet_id: u64, chain: &BeaconChain, ) -> Result> { - let blob_index = blob.message.index; - validate_blob_sidecar_for_gossip(blob, blob_index, chain) + let header = blob.signed_block_header.clone(); + // We only process slashing info if the gossip verification failed + // since we do not process the blob any further in that case. + validate_blob_sidecar_for_gossip(blob, subnet_id, chain).map_err(|e| { + process_block_slash_info::<_, GossipBlobError>( + chain, + BlockSlashInfo::from_early_error_blob(header, e), + ) + }) } /// Construct a `GossipVerifiedBlob` that is assumed to be valid. /// /// This should ONLY be used for testing. - pub fn __assumed_valid(blob: SignedBlobSidecar) -> Self { - Self { blob } + pub fn __assumed_valid(blob: Arc>) -> Self { + Self { + block_root: blob.block_root(), + blob: KzgVerifiedBlob { blob }, + } } pub fn id(&self) -> BlobIdentifier { - self.blob.message.id() + BlobIdentifier { + block_root: self.block_root, + index: self.blob.blob_index(), + } } pub fn block_root(&self) -> Hash256 { - self.blob.message.block_root + self.block_root } - pub fn to_blob(self) -> Arc> { - self.blob.message + pub fn slot(&self) -> Slot { + self.blob.blob.slot() + } + pub fn index(&self) -> u64 { + self.blob.blob.index + } + pub fn kzg_commitment(&self) -> KzgCommitment { + self.blob.blob.kzg_commitment + } + pub fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.blob.blob.signed_block_header.clone() + } + pub fn block_proposer_index(&self) -> u64 { + self.blob.blob.block_proposer_index() + } + pub fn into_inner(self) -> KzgVerifiedBlob { + self.blob } pub fn as_blob(&self) -> &BlobSidecar { - &self.blob.message + self.blob.as_blob() } - pub fn signed_blob(&self) -> SignedBlobSidecar { - self.blob.clone() + /// This is cheap as we're calling clone on an Arc + pub fn clone_blob(&self) -> Arc> { + self.blob.clone_blob() } - pub fn slot(&self) -> Slot { - self.blob.message.slot +} + +/// Wrapper over a `BlobSidecar` for which we have completed kzg verification. +/// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`. +#[derive(Debug, Derivative, Clone, Encode, Decode)] +#[derivative(PartialEq, Eq)] +#[ssz(struct_behaviour = "transparent")] +pub struct KzgVerifiedBlob { + blob: Arc>, +} + +impl PartialOrd for KzgVerifiedBlob { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) } - pub fn index(&self) -> u64 { - self.blob.message.index +} + +impl Ord for KzgVerifiedBlob { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.blob.cmp(&other.blob) } - pub fn kzg_commitment(&self) -> KzgCommitment { - self.blob.message.kzg_commitment +} + +impl KzgVerifiedBlob { + pub fn to_blob(self) -> Arc> { + self.blob + } + pub fn as_blob(&self) -> &BlobSidecar { + &self.blob } - pub fn proposer_index(&self) -> u64 { - self.blob.message.proposer_index + /// This is cheap as we're calling clone on an Arc + pub fn clone_blob(&self) -> Arc> { + self.blob.clone() + } + pub fn blob_index(&self) -> u64 { + self.blob.index } } +#[cfg(test)] +impl KzgVerifiedBlob { + pub fn new(blob: BlobSidecar) -> Self { + Self { + blob: Arc::new(blob), + } + } +} + +/// Complete kzg verification for a `BlobSidecar`. +/// +/// Returns an error if the kzg verification check fails. +pub fn verify_kzg_for_blob( + blob: Arc>, + kzg: &Kzg, +) -> Result, KzgError> { + validate_blob::(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof)?; + + Ok(KzgVerifiedBlob { blob }) +} + +/// Complete kzg verification for a list of `BlobSidecar`s. +/// Returns an error if any of the `BlobSidecar`s fails kzg verification. +/// +/// Note: This function should be preferred over calling `verify_kzg_for_blob` +/// in a loop since this function kzg verifies a list of blobs more efficiently. +pub fn verify_kzg_for_blob_list( + blob_list: &BlobSidecarList, + kzg: &Kzg, +) -> Result<(), KzgError> { + let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list + .iter() + .map(|blob| (&blob.blob, (blob.kzg_commitment, blob.kzg_proof))) + .unzip(); + validate_blobs::(kzg, commitments.as_slice(), blobs, proofs.as_slice()) +} + pub fn validate_blob_sidecar_for_gossip( - signed_blob_sidecar: SignedBlobSidecar, + blob_sidecar: Arc>, subnet: u64, chain: &BeaconChain, ) -> Result, GossipBlobError> { - let blob_slot = signed_blob_sidecar.message.slot; - let blob_index = signed_blob_sidecar.message.index; - let block_parent_root = signed_blob_sidecar.message.block_parent_root; - let blob_proposer_index = signed_blob_sidecar.message.proposer_index; - let block_root = signed_blob_sidecar.message.block_root; + let blob_slot = blob_sidecar.slot(); + let blob_index = blob_sidecar.index; + let block_parent_root = blob_sidecar.block_parent_root(); + let blob_proposer_index = blob_sidecar.block_proposer_index(); + let block_root = blob_sidecar.block_root(); let blob_epoch = blob_slot.epoch(T::EthSpec::slots_per_epoch()); + let signed_block_header = &blob_sidecar.signed_block_header; + + // This condition is not possible if we have received the blob from the network + // since we only subscribe to `MaxBlobsPerBlock` subnets over gossip network. + // We include this check only for completeness. + // Getting this error would imply something very wrong with our networking decoding logic. + if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { + return Err(GossipBlobError::InvalidSubnet { + expected: subnet, + received: blob_index, + }); + } // Verify that the blob_sidecar was received on the correct subnet. if blob_index != subnet { @@ -213,8 +360,6 @@ pub fn validate_blob_sidecar_for_gossip( }); } - let blob_root = get_blob_root(&signed_blob_sidecar); - // Verify that the sidecar is not from a future slot. let latest_permissible_slot = chain .slot_clock @@ -240,11 +385,12 @@ pub fn validate_blob_sidecar_for_gossip( }); } - // Verify that this is the first blob sidecar received for the (sidecar.block_root, sidecar.index) tuple + // Verify that this is the first blob sidecar received for the tuple: + // (block_header.slot, block_header.proposer_index, blob_sidecar.index) if chain .observed_blob_sidecars .read() - .is_known(&signed_blob_sidecar.message) + .is_known(&blob_sidecar) .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? { return Err(GossipBlobError::RepeatBlob { @@ -254,18 +400,31 @@ pub fn validate_blob_sidecar_for_gossip( }); } + // Verify the inclusion proof in the sidecar + let _timer = metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION); + if !blob_sidecar + .verify_blob_sidecar_inclusion_proof() + .map_err(GossipBlobError::InclusionProof)? + { + return Err(GossipBlobError::InvalidInclusionProof); + } + drop(_timer); + + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + // We have already verified that the blob is past finalization, so we can // just check fork choice for the block's parent. - let Some(parent_block) = chain - .canonical_head - .fork_choice_read_lock() - .get_block(&block_parent_root) - else { - return Err(GossipBlobError::BlobParentUnknown( - signed_blob_sidecar.message, - )); + let Some(parent_block) = fork_choice.get_block(&block_parent_root) else { + return Err(GossipBlobError::BlobParentUnknown(blob_sidecar)); }; + // Do not process a blob that does not descend from the finalized root. + // We just loaded the parent_block, so we can be sure that it exists in fork choice. + if !fork_choice.is_finalized_checkpoint_or_descendant(block_parent_root) { + return Err(GossipBlobError::NotFinalizedDescendant { block_parent_root }); + } + drop(fork_choice); + if parent_block.slot >= blob_slot { return Err(GossipBlobError::BlobIsNotLaterThanParent { blob_slot, @@ -273,8 +432,6 @@ pub fn validate_blob_sidecar_for_gossip( }); } - // Note: We check that the proposer_index matches against the shuffling first to avoid - // signature verification against an invalid proposer_index. let proposer_shuffling_root = if parent_block.slot.epoch(T::EthSpec::slots_per_epoch()) == blob_epoch { parent_block @@ -374,38 +531,26 @@ pub fn validate_blob_sidecar_for_gossip( .get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize) .ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?; + let fork = state.fork(); // Prime the proposer shuffling cache with the newly-learned value. chain.beacon_proposer_cache.lock().insert( blob_epoch, proposer_shuffling_root, proposers, - state.fork(), + fork, )?; - (proposer_index, state.fork()) + (proposer_index, fork) } }; - if proposer_index != blob_proposer_index as usize { - return Err(GossipBlobError::ProposerIndexMismatch { - sidecar: blob_proposer_index as usize, - local: proposer_index, - }); - } - - // Signature verification + // Signature verify the signed block header. let signature_is_valid = { - let pubkey_cache = chain - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) - .map_err(GossipBlobError::BeaconChainError)?; - + let pubkey_cache = + get_validator_pubkey_cache(chain).map_err(|_| GossipBlobError::PubkeyCacheTimeout)?; let pubkey = pubkey_cache .get(proposer_index) .ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?; - - signed_blob_sidecar.verify_signature( - Some(blob_root), + signed_block_header.verify_signature::( pubkey, &fork, chain.genesis_validators_root, @@ -414,7 +559,14 @@ pub fn validate_blob_sidecar_for_gossip( }; if !signature_is_valid { - return Err(GossipBlobError::ProposerSignatureInvalid); + return Err(GossipBlobError::ProposalSignatureInvalid); + } + + if proposer_index != blob_proposer_index as usize { + return Err(GossipBlobError::ProposerIndexMismatch { + sidecar: blob_proposer_index as usize, + local: proposer_index, + }); } // Now the signature is valid, store the proposal so we don't accept another blob sidecar @@ -431,7 +583,7 @@ pub fn validate_blob_sidecar_for_gossip( if chain .observed_blob_sidecars .write() - .observe_sidecar(&signed_blob_sidecar.message) + .observe_sidecar(&blob_sidecar) .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? { return Err(GossipBlobError::RepeatBlob { @@ -441,106 +593,27 @@ pub fn validate_blob_sidecar_for_gossip( }); } + // Kzg verification for gossip blob sidecar + let kzg = chain + .kzg + .as_ref() + .ok_or(GossipBlobError::KzgNotInitialized)?; + let kzg_verified_blob = + verify_kzg_for_blob(blob_sidecar, kzg).map_err(GossipBlobError::KzgError)?; + Ok(GossipVerifiedBlob { - blob: signed_blob_sidecar, + block_root, + blob: kzg_verified_blob, }) } -/// Wrapper over a `BlobSidecar` for which we have completed kzg verification. -/// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`. -#[derive(Debug, Derivative, Clone, Encode, Decode)] -#[derivative(PartialEq, Eq)] -#[ssz(struct_behaviour = "transparent")] -pub struct KzgVerifiedBlob { - blob: Arc>, -} - -impl PartialOrd for KzgVerifiedBlob { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for KzgVerifiedBlob { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.blob.cmp(&other.blob) - } -} - -impl KzgVerifiedBlob { - pub fn to_blob(self) -> Arc> { - self.blob - } - pub fn as_blob(&self) -> &BlobSidecar { - &self.blob - } - pub fn clone_blob(&self) -> Arc> { - self.blob.clone() - } - pub fn block_root(&self) -> Hash256 { - self.blob.block_root - } - pub fn blob_index(&self) -> u64 { - self.blob.index - } -} - -#[cfg(test)] -impl KzgVerifiedBlob { - pub fn new(blob: BlobSidecar) -> Self { - Self { - blob: Arc::new(blob), - } - } -} - -/// Complete kzg verification for a `GossipVerifiedBlob`. -/// -/// Returns an error if the kzg verification check fails. -pub fn verify_kzg_for_blob( - blob: Arc>, - kzg: &Kzg, -) -> Result, AvailabilityCheckError> { - let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES); - if validate_blob::(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof) - .map_err(AvailabilityCheckError::Kzg)? - { - Ok(KzgVerifiedBlob { blob }) - } else { - Err(AvailabilityCheckError::KzgVerificationFailed) - } -} - -/// Complete kzg verification for a list of `BlobSidecar`s. -/// Returns an error if any of the `BlobSidecar`s fails kzg verification. -/// -/// Note: This function should be preferred over calling `verify_kzg_for_blob` -/// in a loop since this function kzg verifies a list of blobs more efficiently. -pub fn verify_kzg_for_blob_list( - blob_list: &BlobSidecarList, - kzg: &Kzg, -) -> Result<(), AvailabilityCheckError> { - let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES); - let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list - .iter() - .map(|blob| (&blob.blob, (blob.kzg_commitment, blob.kzg_proof))) - .unzip(); - if validate_blobs::(kzg, commitments.as_slice(), blobs, proofs.as_slice()) - .map_err(AvailabilityCheckError::Kzg)? - { - Ok(()) - } else { - Err(AvailabilityCheckError::KzgVerificationFailed) - } -} - /// Returns the canonical root of the given `blob`. /// /// Use this function to ensure that we report the blob hashing time Prometheus metric. -pub fn get_blob_root(blob: &SignedBlobSidecar) -> Hash256 { +pub fn get_blob_root(blob: &BlobSidecar) -> Hash256 { let blob_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOB_ROOT); - let blob_root = blob.message.tree_hash_root(); + let blob_root = blob.tree_hash_root(); metrics::stop_timer(blob_root_timer); diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index 484de841de5..c5293bcb0ee 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -23,7 +23,7 @@ pub struct Timestamps { } // Helps arrange delay data so it is more relevant to metrics. -#[derive(Default)] +#[derive(Debug, Default)] pub struct BlockDelays { pub observed: Option, pub imported: Option, @@ -51,7 +51,7 @@ impl BlockDelays { // If the block was received via gossip, we can record the client type of the peer which sent us // the block. -#[derive(Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq)] pub struct BlockPeerInfo { pub id: Option, pub client: Option, @@ -80,6 +80,8 @@ pub struct BlockTimesCache { /// Helper methods to read from and write to the cache. impl BlockTimesCache { + /// Set the observation time for `block_root` to `timestamp` if `timestamp` is less than + /// any previous timestamp at which this block was observed. pub fn set_time_observed( &mut self, block_root: BlockRoot, @@ -92,11 +94,19 @@ impl BlockTimesCache { .cache .entry(block_root) .or_insert_with(|| BlockTimesCacheValue::new(slot)); - block_times.timestamps.observed = Some(timestamp); - block_times.peer_info = BlockPeerInfo { - id: peer_id, - client: peer_client, - }; + match block_times.timestamps.observed { + Some(existing_observation_time) if existing_observation_time <= timestamp => { + // Existing timestamp is earlier, do nothing. + } + _ => { + // No existing timestamp, or new timestamp is earlier. + block_times.timestamps.observed = Some(timestamp); + block_times.peer_info = BlockPeerInfo { + id: peer_id, + client: peer_client, + }; + } + } } pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { @@ -141,3 +151,71 @@ impl BlockTimesCache { .retain(|_, cache| cache.slot > current_slot.saturating_sub(64_u64)); } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn observed_time_uses_minimum() { + let mut cache = BlockTimesCache::default(); + + let block_root = Hash256::zero(); + let slot = Slot::new(100); + + let slot_start_time = Duration::from_secs(0); + + let ts1 = Duration::from_secs(5); + let ts2 = Duration::from_secs(6); + let ts3 = Duration::from_secs(4); + + let peer_info2 = BlockPeerInfo { + id: Some("peer2".to_string()), + client: Some("lighthouse".to_string()), + }; + + let peer_info3 = BlockPeerInfo { + id: Some("peer3".to_string()), + client: Some("prysm".to_string()), + }; + + cache.set_time_observed(block_root, slot, ts1, None, None); + + assert_eq!( + cache.get_block_delays(block_root, slot_start_time).observed, + Some(ts1) + ); + assert_eq!(cache.get_peer_info(block_root), BlockPeerInfo::default()); + + // Second observation with higher timestamp should not override anything, even though it has + // superior peer info. + cache.set_time_observed( + block_root, + slot, + ts2, + peer_info2.id.clone(), + peer_info2.client.clone(), + ); + + assert_eq!( + cache.get_block_delays(block_root, slot_start_time).observed, + Some(ts1) + ); + assert_eq!(cache.get_peer_info(block_root), BlockPeerInfo::default()); + + // Third observation with lower timestamp should override everything. + cache.set_time_observed( + block_root, + slot, + ts3, + peer_info3.id.clone(), + peer_info3.client.clone(), + ); + + assert_eq!( + cache.get_block_delays(block_root, slot_start_time).observed, + Some(ts3) + ); + assert_eq!(cache.get_peer_info(block_root), peer_info3); + } +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 65cf7a728bc..e86ca85bbfa 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -70,7 +70,7 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; -use eth2::types::{EventKind, SignedBlockContents}; +use eth2::types::{EventKind, PublishBlockRequest}; use execution_layer::PayloadStatus; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; @@ -95,15 +95,15 @@ use std::fs; use std::io::Write; use std::sync::Arc; use std::time::Duration; -use store::{Error as DBError, HotStateSummary, KeyValueStore, SignedBlobSidecarList, StoreOp}; +use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; -use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; +use types::{BlobSidecar, ExecPayload}; pub const POS_PANDA_BANNER: &str = r#" ,,, ,,, ,,, ,,, @@ -507,7 +507,7 @@ pub enum BlockSlashInfo { } impl BlockSlashInfo> { - pub fn from_early_error(header: SignedBeaconBlockHeader, e: BlockError) -> Self { + pub fn from_early_error_block(header: SignedBeaconBlockHeader, e: BlockError) -> Self { match e { BlockError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), // `InvalidSignature` could indicate any signature in the block, so we want @@ -517,17 +517,28 @@ impl BlockSlashInfo> { } } +impl BlockSlashInfo> { + pub fn from_early_error_blob(header: SignedBeaconBlockHeader, e: GossipBlobError) -> Self { + match e { + GossipBlobError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), + // `InvalidSignature` could indicate any signature in the block, so we want + // to recheck the proposer signature alone. + _ => BlockSlashInfo::SignatureNotChecked(header, e), + } + } +} + /// Process invalid blocks to see if they are suitable for the slasher. /// /// If no slasher is configured, this is a no-op. -fn process_block_slash_info( +pub(crate) fn process_block_slash_info( chain: &BeaconChain, - slash_info: BlockSlashInfo>, -) -> BlockError { + slash_info: BlockSlashInfo, +) -> TErr { if let Some(slasher) = chain.slasher.as_ref() { let (verified_header, error) = match slash_info { BlockSlashInfo::SignatureNotChecked(header, e) => { - if verify_header_signature(chain, &header).is_ok() { + if verify_header_signature::<_, TErr>(chain, &header).is_ok() { (header, e) } else { return e; @@ -673,7 +684,6 @@ pub trait IntoGossipVerifiedBlockContents: Sized { chain: &BeaconChain, ) -> Result, BlockContentsError>; fn inner_block(&self) -> &SignedBeaconBlock; - fn inner_blobs(&self) -> Option>; } impl IntoGossipVerifiedBlockContents for GossipVerifiedBlockContents { @@ -686,45 +696,40 @@ impl IntoGossipVerifiedBlockContents for GossipVerifiedB fn inner_block(&self) -> &SignedBeaconBlock { self.0.block.as_block() } - fn inner_blobs(&self) -> Option> { - self.1.as_ref().map(|blobs| { - VariableList::from( - blobs - .into_iter() - .map(GossipVerifiedBlob::signed_blob) - .collect::>(), - ) - }) - } } -impl IntoGossipVerifiedBlockContents for SignedBlockContents { +impl IntoGossipVerifiedBlockContents for PublishBlockRequest { fn into_gossip_verified_block( self, chain: &BeaconChain, ) -> Result, BlockContentsError> { let (block, blobs) = self.deconstruct(); - let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?; + let gossip_verified_blobs = blobs - .map(|blobs| { - Ok::<_, GossipBlobError>(VariableList::from( - blobs - .into_iter() - .map(|blob| GossipVerifiedBlob::new(blob, chain)) - .collect::, GossipBlobError>>()?, - )) + .map(|(kzg_proofs, blobs)| { + let mut gossip_verified_blobs = vec![]; + for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { + let _timer = + metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION); + let blob = BlobSidecar::new(i, blob, &block, *kzg_proof) + .map_err(BlockContentsError::SidecarError)?; + drop(_timer); + let gossip_verified_blob = + GossipVerifiedBlob::new(Arc::new(blob), i as u64, chain)?; + gossip_verified_blobs.push(gossip_verified_blob); + } + let gossip_verified_blobs = VariableList::from(gossip_verified_blobs); + Ok::<_, BlockContentsError>(gossip_verified_blobs) }) .transpose()?; + let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?; + Ok((gossip_verified_block, gossip_verified_blobs)) } fn inner_block(&self) -> &SignedBeaconBlock { self.signed_block() } - - fn inner_blobs(&self) -> Option> { - self.blobs_cloned() - } } /// Implemented on types that can be converted into a `ExecutionPendingBlock`. @@ -745,7 +750,9 @@ pub trait IntoExecutionPendingBlock: Sized { } execution_pending }) - .map_err(|slash_info| process_block_slash_info(chain, slash_info)) + .map_err(|slash_info| { + process_block_slash_info::<_, BlockError>(chain, slash_info) + }) } /// Convert the block to fully-verified form while producing data to aid checking slashability. @@ -774,7 +781,10 @@ impl GossipVerifiedBlock { // and it could be a repeat proposal (a likely cause for slashing!). let header = block.signed_block_header(); Self::new_without_slasher_checks(block, chain).map_err(|e| { - process_block_slash_info(chain, BlockSlashInfo::from_early_error(header, e)) + process_block_slash_info::<_, BlockError>( + chain, + BlockSlashInfo::from_early_error_block(header, e), + ) }) } @@ -1055,7 +1065,8 @@ impl SignatureVerifiedBlock { chain: &BeaconChain, ) -> Result>> { let header = block.signed_block_header(); - Self::new(block, block_root, chain).map_err(|e| BlockSlashInfo::from_early_error(header, e)) + Self::new(block, block_root, chain) + .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) } /// Finishes signature verification on the provided `GossipVerifedBlock`. Does not re-verify @@ -1109,7 +1120,7 @@ impl SignatureVerifiedBlock { ) -> Result>> { let header = from.block.signed_block_header(); Self::from_gossip_verified_block(from, chain) - .map_err(|e| BlockSlashInfo::from_early_error(header, e)) + .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) } pub fn block_root(&self) -> Hash256 { @@ -1908,28 +1919,45 @@ fn load_parent>( result } -/// This trait is used to unify `BlockError` and `BlobError` so -/// `cheap_state_advance_to_obtain_committees` can be re-used in gossip blob validation. -pub trait CheapStateAdvanceError: From + From + Debug { +/// This trait is used to unify `BlockError` and `GossipBlobError`. +pub trait BlockBlobError: From + From + Debug { fn not_later_than_parent_error(block_slot: Slot, state_slot: Slot) -> Self; + fn unknown_validator_error(validator_index: u64) -> Self; + fn proposer_signature_invalid() -> Self; } -impl CheapStateAdvanceError for BlockError { +impl BlockBlobError for BlockError { fn not_later_than_parent_error(block_slot: Slot, parent_slot: Slot) -> Self { BlockError::BlockIsNotLaterThanParent { block_slot, parent_slot, } } + + fn unknown_validator_error(validator_index: u64) -> Self { + BlockError::UnknownValidator(validator_index) + } + + fn proposer_signature_invalid() -> Self { + BlockError::ProposalSignatureInvalid + } } -impl CheapStateAdvanceError for GossipBlobError { +impl BlockBlobError for GossipBlobError { fn not_later_than_parent_error(blob_slot: Slot, parent_slot: Slot) -> Self { GossipBlobError::BlobIsNotLaterThanParent { blob_slot, parent_slot, } } + + fn unknown_validator_error(validator_index: u64) -> Self { + GossipBlobError::UnknownValidator(validator_index) + } + + fn proposer_signature_invalid() -> Self { + GossipBlobError::ProposalSignatureInvalid + } } /// Performs a cheap (time-efficient) state advancement so the committees and proposer shuffling for @@ -1943,7 +1971,7 @@ impl CheapStateAdvanceError for GossipBlobError { /// and `Cow::Borrowed(state)` will be returned. Otherwise, the state will be cloned, cheaply /// advanced and then returned as a `Cow::Owned`. The end result is that the given `state` is never /// mutated to be invalid (in fact, it is never changed beyond a simple committee cache build). -pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: CheapStateAdvanceError>( +pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: BlockBlobError>( state: &'a mut BeaconState, state_root_opt: Option, block_slot: Slot, @@ -1979,12 +2007,11 @@ pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: CheapStateA /// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`. pub fn get_validator_pubkey_cache( chain: &BeaconChain, -) -> Result>, BlockError> { +) -> Result>, BeaconChainError> { chain .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) - .map_err(BlockError::BeaconChainError) } /// Produces an _empty_ `BlockSignatureVerifier`. @@ -2025,14 +2052,14 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>( /// Verify that `header` was signed with a valid signature from its proposer. /// /// Return `Ok(())` if the signature is valid, and an `Err` otherwise. -fn verify_header_signature( +fn verify_header_signature( chain: &BeaconChain, header: &SignedBeaconBlockHeader, -) -> Result<(), BlockError> { +) -> Result<(), Err> { let proposer_pubkey = get_validator_pubkey_cache(chain)? .get(header.message.proposer_index as usize) .cloned() - .ok_or(BlockError::UnknownValidator(header.message.proposer_index))?; + .ok_or(Err::unknown_validator_error(header.message.proposer_index))?; let head_fork = chain.canonical_head.cached_head().head_fork(); if header.verify_signature::( @@ -2043,7 +2070,7 @@ fn verify_header_signature( ) { Ok(()) } else { - Err(BlockError::ProposalSignatureInvalid) + Err(Err::proposer_signature_invalid()) } } diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 9cd853ba8c5..a6840ed7648 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -8,7 +8,7 @@ use derivative::Derivative; use ssz_types::VariableList; use state_processing::ConsensusContext; use std::sync::Arc; -use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; +use types::blob_sidecar::{BlobIdentifier, BlobSidecarError, FixedBlobSidecarList}; use types::{ BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, @@ -98,13 +98,6 @@ impl RpcBlock { return Err(AvailabilityCheckError::MissingBlobs); } for (blob, &block_commitment) in blobs.iter().zip(block_commitments.iter()) { - let blob_block_root = blob.block_root; - if blob_block_root != block_root { - return Err(AvailabilityCheckError::InconsistentBlobBlockRoots { - block_root, - blob_block_root, - }); - } let blob_commitment = blob.kzg_commitment; if blob_commitment != block_commitment { return Err(AvailabilityCheckError::KzgCommitmentMismatch { @@ -309,6 +302,7 @@ pub type GossipVerifiedBlockContents = pub enum BlockContentsError { BlockError(BlockError), BlobError(GossipBlobError), + SidecarError(BlobSidecarError), } impl From> for BlockContentsError { @@ -332,6 +326,9 @@ impl std::fmt::Display for BlockContentsError { BlockContentsError::BlobError(err) => { write!(f, "BlobError({})", err) } + BlockContentsError::SidecarError(err) => { + write!(f, "SidecarError({:?})", err) + } } } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index ad328077d0e..2fcb3b7a9e8 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -200,7 +200,9 @@ impl DataAvailabilityChecker { let mut verified_blobs = vec![]; if let Some(kzg) = self.kzg.as_ref() { for blob in blobs.iter().flatten() { - verified_blobs.push(verify_kzg_for_blob(blob.clone(), kzg)?) + verified_blobs.push( + verify_kzg_for_blob(blob.clone(), kzg).map_err(AvailabilityCheckError::Kzg)?, + ); } } else { return Err(AvailabilityCheckError::KzgNotInitialized); @@ -209,7 +211,6 @@ impl DataAvailabilityChecker { .put_kzg_verified_blobs(block_root, verified_blobs) } - /// This first validates the KZG commitments included in the blob sidecar. /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the blob sidecar. @@ -219,15 +220,8 @@ impl DataAvailabilityChecker { &self, gossip_blob: GossipVerifiedBlob, ) -> Result, AvailabilityCheckError> { - // Verify the KZG commitments. - let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() { - verify_kzg_for_blob(gossip_blob.to_blob(), kzg)? - } else { - return Err(AvailabilityCheckError::KzgNotInitialized); - }; - self.availability_cache - .put_kzg_verified_blobs(kzg_verified_blob.block_root(), vec![kzg_verified_blob]) + .put_kzg_verified_blobs(gossip_blob.block_root(), vec![gossip_blob.into_inner()]) } /// Check if we have all the blobs for a block. Returns `Availability` which has information @@ -268,7 +262,8 @@ impl DataAvailabilityChecker { .kzg .as_ref() .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_blob_list(&blob_list, kzg)?; + verify_kzg_for_blob_list(&blob_list, kzg) + .map_err(AvailabilityCheckError::Kzg)?; Some(blob_list) } else { None @@ -375,8 +370,8 @@ impl DataAvailabilityChecker { block_root: Hash256, blob: &GossipVerifiedBlob, ) { - let index = blob.as_blob().index; - let commitment = blob.as_blob().kzg_commitment; + let index = blob.index(); + let commitment = blob.kzg_commitment(); self.processing_cache .write() .entry(block_root) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index b2979f2bf04..0804fe3b9ab 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -16,10 +16,6 @@ pub enum Error { BlobIndexInvalid(u64), StoreError(store::Error), DecodeError(ssz::DecodeError), - InconsistentBlobBlockRoots { - block_root: Hash256, - blob_block_root: Hash256, - }, ParentStateMissing(Hash256), BlockReplayError(state_processing::BlockReplayError), RebuildingStateCaches(BeaconStateError), @@ -47,8 +43,7 @@ impl Error { Error::Kzg(_) | Error::BlobIndexInvalid(_) | Error::KzgCommitmentMismatch { .. } - | Error::KzgVerificationFailed - | Error::InconsistentBlobBlockRoots { .. } => ErrorCategory::Malicious, + | Error::KzgVerificationFailed => ErrorCategory::Malicious, } } } @@ -76,3 +71,9 @@ impl From for Error { Self::BlockReplayError(value) } } + +impl From for Error { + fn from(value: KzgError) -> Self { + Self::Kzg(value) + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 6033293b825..36d7c2acad8 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -125,7 +125,10 @@ impl PendingComponents { for maybe_blob in self.verified_blobs.iter() { if maybe_blob.is_some() { return maybe_blob.as_ref().map(|kzg_verified_blob| { - kzg_verified_blob.as_blob().slot.epoch(T::slots_per_epoch()) + kzg_verified_blob + .as_blob() + .slot() + .epoch(T::slots_per_epoch()) }); } } @@ -418,15 +421,7 @@ impl OverflowLRUCache { ) -> Result, AvailabilityCheckError> { let mut fixed_blobs = FixedVector::default(); - // Initial check to ensure all provided blobs have a consistent block root. for blob in kzg_verified_blobs { - let blob_block_root = blob.block_root(); - if blob_block_root != block_root { - return Err(AvailabilityCheckError::InconsistentBlobBlockRoots { - block_root, - blob_block_root, - }); - } if let Some(blob_opt) = fixed_blobs.get_mut(blob.blob_index() as usize) { *blob_opt = Some(blob); } @@ -651,7 +646,7 @@ impl OverflowLRUCache { OverflowKey::Blob(_, _) => { KzgVerifiedBlob::::from_ssz_bytes(value_bytes.as_slice())? .as_blob() - .slot + .slot() .epoch(T::EthSpec::slots_per_epoch()) } }; @@ -743,9 +738,7 @@ impl ssz::Decode for OverflowKey { mod test { use super::*; use crate::{ - blob_verification::{ - validate_blob_sidecar_for_gossip, verify_kzg_for_blob, GossipVerifiedBlob, - }, + blob_verification::{validate_blob_sidecar_for_gossip, GossipVerifiedBlob}, block_verification::PayloadVerificationOutcome, block_verification_types::{AsBlock, BlockImportData}, data_availability_checker::STATE_LRU_CAPACITY, @@ -926,12 +919,13 @@ mod test { } info!(log, "done printing kzg commitments"); - let gossip_verified_blobs = if let Some(blobs) = maybe_blobs { - Vec::from(blobs) + let gossip_verified_blobs = if let Some((kzg_proofs, blobs)) = maybe_blobs { + let sidecars = BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap(); + Vec::from(sidecars) .into_iter() - .map(|signed_blob| { - let subnet = signed_blob.message.index; - validate_blob_sidecar_for_gossip(signed_blob, subnet, &harness.chain) + .map(|sidecar| { + let subnet = sidecar.index; + validate_blob_sidecar_for_gossip(sidecar, subnet, &harness.chain) .expect("should validate blob") }) .collect() @@ -1036,17 +1030,9 @@ mod test { ); } - let kzg = harness - .chain - .kzg - .as_ref() - .cloned() - .expect("kzg should exist"); let mut kzg_verified_blobs = Vec::new(); for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { - let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - kzg_verified_blobs.push(kzg_verified_blob); + kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) .expect("should put blob"); @@ -1072,9 +1058,7 @@ mod test { let root = pending_block.import_data.block_root; let mut kzg_verified_blobs = vec![]; for gossip_blob in blobs { - let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - kzg_verified_blobs.push(kzg_verified_blob); + kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) .expect("should put blob"); @@ -1198,20 +1182,11 @@ mod test { assert!(cache.critical.read().store_keys.contains(&roots[0])); assert!(cache.critical.read().store_keys.contains(&roots[1])); - let kzg = harness - .chain - .kzg - .as_ref() - .cloned() - .expect("kzg should exist"); - let blobs_0 = pending_blobs.pop_front().expect("should have blobs"); let expected_blobs = blobs_0.len(); let mut kzg_verified_blobs = vec![]; for (blob_index, gossip_blob) in blobs_0.into_iter().enumerate() { - let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - kzg_verified_blobs.push(kzg_verified_blob); + kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache .put_kzg_verified_blobs(roots[0], kzg_verified_blobs.clone()) .expect("should put blob"); @@ -1278,13 +1253,6 @@ mod test { pending_blobs.push_back(blobs); } - let kzg = harness - .chain - .kzg - .as_ref() - .cloned() - .expect("kzg should exist"); - for _ in 0..(n_epochs * capacity) { let pending_block = pending_blocks.pop_front().expect("should have block"); let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs"); @@ -1295,9 +1263,7 @@ mod test { let one_blob = pending_block_blobs .pop() .expect("should have at least one blob"); - let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - let kzg_verified_blobs = vec![kzg_verified_blob]; + let kzg_verified_blobs = vec![one_blob.into_inner()]; // generate random boolean let block_first = (rand::random::() % 2) == 0; if block_first { @@ -1418,13 +1384,6 @@ mod test { pending_blobs.push_back(blobs); } - let kzg = harness - .chain - .kzg - .as_ref() - .cloned() - .expect("kzg should exist"); - let mut remaining_blobs = HashMap::new(); for _ in 0..(n_epochs * capacity) { let pending_block = pending_blocks.pop_front().expect("should have block"); @@ -1436,9 +1395,7 @@ mod test { let one_blob = pending_block_blobs .pop() .expect("should have at least one blob"); - let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - let kzg_verified_blobs = vec![kzg_verified_blob]; + let kzg_verified_blobs = vec![one_blob.into_inner()]; // generate random boolean let block_first = (rand::random::() % 2) == 0; if block_first { @@ -1551,9 +1508,7 @@ mod test { let additional_blobs = blobs.len(); let mut kzg_verified_blobs = vec![]; for (i, gossip_blob) in blobs.into_iter().enumerate() { - let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - kzg_verified_blobs.push(kzg_verified_blob); + kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = recovered_cache .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) .expect("should put blob"); diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 9f5186f3104..924cc26520a 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -4,7 +4,7 @@ use types::{Blob, EthSpec, Hash256, KzgCommitment, KzgProof}; /// Converts a blob ssz List object to an array to be used with the kzg /// crypto library. fn ssz_blob_to_crypto_blob(blob: &Blob) -> Result { - KzgBlob::from_bytes(blob.as_ref()) + KzgBlob::from_bytes(blob.as_ref()).map_err(Into::into) } /// Validate a single blob-commitment-proof triplet from a `BlobSidecar`. @@ -13,7 +13,8 @@ pub fn validate_blob( blob: &Blob, kzg_commitment: KzgCommitment, kzg_proof: KzgProof, -) -> Result { +) -> Result<(), KzgError> { + let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES); let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) } @@ -24,7 +25,8 @@ pub fn validate_blobs( expected_kzg_commitments: &[KzgCommitment], blobs: Vec<&Blob>, kzg_proofs: &[KzgProof], -) -> Result { +) -> Result<(), KzgError> { + let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES); let blobs = blobs .into_iter() .map(|blob| ssz_blob_to_crypto_blob::(blob)) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index e2d37078ac5..8edb7b4fc80 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -57,7 +57,7 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, - BeaconBlockResponseType, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, + BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 0fe68ba19ea..ca04366b01e 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1004,6 +1004,14 @@ lazy_static! { "beacon_blobs_sidecar_gossip_verification_seconds", "Full runtime of blob sidecars gossip verification" ); + pub static ref BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION: Result = try_create_histogram( + "blob_sidecar_inclusion_proof_verification_seconds", + "Time taken to verify blob sidecar inclusion proof" + ); + pub static ref BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: Result = try_create_histogram( + "blob_sidecar_inclusion_proof_computation_seconds", + "Time taken to compute blob sidecar inclusion proof" + ); } // Fifth lazy-static block is used to account for macro recursion limit. diff --git a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs index f16f38bad55..4f849614490 100644 --- a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs @@ -5,8 +5,7 @@ use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use std::sync::Arc; -use types::{BlobSidecar, EthSpec, Hash256, Slot}; +use types::{BlobSidecar, EthSpec, Slot}; #[derive(Debug, PartialEq)] pub enum Error { @@ -29,8 +28,8 @@ pub enum Error { /// like checking the proposer signature. pub struct ObservedBlobSidecars { finalized_slot: Slot, - /// Stores all received blob indices for a given `(Root, Slot)` tuple. - items: HashMap<(Hash256, Slot), HashSet>, + /// Stores all received blob indices for a given `(ValidatorIndex, Slot)` tuple. + items: HashMap<(u64, Slot), HashSet>, _phantom: PhantomData, } @@ -46,16 +45,16 @@ impl Default for ObservedBlobSidecars { } impl ObservedBlobSidecars { - /// Observe the `blob_sidecar` at (`blob_sidecar.block_root, blob_sidecar.slot`). + /// Observe the `blob_sidecar` at (`blob_sidecar.block_proposer_index, blob_sidecar.slot`). /// This will update `self` so future calls to it indicate that this `blob_sidecar` is known. /// /// The supplied `blob_sidecar` **MUST** have completed proposer signature verification. - pub fn observe_sidecar(&mut self, blob_sidecar: &Arc>) -> Result { + pub fn observe_sidecar(&mut self, blob_sidecar: &BlobSidecar) -> Result { self.sanitize_blob_sidecar(blob_sidecar)?; let did_not_exist = self .items - .entry((blob_sidecar.block_root, blob_sidecar.slot)) + .entry((blob_sidecar.block_proposer_index(), blob_sidecar.slot())) .or_insert_with(|| HashSet::with_capacity(T::max_blobs_per_block())) .insert(blob_sidecar.index); @@ -63,23 +62,23 @@ impl ObservedBlobSidecars { } /// Returns `true` if the `blob_sidecar` has already been observed in the cache within the prune window. - pub fn is_known(&self, blob_sidecar: &Arc>) -> Result { + pub fn is_known(&self, blob_sidecar: &BlobSidecar) -> Result { self.sanitize_blob_sidecar(blob_sidecar)?; let is_known = self .items - .get(&(blob_sidecar.block_root, blob_sidecar.slot)) + .get(&(blob_sidecar.block_proposer_index(), blob_sidecar.slot())) .map_or(false, |set| set.contains(&blob_sidecar.index)); Ok(is_known) } - fn sanitize_blob_sidecar(&self, blob_sidecar: &Arc>) -> Result<(), Error> { + fn sanitize_blob_sidecar(&self, blob_sidecar: &BlobSidecar) -> Result<(), Error> { if blob_sidecar.index >= T::max_blobs_per_block() as u64 { return Err(Error::InvalidBlobIndex(blob_sidecar.index)); } let finalized_slot = self.finalized_slot; - if finalized_slot > 0 && blob_sidecar.slot <= finalized_slot { + if finalized_slot > 0 && blob_sidecar.slot() <= finalized_slot { return Err(Error::FinalizedBlob { - slot: blob_sidecar.slot, + slot: blob_sidecar.slot(), finalized_slot, }); } @@ -101,14 +100,15 @@ impl ObservedBlobSidecars { #[cfg(test)] mod tests { use super::*; - use types::{BlobSidecar, Hash256, MainnetEthSpec}; + use std::sync::Arc; + use types::{BlobSidecar, MainnetEthSpec}; type E = MainnetEthSpec; - fn get_blob_sidecar(slot: u64, block_root: Hash256, index: u64) -> Arc> { + fn get_blob_sidecar(slot: u64, proposer_index: u64, index: u64) -> Arc> { let mut blob_sidecar = BlobSidecar::empty(); - blob_sidecar.block_root = block_root; - blob_sidecar.slot = slot.into(); + blob_sidecar.signed_block_header.message.slot = slot.into(); + blob_sidecar.signed_block_header.message.proposer_index = proposer_index; blob_sidecar.index = index; Arc::new(blob_sidecar) } @@ -121,8 +121,8 @@ mod tests { assert_eq!(cache.items.len(), 0, "no slots should be present"); // Slot 0, index 0 - let block_root_a = Hash256::random(); - let sidecar_a = get_blob_sidecar(0, block_root_a, 0); + let proposer_index_a = 420; + let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0); assert_eq!( cache.observe_sidecar(&sidecar_a), @@ -138,12 +138,12 @@ mod tests { assert_eq!( cache.items.len(), 1, - "only one (slot, root) tuple should be present" + "only one (validator_index, slot) tuple should be present" ); assert_eq!( cache .items - .get(&(block_root_a, Slot::new(0))) + .get(&(proposer_index_a, Slot::new(0))) .expect("slot zero should be present") .len(), 1, @@ -161,7 +161,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_a, Slot::new(0))) + .get(&(proposer_index_a, Slot::new(0))) .expect("slot zero should be present") .len(), 1, @@ -185,7 +185,7 @@ mod tests { */ // First slot of finalized epoch - let block_b = get_blob_sidecar(E::slots_per_epoch(), Hash256::random(), 0); + let block_b = get_blob_sidecar(E::slots_per_epoch(), 419, 0); assert_eq!( cache.observe_sidecar(&block_b), @@ -205,8 +205,8 @@ mod tests { let three_epochs = E::slots_per_epoch() * 3; // First slot of finalized epoch - let block_root_b = Hash256::random(); - let block_b = get_blob_sidecar(three_epochs, block_root_b, 0); + let proposer_index_b = 421; + let block_b = get_blob_sidecar(three_epochs, proposer_index_b, 0); assert_eq!( cache.observe_sidecar(&block_b), @@ -218,7 +218,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_b, Slot::new(three_epochs))) + .get(&(proposer_index_b, Slot::new(three_epochs))) .expect("the three epochs slot should be present") .len(), 1, @@ -242,7 +242,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_b, Slot::new(three_epochs))) + .get(&(proposer_index_b, Slot::new(three_epochs))) .expect("the three epochs slot should be present") .len(), 1, @@ -255,8 +255,8 @@ mod tests { let mut cache = ObservedBlobSidecars::default(); // Slot 0, index 0 - let block_root_a = Hash256::random(); - let sidecar_a = get_blob_sidecar(0, block_root_a, 0); + let proposer_index_a = 420; + let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0); assert_eq!( cache.is_known(&sidecar_a), @@ -287,7 +287,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_a, Slot::new(0))) + .get(&(proposer_index_a, Slot::new(0))) .expect("slot zero should be present") .len(), 1, @@ -296,8 +296,8 @@ mod tests { // Slot 1, proposer 0 - let block_root_b = Hash256::random(); - let sidecar_b = get_blob_sidecar(1, block_root_b, 0); + let proposer_index_b = 421; + let sidecar_b = get_blob_sidecar(1, proposer_index_b, 0); assert_eq!( cache.is_known(&sidecar_b), @@ -325,7 +325,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_a, Slot::new(0))) + .get(&(proposer_index_a, Slot::new(0))) .expect("slot zero should be present") .len(), 1, @@ -334,7 +334,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_b, Slot::new(1))) + .get(&(proposer_index_b, Slot::new(1))) .expect("slot zero should be present") .len(), 1, @@ -342,7 +342,7 @@ mod tests { ); // Slot 0, index 1 - let sidecar_c = get_blob_sidecar(0, block_root_a, 1); + let sidecar_c = get_blob_sidecar(0, proposer_index_a, 1); assert_eq!( cache.is_known(&sidecar_c), @@ -370,7 +370,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_a, Slot::new(0))) + .get(&(proposer_index_a, Slot::new(0))) .expect("slot zero should be present") .len(), 2, @@ -379,7 +379,7 @@ mod tests { // Try adding an out of bounds index let invalid_index = E::max_blobs_per_block() as u64; - let sidecar_d = get_blob_sidecar(0, block_root_a, invalid_index); + let sidecar_d = get_blob_sidecar(0, proposer_index_a, invalid_index); assert_eq!( cache.observe_sidecar(&sidecar_d), Err(Error::InvalidBlobIndex(invalid_index)), diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 23af0c81261..eb73478dee9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,7 +1,7 @@ use crate::block_verification_types::{AsBlock, RpcBlock}; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; -use crate::BeaconBlockResponseType; +use crate::BeaconBlockResponseWrapper; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, @@ -33,8 +33,8 @@ use int_to_bytes::int_to_bytes32; use kzg::{Kzg, TrustedSetup}; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; +use parking_lot::Mutex; use parking_lot::RwLockWriteGuard; -use parking_lot::{Mutex, RwLock}; use rand::rngs::StdRng; use rand::Rng; use rand::SeedableRng; @@ -52,7 +52,6 @@ use state_processing::{ use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; -use std::marker::PhantomData; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; @@ -567,7 +566,6 @@ where runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, mock_builder: None, - blob_signature_cache: <_>::default(), rng: make_rng(), } } @@ -623,29 +621,9 @@ pub struct BeaconChainHarness { pub mock_execution_layer: Option>, pub mock_builder: Option>>, - /// Cache for blob signature because we don't need them for import, but we do need them - /// to test gossip validation. We always make them during block production but drop them - /// before storing them in the db. - pub blob_signature_cache: Arc>>, - pub rng: Mutex, } -#[derive(Clone, Debug, Hash, Eq, PartialEq)] -pub struct BlobSignatureKey { - block_root: Hash256, - blob_index: u64, -} - -impl BlobSignatureKey { - pub fn new(block_root: Hash256, blob_index: u64) -> Self { - Self { - block_root, - blob_index, - } - } -} - pub type CommitteeAttestations = Vec<(Attestation, SubnetId)>; pub type HarnessAttestations = Vec<(CommitteeAttestations, Option>)>; @@ -845,28 +823,9 @@ where &self, state: BeaconState, slot: Slot, - ) -> ( - SignedBlockContentsTuple>, - BeaconState, - ) { + ) -> (SignedBlindedBeaconBlock, BeaconState) { let (unblinded, new_state) = self.make_block(state, slot).await; - let maybe_blinded_blob_sidecars = unblinded.1.map(|blob_sidecar_list| { - VariableList::new( - blob_sidecar_list - .into_iter() - .map(|blob_sidecar| { - let blinded_sidecar: BlindedBlobSidecar = blob_sidecar.message.into(); - SignedSidecar { - message: Arc::new(blinded_sidecar), - signature: blob_sidecar.signature, - _phantom: PhantomData, - } - }) - .collect(), - ) - .unwrap() - }); - ((unblinded.0.into(), maybe_blinded_blob_sidecars), new_state) + (unblinded.0.into(), new_state) } /// Returns a newly created block, signed by the proposer for the given slot. @@ -874,7 +833,7 @@ where &self, mut state: BeaconState, slot: Slot, - ) -> (SignedBlockContentsTuple>, BeaconState) { + ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); @@ -892,7 +851,7 @@ where let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); - let BeaconBlockResponseType::Full(block_response) = self + let BeaconBlockResponseWrapper::Full(block_response) = self .chain .produce_block_on_state( state, @@ -916,17 +875,12 @@ where &self.spec, ); - let block_contents: SignedBlockContentsTuple> = match &signed_block { + let block_contents: SignedBlockContentsTuple = match &signed_block { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) | SignedBeaconBlock::Merge(_) | SignedBeaconBlock::Capella(_) => (signed_block, None), - SignedBeaconBlock::Deneb(_) => ( - signed_block, - block_response - .maybe_side_car - .map(|blobs| self.sign_blobs(blobs, &block_response.state, proposer_index)), - ), + SignedBeaconBlock::Deneb(_) => (signed_block, block_response.blob_items), }; (block_contents, block_response.state) @@ -938,7 +892,7 @@ where &self, mut state: BeaconState, slot: Slot, - ) -> (SignedBlockContentsTuple>, BeaconState) { + ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); @@ -958,7 +912,7 @@ where let pre_state = state.clone(); - let BeaconBlockResponseType::Full(block_response) = self + let BeaconBlockResponseWrapper::Full(block_response) = self .chain .produce_block_on_state( state, @@ -982,37 +936,12 @@ where &self.spec, ); - let block_contents: SignedBlockContentsTuple> = match &signed_block { + let block_contents: SignedBlockContentsTuple = match &signed_block { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) | SignedBeaconBlock::Merge(_) | SignedBeaconBlock::Capella(_) => (signed_block, None), - SignedBeaconBlock::Deneb(_) => { - if let Some(blobs) = block_response.maybe_side_car { - let signed_blobs: SignedSidecarList> = Vec::from(blobs) - .into_iter() - .map(|blob| { - blob.sign( - &self.validator_keypairs[proposer_index].sk, - &block_response.state.fork(), - block_response.state.genesis_validators_root(), - &self.spec, - ) - }) - .collect::>() - .into(); - let mut guard = self.blob_signature_cache.write(); - for blob in &signed_blobs { - guard.insert( - BlobSignatureKey::new(blob.message.block_root, blob.message.index), - blob.signature.clone(), - ); - } - (signed_block, Some(signed_blobs)) - } else { - (signed_block, None) - } - } + SignedBeaconBlock::Deneb(_) => (signed_block, block_response.blob_items), }; (block_contents, pre_state) } @@ -1051,35 +980,6 @@ where ) } - /// Sign blobs, and cache their signatures. - pub fn sign_blobs( - &self, - blobs: BlobSidecarList, - state: &BeaconState, - proposer_index: usize, - ) -> SignedSidecarList> { - let signed_blobs: SignedSidecarList> = Vec::from(blobs) - .into_iter() - .map(|blob| { - blob.sign( - &self.validator_keypairs[proposer_index].sk, - &state.fork(), - state.genesis_validators_root(), - &self.spec, - ) - }) - .collect::>() - .into(); - let mut guard = self.blob_signature_cache.write(); - for blob in &signed_blobs { - guard.insert( - BlobSignatureKey::new(blob.message.block_root, blob.message.index), - blob.signature.clone(), - ); - } - signed_blobs - } - /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to /// `beacon_block_root`. The provided `state` should match the `block.state_root` for the /// `block` identified by `beacon_block_root`. @@ -1837,7 +1737,7 @@ where state: BeaconState, slot: Slot, block_modifier: impl FnOnce(&mut BeaconBlock), - ) -> (SignedBlockContentsTuple>, BeaconState) { + ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); @@ -1935,24 +1835,20 @@ where &self, slot: Slot, block_root: Hash256, - block_contents: SignedBlockContentsTuple>, + block_contents: SignedBlockContentsTuple, ) -> Result> { self.set_current_slot(slot); - let (block, blobs) = block_contents; - // Note: we are just dropping signatures here and skipping signature verification. - let blobs_without_signatures = blobs.map(|blobs| { - VariableList::from( - blobs - .into_iter() - .map(|blob| blob.message) - .collect::>(), - ) - }); + let (block, blob_items) = block_contents; + + let sidecars = blob_items + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .transpose() + .unwrap(); let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, - RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(), + RpcBlock::new(Some(block_root), Arc::new(block), sidecars).unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) @@ -1965,24 +1861,20 @@ where pub async fn process_block_result( &self, - block_contents: SignedBlockContentsTuple>, + block_contents: SignedBlockContentsTuple, ) -> Result> { - let (block, blobs) = block_contents; - // Note: we are just dropping signatures here and skipping signature verification. - let blobs_without_signatures = blobs.map(|blobs| { - VariableList::from( - blobs - .into_iter() - .map(|blob| blob.message) - .collect::>(), - ) - }); + let (block, blob_items) = block_contents; + + let sidecars = blob_items + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .transpose() + .unwrap(); let block_root = block.canonical_root(); let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, - RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(), + RpcBlock::new(Some(block_root), Arc::new(block), sidecars).unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) @@ -2051,7 +1943,7 @@ where ) -> Result< ( SignedBeaconBlockHash, - SignedBlockContentsTuple>, + SignedBlockContentsTuple, BeaconState, ), BlockError, @@ -2603,8 +2495,6 @@ pub fn generate_rand_block_and_blobs( blobs, } = bundle; - let block_root = block.canonical_root(); - for (index, ((blob, kzg_commitment), kzg_proof)) in blobs .into_iter() .zip(commitments.into_iter()) @@ -2612,14 +2502,16 @@ pub fn generate_rand_block_and_blobs( .enumerate() { blob_sidecars.push(BlobSidecar { - block_root, index: index as u64, - slot: block.slot(), - block_parent_root: block.parent_root(), - proposer_index: block.message().proposer_index(), blob: blob.clone(), kzg_commitment, kzg_proof, + signed_block_header: block.signed_block_header(), + kzg_commitment_inclusion_proof: block + .message() + .body() + .kzg_commitment_merkle_proof(index) + .unwrap(), }); } } diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 3ac39807146..4344013b3ce 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,7 +1,6 @@ -#![cfg(not(debug_assertions))] +// #![cfg(not(debug_assertions))] use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; -use beacon_chain::test_utils::BlobSignatureKey; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock, @@ -77,10 +76,8 @@ async fn get_chain_segment() -> (Vec>, Vec ( - Vec>, - Vec, ::MaxBlobsPerBlock>>>, -) { +async fn get_chain_segment_with_blob_sidecars( +) -> (Vec>, Vec>>) { let harness = get_harness(VALIDATOR_COUNT); harness @@ -111,27 +108,11 @@ async fn get_chain_segment_with_signed_blobs() -> ( beacon_block: Arc::new(full_block), beacon_state: snapshot.beacon_state, }); - let signed_blobs = harness + let blob_sidecars = harness .chain .get_blobs(&snapshot.beacon_block_root) - .unwrap() - .into_iter() - .map(|blob| { - let block_root = blob.block_root; - let blob_index = blob.index; - SignedBlobSidecar { - message: blob, - signature: harness - .blob_signature_cache - .read() - .get(&BlobSignatureKey::new(block_root, blob_index)) - .unwrap() - .clone(), - _phantom: PhantomData, - } - }) - .collect::>(); - segment_blobs.push(Some(VariableList::from(signed_blobs))) + .unwrap(); + segment_blobs.push(Some(blob_sidecars)) } (segment, segment_blobs) } @@ -159,7 +140,7 @@ fn chain_segment_blocks( ) -> Vec> { chain_segment .iter() - .zip(blobs.into_iter()) + .zip(blobs.iter()) .map(|(snapshot, blobs)| { RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() }) @@ -214,34 +195,30 @@ fn update_parent_roots( let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct(); *block.parent_root_mut() = root; let new_child = Arc::new(SignedBeaconBlock::from_block(block, signature)); - let new_child_root = new_child.canonical_root(); - child.beacon_block = new_child; if let Some(blobs) = child_blobs { - update_blob_roots(new_child_root, blobs); + update_blob_signed_header(&new_child, blobs); } + child.beacon_block = new_child; } } } -fn update_blob_roots(block_root: Hash256, blobs: &mut BlobSidecarList) { +fn update_blob_signed_header( + signed_block: &SignedBeaconBlock, + blobs: &mut BlobSidecarList, +) { for old_blob_sidecar in blobs.iter_mut() { - let index = old_blob_sidecar.index; - let slot = old_blob_sidecar.slot; - let block_parent_root = old_blob_sidecar.block_parent_root; - let proposer_index = old_blob_sidecar.proposer_index; - let blob = old_blob_sidecar.blob.clone(); - let kzg_commitment = old_blob_sidecar.kzg_commitment; - let kzg_proof = old_blob_sidecar.kzg_proof; - let new_blob = Arc::new(BlobSidecar:: { - block_root, - index, - slot, - block_parent_root, - proposer_index, - blob, - kzg_commitment, - kzg_proof, + index: old_blob_sidecar.index, + blob: old_blob_sidecar.blob.clone(), + kzg_commitment: old_blob_sidecar.kzg_commitment, + kzg_proof: old_blob_sidecar.kzg_proof, + signed_block_header: signed_block.signed_block_header(), + kzg_commitment_inclusion_proof: signed_block + .message() + .body() + .kzg_commitment_merkle_proof(old_blob_sidecar.index as usize) + .unwrap(), }); *old_blob_sidecar = new_blob; } @@ -253,7 +230,6 @@ async fn chain_segment_full_segment() { let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); harness @@ -292,7 +268,6 @@ async fn chain_segment_varying_chunk_size() { let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); harness @@ -334,7 +309,6 @@ async fn chain_segment_non_linear_parent_roots() { */ let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); blocks.remove(2); @@ -355,7 +329,6 @@ async fn chain_segment_non_linear_parent_roots() { */ let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); @@ -393,7 +366,6 @@ async fn chain_segment_non_linear_slots() { let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = Slot::new(0); @@ -420,7 +392,6 @@ async fn chain_segment_non_linear_slots() { let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); @@ -879,7 +850,7 @@ fn unwrap_err(result: Result) -> E { #[tokio::test] async fn block_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); - let (chain_segment, chain_segment_blobs) = get_chain_segment_with_signed_blobs().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment_with_blob_sidecars().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; @@ -909,12 +880,12 @@ async fn block_gossip_verification() { ) .await .expect("should import valid gossip verified block"); - if let Some(blobs) = blobs_opt { - for blob in blobs { - let blob_index = blob.message.index; + if let Some(blob_sidecars) = blobs_opt { + for blob_sidecar in blob_sidecars { + let blob_index = blob_sidecar.index; let gossip_verified = harness .chain - .verify_blob_sidecar_for_gossip(blob.clone(), blob_index) + .verify_blob_sidecar_for_gossip(blob_sidecar.clone(), blob_index) .expect("should obtain gossip verified blob"); harness @@ -948,7 +919,7 @@ async fn block_gossip_verification() { *block.slot_mut() = expected_block_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::FutureSlot { present_slot, block_slot, @@ -982,7 +953,7 @@ async fn block_gossip_verification() { *block.slot_mut() = expected_finalized_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::WouldRevertFinalizedSlot { block_slot, finalized_slot, @@ -1012,9 +983,10 @@ async fn block_gossip_verification() { unwrap_err( harness .chain - .verify_block_for_gossip( - Arc::new(SignedBeaconBlock::from_block(block, junk_signature())).into() - ) + .verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block( + block, + junk_signature() + ))) .await ), BlockError::ProposalSignatureInvalid @@ -1039,7 +1011,7 @@ async fn block_gossip_verification() { *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::ParentUnknown(block) if block.parent_root() == parent_root ), @@ -1065,7 +1037,7 @@ async fn block_gossip_verification() { *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::NotFinalizedDescendant { block_parent_root } if block_parent_root == parent_root ), @@ -1091,7 +1063,6 @@ async fn block_gossip_verification() { .0; let expected_proposer = block.proposer_index(); let other_proposer = (0..VALIDATOR_COUNT as u64) - .into_iter() .find(|i| *i != block.proposer_index()) .expect("there must be more than one validator in this test"); *block.proposer_index_mut() = other_proposer; @@ -1103,7 +1074,7 @@ async fn block_gossip_verification() { ); assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::IncorrectBlockProposer { block, local_shuffling, @@ -1115,7 +1086,7 @@ async fn block_gossip_verification() { // Check to ensure that we registered this is a valid block from this proposer. assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::BlockIsAlreadyKnown, ), "should register any valid signature against the proposer, even if the block failed later verification" @@ -1141,10 +1112,9 @@ async fn block_gossip_verification() { matches!( harness .chain - .verify_block_for_gossip(block.clone().into()) + .verify_block_for_gossip(block.clone()) .await - .err() - .expect("should error when processing known block"), + .expect_err("should error when processing known block"), BlockError::BlockIsAlreadyKnown ), "the second proposal by this validator should be rejected" @@ -1178,12 +1148,14 @@ async fn verify_block_for_gossip_slashing_detection() { .await .unwrap(); - if let Some(blobs) = blobs1 { - for blob in blobs { - let blob_index = blob.message.index; + if let Some((kzg_proofs, blobs)) = blobs1 { + let sidecars = + BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs).unwrap(); + for sidecar in sidecars { + let blob_index = sidecar.index; let verified_blob = harness .chain - .verify_blob_sidecar_for_gossip(blob, blob_index) + .verify_blob_sidecar_for_gossip(sidecar, blob_index) .unwrap(); harness .chain @@ -1368,10 +1340,9 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(Arc::new(base_block.clone()).into()) + .verify_block_for_gossip(Arc::new(base_block.clone())) .await - .err() - .expect("should error when processing base block"), + .expect_err("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Altair, object_fork: ForkName::Base, @@ -1389,8 +1360,7 @@ async fn add_base_block_to_altair_chain() { || Ok(()), ) .await - .err() - .expect("should error when processing base block"), + .expect_err("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Altair, object_fork: ForkName::Base, @@ -1506,10 +1476,9 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(Arc::new(altair_block.clone()).into()) + .verify_block_for_gossip(Arc::new(altair_block.clone())) .await - .err() - .expect("should error when processing altair block"), + .expect_err("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Base, object_fork: ForkName::Altair, @@ -1527,8 +1496,7 @@ async fn add_altair_block_to_base_chain() { || Ok(()), ) .await - .err() - .expect("should error when processing altair block"), + .expect_err("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Base, object_fork: ForkName::Altair, @@ -1584,10 +1552,12 @@ async fn import_duplicate_block_unrealized_justification() { // The store's justified checkpoint must still be at epoch 0, while unrealized justification // must be at epoch 1. - let fc = chain.canonical_head.fork_choice_read_lock(); - assert_eq!(fc.justified_checkpoint().epoch, 0); - assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1); - drop(fc); + { + let fc = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc.justified_checkpoint().epoch, 0); + assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1); + drop(fc); + } // Produce a block to justify epoch 2. let state = harness.get_current_state(); @@ -1602,10 +1572,10 @@ async fn import_duplicate_block_unrealized_justification() { let notify_execution_layer = NotifyExecutionLayer::Yes; let verified_block1 = block .clone() - .into_execution_pending_block(block_root, &chain, notify_execution_layer) + .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); let verified_block2 = block - .into_execution_pending_block(block_root, &chain, notify_execution_layer) + .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); // Import the first block, simulating a block processed via a finalized chain segment. @@ -1614,18 +1584,20 @@ async fn import_duplicate_block_unrealized_justification() { .unwrap(); // Unrealized justification should NOT have updated. - let fc = chain.canonical_head.fork_choice_read_lock(); - assert_eq!(fc.justified_checkpoint().epoch, 0); - let unrealized_justification = fc.unrealized_justified_checkpoint(); - assert_eq!(unrealized_justification.epoch, 2); - - // The fork choice node for the block should have unrealized justification. - let fc_block = fc.get_block(&block_root).unwrap(); - assert_eq!( - fc_block.unrealized_justified_checkpoint, - Some(unrealized_justification) - ); - drop(fc); + let unrealized_justification = { + let fc = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc.justified_checkpoint().epoch, 0); + let unrealized_justification = fc.unrealized_justified_checkpoint(); + assert_eq!(unrealized_justification.epoch, 2); + // The fork choice node for the block should have unrealized justification. + let fc_block = fc.get_block(&block_root).unwrap(); + assert_eq!( + fc_block.unrealized_justified_checkpoint, + Some(unrealized_justification) + ); + drop(fc); + unrealized_justification + }; // Import the second verified block, simulating a block processed via RPC. import_execution_pending_block(chain.clone(), verified_block2) @@ -1633,15 +1605,16 @@ async fn import_duplicate_block_unrealized_justification() { .unwrap(); // Unrealized justification should still be updated. - let fc = chain.canonical_head.fork_choice_read_lock(); - assert_eq!(fc.justified_checkpoint().epoch, 0); + let fc3 = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc3.justified_checkpoint().epoch, 0); assert_eq!( - fc.unrealized_justified_checkpoint(), + fc3.unrealized_justified_checkpoint(), unrealized_justification ); // The fork choice node for the block should still have the unrealized justified checkpoint. - let fc_block = fc.get_block(&block_root).unwrap(); + let fc_block = fc3.get_block(&block_root).unwrap(); + drop(fc3); assert_eq!( fc_block.unrealized_justified_checkpoint, Some(unrealized_justification) diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index c48cf310a2a..d54543e4f6f 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -1,17 +1,15 @@ use beacon_chain::blob_verification::GossipVerifiedBlob; use beacon_chain::test_utils::BeaconChainHarness; -use bls::Signature; use eth2::types::{EventKind, SseBlobSidecar}; use rand::rngs::StdRng; use rand::SeedableRng; -use std::marker::PhantomData; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; -use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec, SignedBlobSidecar}; +use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec}; type E = MinimalEthSpec; -/// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API. +/// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API. #[tokio::test] async fn blob_sidecar_event_on_process_gossip_blob() { let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); @@ -29,14 +27,10 @@ async fn blob_sidecar_event_on_process_gossip_blob() { // build and process a gossip verified blob let kzg = harness.chain.kzg.as_ref().unwrap(); let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); - let signed_sidecar = SignedBlobSidecar { - message: BlobSidecar::random_valid(&mut rng, kzg) - .map(Arc::new) - .unwrap(), - signature: Signature::empty(), - _phantom: PhantomData, - }; - let gossip_verified_blob = GossipVerifiedBlob::__assumed_valid(signed_sidecar); + let sidecar = BlobSidecar::random_valid(&mut rng, kzg) + .map(Arc::new) + .unwrap(); + let gossip_verified_blob = GossipVerifiedBlob::__assumed_valid(sidecar); let expected_sse_blobs = SseBlobSidecar::from_blob_sidecar(gossip_verified_blob.as_blob()); let _ = harness @@ -49,7 +43,7 @@ async fn blob_sidecar_event_on_process_gossip_blob() { assert_eq!(sidecar_event, EventKind::BlobSidecar(expected_sse_blobs)); } -/// Verifies that a blob event is emitted when blobs are received via RPC. +/// Verifies that a blob event is emitted when blobs are received via RPC. #[tokio::test] async fn blob_sidecar_event_on_process_rpc_blobs() { let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); @@ -83,7 +77,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { let _ = harness .chain - .process_rpc_blobs(blob_1.slot, blob_1.block_root, blobs) + .process_rpc_blobs(blob_1.slot(), blob_1.block_root(), blobs) .await .unwrap(); diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 31d4e4aac71..1c675d280f8 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -591,7 +591,7 @@ pub enum Work { process_batch: Box>) + Send + Sync>, }, GossipBlock(AsyncFn), - GossipSignedBlobSidecar(AsyncFn), + GossipBlobSidecar(AsyncFn), DelayedImportBlock { beacon_block_slot: Slot, beacon_block_root: Hash256, @@ -641,7 +641,7 @@ impl Work { Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, Work::GossipBlock(_) => GOSSIP_BLOCK, - Work::GossipSignedBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR, + Work::GossipBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR, Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, Work::GossipVoluntaryExit(_) => GOSSIP_VOLUNTARY_EXIT, Work::GossipProposerSlashing(_) => GOSSIP_PROPOSER_SLASHING, @@ -1205,7 +1205,7 @@ impl BeaconProcessor { Work::GossipBlock { .. } => { gossip_block_queue.push(work, work_id, &self.log) } - Work::GossipSignedBlobSidecar { .. } => { + Work::GossipBlobSidecar { .. } => { gossip_blob_queue.push(work, work_id, &self.log) } Work::DelayedImportBlock { .. } => { @@ -1457,10 +1457,11 @@ impl BeaconProcessor { task_spawner.spawn_async(process_fn) } Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), - Work::GossipBlock(work) | Work::GossipSignedBlobSidecar(work) => task_spawner - .spawn_async(async move { + Work::GossipBlock(work) | Work::GossipBlobSidecar(work) => { + task_spawner.spawn_async(async move { work.await; - }), + }) + } Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 28cd1fe4869..934ef059d5b 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,9 +1,9 @@ use eth2::types::builder_bid::SignedBuilderBid; -use eth2::types::FullPayloadContents; use eth2::types::{ - BlindedPayload, EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes, - SignedBlockContents, SignedValidatorRegistrationData, Slot, + EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes, + SignedValidatorRegistrationData, Slot, }; +use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock}; pub use eth2::Error; use eth2::{ok_or_error, StatusCode}; use reqwest::{IntoUrl, Response}; @@ -140,7 +140,7 @@ impl BuilderHttpClient { /// `POST /eth/v1/builder/blinded_blocks` pub async fn post_builder_blinded_blocks( &self, - blinded_block: &SignedBlockContents>, + blinded_block: &SignedBlindedBeaconBlock, ) -> Result>, Error> { let mut path = self.server.full.clone(); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 07fdf6414c1..6b0277ff318 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -14,8 +14,8 @@ pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkchoiceState}; +use eth2::types::FullPayloadContents; use eth2::types::{builder_bid::SignedBuilderBid, BlobsBundle, ForkVersionedResponse}; -use eth2::types::{FullPayloadContents, SignedBlockContents}; use ethers_core::types::Transaction as EthersTransaction; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; @@ -43,8 +43,9 @@ use tree_hash::TreeHash; use types::beacon_block_body::KzgCommitments; use types::builder_bid::BuilderBid; use types::payload::BlockProductionVersion; -use types::sidecar::{BlobItems, Sidecar}; -use types::{AbstractExecPayload, ExecutionPayloadDeneb, KzgProofs}; +use types::{ + AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, KzgProofs, SignedBlindedBeaconBlock, +}; use types::{ BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, FullPayload, ProposerPreparationData, PublicKeyBytes, Signature, Slot, @@ -103,12 +104,8 @@ impl TryFrom> for ProvenancedPayload BlockProposalContents::PayloadAndBlobs { payload: ExecutionPayloadHeader::Deneb(builder_bid.header).into(), block_value: builder_bid.value, - kzg_commitments: builder_bid.blinded_blobs_bundle.commitments, - blobs: BlobItems::::try_from_blob_roots( - builder_bid.blinded_blobs_bundle.blob_roots, - ) - .map_err(Error::InvalidBlobConversion)?, - proofs: builder_bid.blinded_blobs_bundle.proofs, + kzg_commitments: builder_bid.blob_kzg_commitments, + blobs_and_proofs: None, }, }; Ok(ProvenancedPayload::Builder( @@ -170,8 +167,8 @@ pub enum BlockProposalContents> { payload: Payload, block_value: Uint256, kzg_commitments: KzgCommitments, - blobs: >::BlobItems, - proofs: KzgProofs, + /// `None` for blinded `PayloadAndBlobs`. + blobs_and_proofs: Option<(BlobsList, KzgProofs)>, }, } @@ -203,9 +200,7 @@ impl> TryFrom> payload: execution_payload.into(), block_value, kzg_commitments: bundle.commitments, - blobs: BlobItems::try_from_blobs(bundle.blobs) - .map_err(Error::InvalidBlobConversion)?, - proofs: bundle.proofs, + blobs_and_proofs: Some((bundle.blobs, bundle.proofs)), }), None => Ok(Self::Payload { payload: execution_payload.into(), @@ -233,26 +228,23 @@ impl> BlockProposalContents ( Payload, Option>, - Option<>::BlobItems>, - Option>, + Option<(BlobsList, KzgProofs)>, Uint256, ) { match self { Self::Payload { payload, block_value, - } => (payload, None, None, None, block_value), + } => (payload, None, None, block_value), Self::PayloadAndBlobs { payload, block_value, kzg_commitments, - blobs, - proofs, + blobs_and_proofs, } => ( payload, Some(kzg_commitments), - Some(blobs), - Some(proofs), + blobs_and_proofs, block_value, ), } @@ -276,23 +268,6 @@ impl> BlockProposalContents block_value, } } - pub fn default_at_fork(fork_name: ForkName) -> Result { - Ok(match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BlockProposalContents::Payload { - payload: Payload::default_at_fork(fork_name)?, - block_value: Uint256::zero(), - } - } - ForkName::Deneb => BlockProposalContents::PayloadAndBlobs { - payload: Payload::default_at_fork(fork_name)?, - block_value: Uint256::zero(), - blobs: Payload::default_blobs_at_fork(fork_name)?, - kzg_commitments: VariableList::default(), - proofs: VariableList::default(), - }, - }) - } } #[derive(Clone, PartialEq)] @@ -753,6 +728,13 @@ impl ExecutionLayer { } } + /// Delete proposer preparation data for `proposer_index`. This is only useful in tests. + pub async fn clear_proposer_preparation(&self, proposer_index: u64) { + self.proposer_preparation_data() + .await + .remove(&proposer_index); + } + /// Removes expired entries from proposer_preparation_data and proposers caches async fn clean_proposer_caches(&self, current_epoch: Epoch) -> Result<(), Error> { let mut proposer_preparation_data = self.proposer_preparation_data().await; @@ -2003,7 +1985,7 @@ impl ExecutionLayer { pub async fn propose_blinded_beacon_block( &self, block_root: Hash256, - block: &SignedBlockContents>, + block: &SignedBlindedBeaconBlock, ) -> Result, Error> { debug!( self.log(), @@ -2052,7 +2034,6 @@ impl ExecutionLayer { "relay_response_ms" => duration.as_millis(), "block_root" => ?block_root, "parent_hash" => ?block - .signed_block() .message() .execution_payload() .map(|payload| format!("{}", payload.parent_hash())) diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 713ebb670c3..182cad50faf 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -881,16 +881,16 @@ mod test { #[test] fn valid_test_blobs() { assert!( - validate_blob::().unwrap(), + validate_blob::().is_ok(), "Mainnet preset test blobs bundle should contain valid proofs" ); assert!( - validate_blob::().unwrap(), + validate_blob::().is_ok(), "Minimal preset test blobs bundle should contain valid proofs" ); } - fn validate_blob() -> Result { + fn validate_blob() -> Result<(), String> { let kzg = load_kzg()?; let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; let kzg_blob = kzg::Blob::from_bytes(blob.as_ref()) diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 32b352b6aee..7da2022d588 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -533,8 +533,8 @@ pub fn serve( .as_deneb() .map_err(|_| reject("incorrect payload variant"))? .into(), - blinded_blobs_bundle: maybe_blobs_bundle - .map(Into::into) + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) .unwrap_or_default(), value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), pubkey: builder.builder_sk.public_key().compress(), @@ -572,8 +572,8 @@ pub fn serve( .as_deneb() .map_err(|_| reject("incorrect payload variant"))? .into(), - blinded_blobs_bundle: maybe_blobs_bundle - .map(Into::into) + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) .unwrap_or_default(), value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), pubkey: builder.builder_sk.public_key().compress(), diff --git a/beacon_node/http_api/src/build_block_contents.rs b/beacon_node/http_api/src/build_block_contents.rs index f59a4b52152..37b4049c0c6 100644 --- a/beacon_node/http_api/src/build_block_contents.rs +++ b/beacon_node/http_api/src/build_block_contents.rs @@ -1,50 +1,42 @@ -use beacon_chain::BlockProductionError; -use eth2::types::{BeaconBlockAndBlobSidecars, BlindedBeaconBlockAndBlobSidecars, BlockContents}; -use types::{AbstractExecPayload, BeaconBlock, EthSpec, ForkName, SidecarList}; +use beacon_chain::{BeaconBlockResponse, BeaconBlockResponseWrapper, BlockProductionError}; +use eth2::types::{BlockContents, FullBlockContents, ProduceBlockV3Response}; +use types::{EthSpec, ForkName}; type Error = warp::reject::Rejection; -pub fn build_block_contents>( +pub fn build_block_contents( fork_name: ForkName, - block: BeaconBlock, - maybe_blobs: Option>::Sidecar>>, -) -> Result, Error> { - match Payload::block_type() { - types::BlockType::Blinded => match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - Ok(BlockContents::Block(block)) - } + block_response: BeaconBlockResponseWrapper, +) -> Result, Error> { + match block_response { + BeaconBlockResponseWrapper::Blinded(block) => { + Ok(ProduceBlockV3Response::Blinded(block.block)) + } + BeaconBlockResponseWrapper::Full(block) => match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => Ok( + ProduceBlockV3Response::Full(FullBlockContents::Block(block.block)), + ), ForkName::Deneb => { - if let Some(blinded_blob_sidecars) = maybe_blobs { - let block_and_blobs = BlindedBeaconBlockAndBlobSidecars { - blinded_block: block, - blinded_blob_sidecars, - }; + let BeaconBlockResponse { + block, + state: _, + blob_items, + execution_payload_value: _, + consensus_block_value: _, + } = block; - Ok(BlockContents::BlindedBlockAndBlobSidecars(block_and_blobs)) - } else { - Err(warp_utils::reject::block_production_error( + let Some((kzg_proofs, blobs)) = blob_items else { + return Err(warp_utils::reject::block_production_error( BlockProductionError::MissingBlobs, - )) - } - } - }, - types::BlockType::Full => match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - Ok(BlockContents::Block(block)) - } - ForkName::Deneb => { - if let Some(blob_sidecars) = maybe_blobs { - let block_and_blobs = BeaconBlockAndBlobSidecars { - block, - blob_sidecars, - }; + )); + }; - Ok(BlockContents::BlockAndBlobSidecars(block_and_blobs)) - } else { - Err(warp_utils::reject::block_production_error( - BlockProductionError::MissingBlobs, - )) - } + Ok(ProduceBlockV3Response::Full( + FullBlockContents::BlockContents(BlockContents { + block, + kzg_proofs, + blobs, + }), + )) } }, } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5b00a80bdf0..08c67a00bf7 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -41,7 +41,7 @@ use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, - SignedBlindedBlockContents, SignedBlockContents, ValidatorId, ValidatorStatus, + PublishBlockRequest, ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; @@ -76,9 +76,9 @@ use tokio_stream::{ }; use types::{ Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, - BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, - ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, RelativeEpoch, - SignedAggregateAndProof, SignedBlsToExecutionChange, SignedContributionAndProof, + CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, ForkVersionedResponse, Hash256, + ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, + SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; @@ -1306,7 +1306,7 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - move |block_contents: SignedBlockContents, + move |block_contents: PublishBlockRequest, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, @@ -1342,7 +1342,7 @@ pub fn serve( network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block_contents = SignedBlockContents::::from_ssz_bytes( + let block_contents = PublishBlockRequest::::from_ssz_bytes( &block_bytes, &chain.spec, ) @@ -1375,7 +1375,7 @@ pub fn serve( .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, - block_contents: SignedBlockContents, + block_contents: PublishBlockRequest, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, @@ -1413,7 +1413,7 @@ pub fn serve( network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block_contents = SignedBlockContents::::from_ssz_bytes( + let block_contents = PublishBlockRequest::::from_ssz_bytes( &block_bytes, &chain.spec, ) @@ -1449,7 +1449,7 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - move |block_contents: SignedBlindedBlockContents, + move |block_contents: SignedBlindedBeaconBlock, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, @@ -1485,14 +1485,13 @@ pub fn serve( network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = - SignedBlockContents::>::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; publish_blocks::publish_blinded_block( block, chain, @@ -1518,14 +1517,14 @@ pub fn serve( .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, - block_contents: SignedBlindedBlockContents, + blinded_block: SignedBlindedBeaconBlock, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_blinded_block( - block_contents, + blinded_block, chain, &network_tx, log, @@ -1555,14 +1554,13 @@ pub fn serve( network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = - SignedBlockContents::>::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; publish_blocks::publish_blinded_block( block, chain, diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 73da4853e60..09b95136b57 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -3,8 +3,7 @@ use std::sync::Arc; use types::{payload::BlockProductionVersion, *}; use beacon_chain::{ - BeaconBlockResponse, BeaconBlockResponseType, BeaconChain, BeaconChainTypes, - ProduceBlockVerification, + BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, }; use eth2::types::{self as api_types, EndpointVersion, SkipRandaoVerification}; use ssz::Encode; @@ -69,35 +68,23 @@ pub async fn produce_block_v3( warp_utils::reject::custom_bad_request(format!("failed to fetch a block: {:?}", e)) })?; - match block_response_type { - BeaconBlockResponseType::Full(block_response) => { - build_response_v3(chain, block_response, endpoint_version, accept_header) - } - BeaconBlockResponseType::Blinded(block_response) => { - build_response_v3(chain, block_response, endpoint_version, accept_header) - } - } + build_response_v3(chain, block_response_type, endpoint_version, accept_header) } -pub fn build_response_v3>( +pub fn build_response_v3( chain: Arc>, - block_response: BeaconBlockResponse, + block_response: BeaconBlockResponseWrapper, endpoint_version: EndpointVersion, accept_header: Option, ) -> Result, warp::Rejection> { let fork_name = block_response - .block - .to_ref() .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; + let execution_payload_value = block_response.execution_payload_value(); + let consensus_block_value = block_response.consensus_block_value(); + let execution_payload_blinded = block_response.is_blinded(); - let block_contents = build_block_contents::build_block_contents( - fork_name, - block_response.block, - block_response.maybe_side_car, - )?; - - let execution_payload_blinded = Payload::block_type() == BlockType::Blinded; + let block_contents = build_block_contents::build_block_contents(fork_name, block_response)?; match accept_header { Some(api_types::Accept::Ssz) => Response::builder() @@ -107,9 +94,9 @@ pub fn build_response_v3| add_consensus_version_header(res, fork_name)) .map(|res| add_execution_payload_blinded_header(res, execution_payload_blinded)) .map(|res: Response| { - add_execution_payload_value_header(res, block_response.execution_payload_value) + add_execution_payload_value_header(res, execution_payload_value) }) - .map(|res| add_consensus_block_value_header(res, block_response.consensus_block_value)) + .map(|res| add_consensus_block_value_header(res, consensus_block_value)) .map_err(|e| -> warp::Rejection { warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) }), @@ -117,10 +104,8 @@ pub fn build_response_v3( .await .map_err(warp_utils::reject::block_production_error)?; - match block_response_type { - BeaconBlockResponseType::Full(block_response) => { - build_response_v2(chain, block_response, endpoint_version, accept_header) - } - BeaconBlockResponseType::Blinded(block_response) => { - build_response_v2(chain, block_response, endpoint_version, accept_header) - } - } + build_response_v2(chain, block_response_type, endpoint_version, accept_header) } pub async fn produce_block_v2( @@ -187,33 +165,20 @@ pub async fn produce_block_v2( .await .map_err(warp_utils::reject::block_production_error)?; - match block_response_type { - BeaconBlockResponseType::Full(block_response) => { - build_response_v2(chain, block_response, endpoint_version, accept_header) - } - BeaconBlockResponseType::Blinded(block_response) => { - build_response_v2(chain, block_response, endpoint_version, accept_header) - } - } + build_response_v2(chain, block_response_type, endpoint_version, accept_header) } -pub fn build_response_v2>( +pub fn build_response_v2( chain: Arc>, - block_response: BeaconBlockResponse, + block_response: BeaconBlockResponseWrapper, endpoint_version: EndpointVersion, accept_header: Option, ) -> Result, warp::Rejection> { let fork_name = block_response - .block - .to_ref() .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; - let block_contents = build_block_contents::build_block_contents( - fork_name, - block_response.block, - block_response.maybe_side_car, - )?; + let block_contents = build_block_contents::build_block_contents(fork_name, block_response)?; match accept_header { Some(api_types::Accept::Ssz) => Response::builder() diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index e41cf51ec3b..432d91b7234 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -6,8 +6,8 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlockContents, NotifyExecutionLayer, }; -use eth2::types::{BroadcastValidation, ErrorMessage}; -use eth2::types::{FullPayloadContents, SignedBlockContents}; +use eth2::types::{into_full_block_and_blobs, BroadcastValidation, ErrorMessage}; +use eth2::types::{FullPayloadContents, PublishBlockRequest}; use execution_layer::ProvenancedPayload; use lighthouse_network::PubsubMessage; use network::NetworkMessage; @@ -19,8 +19,9 @@ use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, - ForkName, FullPayload, FullPayloadMerge, Hash256, SignedBeaconBlock, SignedBlobSidecarList, + AbstractExecPayload, BeaconBlockRef, BlobSidecarList, EthSpec, ExecPayload, ExecutionBlockHash, + ForkName, FullPayload, FullPayloadMerge, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, + VariableList, }; use warp::http::StatusCode; use warp::{reply::Response, Rejection, Reply}; @@ -65,7 +66,7 @@ pub async fn publish_block>, - blobs_opt: Option>, + blobs_opt: Option>, sender, log, seen_timestamp| { @@ -86,8 +87,8 @@ pub async fn publish_block { let mut pubsub_messages = vec![PubsubMessage::BeaconBlock(block.clone())]; - if let Some(signed_blobs) = blobs_opt { - for (blob_index, blob) in signed_blobs.into_iter().enumerate() { + if let Some(blob_sidecars) = blobs_opt { + for (blob_index, blob) in blob_sidecars.into_iter().enumerate() { pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new(( blob_index as u64, blob, @@ -108,10 +109,6 @@ pub async fn publish_block>(); + VariableList::from(blobs) + }); let block_root = block_root.unwrap_or(gossip_verified_block.block_root); @@ -292,16 +296,16 @@ pub async fn publish_block( - block_contents: SignedBlockContents>, + blinded_block: SignedBlindedBeaconBlock, chain: Arc>, network_tx: &UnboundedSender>, log: Logger, validation_level: BroadcastValidation, duplicate_status_code: StatusCode, ) -> Result { - let block_root = block_contents.signed_block().canonical_root(); - let full_block: ProvenancedBlock> = - reconstruct_block(chain.clone(), block_root, block_contents, log.clone()).await?; + let block_root = blinded_block.canonical_root(); + let full_block: ProvenancedBlock> = + reconstruct_block(chain.clone(), block_root, blinded_block, log.clone()).await?; publish_block::( Some(block_root), full_block, @@ -320,10 +324,9 @@ pub async fn publish_blinded_block( pub async fn reconstruct_block( chain: Arc>, block_root: Hash256, - block_contents: SignedBlockContents>, + block: SignedBlindedBeaconBlock, log: Logger, -) -> Result>, Rejection> { - let block = block_contents.signed_block(); +) -> Result>, Rejection> { let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { warp_utils::reject::custom_server_error("Missing execution layer".to_string()) @@ -365,7 +368,7 @@ pub async fn reconstruct_block( ); let full_payload = el - .propose_blinded_beacon_block(block_root, &block_contents) + .propose_blinded_beacon_block(block_root, &block) .await .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -385,15 +388,15 @@ pub async fn reconstruct_block( match full_payload_opt { // A block without a payload is pre-merge and we consider it locally // built. - None => block_contents - .try_into_full_block_and_blobs(None) - .map(ProvenancedBlock::local), - Some(ProvenancedPayload::Local(full_payload_contents)) => block_contents - .try_into_full_block_and_blobs(Some(full_payload_contents)) - .map(ProvenancedBlock::local), - Some(ProvenancedPayload::Builder(full_payload_contents)) => block_contents - .try_into_full_block_and_blobs(Some(full_payload_contents)) - .map(ProvenancedBlock::builder), + None => into_full_block_and_blobs(block, None).map(ProvenancedBlock::local), + Some(ProvenancedPayload::Local(full_payload_contents)) => { + into_full_block_and_blobs(block, Some(full_payload_contents)) + .map(ProvenancedBlock::local) + } + Some(ProvenancedPayload::Builder(full_payload_contents)) => { + into_full_block_and_blobs(block, Some(full_payload_contents)) + .map(ProvenancedBlock::builder) + } } .map_err(|e| { warp_utils::reject::custom_server_error(format!("Unable to add payload to block: {e:?}")) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index fe300ae5e1d..7961b32c578 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -2,18 +2,12 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, GossipVerifiedBlock, IntoGossipVerifiedBlockContents, }; -use eth2::types::{ - BroadcastValidation, SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlockContents, - SignedBlockContentsTuple, -}; +use eth2::types::{BroadcastValidation, PublishBlockRequest, SignedBeaconBlock}; use http_api::test_utils::InteractiveTester; use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock}; use std::sync::Arc; use tree_hash::TreeHash; -use types::{ - BlindedBlobSidecar, BlindedPayload, BlobSidecar, FullPayload, Hash256, MainnetEthSpec, - SignedSidecarList, Slot, -}; +use types::{Hash256, MainnetEthSpec, Slot}; use warp::Rejection; use warp_utils::reject::CustomBadRequest; @@ -80,7 +74,7 @@ pub async fn gossip_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -131,7 +125,7 @@ pub async fn gossip_partial_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -174,7 +168,7 @@ pub async fn gossip_full_pass() { let response: Result<(), eth2::Error> = tester .client .post_beacon_blocks_v2( - &SignedBlockContents::new(block.clone(), blobs), + &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) .await; @@ -266,7 +260,7 @@ pub async fn consensus_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -315,7 +309,7 @@ pub async fn consensus_gossip() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -358,10 +352,8 @@ pub async fn consensus_partial_pass_only_consensus() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let ((block_a, _), state_after_a): ((SignedBeaconBlock, _), _) = - tester.harness.make_block(state_a.clone(), slot_b).await; - let ((block_b, blobs_b), state_after_b): ((SignedBeaconBlock, _), _) = - tester.harness.make_block(state_a, slot_b).await; + let ((block_a, _), state_after_a) = tester.harness.make_block(state_a.clone(), slot_b).await; + let ((block_b, blobs_b), state_after_b) = tester.harness.make_block(state_a, slot_b).await; let block_b_root = block_b.canonical_root(); /* check for `make_block` curios */ @@ -369,7 +361,7 @@ pub async fn consensus_partial_pass_only_consensus() { assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_contents_b = SignedBlockContents::new(block_b, blobs_b) + let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) .into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_contents_b.is_ok()); let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); @@ -430,7 +422,7 @@ pub async fn consensus_full_pass() { let response: Result<(), eth2::Error> = tester .client .post_beacon_blocks_v2( - &SignedBlockContents::new(block.clone(), blobs), + &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) .await; @@ -481,7 +473,7 @@ pub async fn equivocation_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -538,7 +530,7 @@ pub async fn equivocation_consensus_early_equivocation() { assert!(tester .client .post_beacon_blocks_v2( - &SignedBlockContents::new(block_a.clone(), blobs_a), + &PublishBlockRequest::new(block_a.clone(), blobs_a), validation_level ) .await @@ -552,7 +544,7 @@ pub async fn equivocation_consensus_early_equivocation() { let response: Result<(), eth2::Error> = tester .client .post_beacon_blocks_v2( - &SignedBlockContents::new(block_b.clone(), blobs_b), + &PublishBlockRequest::new(block_b.clone(), blobs_b), validation_level, ) .await; @@ -603,7 +595,7 @@ pub async fn equivocation_gossip() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -661,10 +653,10 @@ pub async fn equivocation_consensus_late_equivocation() { assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_contents_b = SignedBlockContents::new(block_b, blobs_b) + let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) .into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_contents_b.is_ok()); - let gossip_block_contents_a = SignedBlockContents::new(block_a, blobs_a) + let gossip_block_contents_a = PublishBlockRequest::new(block_a, blobs_a) .into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_contents_a.is_err()); @@ -728,7 +720,7 @@ pub async fn equivocation_full_pass() { let response: Result<(), eth2::Error> = tester .client .post_beacon_blocks_v2( - &SignedBlockContents::new(block.clone(), blobs), + &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) .await; @@ -776,11 +768,9 @@ pub async fn blinded_gossip_invalid() { }) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -829,11 +819,9 @@ pub async fn blinded_gossip_partial_pass() { }) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -870,18 +858,17 @@ pub async fn blinded_gossip_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await; - let block_contents = block_contents_tuple.into(); + let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); + .block_is_known_to_fork_choice(&blinded_block.canonical_root())); } // This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. @@ -912,19 +899,18 @@ pub async fn blinded_gossip_full_pass_ssz() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await; - let block_contents = block_contents_tuple.into(); + let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2_ssz(&block_contents, validation_level) + .post_beacon_blinded_blocks_v2_ssz(&blinded_block, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); + .block_is_known_to_fork_choice(&blinded_block.canonical_root())); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. @@ -963,11 +949,9 @@ pub async fn blinded_consensus_invalid() { }) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -1014,11 +998,9 @@ pub async fn blinded_consensus_gossip() { .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -1060,19 +1042,18 @@ pub async fn blinded_consensus_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await; + let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; - let block_contents = block_contents_tuple.into(); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); + .block_is_known_to_fork_choice(&blinded_block.canonical_root())); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -1112,11 +1093,9 @@ pub async fn blinded_equivocation_invalid() { }) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -1159,18 +1138,13 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_contents_tuple_a, state_after_a) = tester + let (block_a, state_after_a) = tester .harness .make_blinded_block(state_a.clone(), slot_b) .await; - let (block_contents_tuple_b, state_after_b) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (block_b, state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await; /* check for `make_blinded_block` curios */ - let block_contents_a: SignedBlockContents> = block_contents_tuple_a.into(); - let block_contents_b: SignedBlockContents> = block_contents_tuple_b.into(); - let block_a = block_contents_a.signed_block(); - let block_b = block_contents_b.signed_block(); assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); assert_ne!(block_a.state_root(), block_b.state_root()); @@ -1178,7 +1152,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { /* submit `block_a` as valid */ assert!(tester .client - .post_beacon_blinded_blocks_v2(&block_contents_a, validation_level) + .post_beacon_blinded_blocks_v2(&block_a, validation_level) .await .is_ok()); assert!(tester @@ -1189,7 +1163,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { /* submit `block_b` which should induce equivocation */ let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&block_contents_b, validation_level) + .post_beacon_blinded_blocks_v2(&block_b, validation_level) .await; assert!(response.is_err()); @@ -1236,11 +1210,9 @@ pub async fn blinded_equivocation_gossip() { .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -1286,12 +1258,11 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let ((block_a, blobs_a), state_after_a): ((SignedBlindedBeaconBlock, _), _) = tester + let (block_a, state_after_a) = tester .harness .make_blinded_block(state_a.clone(), slot_b) .await; - let ((block_b, blobs_b), state_after_b): ((SignedBlindedBeaconBlock, _), _) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (block_b, state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await; /* check for `make_blinded_block` curios */ assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); @@ -1301,7 +1272,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let unblinded_block_a = reconstruct_block( tester.harness.chain.clone(), block_a.canonical_root(), - SignedBlockContents::new(block_a, blobs_a), + block_a, test_logger.clone(), ) .await @@ -1309,7 +1280,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let unblinded_block_b = reconstruct_block( tester.harness.chain.clone(), block_b.canonical_root(), - SignedBlockContents::new(block_b.clone(), blobs_b.clone()), + block_b.clone(), test_logger.clone(), ) .await @@ -1338,7 +1309,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let channel = tokio::sync::mpsc::unbounded_channel(); let publication_result = publish_blinded_block( - SignedBlockContents::new(block_b, blobs_b), + block_b, tester.harness.chain, &channel.0, test_logger, @@ -1383,15 +1354,11 @@ pub async fn blinded_equivocation_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let ((block, blobs), _): ((SignedBlindedBeaconBlock, _), _) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( - &SignedBlockContents::new(block.clone(), blobs), - validation_level, - ) + .post_beacon_blinded_blocks_v2(&block, validation_level) .await; assert!(response.is_ok()); @@ -1400,20 +1367,3 @@ pub async fn blinded_equivocation_full_pass() { .chain .block_is_known_to_fork_choice(&block.canonical_root())); } - -fn into_signed_blinded_block_contents( - block_contents_tuple: SignedBlockContentsTuple>, -) -> SignedBlockContents> { - let (block, maybe_blobs) = block_contents_tuple; - SignedBlockContents::new(block.into(), maybe_blobs.map(into_blinded_blob_sidecars)) -} - -fn into_blinded_blob_sidecars( - blobs: SignedSidecarList>, -) -> SignedSidecarList { - blobs - .into_iter() - .map(|blob| blob.into()) - .collect::>() - .into() -} diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 327215209f2..48a2f450e21 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -17,8 +17,8 @@ use std::sync::Arc; use std::time::Duration; use tree_hash::TreeHash; use types::{ - Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, - MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, + Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, MainnetEthSpec, + MinimalEthSpec, ProposerPreparationData, Slot, }; use eth2::types::ForkVersionedBeaconBlockType::{Blinded, Full}; @@ -641,13 +641,9 @@ pub async fn proposer_boost_re_org_test( assert_eq!(block_c.parent_root(), block_b_root); } - // Sign blobs. - let block_c_signed_blobs = - block_c_blobs.map(|blobs| harness.sign_blobs(blobs, &state_b, proposer_index)); - // Applying block C should cause it to become head regardless (re-org or continuation). let block_root_c = harness - .process_block_result((block_c.clone(), block_c_signed_blobs)) + .process_block_result((block_c.clone(), block_c_blobs)) .await .unwrap() .into(); @@ -828,7 +824,7 @@ pub async fn fork_choice_before_proposal() { .into(); let block_d = tester .client - .get_validator_blocks::>(slot_d, &randao_reveal, None) + .get_validator_blocks::(slot_d, &randao_reveal, None) .await .unwrap() .data diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index d5fa50ba219..7b769009cff 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -64,8 +64,8 @@ struct ApiTester { harness: Arc>>, chain: Arc>>, client: BeaconNodeHttpClient, - next_block: SignedBlockContents, - reorg_block: SignedBlockContents, + next_block: PublishBlockRequest, + reorg_block: PublishBlockRequest, attestations: Vec>, contribution_and_proofs: Vec>, attester_slashing: AttesterSlashing, @@ -173,13 +173,13 @@ impl ApiTester { let (next_block, _next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; - let next_block = SignedBlockContents::from(next_block); + let next_block = PublishBlockRequest::from(next_block); // `make_block` adds random graffiti, so this will produce an alternate block let (reorg_block, _reorg_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1) .await; - let reorg_block = SignedBlockContents::from(reorg_block); + let reorg_block = PublishBlockRequest::from(reorg_block); let head_state_root = head.beacon_state_root(); let attestations = harness @@ -314,13 +314,13 @@ impl ApiTester { let (next_block, _next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; - let next_block = SignedBlockContents::from(next_block); + let next_block = PublishBlockRequest::from(next_block); // `make_block` adds random graffiti, so this will produce an alternate block let (reorg_block, _reorg_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; - let reorg_block = SignedBlockContents::from(reorg_block); + let reorg_block = PublishBlockRequest::from(reorg_block); let head_state_root = head.beacon_state_root(); let attestations = harness @@ -1301,7 +1301,7 @@ impl ApiTester { assert!(self .client - .post_beacon_blocks(&SignedBlockContents::from(block)) + .post_beacon_blocks(&PublishBlockRequest::from(block)) .await .is_err()); @@ -1328,7 +1328,7 @@ impl ApiTester { assert!(self .client - .post_beacon_blocks_ssz(&SignedBlockContents::from(block)) + .post_beacon_blocks_ssz(&PublishBlockRequest::from(block)) .await .is_err()); @@ -1357,7 +1357,8 @@ impl ApiTester { .await .is_ok()); - let blinded_block_contents = block_contents.clone_as_blinded(); + // Blinded deneb block contents is just the blinded block + let blinded_block_contents = block_contents.signed_block().clone_as_blinded(); // Test all the POST methods in sequence, they should all behave the same. let responses = vec![ @@ -2567,7 +2568,7 @@ impl ApiTester { let block = self .client - .get_validator_blocks::>(slot, &randao_reveal, None) + .get_validator_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -2576,7 +2577,7 @@ impl ApiTester { let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); let signed_block_contents = - SignedBlockContents::try_from(signed_block.clone()).unwrap(); + PublishBlockRequest::try_from(signed_block.clone()).unwrap(); self.client .post_beacon_blocks(&signed_block_contents) @@ -2631,13 +2632,13 @@ impl ApiTester { let block_bytes = self .client - .get_validator_blocks_ssz::>(slot, &randao_reveal, None) + .get_validator_blocks_ssz::(slot, &randao_reveal, None) .await .unwrap() .expect("block bytes"); let block_contents = - BlockContents::>::from_ssz_bytes(&block_bytes, &self.chain.spec) + FullBlockContents::::from_ssz_bytes(&block_bytes, &self.chain.spec) .expect("block contents bytes can be decoded"); let signed_block_contents = @@ -2704,28 +2705,26 @@ impl ApiTester { .unwrap(); if is_blinded_payload { - let block_contents = >>::from_ssz_bytes( + let blinded_block = >::from_ssz_bytes( &fork_version_response_bytes.unwrap(), &self.chain.spec, ) .expect("block contents bytes can be decoded"); - let signed_block_contents = - block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let signed_blinded_block = + blinded_block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); self.client - .post_beacon_blocks_ssz(&signed_block_contents) + .post_beacon_blinded_blocks_ssz(&signed_blinded_block) .await .unwrap(); - // This converts the generic `Payload` to a concrete type for comparison. - let signed_block = signed_block_contents.deconstruct().0; - let head_block = SignedBeaconBlock::from(signed_block.clone()); - assert_eq!(head_block, signed_block); + let head_block = self.chain.head_beacon_block().clone_as_blinded(); + assert_eq!(head_block, signed_blinded_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } else { - let block_contents = >>::from_ssz_bytes( + let block_contents = >::from_ssz_bytes( &fork_version_response_bytes.unwrap(), &self.chain.spec, ) @@ -2757,7 +2756,7 @@ impl ApiTester { let block = self .client - .get_validator_blocks_modular::>( + .get_validator_blocks_modular::( slot, &Signature::infinity().unwrap().into(), None, @@ -2815,13 +2814,13 @@ impl ApiTester { // Check failure with no `skip_randao_verification` passed. self.client - .get_validator_blocks::>(slot, &bad_randao_reveal, None) + .get_validator_blocks::(slot, &bad_randao_reveal, None) .await .unwrap_err(); // Check failure with `skip_randao_verification` (requires infinity sig). self.client - .get_validator_blocks_modular::>( + .get_validator_blocks_modular::( slot, &bad_randao_reveal, None, @@ -2836,7 +2835,7 @@ impl ApiTester { self } - pub async fn test_blinded_block_production>(&self) { + pub async fn test_blinded_block_production(&self) { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -2876,29 +2875,33 @@ impl ApiTester { let block = self .client - .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data; - let signed_block_contents = - block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); self.client - .post_beacon_blinded_blocks(&signed_block_contents) + .post_beacon_blinded_blocks(&signed_block) .await .unwrap(); - // This converts the generic `Payload` to a concrete type for comparison. - let signed_block = signed_block_contents.deconstruct().0; - let head_block = SignedBeaconBlock::from(signed_block.clone()); - assert_eq!(head_block, signed_block); + let head_block = self + .client + .get_beacon_blocks(CoreBlockId::Head) + .await + .unwrap() + .unwrap() + .data; + + assert_eq!(head_block.clone_as_blinded(), signed_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } } - pub async fn test_blinded_block_production_ssz>(&self) { + pub async fn test_blinded_block_production_ssz(&self) { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -2938,43 +2941,47 @@ impl ApiTester { let block_contents_bytes = self .client - .get_validator_blinded_blocks_ssz::(slot, &randao_reveal, None) + .get_validator_blinded_blocks_ssz::(slot, &randao_reveal, None) .await .unwrap() .expect("block bytes"); - let block_contents = BlockContents::::from_ssz_bytes( - &block_contents_bytes, - &self.chain.spec, - ) - .expect("block contents bytes can be decoded"); + let block_contents = + FullBlockContents::::from_ssz_bytes(&block_contents_bytes, &self.chain.spec) + .expect("block contents bytes can be decoded"); let signed_block_contents = block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); self.client - .post_beacon_blinded_blocks_ssz(&signed_block_contents) + .post_beacon_blinded_blocks_ssz( + &signed_block_contents.signed_block().clone_as_blinded(), + ) .await .unwrap(); - // This converts the generic `Payload` to a concrete type for comparison. - let signed_block = signed_block_contents.deconstruct().0; - let head_block = SignedBeaconBlock::from(signed_block.clone()); - assert_eq!(head_block, signed_block); + let head_block = self + .client + .get_beacon_blocks(CoreBlockId::Head) + .await + .unwrap() + .unwrap() + .data; + + let signed_block = signed_block_contents.signed_block(); + assert_eq!(&head_block, signed_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } } - pub async fn test_blinded_block_production_no_verify_randao>( - self, - ) -> Self { + pub async fn test_blinded_block_production_no_verify_randao(self) -> Self { for _ in 0..E::slots_per_epoch() { let slot = self.chain.slot().unwrap(); - let block_contents = self + let blinded_block = self .client - .get_validator_blinded_blocks_modular::( + .get_validator_blinded_blocks_modular::( slot, &Signature::infinity().unwrap().into(), None, @@ -2983,18 +2990,14 @@ impl ApiTester { .await .unwrap() .data; - assert_eq!(block_contents.block().slot(), slot); + assert_eq!(blinded_block.slot(), slot); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } self } - pub async fn test_blinded_block_production_verify_randao_invalid< - Payload: AbstractExecPayload, - >( - self, - ) -> Self { + pub async fn test_blinded_block_production_verify_randao_invalid(self) -> Self { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -3034,13 +3037,13 @@ impl ApiTester { // Check failure with full randao verification enabled. self.client - .get_validator_blinded_blocks::(slot, &bad_randao_reveal, None) + .get_validator_blinded_blocks::(slot, &bad_randao_reveal, None) .await .unwrap_err(); // Check failure with `skip_randao_verification` (requires infinity sig). self.client - .get_validator_blinded_blocks_modular::( + .get_validator_blinded_blocks_modular::( slot, &bad_randao_reveal, None, @@ -3520,13 +3523,7 @@ impl ApiTester { .unwrap(); let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), + Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), Full(_) => panic!("Expecting a blinded payload"), }; @@ -3545,11 +3542,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -3586,11 +3582,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -3630,13 +3625,7 @@ impl ApiTester { .unwrap(); let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), + Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), Full(_) => panic!("Expecting a blinded payload"), }; @@ -3665,11 +3654,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -3711,13 +3699,7 @@ impl ApiTester { .unwrap(); let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), + Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), Full(_) => panic!("Expecting a blinded payload"), }; @@ -3752,11 +3734,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -3845,11 +3826,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -3936,11 +3916,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4026,11 +4005,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4102,11 +4080,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4162,11 +4139,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4235,11 +4211,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4265,11 +4240,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4370,11 +4344,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4410,11 +4383,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4524,11 +4496,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4608,11 +4579,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4673,11 +4643,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4738,11 +4707,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4803,11 +4771,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4867,11 +4834,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4907,16 +4873,11 @@ impl ApiTester { .await .unwrap(); - let block_contents = match payload_type { + let _block_contents = match payload_type { Blinded(payload) => payload.data, Full(_) => panic!("Expecting a blinded payload"), }; - let (_, maybe_sidecars) = block_contents.deconstruct(); - - // Response should contain blob sidecars - assert!(maybe_sidecars.is_some()); - self } @@ -4940,11 +4901,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -5892,17 +5852,14 @@ async fn block_production_v3_ssz_with_skip_slots() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_full_payload_premerge() { - ApiTester::new() - .await - .test_blinded_block_production::>() - .await; + ApiTester::new().await.test_blinded_block_production().await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_ssz_full_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_ssz::>() + .test_blinded_block_production_ssz() .await; } @@ -5911,7 +5868,7 @@ async fn blinded_block_production_with_skip_slots_full_payload_premerge() { ApiTester::new() .await .skip_slots(E::slots_per_epoch() * 2) - .test_blinded_block_production::>() + .test_blinded_block_production() .await; } @@ -5920,7 +5877,7 @@ async fn blinded_block_production_ssz_with_skip_slots_full_payload_premerge() { ApiTester::new() .await .skip_slots(E::slots_per_epoch() * 2) - .test_blinded_block_production_ssz::>() + .test_blinded_block_production_ssz() .await; } @@ -5928,7 +5885,7 @@ async fn blinded_block_production_ssz_with_skip_slots_full_payload_premerge() { async fn blinded_block_production_no_verify_randao_full_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_no_verify_randao::>() + .test_blinded_block_production_no_verify_randao() .await; } @@ -5936,16 +5893,13 @@ async fn blinded_block_production_no_verify_randao_full_payload_premerge() { async fn blinded_block_production_verify_randao_invalid_full_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_verify_randao_invalid::>() + .test_blinded_block_production_verify_randao_invalid() .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_blinded_payload_premerge() { - ApiTester::new() - .await - .test_blinded_block_production::>() - .await; + ApiTester::new().await.test_blinded_block_production().await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -5953,7 +5907,7 @@ async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() { ApiTester::new() .await .skip_slots(E::slots_per_epoch() * 2) - .test_blinded_block_production::>() + .test_blinded_block_production() .await; } @@ -5961,7 +5915,7 @@ async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() { async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_no_verify_randao::>() + .test_blinded_block_production_no_verify_randao() .await; } @@ -5969,7 +5923,7 @@ async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() { async fn blinded_block_production_verify_randao_invalid_blinded_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_verify_randao_invalid::>() + .test_blinded_block_production_verify_randao_invalid() .await; } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 627c871c471..9a6ad19ac58 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -563,10 +563,10 @@ impl std::fmt::Display for RPCResponse { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } RPCResponse::BlobsByRange(blob) => { - write!(f, "BlobsByRange: Blob slot: {}", blob.slot) + write!(f, "BlobsByRange: Blob slot: {}", blob.slot()) } RPCResponse::BlobsByRoot(sidecar) => { - write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot) + write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot()) } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 9efe44f75d8..60fe3748265 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -9,19 +9,20 @@ use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ - Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockMerge, SignedBlobSidecar, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, BlobSidecar, EthSpec, ForkContext, ForkName, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockMerge, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. BeaconBlock(Arc>), - /// Gossipsub message providing notification of a [`SignedBlobSidecar`] along with the subnet id where it was received. - BlobSidecar(Box<(u64, SignedBlobSidecar)>), + /// Gossipsub message providing notification of a [`BlobSidecar`] along with the subnet id where it was received. + BlobSidecar(Box<(u64, Arc>)>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. @@ -204,8 +205,10 @@ impl PubsubMessage { GossipKind::BlobSidecar(blob_index) => { match fork_context.from_context_bytes(gossip_topic.fork_digest) { Some(ForkName::Deneb) => { - let blob_sidecar = SignedBlobSidecar::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?; + let blob_sidecar = Arc::new( + BlobSidecar::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ); Ok(PubsubMessage::BlobSidecar(Box::new(( *blob_index, blob_sidecar, @@ -318,7 +321,8 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::BlobSidecar(data) => write!( f, "BlobSidecar: slot: {}, blob index: {}", - data.1.message.slot, data.1.message.index, + data.1.slot(), + data.1.index, ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 82daf74efe0..5d98039a819 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -33,10 +33,11 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBlobSidecar, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, - Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, BlobSidecar, EthSpec, Hash256, IndexedAttestation, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, + SyncSubnetId, }; use beacon_processor::{ @@ -607,20 +608,20 @@ impl NetworkBeaconProcessor { peer_id: PeerId, _peer_client: Client, blob_index: u64, - signed_blob: SignedBlobSidecar, + blob_sidecar: Arc>, seen_duration: Duration, ) { - let slot = signed_blob.message.slot; - let root = signed_blob.message.block_root; - let index = signed_blob.message.index; - let commitment = signed_blob.message.kzg_commitment; + let slot = blob_sidecar.slot(); + let root = blob_sidecar.block_root(); + let index = blob_sidecar.index; + let commitment = blob_sidecar.kzg_commitment; let delay = get_slot_delay_ms(seen_duration, slot, &self.chain.slot_clock); // Log metrics to track delay from other nodes on the network. metrics::observe_duration(&metrics::BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME, delay); metrics::set_gauge(&metrics::BEACON_BLOB_LAST_DELAY, delay.as_millis() as i64); match self .chain - .verify_blob_sidecar_for_gossip(signed_blob, blob_index) + .verify_blob_sidecar_for_gossip(blob_sidecar, blob_index) { Ok(gossip_verified_blob) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL); @@ -631,7 +632,7 @@ impl NetworkBeaconProcessor { self.log, "Gossip blob arrived late"; "block_root" => ?gossip_verified_blob.block_root(), - "proposer_index" => gossip_verified_blob.proposer_index(), + "proposer_index" => gossip_verified_blob.block_proposer_index(), "slot" => gossip_verified_blob.slot(), "delay" => ?delay, "commitment" => %gossip_verified_blob.kzg_commitment(), @@ -670,17 +671,30 @@ impl NetworkBeaconProcessor { self.log, "Unknown parent hash for blob"; "action" => "requesting parent", - "block_root" => %blob.block_root, - "parent_root" => %blob.block_parent_root, + "block_root" => %blob.block_root(), + "parent_root" => %blob.block_parent_root(), "commitment" => %commitment, ); self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob)); } - GossipBlobError::ProposerSignatureInvalid + GossipBlobError::KzgNotInitialized + | GossipBlobError::PubkeyCacheTimeout + | GossipBlobError::BeaconChainError(_) => { + crit!( + self.log, + "Internal error when verifying blob sidecar"; + "error" => ?err, + ) + } + GossipBlobError::ProposalSignatureInvalid | GossipBlobError::UnknownValidator(_) | GossipBlobError::ProposerIndexMismatch { .. } | GossipBlobError::BlobIsNotLaterThanParent { .. } - | GossipBlobError::InvalidSubnet { .. } => { + | GossipBlobError::InvalidSubnet { .. } + | GossipBlobError::InvalidInclusionProof + | GossipBlobError::KzgError(_) + | GossipBlobError::InclusionProof(_) + | GossipBlobError::NotFinalizedDescendant { .. } => { warn!( self.log, "Could not verify blob sidecar for gossip. Rejecting the blob sidecar"; @@ -703,7 +717,6 @@ impl NetworkBeaconProcessor { ); } GossipBlobError::FutureSlot { .. } - | GossipBlobError::BeaconChainError(_) | GossipBlobError::RepeatBlob { .. } | GossipBlobError::PastFinalizedSlot { .. } => { warn!( diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 8094d4677c4..2356a197cc2 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -212,7 +212,7 @@ impl NetworkBeaconProcessor { peer_id: PeerId, peer_client: Client, blob_index: u64, - blob: SignedBlobSidecar, + blob_sidecar: Arc>, seen_timestamp: Duration, ) -> Result<(), Error> { let processor = self.clone(); @@ -223,7 +223,7 @@ impl NetworkBeaconProcessor { peer_id, peer_client, blob_index, - blob, + blob_sidecar, seen_timestamp, ) .await @@ -231,7 +231,7 @@ impl NetworkBeaconProcessor { self.try_send(BeaconWorkEvent { drop_during_sync: false, - work: Work::GossipSignedBlobSidecar(Box::pin(process_fn)), + work: Work::GossipBlobSidecar(Box::pin(process_fn)), }) } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index d76ce5aadde..acfa069d355 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -292,7 +292,7 @@ impl NetworkBeaconProcessor { ) { let Some(slot) = blobs .iter() - .find_map(|blob| blob.as_ref().map(|blob| blob.slot)) + .find_map(|blob| blob.as_ref().map(|blob| blob.slot())) else { return; }; diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 0945aa74319..503d2f12618 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -33,8 +33,8 @@ use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - Attestation, AttesterSlashing, Epoch, Hash256, MainnetEthSpec, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecarList, SignedVoluntaryExit, Slot, + Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, Hash256, MainnetEthSpec, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, SubnetId, }; @@ -55,7 +55,7 @@ const STANDARD_TIMEOUT: Duration = Duration::from_secs(10); struct TestRig { chain: Arc>, next_block: Arc>, - next_blobs: Option>, + next_blobs: Option>, attestations: Vec<(Attestation, SubnetId)>, next_block_attestations: Vec<(Attestation, SubnetId)>, next_block_aggregate_attestations: Vec>, @@ -186,8 +186,10 @@ impl TestRig { let log = harness.logger().clone(); - let mut beacon_processor_config = BeaconProcessorConfig::default(); - beacon_processor_config.enable_backfill_rate_limiting = enable_backfill_rate_limiting; + let beacon_processor_config = BeaconProcessorConfig { + enable_backfill_rate_limiting, + ..Default::default() + }; let BeaconProcessorChannels { beacon_processor_tx, beacon_processor_rx, @@ -243,12 +245,17 @@ impl TestRig { chain.spec.maximum_gossip_clock_disparity(), ); - assert!(!beacon_processor.is_err()); - + assert!(beacon_processor.is_ok()); + let block = next_block_tuple.0; + let blob_sidecars = if let Some((kzg_proofs, blobs)) = next_block_tuple.1 { + Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap()) + } else { + None + }; Self { chain, - next_block: Arc::new(next_block_tuple.0), - next_blobs: next_block_tuple.1, + next_block: Arc::new(block), + next_blobs: blob_sidecars, attestations, next_block_attestations, next_block_aggregate_attestations, @@ -293,7 +300,7 @@ impl TestRig { junk_message_id(), junk_peer_id(), Client::default(), - blob.message.index, + blob.index, blob.clone(), Duration::from_secs(0), ) @@ -306,7 +313,7 @@ impl TestRig { self.network_beacon_processor .send_rpc_beacon_block( block_root, - RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone().into()), + RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), std::time::Duration::default(), BlockProcessType::ParentLookup { chain_hash: Hash256::random(), @@ -320,7 +327,7 @@ impl TestRig { self.network_beacon_processor .send_rpc_beacon_block( block_root, - RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone().into()), + RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 1 }, ) @@ -328,12 +335,7 @@ impl TestRig { } pub fn enqueue_single_lookup_rpc_blobs(&self) { if let Some(blobs) = self.next_blobs.clone() { - let blobs = FixedBlobSidecarList::from( - blobs - .into_iter() - .map(|b| Some(b.message)) - .collect::>(), - ); + let blobs = FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::>()); self.network_beacon_processor .send_rpc_blobs( self.next_block.canonical_root(), diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 4df940a3b79..5d3dde90ce0 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -302,14 +302,14 @@ impl Router { ), ), PubsubMessage::BlobSidecar(data) => { - let (blob_index, signed_blob) = *data; + let (blob_index, blob_sidecar) = *data; self.handle_beacon_processor_send_result( self.network_beacon_processor.send_gossip_blob_sidecar( message_id, peer_id, self.network_globals.client(&peer_id), blob_index, - signed_blob, + blob_sidecar, timestamp_now(), ), ) diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 7f141edb5b1..e089ef4fef3 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -428,7 +428,7 @@ impl RequestState for BlobRequestState (SignedBeaconBlock, Vec>) { let (mut block, mut blobs) = self.rand_block_and_blobs(fork_name, num_blobs); *block.message_mut().parent_root_mut() = parent_root; - let block_root = block.canonical_root(); blobs.iter_mut().for_each(|blob| { - blob.block_parent_root = parent_root; - blob.block_root = block_root; + blob.signed_block_header = block.signed_block_header(); }); (block, blobs) } @@ -1293,7 +1291,7 @@ mod deneb_only { let child_blob = blobs.first().cloned().unwrap(); let parent_root = block_root; - let child_root = child_blob.block_root; + let child_root = child_blob.block_root(); block_root = child_root; let mut blobs = FixedBlobSidecarList::default(); diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 819ea8e30bd..f9ed45fcd8b 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -46,7 +46,7 @@ impl BlocksAndBlobsRequestInfo { while { let pair_next_blob = blob_iter .peek() - .map(|sidecar| sidecar.slot == block.slot()) + .map(|sidecar| sidecar.slot() == block.slot()) .unwrap_or(false); pair_next_blob } { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index c1e9cde3fe0..3bd32308ae8 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -637,9 +637,9 @@ impl SyncManager { ); } SyncMessage::UnknownParentBlob(peer_id, blob) => { - let blob_slot = blob.slot; - let block_root = blob.block_root; - let parent_root = blob.block_parent_root; + let blob_slot = blob.slot(); + let block_root = blob.block_root(); + let parent_root = blob.block_parent_root(); let blob_index = blob.index; if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { warn!(self.log, "Peer sent blob with invalid index"; "index" => blob_index, "peer_id" => %peer_id); diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index da5c1a5a122..50f18074459 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -769,9 +769,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blocks>( + pub async fn post_beacon_blocks( &self, - block_contents: &SignedBlockContents, + block_contents: &PublishBlockRequest, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -789,9 +789,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blocks_ssz>( + pub async fn post_beacon_blocks_ssz( &self, - block_contents: &SignedBlockContents, + block_contents: &PublishBlockRequest, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -813,9 +813,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blinded_blocks>( + pub async fn post_beacon_blinded_blocks( &self, - block: &SignedBlockContents, + block: &SignedBlindedBeaconBlock, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -833,9 +833,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blinded_blocks_ssz>( + pub async fn post_beacon_blinded_blocks_ssz( &self, - block: &SignedBlockContents, + block: &SignedBlindedBeaconBlock, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -887,9 +887,9 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/blocks` - pub async fn post_beacon_blocks_v2>( + pub async fn post_beacon_blocks_v2( &self, - block_contents: &SignedBlockContents, + block_contents: &PublishBlockRequest, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version( @@ -904,9 +904,9 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/blocks` - pub async fn post_beacon_blocks_v2_ssz>( + pub async fn post_beacon_blocks_v2_ssz( &self, - block_contents: &SignedBlockContents, + block_contents: &PublishBlockRequest, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version_and_ssz_body( @@ -921,16 +921,16 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/blinded_blocks` - pub async fn post_beacon_blinded_blocks_v2>( + pub async fn post_beacon_blinded_blocks_v2( &self, - block_contents: &SignedBlockContents, + signed_block: &SignedBlindedBeaconBlock, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version( self.post_beacon_blinded_blocks_v2_path(validation_level)?, - block_contents, + signed_block, Some(self.timeouts.proposal), - block_contents.signed_block().message().body().fork_name(), + signed_block.message().body().fork_name(), ) .await?; @@ -940,14 +940,14 @@ impl BeaconNodeHttpClient { /// `POST v2/beacon/blinded_blocks` pub async fn post_beacon_blinded_blocks_v2_ssz( &self, - block_contents: &SignedBlindedBlockContents, + signed_block: &SignedBlindedBeaconBlock, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version_and_ssz_body( self.post_beacon_blinded_blocks_v2_path(validation_level)?, - block_contents.as_ssz_bytes(), + signed_block.as_ssz_bytes(), Some(self.timeouts.proposal), - block_contents.signed_block().message().body().fork_name(), + signed_block.message().body().fork_name(), ) .await?; @@ -1700,38 +1700,33 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks>( + pub async fn get_validator_blocks( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_validator_blocks_modular(slot, randao_reveal, graffiti, SkipRandaoVerification::No) .await } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks_modular>( + pub async fn get_validator_blocks_modular( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result>, Error> { + ) -> Result>, Error> { let path = self - .get_validator_blocks_path::( - slot, - randao_reveal, - graffiti, - skip_randao_verification, - ) + .get_validator_blocks_path::(slot, randao_reveal, graffiti, skip_randao_verification) .await?; self.get(path).await } /// returns `GET v2/validator/blocks/{slot}` URL path - pub async fn get_validator_blocks_path>( + pub async fn get_validator_blocks_path( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1837,12 +1832,12 @@ impl BeaconNodeHttpClient { if is_blinded_payload { let blinded_payload = response - .json::>>>() + .json::>>() .await?; Ok(ForkVersionedBeaconBlockType::Blinded(blinded_payload)) } else { let full_payload = response - .json::>>>() + .json::>>() .await?; Ok(ForkVersionedBeaconBlockType::Full(full_payload)) } @@ -1901,13 +1896,13 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` in ssz format - pub async fn get_validator_blocks_ssz>( + pub async fn get_validator_blocks_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, ) -> Result>, Error> { - self.get_validator_blocks_modular_ssz::( + self.get_validator_blocks_modular_ssz::( slot, randao_reveal, graffiti, @@ -1917,7 +1912,7 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` in ssz format - pub async fn get_validator_blocks_modular_ssz>( + pub async fn get_validator_blocks_modular_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1925,12 +1920,7 @@ impl BeaconNodeHttpClient { skip_randao_verification: SkipRandaoVerification, ) -> Result>, Error> { let path = self - .get_validator_blocks_path::( - slot, - randao_reveal, - graffiti, - skip_randao_verification, - ) + .get_validator_blocks_path::(slot, randao_reveal, graffiti, skip_randao_verification) .await?; self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block_ssz) @@ -1938,12 +1928,12 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks>( + pub async fn get_validator_blinded_blocks( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_validator_blinded_blocks_modular( slot, randao_reveal, @@ -1954,7 +1944,7 @@ impl BeaconNodeHttpClient { } /// returns `GET v1/validator/blinded_blocks/{slot}` URL path - pub async fn get_validator_blinded_blocks_path>( + pub async fn get_validator_blinded_blocks_path( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1986,18 +1976,15 @@ impl BeaconNodeHttpClient { } /// `GET v1/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks_modular< - T: EthSpec, - Payload: AbstractExecPayload, - >( + pub async fn get_validator_blinded_blocks_modular( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result>, Error> { + ) -> Result>, Error> { let path = self - .get_validator_blinded_blocks_path::( + .get_validator_blinded_blocks_path::( slot, randao_reveal, graffiti, @@ -2009,13 +1996,13 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blinded_blocks/{slot}` in ssz format - pub async fn get_validator_blinded_blocks_ssz>( + pub async fn get_validator_blinded_blocks_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, ) -> Result>, Error> { - self.get_validator_blinded_blocks_modular_ssz::( + self.get_validator_blinded_blocks_modular_ssz::( slot, randao_reveal, graffiti, @@ -2024,10 +2011,7 @@ impl BeaconNodeHttpClient { .await } - pub async fn get_validator_blinded_blocks_modular_ssz< - T: EthSpec, - Payload: AbstractExecPayload, - >( + pub async fn get_validator_blinded_blocks_modular_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -2035,7 +2019,7 @@ impl BeaconNodeHttpClient { skip_randao_verification: SkipRandaoVerification, ) -> Result>, Error> { let path = self - .get_validator_blinded_blocks_path::( + .get_validator_blinded_blocks_path::( slot, randao_reveal, graffiti, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index dea8b2bf568..7007138d8e8 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -12,9 +12,7 @@ use std::convert::TryFrom; use std::fmt::{self, Display}; use std::str::{from_utf8, FromStr}; use std::time::Duration; -use tree_hash::TreeHash; use types::beacon_block_body::KzgCommitments; -use types::builder_bid::BlindedBlobsBundle; pub use types::*; #[cfg(feature = "lighthouse")] @@ -901,9 +899,9 @@ pub struct SseBlobSidecar { impl SseBlobSidecar { pub fn from_blob_sidecar(blob_sidecar: &BlobSidecar) -> SseBlobSidecar { SseBlobSidecar { - block_root: blob_sidecar.block_root, + block_root: blob_sidecar.block_root(), index: blob_sidecar.index, - slot: blob_sidecar.slot, + slot: blob_sidecar.slot(), kzg_commitment: blob_sidecar.kzg_commitment, versioned_hash: blob_sidecar.kzg_commitment.calculate_versioned_hash(), } @@ -1411,15 +1409,14 @@ pub mod serde_status_code { } pub enum ForkVersionedBeaconBlockType { - Full(ForkVersionedResponse>>), - Blinded(ForkVersionedResponse>>), + Full(ForkVersionedResponse>), + Blinded(ForkVersionedResponse>), } #[cfg(test)] mod tests { use super::*; use ssz::Encode; - use std::sync::Arc; #[test] fn query_vec() { @@ -1460,17 +1457,17 @@ mod tests { type E = MainnetEthSpec; let spec = ForkName::Capella.make_genesis_spec(E::default_spec()); - let block: SignedBlockContents> = SignedBeaconBlock::from_block( + let block: PublishBlockRequest = SignedBeaconBlock::from_block( BeaconBlock::::Capella(BeaconBlockCapella::empty(&spec)), Signature::empty(), ) .try_into() .expect("should convert into signed block contents"); - let decoded: SignedBlockContents = - SignedBlockContents::from_ssz_bytes(&block.as_ssz_bytes(), &spec) + let decoded: PublishBlockRequest = + PublishBlockRequest::from_ssz_bytes(&block.as_ssz_bytes(), &spec) .expect("should decode Block"); - assert!(matches!(decoded, SignedBlockContents::Block(_))); + assert!(matches!(decoded, PublishBlockRequest::Block(_))); } #[test] @@ -1482,87 +1479,49 @@ mod tests { BeaconBlock::::Deneb(BeaconBlockDeneb::empty(&spec)), Signature::empty(), ); - let blobs = SignedSidecarList::from(vec![SignedSidecar { - message: Arc::new(BlobSidecar::empty()), - signature: Signature::empty(), - _phantom: Default::default(), - }]); - let signed_block_contents = SignedBlockContents::new(block, Some(blobs)); - - let decoded: SignedBlockContents> = - SignedBlockContents::from_ssz_bytes(&signed_block_contents.as_ssz_bytes(), &spec) - .expect("should decode BlockAndBlobSidecars"); - assert!(matches!( - decoded, - SignedBlockContents::BlockAndBlobSidecars(_) - )); - } - - #[test] - fn ssz_signed_blinded_block_contents_with_blobs() { - type E = MainnetEthSpec; - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.bellatrix_fork_epoch = Some(Epoch::new(0)); - spec.capella_fork_epoch = Some(Epoch::new(0)); - spec.deneb_fork_epoch = Some(Epoch::new(0)); - - let blinded_block = SignedBeaconBlock::from_block( - BeaconBlock::>::Deneb(BeaconBlockDeneb::empty(&spec)), - Signature::empty(), - ); - let blinded_blobs = SignedSidecarList::from(vec![SignedSidecar { - message: Arc::new(BlindedBlobSidecar::empty()), - signature: Signature::empty(), - _phantom: Default::default(), - }]); - let signed_block_contents = SignedBlockContents::new(blinded_block, Some(blinded_blobs)); + let blobs = BlobsList::::from(vec![Blob::::default()]); + let kzg_proofs = KzgProofs::::from(vec![KzgProof::empty()]); + let signed_block_contents = PublishBlockRequest::new(block, Some((kzg_proofs, blobs))); - let decoded: SignedBlockContents> = - SignedBlockContents::from_ssz_bytes(&signed_block_contents.as_ssz_bytes(), &spec) - .expect("should decode BlindedBlockAndBlobSidecars"); - assert!(matches!( - decoded, - SignedBlockContents::BlindedBlockAndBlobSidecars(_) - )); + let decoded: PublishBlockRequest = + PublishBlockRequest::from_ssz_bytes(&signed_block_contents.as_ssz_bytes(), &spec) + .expect("should decode BlockAndBlobSidecars"); + assert!(matches!(decoded, PublishBlockRequest::BlockContents(_))); } } -/// A wrapper over a [`BeaconBlock`] or a [`BeaconBlockAndBlobSidecars`]. #[derive(Debug, Encode, Serialize, Deserialize)] #[serde(untagged)] -#[serde(bound = "T: EthSpec")] +#[serde(bound = "E: EthSpec")] #[ssz(enum_behaviour = "transparent")] -pub enum BlockContents> { - BlockAndBlobSidecars(BeaconBlockAndBlobSidecars), - BlindedBlockAndBlobSidecars(BlindedBeaconBlockAndBlobSidecars), - Block(BeaconBlock), +pub enum ProduceBlockV3Response { + Full(FullBlockContents), + Blinded(BlindedBeaconBlock), } -pub type BlockContentsTuple = ( - BeaconBlock, - Option>::Sidecar>>, -); - -impl> BlockContents { - pub fn new( - block: BeaconBlock, - blobs: Option>, - ) -> Self { - match (Payload::block_type(), blobs) { - (BlockType::Full, Some(blobs)) => { - Self::BlockAndBlobSidecars(BeaconBlockAndBlobSidecars { - block, - blob_sidecars: blobs, - }) - } - (BlockType::Blinded, Some(blobs)) => { - Self::BlindedBlockAndBlobSidecars(BlindedBeaconBlockAndBlobSidecars { - blinded_block: block, - blinded_blob_sidecars: blobs, - }) - } - (_, None) => Self::Block(block), +/// A wrapper over a [`BeaconBlock`] or a [`BlockContents`]. +#[derive(Debug, Encode, Serialize, Deserialize)] +#[serde(untagged)] +#[serde(bound = "T: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +pub enum FullBlockContents { + /// This is a full deneb variant with block and blobs. + BlockContents(BlockContents), + /// This variant is for all pre-deneb full blocks. + Block(BeaconBlock), +} + +pub type BlockContentsTuple = (BeaconBlock, Option<(KzgProofs, BlobsList)>); + +impl FullBlockContents { + pub fn new(block: BeaconBlock, blob_data: Option<(KzgProofs, BlobsList)>) -> Self { + match blob_data { + Some((kzg_proofs, blobs)) => Self::BlockContents(BlockContents { + block, + kzg_proofs, + blobs, + }), + None => Self::Block(block), } } @@ -1581,43 +1540,41 @@ impl> BlockContents { match fork_at_slot { ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BeaconBlock::from_ssz_bytes(bytes, spec).map(|block| BlockContents::Block(block)) + BeaconBlock::from_ssz_bytes(bytes, spec) + .map(|block| FullBlockContents::Block(block)) } ForkName::Deneb => { let mut builder = ssz::SszDecoderBuilder::new(bytes); + builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; + builder.register_type::>()?; + builder.register_type::>()?; let mut decoder = builder.build()?; let block = decoder.decode_next_with(|bytes| BeaconBlock::from_ssz_bytes(bytes, spec))?; + let kzg_proofs = decoder.decode_next()?; let blobs = decoder.decode_next()?; - Ok(BlockContents::new(block, Some(blobs))) + + Ok(FullBlockContents::new(block, Some((kzg_proofs, blobs)))) } } } - pub fn block(&self) -> &BeaconBlock { + pub fn block(&self) -> &BeaconBlock { match self { - BlockContents::BlockAndBlobSidecars(block_and_sidecars) => &block_and_sidecars.block, - BlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => { - &block_and_sidecars.blinded_block - } - BlockContents::Block(block) => block, + FullBlockContents::BlockContents(block_and_sidecars) => &block_and_sidecars.block, + FullBlockContents::Block(block) => block, } } - pub fn deconstruct(self) -> BlockContentsTuple { + pub fn deconstruct(self) -> BlockContentsTuple { match self { - BlockContents::BlockAndBlobSidecars(block_and_sidecars) => ( + FullBlockContents::BlockContents(block_and_sidecars) => ( block_and_sidecars.block, - Some(block_and_sidecars.blob_sidecars), + Some((block_and_sidecars.kzg_proofs, block_and_sidecars.blobs)), ), - BlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => ( - block_and_sidecars.blinded_block, - Some(block_and_sidecars.blinded_blob_sidecars), - ), - BlockContents::Block(block) => (block, None), + FullBlockContents::Block(block) => (block, None), } } @@ -1628,104 +1585,64 @@ impl> BlockContents { fork: &Fork, genesis_validators_root: Hash256, spec: &ChainSpec, - ) -> SignedBlockContents { + ) -> PublishBlockRequest { let (block, maybe_blobs) = self.deconstruct(); let signed_block = block.sign(secret_key, fork, genesis_validators_root, spec); - let signed_blobs = maybe_blobs.map(|blobs| { - blobs - .into_iter() - .map(|blob| blob.sign(secret_key, fork, genesis_validators_root, spec)) - .collect::>() - .into() - }); - SignedBlockContents::new(signed_block, signed_blobs) + PublishBlockRequest::new(signed_block, maybe_blobs) } } -impl> ForkVersionDeserialize - for BlockContents -{ +impl ForkVersionDeserialize for FullBlockContents { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, fork_name: ForkName, ) -> Result { match fork_name { ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - Ok(BlockContents::Block(BeaconBlock::deserialize_by_fork::< - 'de, - D, - >(value, fork_name)?)) - } - ForkName::Deneb => { - let block_contents = match Payload::block_type() { - BlockType::Blinded => BlockContents::BlindedBlockAndBlobSidecars( - BlindedBeaconBlockAndBlobSidecars::deserialize_by_fork::<'de, D>( - value, fork_name, - )?, - ), - BlockType::Full => BlockContents::BlockAndBlobSidecars( - BeaconBlockAndBlobSidecars::deserialize_by_fork::<'de, D>( - value, fork_name, - )?, - ), - }; - Ok(block_contents) + Ok(FullBlockContents::Block( + BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?, + )) } + ForkName::Deneb => Ok(FullBlockContents::BlockContents( + BlockContents::deserialize_by_fork::<'de, D>(value, fork_name)?, + )), } } } -impl> Into> - for BlockContents -{ - fn into(self) -> BeaconBlock { +impl Into> for FullBlockContents { + fn into(self) -> BeaconBlock { match self { - Self::BlockAndBlobSidecars(block_and_sidecars) => block_and_sidecars.block, - Self::BlindedBlockAndBlobSidecars(block_and_sidecars) => { - block_and_sidecars.blinded_block - } + Self::BlockContents(block_and_sidecars) => block_and_sidecars.block, Self::Block(block) => block, } } } -pub type SignedBlockContentsTuple = ( - SignedBeaconBlock, - Option>::Sidecar>>, -); - -pub type SignedBlindedBlockContents = SignedBlockContents>; +pub type SignedBlockContentsTuple = (SignedBeaconBlock, Option<(KzgProofs, BlobsList)>); -/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobSidecars`]. +/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBlockContents`]. #[derive(Clone, Debug, Encode, Serialize, Deserialize)] #[serde(untagged)] #[serde(bound = "T: EthSpec")] #[ssz(enum_behaviour = "transparent")] -pub enum SignedBlockContents = FullPayload> { - BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars), - BlindedBlockAndBlobSidecars(SignedBlindedBeaconBlockAndBlobSidecars), - Block(SignedBeaconBlock), +pub enum PublishBlockRequest { + BlockContents(SignedBlockContents), + Block(SignedBeaconBlock), } -impl> SignedBlockContents { +impl PublishBlockRequest { pub fn new( - block: SignedBeaconBlock, - blobs: Option>, + block: SignedBeaconBlock, + blob_items: Option<(KzgProofs, BlobsList)>, ) -> Self { - match (Payload::block_type(), blobs) { - (BlockType::Full, Some(blobs)) => { - Self::BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars { - signed_block: block, - signed_blob_sidecars: blobs, - }) - } - (BlockType::Blinded, Some(blobs)) => { - Self::BlindedBlockAndBlobSidecars(SignedBlindedBeaconBlockAndBlobSidecars { - signed_blinded_block: block, - signed_blinded_blob_sidecars: blobs, - }) - } - (_, None) => Self::Block(block), + match blob_items { + Some((kzg_proofs, blobs)) => Self::BlockContents(SignedBlockContents { + signed_block: block, + kzg_proofs, + blobs, + }), + None => Self::Block(block), } } @@ -1745,133 +1662,88 @@ impl> SignedBlockContents { SignedBeaconBlock::from_ssz_bytes(bytes, spec) - .map(|block| SignedBlockContents::Block(block)) + .map(|block| PublishBlockRequest::Block(block)) } ForkName::Deneb => { let mut builder = ssz::SszDecoderBuilder::new(bytes); builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; + builder.register_type::>()?; + builder.register_type::>()?; let mut decoder = builder.build()?; let block = decoder .decode_next_with(|bytes| SignedBeaconBlock::from_ssz_bytes(bytes, spec))?; + let kzg_proofs = decoder.decode_next()?; let blobs = decoder.decode_next()?; - Ok(SignedBlockContents::new(block, Some(blobs))) + Ok(PublishBlockRequest::new(block, Some((kzg_proofs, blobs)))) } } } - pub fn signed_block(&self) -> &SignedBeaconBlock { + pub fn signed_block(&self) -> &SignedBeaconBlock { match self { - SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => { + PublishBlockRequest::BlockContents(block_and_sidecars) => { &block_and_sidecars.signed_block } - SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => { - &block_and_sidecars.signed_blinded_block - } - SignedBlockContents::Block(block) => block, + PublishBlockRequest::Block(block) => block, } } - pub fn blobs_cloned(&self) -> Option> { + pub fn deconstruct(self) -> SignedBlockContentsTuple { match self { - SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => { - Some(block_and_sidecars.signed_blob_sidecars.clone()) - } - SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => { - Some(block_and_sidecars.signed_blinded_blob_sidecars.clone()) - } - SignedBlockContents::Block(_block) => None, - } - } - - pub fn deconstruct(self) -> SignedBlockContentsTuple { - match self { - SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => ( + PublishBlockRequest::BlockContents(block_and_sidecars) => ( block_and_sidecars.signed_block, - Some(block_and_sidecars.signed_blob_sidecars), - ), - SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => ( - block_and_sidecars.signed_blinded_block, - Some(block_and_sidecars.signed_blinded_blob_sidecars), + Some((block_and_sidecars.kzg_proofs, block_and_sidecars.blobs)), ), - SignedBlockContents::Block(block) => (block, None), + PublishBlockRequest::Block(block) => (block, None), } } } -impl SignedBlockContents> { - pub fn try_into_full_block_and_blobs( - self, - maybe_full_payload_contents: Option>, - ) -> Result>, String> { - match self { - SignedBlockContents::BlindedBlockAndBlobSidecars(blinded_block_and_blob_sidecars) => { - match maybe_full_payload_contents { - None | Some(FullPayloadContents::Payload(_)) => { - Err("Can't build full block contents without payload and blobs".to_string()) - } - Some(FullPayloadContents::PayloadAndBlobs(payload_and_blobs)) => { - let signed_block = blinded_block_and_blob_sidecars - .signed_blinded_block - .try_into_full_block(Some(payload_and_blobs.execution_payload)) - .ok_or("Failed to build full block with payload".to_string())?; - let signed_blob_sidecars: SignedBlobSidecarList = - blinded_block_and_blob_sidecars - .signed_blinded_blob_sidecars - .into_iter() - .zip(payload_and_blobs.blobs_bundle.blobs) - .map(|(blinded_blob_sidecar, blob)| { - blinded_blob_sidecar.into_full_blob_sidecars(blob) - }) - .collect::>() - .into(); - - Ok(SignedBlockContents::new( - signed_block, - Some(signed_blob_sidecars), - )) - } - } - } - SignedBlockContents::Block(blinded_block) => { - let full_payload_opt = maybe_full_payload_contents.map(|o| o.deconstruct().0); - blinded_block - .try_into_full_block(full_payload_opt) - .map(SignedBlockContents::Block) - .ok_or("Can't build full block without payload".to_string()) - } - SignedBlockContents::BlockAndBlobSidecars(_) => Err( - "BlockAndBlobSidecars variant not expected when constructing full block" - .to_string(), - ), +/// Converting from a `SignedBlindedBeaconBlock` into a full `SignedBlockContents`. +pub fn into_full_block_and_blobs( + blinded_block: SignedBlindedBeaconBlock, + maybe_full_payload_contents: Option>, +) -> Result, String> { + match maybe_full_payload_contents { + None => { + let signed_block = blinded_block + .try_into_full_block(None) + .ok_or("Failed to build full block with payload".to_string())?; + Ok(PublishBlockRequest::new(signed_block, None)) + } + // This variant implies a pre-deneb block + Some(FullPayloadContents::Payload(execution_payload)) => { + let signed_block = blinded_block + .try_into_full_block(Some(execution_payload)) + .ok_or("Failed to build full block with payload".to_string())?; + Ok(PublishBlockRequest::new(signed_block, None)) + } + // This variant implies a post-deneb block + Some(FullPayloadContents::PayloadAndBlobs(payload_and_blobs)) => { + let signed_block = blinded_block + .try_into_full_block(Some(payload_and_blobs.execution_payload)) + .ok_or("Failed to build full block with payload".to_string())?; + + Ok(PublishBlockRequest::new( + signed_block, + Some(( + payload_and_blobs.blobs_bundle.proofs, + payload_and_blobs.blobs_bundle.blobs, + )), + )) } } } -impl SignedBlockContents { - pub fn clone_as_blinded(&self) -> SignedBlindedBlockContents { - let blinded_blobs = self.blobs_cloned().map(|blob_sidecars| { - blob_sidecars - .into_iter() - .map(|blob| blob.into()) - .collect::>() - .into() - }); - SignedBlockContents::new(self.signed_block().clone_as_blinded(), blinded_blobs) - } -} - -impl> TryFrom> - for SignedBlockContents -{ +impl TryFrom> for PublishBlockRequest { type Error = &'static str; - fn try_from(block: SignedBeaconBlock) -> Result { + fn try_from(block: SignedBeaconBlock) -> Result { match block { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) | SignedBeaconBlock::Merge(_) - | SignedBeaconBlock::Capella(_) => Ok(SignedBlockContents::Block(block)), + | SignedBeaconBlock::Capella(_) => Ok(PublishBlockRequest::Block(block)), SignedBeaconBlock::Deneb(_) => { Err("deneb block contents cannot be fully constructed from just the signed block") } @@ -1879,93 +1751,49 @@ impl> TryFrom> From> - for SignedBlockContents -{ - fn from(block_contents_tuple: SignedBlockContentsTuple) -> Self { - SignedBlockContents::new(block_contents_tuple.0, block_contents_tuple.1) +impl From> for PublishBlockRequest { + fn from(block_contents_tuple: SignedBlockContentsTuple) -> Self { + PublishBlockRequest::new(block_contents_tuple.0, block_contents_tuple.1) } } #[derive(Debug, Clone, Serialize, Deserialize, Encode)] #[serde(bound = "T: EthSpec")] -pub struct SignedBeaconBlockAndBlobSidecars> { - pub signed_block: SignedBeaconBlock, - pub signed_blob_sidecars: SignedSidecarList, +pub struct SignedBlockContents { + pub signed_block: SignedBeaconBlock, + pub kzg_proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + pub blobs: BlobsList, } #[derive(Debug, Clone, Serialize, Deserialize, Encode)] -#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] -pub struct BeaconBlockAndBlobSidecars> { - pub block: BeaconBlock, - pub blob_sidecars: SidecarList, +#[serde(bound = "T: EthSpec")] +pub struct BlockContents { + pub block: BeaconBlock, + pub kzg_proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + pub blobs: BlobsList, } -impl> ForkVersionDeserialize - for BeaconBlockAndBlobSidecars -{ +impl ForkVersionDeserialize for BlockContents { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, fork_name: ForkName, ) -> Result { #[derive(Deserialize)] - #[serde(bound = "T: EthSpec, S: Sidecar")] - struct Helper> { + #[serde(bound = "T: EthSpec")] + struct Helper { block: serde_json::Value, - blob_sidecars: SidecarList, + kzg_proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + blobs: BlobsList, } - let helper: Helper = - serde_json::from_value(value).map_err(serde::de::Error::custom)?; + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; Ok(Self { block: BeaconBlock::deserialize_by_fork::<'de, D>(helper.block, fork_name)?, - blob_sidecars: helper.blob_sidecars, - }) - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, Encode)] -#[serde(bound = "T: EthSpec")] -pub struct SignedBlindedBeaconBlockAndBlobSidecars< - T: EthSpec, - Payload: AbstractExecPayload = BlindedPayload, -> { - pub signed_blinded_block: SignedBeaconBlock, - pub signed_blinded_blob_sidecars: SignedSidecarList, -} - -#[derive(Debug, Clone, Serialize, Deserialize, Encode)] -#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] -pub struct BlindedBeaconBlockAndBlobSidecars< - T: EthSpec, - Payload: AbstractExecPayload = BlindedPayload, -> { - pub blinded_block: BeaconBlock, - pub blinded_blob_sidecars: SidecarList, -} - -impl> ForkVersionDeserialize - for BlindedBeaconBlockAndBlobSidecars -{ - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { - #[derive(Deserialize)] - #[serde(bound = "T: EthSpec, S: Sidecar")] - struct Helper> { - blinded_block: serde_json::Value, - blinded_blob_sidecars: SidecarList, - } - let helper: Helper = - serde_json::from_value(value).map_err(serde::de::Error::custom)?; - - Ok(Self { - blinded_block: BeaconBlock::deserialize_by_fork::<'de, D>( - helper.blinded_block, - fork_name, - )?, - blinded_blob_sidecars: helper.blinded_blob_sidecars, + kzg_proofs: helper.kzg_proofs, + blobs: helper.blobs, }) } } @@ -2051,18 +1879,3 @@ pub struct BlobsBundle { #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] pub blobs: BlobsList, } - -impl Into> for BlobsBundle { - fn into(self) -> BlindedBlobsBundle { - BlindedBlobsBundle { - commitments: self.commitments, - proofs: self.proofs, - blob_roots: self - .blobs - .into_iter() - .map(|blob| blob.tree_hash_root()) - .collect::>() - .into(), - } - } -} diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index bdd74c1a2aa..865a5affbb9 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -291,7 +291,7 @@ pub enum AttestationFromBlock { } /// Parameters which are cached between calls to `ForkChoice::get_head`. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct ForkchoiceUpdateParameters { /// The most recent result of running `ForkChoice::get_head`. pub head_root: Hash256, diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index e7ca84efb35..5e8cfb1ee49 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -7,4 +7,6 @@ pub use crate::fork_choice::{ QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; -pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; +pub use proto_array::{ + Block as ProtoBlock, ExecutionStatus, InvalidationOperation, ProposerHeadError, +}; diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index 595de86e862..2d2d2afddab 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -369,7 +369,7 @@ pub fn verify_merkle_proof( } /// Compute a root hash from a leaf and a Merkle proof. -fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usize) -> H256 { +pub fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usize) -> H256 { assert_eq!(branch.len(), depth, "proof length should equal depth"); let mut merkle_root = leaf.as_bytes().to_vec(); diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 6fc677073ed..1c41b1855b7 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -188,7 +188,7 @@ where } /// Information about the proposer head used for opportunistic re-orgs. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ProposerHeadInfo { /// Information about the *current* head block, which may be re-orged. pub head_node: ProtoNode, @@ -206,7 +206,7 @@ pub struct ProposerHeadInfo { /// /// This type intentionally does not implement `Debug` so that callers are forced to handle the /// enum. -#[derive(Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub enum ProposerHeadError { DoNotReOrg(DoNotReOrg), Error(E), @@ -243,7 +243,7 @@ impl ProposerHeadError { /// Reasons why a re-org should not be attempted. /// /// This type intentionally does not implement `Debug` so that the `Display` impl must be used. -#[derive(Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub enum DoNotReOrg { MissingHeadOrParentNode, MissingHeadFinalizedCheckpoint, diff --git a/consensus/types/presets/gnosis/deneb.yaml b/consensus/types/presets/gnosis/deneb.yaml index b78a9502757..d2d7d0abed3 100644 --- a/consensus/types/presets/gnosis/deneb.yaml +++ b/consensus/types/presets/gnosis/deneb.yaml @@ -10,3 +10,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096 MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 +# `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17 diff --git a/consensus/types/presets/mainnet/deneb.yaml b/consensus/types/presets/mainnet/deneb.yaml index 23889fd18e4..6d2fb4abde9 100644 --- a/consensus/types/presets/mainnet/deneb.yaml +++ b/consensus/types/presets/mainnet/deneb.yaml @@ -8,3 +8,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096 MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 +# `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17 diff --git a/consensus/types/presets/minimal/deneb.yaml b/consensus/types/presets/minimal/deneb.yaml index 3da2f80a74b..be2b9fadfa5 100644 --- a/consensus/types/presets/minimal/deneb.yaml +++ b/consensus/types/presets/minimal/deneb.yaml @@ -8,3 +8,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096 MAX_BLOB_COMMITMENTS_PER_BLOCK: 16 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 +# [customized] `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 9 diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 2f7c6891e4c..146dff895c8 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,12 +1,14 @@ use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; +use merkle_proof::{MerkleTree, MerkleTreeError}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use std::marker::PhantomData; use superstruct::superstruct; use test_random_derive::TestRandom; +use tree_hash::{TreeHash, BYTES_PER_CHUNK}; use tree_hash_derive::TreeHash; pub type KzgCommitments = @@ -14,6 +16,9 @@ pub type KzgCommitments = pub type KzgCommitmentOpts = FixedVector, ::MaxBlobsPerBlock>; +/// Index of the `blob_kzg_commitments` leaf in the `BeaconBlockBody` tree post-deneb. +pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; + /// The body of a `BeaconChain` block, containing operations. /// /// This *superstruct* abstracts over the hard-fork. @@ -98,6 +103,79 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)), } } + + /// Produces the proof of inclusion for a `KzgCommitment` in `self.blob_kzg_commitments` + /// at `index`. + pub fn kzg_commitment_merkle_proof( + &self, + index: usize, + ) -> Result, Error> { + match self { + Self::Base(_) | Self::Altair(_) | Self::Merge(_) | Self::Capella(_) => { + Err(Error::IncorrectStateVariant) + } + Self::Deneb(body) => { + // We compute the branches by generating 2 merkle trees: + // 1. Merkle tree for the `blob_kzg_commitments` List object + // 2. Merkle tree for the `BeaconBlockBody` container + // We then merge the branches for both the trees all the way up to the root. + + // Part1 (Branches for the subtree rooted at `blob_kzg_commitments`) + // + // Branches for `blob_kzg_commitments` without length mix-in + let depth = T::max_blob_commitments_per_block() + .next_power_of_two() + .ilog2(); + let leaves: Vec<_> = body + .blob_kzg_commitments + .iter() + .map(|commitment| commitment.tree_hash_root()) + .collect(); + let tree = MerkleTree::create(&leaves, depth as usize); + let (_, mut proof) = tree + .generate_proof(index, depth as usize) + .map_err(Error::MerkleTreeError)?; + + // Add the branch corresponding to the length mix-in. + let length = body.blob_kzg_commitments.len(); + let usize_len = std::mem::size_of::(); + let mut length_bytes = [0; BYTES_PER_CHUNK]; + length_bytes + .get_mut(0..usize_len) + .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? + .copy_from_slice(&length.to_le_bytes()); + let length_root = Hash256::from_slice(length_bytes.as_slice()); + proof.push(length_root); + + // Part 2 + // Branches for `BeaconBlockBody` container + let leaves = [ + body.randao_reveal.tree_hash_root(), + body.eth1_data.tree_hash_root(), + body.graffiti.tree_hash_root(), + body.proposer_slashings.tree_hash_root(), + body.attester_slashings.tree_hash_root(), + body.attestations.tree_hash_root(), + body.deposits.tree_hash_root(), + body.voluntary_exits.tree_hash_root(), + body.sync_aggregate.tree_hash_root(), + body.execution_payload.tree_hash_root(), + body.bls_to_execution_changes.tree_hash_root(), + body.blob_kzg_commitments.tree_hash_root(), + ]; + let beacon_block_body_depth = leaves.len().next_power_of_two().ilog2() as usize; + let tree = MerkleTree::create(&leaves, beacon_block_body_depth); + let (_, mut proof_body) = tree + .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) + .map_err(Error::MerkleTreeError)?; + // Join the proofs for the subtree and the main tree + proof.append(&mut proof_body); + + debug_assert_eq!(proof.len(), T::kzg_proof_inclusion_proof_depth()); + Ok(proof.into()) + } + } + } } impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Payload> { diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index 689f1a28b08..b382359313c 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -60,6 +60,16 @@ impl BeaconBlockHeader { signature, } } + + pub fn empty() -> Self { + Self { + body_root: Default::default(), + parent_root: Default::default(), + proposer_index: Default::default(), + slot: Default::default(), + state_root: Default::default(), + } + } } #[cfg(test)] diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 8637e538d0b..c249d8b4d83 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,11 +1,18 @@ use crate::test_utils::TestRandom; -use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot}; +use crate::{ + beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, + EthSpec, Hash256, SignedBeaconBlockHeader, Slot, +}; +use crate::{KzgProofs, SignedBeaconBlock}; +use bls::Signature; use derivative::Derivative; use kzg::{ Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, FIELD_ELEMENTS_PER_BLOB, }; +use merkle_proof::{merkle_root_from_branch, verify_merkle_proof, MerkleTreeError}; use rand::Rng; +use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -67,47 +74,14 @@ impl Ord for BlobIdentifier { #[arbitrary(bound = "T: EthSpec")] #[derivative(PartialEq, Eq, Hash(bound = "T: EthSpec"))] pub struct BlobSidecar { - pub block_root: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - pub slot: Slot, - pub block_parent_root: Hash256, - #[serde(with = "serde_utils::quoted_u64")] - pub proposer_index: u64, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub blob: Blob, pub kzg_commitment: KzgCommitment, pub kzg_proof: KzgProof, -} - -impl From>> for BlindedBlobSidecar { - fn from(blob_sidecar: Arc>) -> Self { - BlindedBlobSidecar { - block_root: blob_sidecar.block_root, - index: blob_sidecar.index, - slot: blob_sidecar.slot, - block_parent_root: blob_sidecar.block_parent_root, - proposer_index: blob_sidecar.proposer_index, - blob_root: blob_sidecar.blob.tree_hash_root(), - kzg_commitment: blob_sidecar.kzg_commitment, - kzg_proof: blob_sidecar.kzg_proof, - } - } -} - -impl From> for BlindedBlobSidecar { - fn from(blob_sidecar: BlobSidecar) -> Self { - BlindedBlobSidecar { - block_root: blob_sidecar.block_root, - index: blob_sidecar.index, - slot: blob_sidecar.slot, - block_parent_root: blob_sidecar.block_parent_root, - proposer_index: blob_sidecar.proposer_index, - blob_root: blob_sidecar.blob.tree_hash_root(), - kzg_commitment: blob_sidecar.kzg_commitment, - kzg_proof: blob_sidecar.kzg_proof, - } - } + pub signed_block_header: SignedBeaconBlockHeader, + pub kzg_commitment_inclusion_proof: FixedVector, } impl PartialOrd for BlobSidecar { @@ -122,29 +96,130 @@ impl Ord for BlobSidecar { } } -impl SignedRoot for BlobSidecar {} +#[derive(Debug)] +pub enum BlobSidecarError { + PreDeneb, + MissingKzgCommitment, + BeaconState(BeaconStateError), + MerkleTree(MerkleTreeError), + ArithError(ArithError), +} + +impl From for BlobSidecarError { + fn from(e: BeaconStateError) -> Self { + BlobSidecarError::BeaconState(e) + } +} + +impl From for BlobSidecarError { + fn from(e: MerkleTreeError) -> Self { + BlobSidecarError::MerkleTree(e) + } +} + +impl From for BlobSidecarError { + fn from(e: ArithError) -> Self { + BlobSidecarError::ArithError(e) + } +} impl BlobSidecar { + pub fn new( + index: usize, + blob: Blob, + signed_block: &SignedBeaconBlock, + kzg_proof: KzgProof, + ) -> Result { + let expected_kzg_commitments = signed_block + .message() + .body() + .blob_kzg_commitments() + .map_err(|_e| BlobSidecarError::PreDeneb)?; + let kzg_commitment = *expected_kzg_commitments + .get(index) + .ok_or(BlobSidecarError::MissingKzgCommitment)?; + let kzg_commitment_inclusion_proof = signed_block + .message() + .body() + .kzg_commitment_merkle_proof(index)?; + + Ok(Self { + index: index as u64, + blob, + kzg_commitment, + kzg_proof, + signed_block_header: signed_block.signed_block_header(), + kzg_commitment_inclusion_proof, + }) + } + pub fn id(&self) -> BlobIdentifier { BlobIdentifier { - block_root: self.block_root, + block_root: self.block_root(), index: self.index, } } + pub fn slot(&self) -> Slot { + self.signed_block_header.message.slot + } + + pub fn block_root(&self) -> Hash256 { + self.signed_block_header.message.tree_hash_root() + } + + pub fn block_parent_root(&self) -> Hash256 { + self.signed_block_header.message.parent_root + } + + pub fn block_proposer_index(&self) -> u64 { + self.signed_block_header.message.proposer_index + } + pub fn empty() -> Self { Self { - block_root: Hash256::zero(), index: 0, - slot: Slot::new(0), - block_parent_root: Hash256::zero(), - proposer_index: 0, blob: Blob::::default(), kzg_commitment: KzgCommitment::empty_for_testing(), kzg_proof: KzgProof::empty(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader::empty(), + signature: Signature::empty(), + }, + kzg_commitment_inclusion_proof: Default::default(), } } + /// Verifies the kzg commitment inclusion merkle proof. + pub fn verify_blob_sidecar_inclusion_proof(&self) -> Result { + // Depth of the subtree rooted at `blob_kzg_commitments` in the `BeaconBlockBody` + // is equal to depth of the ssz List max size + 1 for the length mixin + let kzg_commitments_tree_depth = (T::max_blob_commitments_per_block() + .next_power_of_two() + .ilog2() + .safe_add(1))? as usize; + // Compute the `tree_hash_root` of the `blob_kzg_commitments` subtree using the + // inclusion proof branches + let blob_kzg_commitments_root = merkle_root_from_branch( + self.kzg_commitment.tree_hash_root(), + self.kzg_commitment_inclusion_proof + .get(0..kzg_commitments_tree_depth) + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, + kzg_commitments_tree_depth, + self.index as usize, + ); + // The remaining inclusion proof branches are for the top level `BeaconBlockBody` tree + Ok(verify_merkle_proof( + blob_kzg_commitments_root, + self.kzg_commitment_inclusion_proof + .get(kzg_commitments_tree_depth..T::kzg_proof_inclusion_proof_depth()) + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, + T::kzg_proof_inclusion_proof_depth().safe_sub(kzg_commitments_tree_depth)?, + BLOB_KZG_COMMITMENTS_INDEX, + self.signed_block_header.message.body_root, + )) + } + pub fn random_valid(rng: &mut R, kzg: &Kzg) -> Result { let mut blob_bytes = vec![0u8; BYTES_PER_BLOB]; rng.fill_bytes(&mut blob_bytes); @@ -185,57 +260,22 @@ impl BlobSidecar { // Fixed part Self::empty().as_ssz_bytes().len() } -} -#[derive( - Debug, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - Derivative, - arbitrary::Arbitrary, -)] -#[derivative(PartialEq, Eq, Hash)] -pub struct BlindedBlobSidecar { - pub block_root: Hash256, - #[serde(with = "serde_utils::quoted_u64")] - pub index: u64, - pub slot: Slot, - pub block_parent_root: Hash256, - #[serde(with = "serde_utils::quoted_u64")] - pub proposer_index: u64, - pub blob_root: Hash256, - pub kzg_commitment: KzgCommitment, - pub kzg_proof: KzgProof, -} - -impl BlindedBlobSidecar { - pub fn empty() -> Self { - Self { - block_root: Hash256::zero(), - index: 0, - slot: Slot::new(0), - block_parent_root: Hash256::zero(), - proposer_index: 0, - blob_root: Hash256::zero(), - kzg_commitment: KzgCommitment::empty_for_testing(), - kzg_proof: KzgProof::empty(), + pub fn build_sidecars( + blobs: BlobsList, + block: &SignedBeaconBlock, + kzg_proofs: KzgProofs, + ) -> Result, BlobSidecarError> { + let mut blob_sidecars = vec![]; + for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { + let blob_sidecar = BlobSidecar::new(i, blob, block, *kzg_proof)?; + blob_sidecars.push(Arc::new(blob_sidecar)); } + Ok(VariableList::from(blob_sidecars)) } } -impl SignedRoot for BlindedBlobSidecar {} - -pub type SidecarList = VariableList, ::MaxBlobsPerBlock>; -pub type BlobSidecarList = SidecarList>; -pub type BlindedBlobSidecarList = SidecarList; - +pub type BlobSidecarList = VariableList>, ::MaxBlobsPerBlock>; pub type FixedBlobSidecarList = FixedVector>>, ::MaxBlobsPerBlock>; - pub type BlobsList = VariableList, ::MaxBlobCommitmentsPerBlock>; -pub type BlobRootsList = VariableList::MaxBlobCommitmentsPerBlock>; diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 910ef97c71c..f43585000a5 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,24 +1,15 @@ use crate::beacon_block_body::KzgCommitments; use crate::{ - BlobRootsList, ChainSpec, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ChainSpec, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName, - ForkVersionDeserialize, KzgProofs, SignedRoot, Uint256, + ForkVersionDeserialize, SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; use serde::{Deserialize, Deserializer, Serialize}; -use ssz_derive::Encode; use superstruct::superstruct; use tree_hash_derive::TreeHash; -#[derive(PartialEq, Debug, Default, Serialize, Deserialize, TreeHash, Clone, Encode)] -#[serde(bound = "E: EthSpec")] -pub struct BlindedBlobsBundle { - pub commitments: KzgCommitments, - pub proofs: KzgProofs, - pub blob_roots: BlobRootsList, -} - #[superstruct( variants(Merge, Capella, Deneb), variant_attributes( @@ -39,7 +30,7 @@ pub struct BuilderBid { #[superstruct(only(Deneb), partial_getter(rename = "header_deneb"))] pub header: ExecutionPayloadHeaderDeneb, #[superstruct(only(Deneb))] - pub blinded_blobs_bundle: BlindedBlobsBundle, + pub blob_kzg_commitments: KzgCommitments, #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, pub pubkey: PublicKeyBytes, diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ed002978580..784d98c1397 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -15,7 +15,6 @@ pub enum Domain { BlsToExecutionChange, BeaconProposer, BeaconAttester, - BlobSidecar, Randao, Deposit, VoluntaryExit, @@ -102,7 +101,6 @@ pub struct ChainSpec { */ pub(crate) domain_beacon_proposer: u32, pub(crate) domain_beacon_attester: u32, - pub(crate) domain_blob_sidecar: u32, pub(crate) domain_randao: u32, pub(crate) domain_deposit: u32, pub(crate) domain_voluntary_exit: u32, @@ -374,7 +372,6 @@ impl ChainSpec { match domain { Domain::BeaconProposer => self.domain_beacon_proposer, Domain::BeaconAttester => self.domain_beacon_attester, - Domain::BlobSidecar => self.domain_blob_sidecar, Domain::Randao => self.domain_randao, Domain::Deposit => self.domain_deposit, Domain::VoluntaryExit => self.domain_voluntary_exit, @@ -579,7 +576,6 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, - domain_blob_sidecar: 11, // 0x0B000000 /* * Fork choice @@ -822,7 +818,6 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, - domain_blob_sidecar: 11, /* * Fork choice @@ -1416,7 +1411,6 @@ mod tests { test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec); test_domain(Domain::BeaconAttester, spec.domain_beacon_attester, &spec); - test_domain(Domain::BlobSidecar, spec.domain_blob_sidecar, &spec); test_domain(Domain::Randao, spec.domain_randao, &spec); test_domain(Domain::Deposit, spec.domain_deposit, &spec); test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec); @@ -1441,8 +1435,6 @@ mod tests { spec.domain_bls_to_execution_change, &spec, ); - - test_domain(Domain::BlobSidecar, spec.domain_blob_sidecar, &spec); } fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 { diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index bd2efd3d9ee..b651d34af36 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -82,7 +82,6 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "bls_withdrawal_prefix".to_uppercase() => u8_hex(spec.bls_withdrawal_prefix_byte), "domain_beacon_proposer".to_uppercase() => u32_hex(spec.domain_beacon_proposer), "domain_beacon_attester".to_uppercase() => u32_hex(spec.domain_beacon_attester), - "domain_blob_sidecar".to_uppercase() => u32_hex(spec.domain_blob_sidecar), "domain_randao".to_uppercase()=> u32_hex(spec.domain_randao), "domain_deposit".to_uppercase()=> u32_hex(spec.domain_deposit), "domain_voluntary_exit".to_uppercase() => u32_hex(spec.domain_voluntary_exit), diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 70982e8d56b..17baad9c4c7 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -6,6 +6,7 @@ use ssz_types::typenum::{ bit::B0, UInt, Unsigned, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U16, U16777216, U2, U2048, U256, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, U8192, }; +use ssz_types::typenum::{U17, U9}; use std::fmt::{self, Debug}; use std::str::FromStr; @@ -109,6 +110,7 @@ pub trait EthSpec: type MaxBlobCommitmentsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type KzgCommitmentInclusionProofDepth: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -271,6 +273,10 @@ pub trait EthSpec: fn bytes_per_blob() -> usize { Self::BytesPerBlob::to_usize() } + /// Returns the `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` preset for this specification. + fn kzg_proof_inclusion_proof_depth() -> usize { + Self::KzgCommitmentInclusionProofDepth::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -315,6 +321,7 @@ impl EthSpec for MainnetEthSpec { type BytesPerFieldElement = U32; type FieldElementsPerBlob = U4096; type BytesPerBlob = U131072; + type KzgCommitmentInclusionProofDepth = U17; type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -348,6 +355,7 @@ impl EthSpec for MinimalEthSpec { type FieldElementsPerBlob = U4096; type BytesPerBlob = U131072; type MaxBlobCommitmentsPerBlock = U16; + type KzgCommitmentInclusionProofDepth = U9; params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -421,6 +429,7 @@ impl EthSpec for GnosisEthSpec { type FieldElementsPerBlob = U4096; type BytesPerFieldElement = U32; type BytesPerBlob = U131072; + type KzgCommitmentInclusionProofDepth = U17; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 0f284bde9d2..2322a67a626 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -100,8 +100,6 @@ pub mod sqlite; pub mod blob_sidecar; pub mod light_client_header; -pub mod sidecar; -pub mod signed_blob; use ethereum_types::{H160, H256}; @@ -121,10 +119,7 @@ pub use crate::beacon_block_body::{ pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; -pub use crate::blob_sidecar::{ - BlindedBlobSidecar, BlindedBlobSidecarList, BlobRootsList, BlobSidecar, BlobSidecarList, - BlobsList, SidecarList, -}; +pub use crate::blob_sidecar::{BlobSidecar, BlobSidecarList, BlobsList}; pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; @@ -182,7 +177,6 @@ pub use crate::signed_beacon_block::{ SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; -pub use crate::signed_blob::*; pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; @@ -223,6 +217,5 @@ pub use bls::{ pub use kzg::{KzgCommitment, KzgProof}; -pub use sidecar::Sidecar; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; pub use superstruct::superstruct; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 6d584fc1eb6..fa7745ad977 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -83,8 +83,6 @@ pub trait AbstractExecPayload: + TryInto + TryInto { - type Sidecar: Sidecar; - type Ref<'a>: ExecPayload + Copy + From<&'a Self::Merge> @@ -103,11 +101,6 @@ pub trait AbstractExecPayload: + Into + for<'a> From>> + TryFrom>; - - fn default_at_fork(fork_name: ForkName) -> Result; - fn default_blobs_at_fork( - fork_name: ForkName, - ) -> Result<>::BlobItems, Error>; } #[superstruct( @@ -280,6 +273,15 @@ impl FullPayload { cons(inner.execution_payload) }) } + + pub fn default_at_fork(fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(FullPayloadMerge::default().into()), + ForkName::Capella => Ok(FullPayloadCapella::default().into()), + ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), + } + } } impl<'a, T: EthSpec> FullPayloadRef<'a, T> { @@ -384,28 +386,10 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { } impl AbstractExecPayload for FullPayload { - type Sidecar = BlobSidecar; type Ref<'a> = FullPayloadRef<'a, T>; type Merge = FullPayloadMerge; type Capella = FullPayloadCapella; type Deneb = FullPayloadDeneb; - - fn default_at_fork(fork_name: ForkName) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), - ForkName::Merge => Ok(FullPayloadMerge::default().into()), - ForkName::Capella => Ok(FullPayloadCapella::default().into()), - ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), - } - } - fn default_blobs_at_fork(fork_name: ForkName) -> Result, Error> { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - Err(Error::IncorrectStateVariant) - } - ForkName::Deneb => Ok(VariableList::default()), - } - } } impl From> for FullPayload { @@ -910,25 +894,6 @@ impl AbstractExecPayload for BlindedPayload { type Merge = BlindedPayloadMerge; type Capella = BlindedPayloadCapella; type Deneb = BlindedPayloadDeneb; - - type Sidecar = BlindedBlobSidecar; - - fn default_at_fork(fork_name: ForkName) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), - ForkName::Merge => Ok(BlindedPayloadMerge::default().into()), - ForkName::Capella => Ok(BlindedPayloadCapella::default().into()), - ForkName::Deneb => Ok(BlindedPayloadDeneb::default().into()), - } - } - fn default_blobs_at_fork(fork_name: ForkName) -> Result, Error> { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - Err(Error::IncorrectStateVariant) - } - ForkName::Deneb => Ok(VariableList::default()), - } - } } impl From> for BlindedPayload { diff --git a/consensus/types/src/sidecar.rs b/consensus/types/src/sidecar.rs deleted file mode 100644 index e784cc57f1f..00000000000 --- a/consensus/types/src/sidecar.rs +++ /dev/null @@ -1,221 +0,0 @@ -use crate::beacon_block_body::KzgCommitments; -use crate::test_utils::TestRandom; -use crate::{ - AbstractExecPayload, BeaconBlock, BlindedBlobSidecar, BlindedBlobSidecarList, BlobRootsList, - BlobSidecar, BlobSidecarList, BlobsList, ChainSpec, Domain, EthSpec, Fork, Hash256, - SidecarList, SignedRoot, SignedSidecar, Slot, -}; -use bls::SecretKey; -use kzg::KzgProof; -use serde::de::DeserializeOwned; -use ssz::{Decode, Encode}; -use ssz_types::VariableList; -use std::fmt::Debug; -use std::hash::Hash; -use std::marker::PhantomData; -use std::sync::Arc; -use tree_hash::TreeHash; - -pub trait Sidecar: - serde::Serialize - + Clone - + DeserializeOwned - + Encode - + Decode - + Hash - + TreeHash - + TestRandom - + Debug - + SignedRoot - + Sync - + Send - + for<'a> arbitrary::Arbitrary<'a> -{ - type BlobItems: BlobItems; - - fn slot(&self) -> Slot; - - fn build_sidecar>( - blob_items: Self::BlobItems, - block: &BeaconBlock, - expected_kzg_commitments: &KzgCommitments, - kzg_proofs: Vec, - ) -> Result, String>; - - // this is mostly not used except for in testing - fn sign( - self: Arc, - secret_key: &SecretKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> SignedSidecar { - let signing_epoch = self.slot().epoch(E::slots_per_epoch()); - let domain = spec.get_domain( - signing_epoch, - Domain::BlobSidecar, - fork, - genesis_validators_root, - ); - let message = self.signing_root(domain); - let signature = secret_key.sign(message); - - SignedSidecar { - message: self, - signature, - _phantom: PhantomData, - } - } -} - -pub trait BlobItems: Sync + Send + Sized { - fn try_from_blob_roots(roots: BlobRootsList) -> Result; - fn try_from_blobs(blobs: BlobsList) -> Result; - fn len(&self) -> usize; - fn is_empty(&self) -> bool; - fn blobs(&self) -> Option<&BlobsList>; -} - -impl BlobItems for BlobsList { - fn try_from_blob_roots(_roots: BlobRootsList) -> Result { - Err("Unexpected conversion from blob roots to blobs".to_string()) - } - - fn try_from_blobs(blobs: BlobsList) -> Result { - Ok(blobs) - } - - fn len(&self) -> usize { - VariableList::len(self) - } - - fn is_empty(&self) -> bool { - VariableList::is_empty(self) - } - - fn blobs(&self) -> Option<&BlobsList> { - Some(self) - } -} - -impl BlobItems for BlobRootsList { - fn try_from_blob_roots(roots: BlobRootsList) -> Result { - Ok(roots) - } - - fn try_from_blobs(blobs: BlobsList) -> Result { - VariableList::new( - blobs - .into_iter() - .map(|blob| blob.tree_hash_root()) - .collect(), - ) - .map_err(|e| format!("{e:?}")) - } - - fn len(&self) -> usize { - VariableList::len(self) - } - - fn is_empty(&self) -> bool { - VariableList::is_empty(self) - } - - fn blobs(&self) -> Option<&BlobsList> { - None - } -} - -impl Sidecar for BlobSidecar { - type BlobItems = BlobsList; - - fn slot(&self) -> Slot { - self.slot - } - - fn build_sidecar>( - blobs: BlobsList, - block: &BeaconBlock, - expected_kzg_commitments: &KzgCommitments, - kzg_proofs: Vec, - ) -> Result, String> { - let beacon_block_root = block.canonical_root(); - let slot = block.slot(); - let blob_sidecars = BlobSidecarList::from( - blobs - .into_iter() - .enumerate() - .map(|(blob_index, blob)| { - let kzg_commitment = expected_kzg_commitments - .get(blob_index) - .ok_or("KZG commitment should exist for blob")?; - - let kzg_proof = kzg_proofs - .get(blob_index) - .ok_or("KZG proof should exist for blob")?; - - Ok(Arc::new(BlobSidecar { - block_root: beacon_block_root, - index: blob_index as u64, - slot, - block_parent_root: block.parent_root(), - proposer_index: block.proposer_index(), - blob, - kzg_commitment: *kzg_commitment, - kzg_proof: *kzg_proof, - })) - }) - .collect::, String>>()?, - ); - - Ok(blob_sidecars) - } -} - -impl Sidecar for BlindedBlobSidecar { - type BlobItems = BlobRootsList; - - fn slot(&self) -> Slot { - self.slot - } - - fn build_sidecar>( - blob_roots: BlobRootsList, - block: &BeaconBlock, - expected_kzg_commitments: &KzgCommitments, - kzg_proofs: Vec, - ) -> Result, String> { - let beacon_block_root = block.canonical_root(); - let slot = block.slot(); - - let blob_sidecars = BlindedBlobSidecarList::::from( - blob_roots - .into_iter() - .enumerate() - .map(|(blob_index, blob_root)| { - let kzg_commitment = expected_kzg_commitments - .get(blob_index) - .ok_or("KZG commitment should exist for blob")?; - - let kzg_proof = kzg_proofs.get(blob_index).ok_or(format!( - "Missing KZG proof for slot {} blob index: {}", - slot, blob_index - ))?; - - Ok(Arc::new(BlindedBlobSidecar { - block_root: beacon_block_root, - index: blob_index as u64, - slot, - block_parent_root: block.parent_root(), - proposer_index: block.proposer_index(), - blob_root, - kzg_commitment: *kzg_commitment, - kzg_proof: *kzg_proof, - })) - }) - .collect::, String>>()?, - ); - - Ok(blob_sidecars) - } -} diff --git a/consensus/types/src/signed_blob.rs b/consensus/types/src/signed_blob.rs deleted file mode 100644 index 3c560823cea..00000000000 --- a/consensus/types/src/signed_blob.rs +++ /dev/null @@ -1,114 +0,0 @@ -use crate::sidecar::Sidecar; -use crate::{ - test_utils::TestRandom, BlindedBlobSidecar, Blob, BlobSidecar, ChainSpec, Domain, EthSpec, - Fork, Hash256, Signature, SignedRoot, SigningData, -}; -use bls::PublicKey; -use derivative::Derivative; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; -use std::marker::PhantomData; -use std::sync::Arc; -use test_random_derive::TestRandom; -use tree_hash::TreeHash; -use tree_hash_derive::TreeHash; - -#[derive( - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TestRandom, - TreeHash, - Derivative, - arbitrary::Arbitrary, -)] -#[serde(bound = "T: EthSpec, S: Sidecar")] -#[arbitrary(bound = "T: EthSpec, S: Sidecar")] -#[derivative(Hash(bound = "T: EthSpec, S: Sidecar"))] -pub struct SignedSidecar> { - pub message: Arc, - pub signature: Signature, - #[ssz(skip_serializing, skip_deserializing)] - #[tree_hash(skip_hashing)] - #[serde(skip)] - #[arbitrary(default)] - pub _phantom: PhantomData, -} - -impl SignedSidecar { - pub fn into_full_blob_sidecars(self, blob: Blob) -> SignedSidecar> { - let blinded_sidecar = self.message; - SignedSidecar { - message: Arc::new(BlobSidecar { - block_root: blinded_sidecar.block_root, - index: blinded_sidecar.index, - slot: blinded_sidecar.slot, - block_parent_root: blinded_sidecar.block_parent_root, - proposer_index: blinded_sidecar.proposer_index, - blob, - kzg_commitment: blinded_sidecar.kzg_commitment, - kzg_proof: blinded_sidecar.kzg_proof, - }), - signature: self.signature, - _phantom: PhantomData, - } - } -} - -impl SignedBlobSidecar { - /// Verify `self.signature`. - /// - /// If the root of `block.message` is already known it can be passed in via `object_root_opt`. - /// Otherwise, it will be computed locally. - pub fn verify_signature( - &self, - object_root_opt: Option, - pubkey: &PublicKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> bool { - let domain = spec.get_domain( - self.message.slot.epoch(T::slots_per_epoch()), - Domain::BlobSidecar, - fork, - genesis_validators_root, - ); - - let message = if let Some(object_root) = object_root_opt { - SigningData { - object_root, - domain, - } - .tree_hash_root() - } else { - self.message.signing_root(domain) - }; - - self.signature.verify(pubkey, message) - } -} - -impl From> for SignedBlindedBlobSidecar { - fn from(signed: SignedBlobSidecar) -> Self { - SignedBlindedBlobSidecar { - message: Arc::new(signed.message.into()), - signature: signed.signature, - _phantom: PhantomData, - } - } -} - -pub type SignedBlobSidecar = SignedSidecar>; -pub type SignedBlindedBlobSidecar = SignedSidecar; - -/// List of Signed Sidecars that implements `Sidecar`. -pub type SignedSidecarList = - VariableList, ::MaxBlobsPerBlock>; -pub type SignedBlobSidecarList = SignedSidecarList>; -pub type SignedBlindedBlobSidecarList = SignedSidecarList; diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index fb2a6d394f9..cb5212aeaa8 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -6,10 +6,24 @@ use std::fmt::Debug; pub use crate::{kzg_commitment::KzgCommitment, kzg_proof::KzgProof, trusted_setup::TrustedSetup}; pub use c_kzg::{ - Blob, Bytes32, Bytes48, Error, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, + Blob, Bytes32, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF, FIELD_ELEMENTS_PER_BLOB, }; +#[derive(Debug)] +pub enum Error { + /// An error from the underlying kzg library. + Kzg(c_kzg::Error), + /// The kzg verification failed + KzgVerificationFailed, +} + +impl From for Error { + fn from(value: c_kzg::Error) -> Self { + Error::Kzg(value) + } +} + /// A wrapper over a kzg library that holds the trusted setup parameters. #[derive(Debug)] pub struct Kzg { @@ -35,6 +49,7 @@ impl Kzg { ) -> Result { c_kzg::KzgProof::compute_blob_kzg_proof(blob, &kzg_commitment.into(), &self.trusted_setup) .map(|proof| KzgProof(proof.to_bytes().into_inner())) + .map_err(Into::into) } /// Verify a kzg proof given the blob, kzg commitment and kzg proof. @@ -43,13 +58,17 @@ impl Kzg { blob: &Blob, kzg_commitment: KzgCommitment, kzg_proof: KzgProof, - ) -> Result { - c_kzg::KzgProof::verify_blob_kzg_proof( + ) -> Result<(), Error> { + if !c_kzg::KzgProof::verify_blob_kzg_proof( blob, &kzg_commitment.into(), &kzg_proof.into(), &self.trusted_setup, - ) + )? { + Err(Error::KzgVerificationFailed) + } else { + Ok(()) + } } /// Verify a batch of blob commitment proof triplets. @@ -61,7 +80,7 @@ impl Kzg { blobs: &[Blob], kzg_commitments: &[KzgCommitment], kzg_proofs: &[KzgProof], - ) -> Result { + ) -> Result<(), Error> { let commitments_bytes = kzg_commitments .iter() .map(|comm| Bytes48::from(*comm)) @@ -72,18 +91,23 @@ impl Kzg { .map(|proof| Bytes48::from(*proof)) .collect::>(); - c_kzg::KzgProof::verify_blob_kzg_proof_batch( + if !c_kzg::KzgProof::verify_blob_kzg_proof_batch( blobs, &commitments_bytes, &proofs_bytes, &self.trusted_setup, - ) + )? { + Err(Error::KzgVerificationFailed) + } else { + Ok(()) + } } /// Converts a blob to a kzg commitment. pub fn blob_to_kzg_commitment(&self, blob: &Blob) -> Result { c_kzg::KzgCommitment::blob_to_kzg_commitment(blob, &self.trusted_setup) .map(|commitment| KzgCommitment(commitment.to_bytes().into_inner())) + .map_err(Into::into) } /// Computes the kzg proof for a given `blob` and an evaluation point `z` @@ -94,6 +118,7 @@ impl Kzg { ) -> Result<(KzgProof, Bytes32), Error> { c_kzg::KzgProof::compute_kzg_proof(blob, z, &self.trusted_setup) .map(|(proof, y)| (KzgProof(proof.to_bytes().into_inner()), y)) + .map_err(Into::into) } /// Verifies a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y` @@ -111,5 +136,6 @@ impl Kzg { &kzg_proof.into(), &self.trusted_setup, ) + .map_err(Into::into) } } diff --git a/slasher/src/block_queue.rs b/slasher/src/block_queue.rs index 3d2472c18ad..b91ceba89a4 100644 --- a/slasher/src/block_queue.rs +++ b/slasher/src/block_queue.rs @@ -1,17 +1,18 @@ use parking_lot::Mutex; +use std::collections::HashSet; use types::SignedBeaconBlockHeader; #[derive(Debug, Default)] pub struct BlockQueue { - blocks: Mutex>, + blocks: Mutex>, } impl BlockQueue { pub fn queue(&self, block_header: SignedBeaconBlockHeader) { - self.blocks.lock().push(block_header) + self.blocks.lock().insert(block_header); } - pub fn dequeue(&self) -> Vec { + pub fn dequeue(&self) -> HashSet { let mut blocks = self.blocks.lock(); std::mem::take(&mut *blocks) } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index dffed7647bf..8bc36d008b1 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -40,3 +40,4 @@ beacon_chain = { workspace = true } store = { workspace = true } fork_choice = { workspace = true } execution_layer = { workspace = true } +logging = { workspace = true } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 452d805ceca..e42db1801df 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.4.0-beta.3 +TESTS_TAG := v1.4.0-beta.4 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index db94106975e..9884a709eb9 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -1,6 +1,11 @@ use super::*; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use ::fork_choice::PayloadVerificationStatus; +use ::fork_choice::{PayloadVerificationStatus, ProposerHeadError}; +use beacon_chain::beacon_proposer_cache::compute_proposer_duties_from_head; +use beacon_chain::blob_verification::GossipBlobError; +use beacon_chain::chain_config::{ + DisallowedReOrgOffsets, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, +}; use beacon_chain::slot_clock::SlotClock; use beacon_chain::{ attestation_verification::{ @@ -20,7 +25,7 @@ use std::time::Duration; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlobSidecar, BlobsList, Checkpoint, EthSpec, ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, KzgProof, - ProgressiveBalancesMode, Signature, SignedBeaconBlock, SignedBlobSidecar, Slot, Uint256, + ProgressiveBalancesMode, ProposerPreparationData, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -38,6 +43,13 @@ pub struct Head { root: Hash256, } +#[derive(Debug, Clone, Copy, PartialEq, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ShouldOverrideFcu { + validator_is_connected: bool, + result: bool, +} + #[derive(Debug, Clone, Deserialize)] #[serde(deny_unknown_fields)] pub struct Checks { @@ -50,6 +62,8 @@ pub struct Checks { u_justified_checkpoint: Option, u_finalized_checkpoint: Option, proposer_boost_root: Option, + get_proposer_head: Option, + should_override_forkchoice_update: Option, } #[derive(Debug, Clone, Deserialize)] @@ -256,6 +270,8 @@ impl Case for ForkChoiceTest { u_justified_checkpoint, u_finalized_checkpoint, proposer_boost_root, + get_proposer_head, + should_override_forkchoice_update: should_override_fcu, } = checks.as_ref(); if let Some(expected_head) = head { @@ -294,6 +310,14 @@ impl Case for ForkChoiceTest { if let Some(expected_proposer_boost_root) = proposer_boost_root { tester.check_expected_proposer_boost_root(*expected_proposer_boost_root)?; } + + if let Some(should_override_fcu) = should_override_fcu { + tester.check_should_override_fcu(*should_override_fcu)?; + } + + if let Some(expected_proposer_head) = get_proposer_head { + tester.check_expected_proposer_head(*expected_proposer_head)?; + } } } } @@ -325,6 +349,7 @@ impl Tester { } let harness = BeaconChainHarness::>::builder(E::default()) + .logger(logging::test_logger()) .spec(spec.clone()) .keypairs(vec![]) .chain_config(ChainConfig { @@ -413,6 +438,8 @@ impl Tester { ) -> Result<(), Error> { let block_root = block.canonical_root(); + let mut blob_success = true; + // Convert blobs and kzg_proofs into sidecars, then plumb them into the availability tracker if let Some(blobs) = blobs.clone() { let proofs = kzg_proofs.unwrap(); @@ -432,25 +459,32 @@ impl Tester { .zip(commitments.into_iter()) .enumerate() { - let signed_sidecar = SignedBlobSidecar { - message: Arc::new(BlobSidecar { - block_root, - index: i as u64, - slot: block.slot(), - block_parent_root: block.parent_root(), - proposer_index: block.message().proposer_index(), - blob, - kzg_commitment, - kzg_proof, - }), - signature: Signature::empty(), - _phantom: Default::default(), - }; - let result = self.block_on_dangerous( - self.harness - .chain - .process_gossip_blob(GossipVerifiedBlob::__assumed_valid(signed_sidecar)), - )?; + let blob_sidecar = Arc::new(BlobSidecar { + index: i as u64, + blob, + kzg_commitment, + kzg_proof, + signed_block_header: block.signed_block_header(), + kzg_commitment_inclusion_proof: block + .message() + .body() + .kzg_commitment_merkle_proof(i) + .unwrap(), + }); + + let chain = self.harness.chain.clone(); + let blob = + match GossipVerifiedBlob::new(blob_sidecar.clone(), blob_sidecar.index, &chain) + { + Ok(gossip_verified_blob) => gossip_verified_blob, + Err(GossipBlobError::KzgError(_)) => { + blob_success = false; + GossipVerifiedBlob::__assumed_valid(blob_sidecar) + } + Err(_) => GossipVerifiedBlob::__assumed_valid(blob_sidecar), + }; + let result = + self.block_on_dangerous(self.harness.chain.process_gossip_blob(blob))?; if valid { assert!(result.is_ok()); } @@ -466,7 +500,7 @@ impl Tester { || Ok(()), ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); - let success = result.as_ref().map_or(false, |inner| inner.is_ok()); + let success = blob_success && result.as_ref().map_or(false, |inner| inner.is_ok()); if success != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", @@ -703,6 +737,82 @@ impl Tester { expected_proposer_boost_root, ) } + + pub fn check_expected_proposer_head( + &self, + expected_proposer_head: Hash256, + ) -> Result<(), Error> { + let mut fc = self.harness.chain.canonical_head.fork_choice_write_lock(); + let slot = self.harness.chain.slot().unwrap(); + let canonical_head = fc.get_head(slot, &self.harness.spec).unwrap(); + let proposer_head_result = fc.get_proposer_head( + slot, + canonical_head, + DEFAULT_RE_ORG_THRESHOLD, + &DisallowedReOrgOffsets::default(), + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + ); + let proposer_head = match proposer_head_result { + Ok(head) => head.parent_node.root, + Err(ProposerHeadError::DoNotReOrg(_)) => canonical_head, + _ => panic!("Unexpected error in get proposer head"), + }; + + check_equal("proposer_head", proposer_head, expected_proposer_head) + } + + pub fn check_should_override_fcu( + &self, + expected_should_override_fcu: ShouldOverrideFcu, + ) -> Result<(), Error> { + // Determine proposer. + let cached_head = self.harness.chain.canonical_head.cached_head(); + let next_slot = cached_head.snapshot.beacon_block.slot() + 1; + let next_slot_epoch = next_slot.epoch(E::slots_per_epoch()); + let (proposer_indices, decision_root, _, fork) = + compute_proposer_duties_from_head(next_slot_epoch, &self.harness.chain).unwrap(); + let proposer_index = proposer_indices[next_slot.as_usize() % E::slots_per_epoch() as usize]; + + // Ensure the proposer index cache is primed. + self.harness + .chain + .beacon_proposer_cache + .lock() + .insert(next_slot_epoch, decision_root, proposer_indices, fork) + .unwrap(); + + // Update the execution layer proposer preparation to match the test config. + let el = self.harness.chain.execution_layer.clone().unwrap(); + self.block_on_dangerous(async { + if expected_should_override_fcu.validator_is_connected { + el.update_proposer_preparation( + next_slot_epoch, + &[ProposerPreparationData { + validator_index: dbg!(proposer_index) as u64, + fee_recipient: Default::default(), + }], + ) + .await; + } else { + el.clear_proposer_preparation(proposer_index as u64).await; + } + }) + .unwrap(); + + // Check forkchoice override. + let canonical_fcu_params = cached_head.forkchoice_update_parameters(); + let fcu_params = self + .harness + .chain + .overridden_forkchoice_update_params(canonical_fcu_params) + .unwrap(); + + check_equal( + "should_override_forkchoice_update", + fcu_params != canonical_fcu_params, + expected_should_override_fcu.result, + ) + } } /// Checks that the `head` checkpoint from the beacon chain head matches the `fc` checkpoint gleaned diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs index 226d162b957..04d1b8d5dc6 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::validate_blob; use eth2_network_config::TRUSTED_SETUP_BYTES; -use kzg::{Kzg, KzgCommitment, KzgProof, TrustedSetup}; +use kzg::{Error as KzgError, Kzg, KzgCommitment, KzgProof, TrustedSetup}; use serde::Deserialize; use std::convert::TryInto; use std::marker::PhantomData; @@ -91,8 +91,14 @@ impl Case for KZGVerifyBlobKZGProof { let kzg = get_kzg()?; let result = parse_input(&self.input).and_then(|(blob, commitment, proof)| { - validate_blob::(&kzg, &blob, commitment, proof) - .map_err(|e| Error::InternalError(format!("Failed to validate blob: {:?}", e))) + match validate_blob::(&kzg, &blob, commitment, proof) { + Ok(_) => Ok(true), + Err(KzgError::KzgVerificationFailed) => Ok(false), + Err(e) => Err(Error::InternalError(format!( + "Failed to validate blob: {:?}", + e + ))), + } }); compare_result::(&result, &self.output) diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs index 24182b69f9f..ae5caedf069 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs @@ -1,6 +1,7 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::validate_blobs; +use kzg::Error as KzgError; use serde::Deserialize; use std::marker::PhantomData; @@ -53,10 +54,23 @@ impl Case for KZGVerifyBlobKZGProofBatch { }; let kzg = get_kzg()?; - let result = parse_input(&self.input).and_then(|(commitments, blobs, proofs)| { - validate_blobs::(&kzg, &commitments, blobs.iter().collect(), &proofs) - .map_err(|e| Error::InternalError(format!("Failed to validate blobs: {:?}", e))) - }); + + let result = + parse_input(&self.input).and_then( + |(commitments, blobs, proofs)| match validate_blobs::( + &kzg, + &commitments, + blobs.iter().collect(), + &proofs, + ) { + Ok(_) => Ok(true), + Err(KzgError::KzgVerificationFailed) => Ok(false), + Err(e) => Err(Error::InternalError(format!( + "Failed to validate blobs: {:?}", + e + ))), + }, + ); compare_result::(&result, &self.output) } diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 0ba2c926633..d9deda81232 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -1,9 +1,9 @@ use super::*; -use crate::decode::{ssz_decode_state, yaml_decode_file}; +use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use std::path::Path; use tree_hash::Hash256; -use types::{BeaconState, EthSpec, ForkName}; +use types::{BeaconBlockBody, BeaconBlockBodyDeneb, BeaconState, EthSpec, ForkName}; #[derive(Debug, Clone, Deserialize)] pub struct Metadata { @@ -82,3 +82,72 @@ impl Case for MerkleProofValidity { Ok(()) } } + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct KzgInclusionMerkleProofValidity { + pub metadata: Option, + pub block: BeaconBlockBody, + pub merkle_proof: MerkleProof, +} + +impl LoadCase for KzgInclusionMerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let block = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + return Err(Error::InternalError(format!( + "KZG inclusion merkle proof validity test skipped for {:?}", + fork_name + ))) + } + ForkName::Deneb => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + } + }; + let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; + // Metadata does not exist in these tests but it is left like this just in case. + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; + + Ok(Self { + metadata, + block: block.into(), + merkle_proof, + }) + } +} + +impl Case for KzgInclusionMerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let Ok(proof) = self.block.to_ref().kzg_commitment_merkle_proof(0) else { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )); + }; + let proof_len = proof.len(); + let branch_len = self.merkle_proof.branch.len(); + if proof_len != branch_len { + return Err(Error::NotEqual(format!( + "Branches not equal in length computed: {}, expected {}", + proof_len, branch_len + ))); + } + + for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) { + let expected_leaf = self.merkle_proof.branch[i]; + if *proof_leaf != expected_leaf { + return Err(Error::NotEqual(format!( + "Leaves not equal in merkle proof computed: {}, expected: {}", + hex::encode(proof_leaf), + hex::encode(expected_leaf) + ))); + } + } + + Ok(()) + } +} diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 6dec9346291..0295ff1bd49 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -560,6 +560,13 @@ impl Handler for ForkChoiceHandler { return false; } + // No FCU override tests prior to bellatrix. + if self.handler_name == "should_override_forkchoice_update" + && (fork_name == ForkName::Base || fork_name == ForkName::Altair) + { + return false; + } + // These tests check block validity (which may include signatures) and there is no need to // run them with fake crypto. cfg!(not(feature = "fake_crypto")) @@ -786,6 +793,34 @@ impl Handler for MerkleProofValidityHandler { } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KzgInclusionMerkleProofValidityHandler(PhantomData); + +impl Handler for KzgInclusionMerkleProofValidityHandler { + type Case = cases::KzgInclusionMerkleProofValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "merkle_proof" + } + + fn handler_name(&self) -> String { + "single_merkle_proof".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Enabled in Deneb + fork_name != ForkName::Base + && fork_name != ForkName::Altair + && fork_name != ForkName::Merge + && fork_name != ForkName::Capella + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct OperationsHandler(PhantomData<(E, O)>); diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index ef128440301..13121854acc 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -78,7 +78,6 @@ type_name!(ProposerSlashing); type_name_generic!(SignedAggregateAndProof); type_name_generic!(SignedBeaconBlock); type_name!(SignedBeaconBlockHeader); -type_name_generic!(SignedBlobSidecar); type_name_generic!(SignedContributionAndProof); type_name!(SignedVoluntaryExit); type_name!(SigningData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index d2d30b596cc..dd25dba8b60 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1,7 +1,7 @@ #![cfg(feature = "ef_tests")] -use ef_tests::*; -use types::*; +use ef_tests::{KzgInclusionMerkleProofValidityHandler, *}; +use types::{MainnetEthSpec, MinimalEthSpec, *}; // Check that the hand-computed multiplications on EthSpec are correctly computed. // This test lives here because one is most likely to muck these up during a spec update. @@ -378,12 +378,6 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::deneb_only().run(); } - #[test] - fn signed_blob_sidecar() { - SszStaticHandler::, MinimalEthSpec>::deneb_only().run(); - SszStaticHandler::, MainnetEthSpec>::deneb_only().run(); - } - #[test] fn blob_identifier() { SszStaticHandler::::deneb_only().run(); @@ -546,6 +540,18 @@ fn fork_choice_withholding() { // There is no mainnet variant for this test. } +#[test] +fn fork_choice_should_override_forkchoice_update() { + ForkChoiceHandler::::new("should_override_forkchoice_update").run(); + ForkChoiceHandler::::new("should_override_forkchoice_update").run(); +} + +#[test] +fn fork_choice_get_proposer_head() { + ForkChoiceHandler::::new("get_proposer_head").run(); + ForkChoiceHandler::::new("get_proposer_head").run(); +} + #[test] fn optimistic_sync() { OptimisticSyncHandler::::default().run(); @@ -598,6 +604,12 @@ fn merkle_proof_validity() { MerkleProofValidityHandler::::default().run(); } +#[test] +fn kzg_inclusion_merkle_proof_validity() { + KzgInclusionMerkleProofValidityHandler::::default().run(); + KzgInclusionMerkleProofValidityHandler::::default().run(); +} + #[test] fn rewards() { for handler in &["basic", "leak", "random"] { diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 47f16fc4156..00d9b2e86db 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -11,7 +11,7 @@ use crate::{ }; use bls::SignatureBytes; use environment::RuntimeContext; -use eth2::types::{BlockContents, SignedBlockContents}; +use eth2::types::{FullBlockContents, PublishBlockRequest}; use eth2::{BeaconNodeHttpClient, StatusCode}; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; @@ -22,7 +22,7 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::{ - AbstractExecPayload, BlindedPayload, BlockType, EthSpec, FullPayload, Graffiti, PublicKeyBytes, + BlindedBeaconBlock, BlockType, EthSpec, Graffiti, PublicKeyBytes, SignedBlindedBeaconBlock, Slot, }; @@ -329,10 +329,7 @@ impl BlockService { self.inner.context.executor.spawn( async move { if builder_proposals { - let result = service - .clone() - .publish_block::>(slot, validator_pubkey) - .await; + let result = service.publish_block(slot, validator_pubkey, true).await; match result { Err(BlockError::Recoverable(e)) => { error!( @@ -342,9 +339,8 @@ impl BlockService { "block_slot" => ?slot, "info" => "blinded proposal failed, attempting full block" ); - if let Err(e) = service - .publish_block::>(slot, validator_pubkey) - .await + if let Err(e) = + service.publish_block(slot, validator_pubkey, false).await { // Log a `crit` since a full block // (non-builder) proposal failed. @@ -371,9 +367,8 @@ impl BlockService { } Ok(_) => {} }; - } else if let Err(e) = service - .publish_block::>(slot, validator_pubkey) - .await + } else if let Err(e) = + service.publish_block(slot, validator_pubkey, false).await { // Log a `crit` since a full block (non-builder) // proposal failed. @@ -394,10 +389,11 @@ impl BlockService { } /// Produce a block at the given slot for validator_pubkey - async fn publish_block>( - self, + async fn publish_block( + &self, slot: Slot, validator_pubkey: PublicKeyBytes, + builder_proposal: bool, ) -> Result<(), BlockError> { let log = self.context.log(); let _timer = @@ -460,7 +456,7 @@ impl BlockService { // // Try the proposer nodes last, since it's likely that they don't have a // great view of attestations on the network. - let block_contents = proposer_fallback + let unsigned_block = proposer_fallback .request_proposers_last( RequireSynced::No, OfflineOnFailure::Yes, @@ -471,20 +467,32 @@ impl BlockService { randao_reveal_ref, graffiti, proposer_index, + builder_proposal, log, ) }, ) .await?; - let (block, maybe_blob_sidecars) = block_contents.deconstruct(); let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES); - let signed_block = match self_ref - .validator_store - .sign_block::(*validator_pubkey_ref, block, current_slot) - .await - { + let res = match unsigned_block { + UnsignedBlock::Full(block_contents) => { + let (block, maybe_blobs) = block_contents.deconstruct(); + self_ref + .validator_store + .sign_block(*validator_pubkey_ref, block, current_slot) + .await + .map(|b| SignedBlock::Full(PublishBlockRequest::new(b, maybe_blobs))) + } + UnsignedBlock::Blinded(block) => self_ref + .validator_store + .sign_block(*validator_pubkey_ref, block, current_slot) + .await + .map(SignedBlock::Blinded), + }; + + let signed_block = match res { Ok(block) => block, Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { // A pubkey can be missing when a validator was recently removed @@ -506,36 +514,6 @@ impl BlockService { } }; - let maybe_signed_blobs = match maybe_blob_sidecars { - Some(blob_sidecars) => { - match self_ref - .validator_store - .sign_blobs::(*validator_pubkey_ref, blob_sidecars) - .await - { - Ok(signed_blobs) => Some(signed_blobs), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently removed - // via the API. - warn!( - log, - "Missing pubkey for blobs"; - "info" => "a validator may have recently been removed from this VC", - "pubkey" => ?pubkey, - "slot" => ?slot - ); - return Ok(()); - } - Err(e) => { - return Err(BlockError::Recoverable(format!( - "Unable to sign blobs: {:?}", - e - ))) - } - } - } - None => None, - }; let signing_time_ms = Duration::from_secs_f64(signing_timer.map_or(0.0, |t| t.stop_and_record())).as_millis(); @@ -546,8 +524,6 @@ impl BlockService { "signing_time_ms" => signing_time_ms, ); - let signed_block_contents = SignedBlockContents::from((signed_block, maybe_signed_blobs)); - // Publish block with first available beacon node. // // Try the proposer nodes first, since we've likely gone to efforts to @@ -558,11 +534,8 @@ impl BlockService { RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async { - self.publish_signed_block_contents::( - &signed_block_contents, - beacon_node, - ) - .await + self.publish_signed_block_contents(&signed_block, beacon_node) + .await }, ) .await?; @@ -570,41 +543,41 @@ impl BlockService { info!( log, "Successfully published block"; - "block_type" => ?Payload::block_type(), - "deposits" => signed_block_contents.signed_block().message().body().deposits().len(), - "attestations" => signed_block_contents.signed_block().message().body().attestations().len(), + "block_type" => ?signed_block.block_type(), + "deposits" => signed_block.num_deposits(), + "attestations" => signed_block.num_attestations(), "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block_contents.signed_block().slot().as_u64(), + "slot" => signed_block.slot().as_u64(), ); Ok(()) } - async fn publish_signed_block_contents>( + async fn publish_signed_block_contents( &self, - signed_block_contents: &SignedBlockContents, + signed_block: &SignedBlock, beacon_node: &BeaconNodeHttpClient, ) -> Result<(), BlockError> { let log = self.context.log(); - let slot = signed_block_contents.signed_block().slot(); - match Payload::block_type() { - BlockType::Full => { + let slot = signed_block.slot(); + match signed_block { + SignedBlock::Full(signed_block) => { let _post_timer = metrics::start_timer_vec( &metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blocks(signed_block_contents) + .post_beacon_blocks(signed_block) .await .or_else(|e| handle_block_post_error(e, slot, log))? } - BlockType::Blinded => { + SignedBlock::Blinded(signed_block) => { let _post_timer = metrics::start_timer_vec( &metrics::BLOCK_SERVICE_TIMES, &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blinded_blocks(signed_block_contents) + .post_beacon_blinded_blocks(signed_block) .await .or_else(|e| handle_block_post_error(e, slot, log))? } @@ -612,22 +585,23 @@ impl BlockService { Ok::<_, BlockError>(()) } - async fn get_validator_block>( + async fn get_validator_block( beacon_node: &BeaconNodeHttpClient, slot: Slot, randao_reveal_ref: &SignatureBytes, graffiti: Option, proposer_index: Option, + builder_proposal: bool, log: &Logger, - ) -> Result, BlockError> { - let block_contents: BlockContents = match Payload::block_type() { - BlockType::Full => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); + ) -> Result, BlockError> { + let unsigned_block = if !builder_proposal { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + UnsignedBlock::Full( beacon_node - .get_validator_blocks::(slot, randao_reveal_ref, graffiti.as_ref()) + .get_validator_blocks::(slot, randao_reveal_ref, graffiti.as_ref()) .await .map_err(|e| { BlockError::Recoverable(format!( @@ -635,19 +609,16 @@ impl BlockService { e )) })? - .data - } - BlockType::Blinded => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], - ); + .data, + ) + } else { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], + ); + UnsignedBlock::Blinded( beacon_node - .get_validator_blinded_blocks::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) + .get_validator_blinded_blocks::(slot, randao_reveal_ref, graffiti.as_ref()) .await .map_err(|e| { BlockError::Recoverable(format!( @@ -655,8 +626,8 @@ impl BlockService { e )) })? - .data - } + .data, + ) }; info!( @@ -664,13 +635,59 @@ impl BlockService { "Received unsigned block"; "slot" => slot.as_u64(), ); - if proposer_index != Some(block_contents.block().proposer_index()) { + if proposer_index != Some(unsigned_block.proposer_index()) { return Err(BlockError::Recoverable( "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), )); } - Ok::<_, BlockError>(block_contents) + Ok::<_, BlockError>(unsigned_block) + } +} + +pub enum UnsignedBlock { + Full(FullBlockContents), + Blinded(BlindedBeaconBlock), +} + +impl UnsignedBlock { + pub fn proposer_index(&self) -> u64 { + match self { + UnsignedBlock::Full(block) => block.block().proposer_index(), + UnsignedBlock::Blinded(block) => block.proposer_index(), + } + } +} + +pub enum SignedBlock { + Full(PublishBlockRequest), + Blinded(SignedBlindedBeaconBlock), +} + +impl SignedBlock { + pub fn block_type(&self) -> BlockType { + match self { + SignedBlock::Full(_) => BlockType::Full, + SignedBlock::Blinded(_) => BlockType::Blinded, + } + } + pub fn slot(&self) -> Slot { + match self { + SignedBlock::Full(block) => block.signed_block().message().slot(), + SignedBlock::Blinded(block) => block.message().slot(), + } + } + pub fn num_deposits(&self) -> usize { + match self { + SignedBlock::Full(block) => block.signed_block().message().body().deposits().len(), + SignedBlock::Blinded(block) => block.message().body().deposits().len(), + } + } + pub fn num_attestations(&self) -> usize { + match self { + SignedBlock::Full(block) => block.signed_block().message().body().attestations().len(), + SignedBlock::Blinded(block) => block.message().body().attestations().len(), + } } } diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index ed16f52d280..52b52126bd6 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -59,11 +59,6 @@ lazy_static::lazy_static! { "Total count of attempted block signings", &["status"] ); - pub static ref SIGNED_BLOBS_TOTAL: Result = try_create_int_counter_vec( - "vc_signed_beacon_blobs_total", - "Total count of attempted blob signings", - &["status"] - ); pub static ref SIGNED_ATTESTATIONS_TOTAL: Result = try_create_int_counter_vec( "vc_signed_attestations_total", "Total count of attempted Attestation signings", diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 96bfd2511f1..0de2f2f54fa 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -37,7 +37,6 @@ pub enum Error { pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload = FullPayload> { RandaoReveal(Epoch), BeaconBlock(&'a BeaconBlock), - BlobSidecar(&'a Payload::Sidecar), AttestationData(&'a AttestationData), SignedAggregateAndProof(&'a AggregateAndProof), SelectionProof(Slot), @@ -60,7 +59,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Pay match self { SignableMessage::RandaoReveal(epoch) => epoch.signing_root(domain), SignableMessage::BeaconBlock(b) => b.signing_root(domain), - SignableMessage::BlobSidecar(b) => b.signing_root(domain), SignableMessage::AttestationData(a) => a.signing_root(domain), SignableMessage::SignedAggregateAndProof(a) => a.signing_root(domain), SignableMessage::SelectionProof(slot) => slot.signing_root(domain), @@ -184,10 +182,6 @@ impl SigningMethod { Web3SignerObject::RandaoReveal { epoch } } SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?, - SignableMessage::BlobSidecar(_) => { - // https://github.com/ConsenSys/web3signer/issues/726 - unimplemented!("Web3Signer blob signing not implemented.") - } SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a), SignableMessage::SignedAggregateAndProof(a) => { Web3SignerObject::AggregateAndProof(a) diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 612dd96bcd1..60155d8efb7 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -6,7 +6,6 @@ use crate::{ Config, }; use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; -use eth2::types::VariableList; use parking_lot::{Mutex, RwLock}; use slashing_protection::{ interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase, @@ -18,16 +17,14 @@ use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; -use types::sidecar::Sidecar; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, ForkName, Graffiti, Hash256, Keypair, PublicKeyBytes, - SelectionProof, SidecarList, Signature, SignedAggregateAndProof, SignedBeaconBlock, - SignedContributionAndProof, SignedRoot, SignedSidecar, SignedSidecarList, - SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, - SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, - ValidatorRegistrationData, VoluntaryExit, + SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, + SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, + Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, + SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, }; use validator_dir::ValidatorDir; @@ -567,39 +564,6 @@ impl ValidatorStore { } } - pub async fn sign_blobs>( - &self, - validator_pubkey: PublicKeyBytes, - blob_sidecars: SidecarList, - ) -> Result, Error> { - let mut signed_blob_sidecars = Vec::new(); - for blob_sidecar in blob_sidecars.into_iter() { - let slot = blob_sidecar.slot(); - let signing_epoch = slot.epoch(E::slots_per_epoch()); - let signing_context = self.signing_context(Domain::BlobSidecar, signing_epoch); - let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; - - let signature = signing_method - .get_signature::( - SignableMessage::BlobSidecar(blob_sidecar.as_ref()), - signing_context, - &self.spec, - &self.task_executor, - ) - .await?; - - metrics::inc_counter_vec(&metrics::SIGNED_BLOBS_TOTAL, &[metrics::SUCCESS]); - - signed_blob_sidecars.push(SignedSidecar { - message: blob_sidecar, - signature, - _phantom: PhantomData, - }); - } - - Ok(VariableList::from(signed_blob_sidecars)) - } - pub async fn sign_attestation( &self, validator_pubkey: PublicKeyBytes, From 52117f43ba276df30d8e7d4eecc864f211885f81 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Wed, 6 Dec 2023 14:51:40 -0600 Subject: [PATCH 05/19] Small Improvements (#4980) * initial changes * use arc in vector * Utilize new pattern for KzgVerifiedBlob * fmt * Update beacon_node/beacon_chain/src/blob_verification.rs Co-authored-by: realbigsean * forgot to save.. * lint * fmt.. again --------- Co-authored-by: realbigsean --- .../beacon_chain/src/blob_verification.rs | 20 ++++---- .../src/data_availability_checker.rs | 9 ++-- .../availability_view.rs | 47 ++++++++----------- .../overflow_lru_cache.rs | 4 +- 4 files changed, 36 insertions(+), 44 deletions(-) diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index e2a1f0928f0..05457adab32 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -276,6 +276,10 @@ impl Ord for KzgVerifiedBlob { } impl KzgVerifiedBlob { + pub fn new(blob: Arc>, kzg: &Kzg) -> Result { + verify_kzg_for_blob(blob, kzg) + } + pub fn to_blob(self) -> Arc> { self.blob } @@ -289,14 +293,12 @@ impl KzgVerifiedBlob { pub fn blob_index(&self) -> u64 { self.blob.index } -} - -#[cfg(test)] -impl KzgVerifiedBlob { - pub fn new(blob: BlobSidecar) -> Self { - Self { - blob: Arc::new(blob), - } + /// Construct a `KzgVerifiedBlob` that is assumed to be valid. + /// + /// This should ONLY be used for testing. + #[cfg(test)] + pub fn __assumed_valid(blob: Arc>) -> Self { + Self { blob } } } @@ -599,7 +601,7 @@ pub fn validate_blob_sidecar_for_gossip( .as_ref() .ok_or(GossipBlobError::KzgNotInitialized)?; let kzg_verified_blob = - verify_kzg_for_blob(blob_sidecar, kzg).map_err(GossipBlobError::KzgError)?; + KzgVerifiedBlob::new(blob_sidecar, kzg).map_err(GossipBlobError::KzgError)?; Ok(GossipVerifiedBlob { block_root, diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 2fcb3b7a9e8..6b327246a2e 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -1,4 +1,4 @@ -use crate::blob_verification::{verify_kzg_for_blob, verify_kzg_for_blob_list, GossipVerifiedBlob}; +use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, KzgVerifiedBlob}; use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, }; @@ -199,10 +199,9 @@ impl DataAvailabilityChecker { ) -> Result, AvailabilityCheckError> { let mut verified_blobs = vec![]; if let Some(kzg) = self.kzg.as_ref() { - for blob in blobs.iter().flatten() { - verified_blobs.push( - verify_kzg_for_blob(blob.clone(), kzg).map_err(AvailabilityCheckError::Kzg)?, - ); + for blob in Vec::from(blobs).into_iter().flatten() { + verified_blobs + .push(KzgVerifiedBlob::new(blob, kzg).map_err(AvailabilityCheckError::Kzg)?); } } else { return Err(AvailabilityCheckError::KzgNotInitialized); diff --git a/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs b/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs index f013cf649a8..776f81ee545 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs @@ -278,8 +278,8 @@ pub mod tests { type Setup = ( SignedBeaconBlock, - FixedVector>, ::MaxBlobsPerBlock>, - FixedVector>, ::MaxBlobsPerBlock>, + FixedVector>>, ::MaxBlobsPerBlock>, + FixedVector>>, ::MaxBlobsPerBlock>, ); pub fn pre_setup() -> Setup { @@ -290,20 +290,20 @@ pub mod tests { for blob in blobs_vec { if let Some(b) = blobs.get_mut(blob.index as usize) { - *b = Some(blob); + *b = Some(Arc::new(blob)); } } let mut invalid_blobs: FixedVector< - Option>, + Option>>, ::MaxBlobsPerBlock, > = FixedVector::default(); for (index, blob) in blobs.iter().enumerate() { - let mut invalid_blob_opt = blob.clone(); - if let Some(invalid_blob) = invalid_blob_opt.as_mut() { - invalid_blob.kzg_commitment = KzgCommitment::random_for_test(&mut rng); + if let Some(invalid_blob) = blob { + let mut blob_copy = invalid_blob.as_ref().clone(); + blob_copy.kzg_commitment = KzgCommitment::random_for_test(&mut rng); + *invalid_blobs.get_mut(index).unwrap() = Some(Arc::new(blob_copy)); } - *invalid_blobs.get_mut(index).unwrap() = invalid_blob_opt; } (block, blobs, invalid_blobs) @@ -317,8 +317,8 @@ pub mod tests { pub fn setup_processing_components( block: SignedBeaconBlock, - valid_blobs: FixedVector>, ::MaxBlobsPerBlock>, - invalid_blobs: FixedVector>, ::MaxBlobsPerBlock>, + valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, ) -> ProcessingViewSetup { let commitments = block .message() @@ -349,8 +349,8 @@ pub mod tests { pub fn setup_pending_components( block: SignedBeaconBlock, - valid_blobs: FixedVector>, ::MaxBlobsPerBlock>, - invalid_blobs: FixedVector>, ::MaxBlobsPerBlock>, + valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, ) -> PendingComponentsSetup { let blobs = FixedVector::from( valid_blobs @@ -358,7 +358,7 @@ pub mod tests { .map(|blob_opt| { blob_opt .as_ref() - .map(|blob| KzgVerifiedBlob::new(blob.clone())) + .map(|blob| KzgVerifiedBlob::__assumed_valid(blob.clone())) }) .collect::>(), ); @@ -368,7 +368,7 @@ pub mod tests { .map(|blob_opt| { blob_opt .as_ref() - .map(|blob| KzgVerifiedBlob::new(blob.clone())) + .map(|blob| KzgVerifiedBlob::__assumed_valid(blob.clone())) }) .collect::>(), ); @@ -402,21 +402,12 @@ pub mod tests { pub fn setup_child_components( block: SignedBeaconBlock, - valid_blobs: FixedVector>, ::MaxBlobsPerBlock>, - invalid_blobs: FixedVector>, ::MaxBlobsPerBlock>, + valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, ) -> ChildComponentsSetup { - let blobs = FixedVector::from( - valid_blobs - .into_iter() - .map(|blob_opt| blob_opt.clone().map(Arc::new)) - .collect::>(), - ); - let invalid_blobs = FixedVector::from( - invalid_blobs - .into_iter() - .map(|blob_opt| blob_opt.clone().map(Arc::new)) - .collect::>(), - ); + let blobs = FixedVector::from(valid_blobs.into_iter().cloned().collect::>()); + let invalid_blobs = + FixedVector::from(invalid_blobs.into_iter().cloned().collect::>()); (Arc::new(block), blobs, invalid_blobs) } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 36d7c2acad8..7997a2e5e36 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -738,7 +738,7 @@ impl ssz::Decode for OverflowKey { mod test { use super::*; use crate::{ - blob_verification::{validate_blob_sidecar_for_gossip, GossipVerifiedBlob}, + blob_verification::GossipVerifiedBlob, block_verification::PayloadVerificationOutcome, block_verification_types::{AsBlock, BlockImportData}, data_availability_checker::STATE_LRU_CAPACITY, @@ -925,7 +925,7 @@ mod test { .into_iter() .map(|sidecar| { let subnet = sidecar.index; - validate_blob_sidecar_for_gossip(sidecar, subnet, &harness.chain) + GossipVerifiedBlob::new(sidecar, subnet, &harness.chain) .expect("should validate blob") }) .collect() From d9d84242a70138816b35a5a2cc32e1c3e9746e19 Mon Sep 17 00:00:00 2001 From: chonghe <44791194+chong-he@users.noreply.github.com> Date: Thu, 7 Dec 2023 07:39:22 +0800 Subject: [PATCH 06/19] CLI in Lighthouse Book (#4571) * Add cli.sh file * update bash script * update Makefile * update * modified test-suite * fix path * Fix cli.sh permissions * update cmd * cli_manual * Revise to update * Update directory in Github * Correct cli.txt directory * test old cli_manual * change exit 1 * Update cli and makefile * Move cli.sh * remove files * fix permission * Indentation and revision * Fixed permission * Create new cli folder * remove dummy * put a dummy file * Revise cli.sh * comment * function * remove vm.md * test make cli * test * testing * testing * update * update * test * test * add vm * change back non-debug mode * add exist and update for future debug * revise * remove troubleshooting part * update * add summary.md * test * test * Update Makefile Co-authored-by: Mac L * Update Makefile Co-authored-by: Mac L * Remove help-cli.md and rearrange * Remove help-cli.md * Update scripts/cli.sh Co-authored-by: Mac L * Update scripts/cli.sh Co-authored-by: Mac L * remove maxperf * move then to same line as if * Fix indent and echo file not found * To be explicit in replacing the old file * Add logging when there are changes * Add local variables * spacing * remove cargo fmt * update .md files * Edit exit message to avoid confusion * Remove am and add vm subcommands * Add cargo-fmt * Revise test-suite.yml * Update SUMMARY.md * Add set -e * Add vm * Fix * Add vm * set -e * Remove return 1 and add : * Small revision * Fix typo * Update scripts/cli.sh Co-authored-by: Mac L * Indent * Update scripts/cli.sh Co-authored-by: Mac L * Remove .exe in Windows * Fix period with \. * test * check diff * linux commit * Add cli.sh file * update bash script * update Makefile * update * modified test-suite * fix path * Fix cli.sh permissions * update cmd * cli_manual * Revise to update * Update directory in Github * Correct cli.txt directory * test old cli_manual * change exit 1 * Update cli and makefile * Move cli.sh * remove files * fix permission * Indentation and revision * Fixed permission * Create new cli folder * remove dummy * put a dummy file * Revise cli.sh * comment * function * remove vm.md * test make cli * test * testing * testing * update * update * test * test * add vm * change back non-debug mode * add exist and update for future debug * revise * remove troubleshooting part * update * add summary.md * test * test * Update Makefile Co-authored-by: Mac L * Update Makefile Co-authored-by: Mac L * Remove help-cli.md and rearrange * Remove help-cli.md * Update scripts/cli.sh Co-authored-by: Mac L * Update scripts/cli.sh Co-authored-by: Mac L * remove maxperf * move then to same line as if * Fix indent and echo file not found * To be explicit in replacing the old file * Add logging when there are changes * Add local variables * spacing * remove cargo fmt * Edit exit message to avoid confusion * update .md files * Remove am and add vm subcommands * Add cargo-fmt * Revise test-suite.yml * Update SUMMARY.md * Add set -e * Add vm * Fix * Add vm * set -e * Remove return 1 and add : * Small revision * Fix typo * Update scripts/cli.sh Co-authored-by: Mac L * Indent * Update scripts/cli.sh Co-authored-by: Mac L * Remove .exe in Windows * Fix period with \. * test * check diff * Revert "Merge branch 'book-cli' of https://github.com/chong-he/lighthouse into book-cli" This reverts commit 314005d3f8bc0c13ecfa663ac712b1a2bae17540, reversing changes made to a007f613786221211051394fad76ee1f5d0fe0f5. * update * update * Remove echo diff * Dockerize * Remove `-ti` * take ownership inside container * fix mistake * proper escaping, restore ownership afterwards * try without taking ownership of repo * update * add diff for troubleshooting * binary * update using linux * binary * make file * remove diff * add diff * update progressive balance help text * Remove diff --------- Co-authored-by: Mac L Co-authored-by: antondlr --- .github/workflows/test-suite.yml | 12 + Makefile | 8 +- book/src/SUMMARY.md | 7 + book/src/help_bn.md | 514 +++++++++++++++++++++++++++++++ book/src/help_general.md | 107 +++++++ book/src/help_vc.md | 202 ++++++++++++ book/src/help_vm.md | 97 ++++++ book/src/help_vm_create.md | 129 ++++++++ book/src/help_vm_import.md | 101 ++++++ book/src/help_vm_move.md | 112 +++++++ scripts/cli.sh | 98 ++++++ 11 files changed, 1386 insertions(+), 1 deletion(-) create mode 100644 book/src/help_bn.md create mode 100644 book/src/help_general.md create mode 100644 book/src/help_vc.md create mode 100644 book/src/help_vm.md create mode 100644 book/src/help_vm_create.md create mode 100644 book/src/help_vm_import.md create mode 100644 book/src/help_vm_move.md create mode 100755 scripts/cli.sh diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 69fb0969c27..0a1499340d0 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -411,3 +411,15 @@ jobs: run: rustup override set beta - name: Run make run: make + cli-check: + name: cli-check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + - name: Run Makefile to trigger the bash script + run: make cli diff --git a/Makefile b/Makefile index 19988fc2c64..c1190ac98cf 100644 --- a/Makefile +++ b/Makefile @@ -175,7 +175,7 @@ test-network-%: env FORK_NAME=$* cargo nextest run --release \ --features "fork_from_env,$(TEST_FEATURES)" \ -p network - + # Run the tests in the `slasher` crate for all supported database backends. test-slasher: cargo nextest run --release -p slasher --features "lmdb,$(TEST_FEATURES)" @@ -200,6 +200,12 @@ test-exec-engine: # test vectors. test: test-release +# Updates the CLI help text pages in the Lighthouse book. +cli: + docker run --rm --user=root \ + -v ${PWD}:/home/runner/actions-runner/lighthouse sigmaprime/github-runner \ + bash -c 'cd lighthouse && make && ./scripts/cli.sh' + # Runs the entire test suite, downloading test vectors if required. test-full: cargo-fmt test-release test-debug test-ef test-exec-engine diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 3f58d8aa457..e3236591099 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -53,6 +53,13 @@ * [MEV](./builders.md) * [Merge Migration](./merge-migration.md) * [Late Block Re-orgs](./late-block-re-orgs.md) +* [Built-In Documentation](./help_general.md) + * [Beacon Node](./help_bn.md) + * [Validator Client](./help_vc.md) + * [Validator Manager](./help_vm.md) + * [Create](./help_vm_create.md) + * [Import](./help_vm_import.md) + * [Move](./help_vm_move.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/help_bn.md b/book/src/help_bn.md new file mode 100644 index 00000000000..2a08f901744 --- /dev/null +++ b/book/src/help_bn.md @@ -0,0 +1,514 @@ +# Beacon Node + +``` +Sigma Prime +The primary component which connects to the Ethereum 2.0 P2P network and downloads, verifies and stores blocks. Provides +a HTTP API for querying the beacon chain and publishing messages to the network. + +USAGE: + lighthouse beacon_node [FLAGS] [OPTIONS] + +FLAGS: + --always-prefer-builder-payload If set, the beacon node always uses the payload from the builder instead + of the local payload. + --always-prepare-payload Send payload attributes with every fork choice update. This is intended + for use by block builders, relays and developers. You should set a fee + recipient on this BN and also consider adjusting the --prepare-payload- + lookahead flag. + --builder-fallback-disable-checks This flag disables all checks related to chain health. This means the + builder API will always be used for payload construction, regardless of + recent chain conditions. + --compact-db If present, apply compaction to the database on start-up. Use with + caution. It is generally not recommended unless auto-compaction is + disabled. + --disable-backfill-rate-limiting Disable the backfill sync rate-limiting. This allow users to just sync + the entire chain as fast as possible, however it can result in resource + contention which degrades staking performance. Stakers should generally + choose to avoid this flag since backfill sync is not required for + staking. + --disable-deposit-contract-sync Explictly disables syncing of deposit logs from the execution node. This + overrides any previous option that depends on it. Useful if you intend to + run a non-validating beacon node. + -x, --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP + address and port as seen by other peers on the network. This disables + this feature, fixing the ENR's IP/PORT to those specified on boot. + --disable-lock-timeouts Disable the timeouts applied to some internal locks by default. This can + lead to less spurious failures on slow hardware but is considered + experimental as it may obscure performance issues. + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --disable-optimistic-finalized-sync Force Lighthouse to verify every execution block hash with the execution + client during finalized sync. By default block hashes will be checked in + Lighthouse and only passed to the EL if initial verification fails. + --disable-packet-filter Disables the discovery packet filter. Useful for testing in smaller + networks + --disable-proposer-reorgs Do not attempt to reorg late blocks from other validators when proposing. + --disable-quic Disables the quic transport. The node will rely solely on the TCP + transport for libp2p connections. + --disable-upnp Disables UPnP support. Setting this will prevent Lighthouse from + attempting to automatically establish external port mappings. + --dummy-eth1 If present, uses an eth1 backend that generates static dummy + data.Identical to the method used at the 2019 Canada interop. + --enable-private-discovery Lighthouse by default does not discover private IP addresses. Set this + flag to enable connection attempts to local addresses. + -e, --enr-match Sets the local ENR IP address and port to match those set for lighthouse. + Specifically, the IP address will be the value of --listen-address and + the UDP port will be --discovery-port. + --eth1 If present the node will connect to an eth1 node. This is required for + block production, you must use this flag if you wish to serve a + validator. + --eth1-purge-cache Purges the eth1 block and deposit caches + --genesis-backfill Attempts to download blocks all the way back to genesis when checkpoint + syncing. + --gui Enable the graphical user interface and all its requirements. This + enables --http and --validator-monitor-auto and enables SSE logging. + -h, --help Prints help information + --http Enable the RESTful HTTP API server. Disabled by default. + --http-allow-sync-stalled Forces the HTTP to indicate that the node is synced when sync is actually + stalled. This is useful for very small testnets. TESTING ONLY. DO NOT USE + ON MAINNET. + --http-enable-tls Serves the RESTful HTTP API server over TLS. This feature is currently + experimental. + --import-all-attestations Import and aggregate all attestations, regardless of validator + subscriptions. This will only import attestations from already-subscribed + subnets, use with --subscribe-all-subnets to ensure all attestations are + received for import. + --light-client-server Act as a full node supporting light clients on the p2p network + [experimental] + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed + to store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often contain + sensitive information about your validator and so this flag should be + used with caution. For Windows users, the log file permissions will be + inherited from the parent folder. + --metrics Enable the Prometheus metrics HTTP server. Disabled by default. + --private Prevents sending various client identification information. + --proposer-only Sets this beacon node at be a block proposer only node. This will run the + beacon node in a minimal configuration that is sufficient for block + publishing only. This flag should be used for a beacon node being + referenced by validator client using the --proposer-node flag. This + configuration is for enabling more secure setups. + --purge-db If present, the chain database will be deleted. Use with caution. + --reconstruct-historic-states After a checkpoint sync, reconstruct historic states in the database. + This requires syncing all the way back to genesis. + --reset-payload-statuses When present, Lighthouse will forget the payload statuses of any already- + imported blocks. This can assist in the recovery from a consensus + failure caused by the execution layer. + --shutdown-after-sync Shutdown beacon node as soon as sync is completed. Backfill sync will not + be performed before shutdown. + --slasher Run a slasher alongside the beacon node. It is currently only recommended + for expert users because of the immaturity of the slasher UX and the + extra resources required. + --staking Standard option for a staking beacon node. This will enable the HTTP + server on localhost:5052 and import deposit logs from the execution node. + This is equivalent to `--http` on merge-ready networks, or `--http + --eth1` pre-merge + --subscribe-all-subnets Subscribe to all subnets regardless of validator count. This will also + advertise the beacon node as being long-lived subscribed to all subnets. + --validator-monitor-auto Enables the automatic detection and monitoring of validators connected to + the HTTP API and using the subnet subscription endpoint. This generally + has the effect of providing additional logging and metrics for locally + controlled validators. + -V, --version Prints version information + -z, --zero-ports Sets all listening TCP/UDP ports to 0, allowing the OS to choose some + arbitrary free ports. + +OPTIONS: + --auto-compact-db + Enable or disable automatic compaction of the database on finalization. [default: true] + + --beacon-processor-aggregate-batch-size + Specifies the number of gossip aggregate attestations in a signature verification batch. Higher values may + reduce CPU usage in a healthy network while lower values may increase CPU usage in an unhealthy or hostile + network. [default: 64] + --beacon-processor-attestation-batch-size + Specifies the number of gossip attestations in a signature verification batch. Higher values may reduce CPU + usage in a healthy network whilst lower values may increase CPU usage in an unhealthy or hostile network. + [default: 64] + --beacon-processor-max-workers + Specifies the maximum concurrent tasks for the task scheduler. Increasing this value may increase resource + consumption. Reducing the value may result in decreased resource usage and diminished performance. The + default value is the number of logical CPU cores on the host. + --beacon-processor-reprocess-queue-len + Specifies the length of the queue for messages requiring delayed processing. Higher values may prevent + messages from being dropped while lower values may help protect the node from becoming overwhelmed. + [default: 12288] + --beacon-processor-work-queue-len + Specifies the length of the inbound event queue. Higher values may prevent messages from being dropped while + lower values may help protect the node from becoming overwhelmed. [default: 16384] + --blob-prune-margin-epochs + The margin for blob pruning in epochs. The oldest blobs are pruned up until data_availability_boundary - + blob_prune_margin_epochs. [default: 0] + --blobs-dir + Data directory for the blobs database. + + --block-cache-size + Specifies how many blocks the database should cache in memory [default: 5] + + --boot-nodes + One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported. + + --builder + The URL of a service compatible with the MEV-boost API. + + --builder-fallback-epochs-since-finalization + If this node is proposing a block and the chain has not finalized within this number of epochs, it will NOT + query any connected builders, and will use the local execution engine for payload construction. Setting this + value to anything less than 2 will cause the node to NEVER query connected builders. Setting it to 2 will + cause this condition to be hit if there are skips slots at the start of an epoch, right before this node is + set to propose. [default: 3] + --builder-fallback-skips + If this node is proposing a block and has seen this number of skip slots on the canonical chain in a row, it + will NOT query any connected builders, and will use the local execution engine for payload construction. + [default: 3] + --builder-fallback-skips-per-epoch + If this node is proposing a block and has seen this number of skip slots on the canonical chain in the past + `SLOTS_PER_EPOCH`, it will NOT query any connected builders, and will use the local execution engine for + payload construction. [default: 8] + --builder-profit-threshold + The minimum reward in wei provided to the proposer by a block builder for an external payload to be + considered for inclusion in a proposal. If this threshold is not met, the local EE's payload will be used. + This is currently *NOT* in comparison to the value of the local EE's payload. It simply checks whether the + total proposer reward from an external payload is equal to or greater than this value. In the future, a + comparison to a local payload is likely to be added. Example: Use 250000000000000000 to set the threshold to + 0.25 ETH. [default: 0] + --builder-user-agent + The HTTP user agent to send alongside requests to the builder URL. The default is Lighthouse's version + string. + --checkpoint-block + Set a checkpoint block to start syncing from. Must be aligned and match --checkpoint-state. Using + --checkpoint-sync-url instead is recommended. + --checkpoint-state + Set a checkpoint state to start syncing from. Must be aligned and match --checkpoint-block. Using + --checkpoint-sync-url instead is recommended. + --checkpoint-sync-url + Set the remote beacon node HTTP endpoint to use for checkpoint sync. + + --checkpoint-sync-url-timeout + Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint. [default: 180] + + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --discovery-port + The UDP port that discovery will listen on. Defaults to `port` + + --discovery-port6 + The UDP port that discovery will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to + `port6` + --enr-address
... + The IP address/ DNS address to broadcast to other peers on how to reach this node. If a DNS address is + provided, the enr-address is set to the IP address it resolves to and does not auto-update based on PONG + responses in discovery. Set this only if you are sure other nodes can connect to your local node on this + address. This will update the `ip4` or `ip6` ENR fields accordingly. To update both, set this flag twice + with the different values. + --enr-quic-port + The quic UDP4 port that will be set on the local ENR. Set this only if you are sure other nodes can connect + to your local node on this port over IPv4. + --enr-quic6-port + The quic UDP6 port that will be set on the local ENR. Set this only if you are sure other nodes can connect + to your local node on this port over IPv6. + --enr-tcp-port + The TCP4 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on + this port over IPv4. The --port flag is used if this is not set. + --enr-tcp6-port + The TCP6 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on + this port over IPv6. The --port6 flag is used if this is not set. + --enr-udp-port + The UDP4 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on + this port over IPv4. + --enr-udp6-port + The UDP6 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on + this port over IPv6. + --epochs-per-blob-prune + The epoch interval with which to prune blobs from Lighthouse's database when they are older than the data + availability boundary relative to the current epoch. [default: 1] + --epochs-per-migration + The number of epochs to wait between running the migration of data from the hot DB to the cold DB. Less + frequent runs can be useful for minimizing disk writes [default: 1] + --eth1-blocks-per-log-query + Specifies the number of blocks that a deposit log query should span. This will reduce the size of responses + from the Eth1 endpoint. [default: 1000] + --eth1-cache-follow-distance + Specifies the distance between the Eth1 chain head and the last block which should be imported into the + cache. Setting this value lower can help compensate for irregular Proof-of-Work block times, but setting it + too low can make the node vulnerable to re-orgs. + --execution-endpoint + Server endpoint for an execution layer JWT-authenticated HTTP JSON-RPC connection. Uses the same endpoint to + populate the deposit cache. + --execution-jwt + File path which contains the hex-encoded JWT secret for the execution endpoint provided in the --execution- + endpoint flag. + --execution-jwt-id + Used by the beacon node to communicate a unique identifier to execution nodes during JWT authentication. It + corresponds to the 'id' field in the JWT claims object.Set to empty by default + --execution-jwt-secret-key + Hex-encoded JWT secret for the execution endpoint provided in the --execution-endpoint flag. + + --execution-jwt-version + Used by the beacon node to communicate a client version to execution nodes during JWT authentication. It + corresponds to the 'clv' field in the JWT claims object.Set to empty by default + --execution-timeout-multiplier + Unsigned integer to multiply the default execution timeouts by. [default: 1] + + --fork-choice-before-proposal-timeout + Set the maximum number of milliseconds to wait for fork choice before proposing a block. You can prevent + waiting at all by setting the timeout to 0, however you risk proposing atop the wrong parent block. + [default: 250] + --freezer-dir + Data directory for the freezer database. + + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --graffiti + Specify your custom graffiti to be included in blocks. Defaults to the current version and commit, truncated + to fit in 32 bytes. + --historic-state-cache-size + Specifies how many states from the freezer database should cache in memory [default: 1] + + --http-address
+ Set the listen address for the RESTful HTTP API server. + + --http-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not + recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5052). + --http-duplicate-block-status + Status code to send when a block that is already known is POSTed to the HTTP API. + + --http-enable-beacon-processor + The beacon processor is a scheduler which provides quality-of-service and DoS protection. When set to + "true", HTTP API requests will be queued and scheduled alongside other tasks. When set to "false", HTTP API + responses will be executed immediately. + --http-port + Set the listen TCP port for the RESTful HTTP API server. + + --http-spec-fork + Serve the spec for a specific hard fork on /eth/v1/config/spec. It should not be necessary to set this flag. + + --http-sse-capacity-multiplier + Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. Increasing this value can + prevent messages from being dropped. + --http-tls-cert + The path of the certificate to be used when serving the HTTP API server over TLS. + + --http-tls-key + The path of the private key to be used when serving the HTTP API server over TLS. Must not be password- + protected. + --ignore-builder-override-suggestion-threshold + When the EE advises Lighthouse to ignore the builder payload, this flag specifies a percentage threshold for + the difference between the reward from the builder payload and the local EE's payload. This threshold must + be met for Lighthouse to consider ignoring the EE's suggestion. If the reward from the builder's payload + doesn't exceed the local payload by at least this percentage, the local payload will be used. The conditions + under which the EE may make this suggestion depend on the EE's implementation, with the primary intent being + to safeguard against potential censorship attacks from builders. Setting this flag to 0 will cause + Lighthouse to always ignore the EE's suggestion. Default: 10.0 (equivalent to 10%). [default: 10.0] + --invalid-gossip-verified-blocks-path + If a block succeeds gossip validation whilst failing full validation, store the block SSZ as a file at this + path. This feature is only recommended for developers. This directory is not pruned, users should be careful + to avoid filling up their disks. + --libp2p-addresses + One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR. + + --listen-address
... + The address lighthouse will listen for UDP and TCP connections. To listen over IpV4 and IpV6 set this flag + twice with the different values. + Examples: + - --listen-address '0.0.0.0' will listen over IPv4. + - --listen-address '::' will listen over IPv6. + - --listen-address '0.0.0.0' --listen-address '::' will listen over both IPv4 and IPv6. The order of the + given addresses is not relevant. However, multiple IPv4, or multiple IPv6 addresses will not be accepted. + [default: 0.0.0.0] + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --max-skip-slots + Refuse to skip more than this many slots when processing an attestation. This prevents nodes on minority + forks from wasting our time and disk space, but could also cause unnecessary consensus failures, so is + disabled by default. + --metrics-address
+ Set the listen address for the Prometheus metrics HTTP server. + + --metrics-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not + recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5054). + --metrics-port + Set the listen TCP port for the Prometheus metrics HTTP server. + + --monitoring-endpoint
+ Enables the monitoring service for sending system metrics to a remote endpoint. This can be used to monitor + your setup on certain services (e.g. beaconcha.in). This flag sets the endpoint where the beacon node + metrics will be sent. Note: This will send information to a remote sever which may identify and associate + your validators, IP address and other personal information. Always use a HTTPS connection and never provide + an untrusted URL. + --monitoring-endpoint-period + Defines how many seconds to wait between each message sent to the monitoring-endpoint. Default: 60s + + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --network-dir + Data directory for network keys. Defaults to network/ inside the beacon node dir. + + --port + The TCP/UDP ports to listen on. There are two UDP ports. The discovery UDP port will be set to this value + and the Quic UDP port will be set to this value + 1. The discovery port can be modified by the --discovery- + port flag and the quic port can be modified by the --quic-port flag. If listening over both IPv4 + and IPv6 the --port flag will apply to the IPv4 address and --port6 to the IPv6 address. [default: 9000] + --port6 + The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 and IPv6. Defaults to 9090 when + required. The Quic UDP port will be set to this value + 1. [default: 9090] + --prepare-payload-lookahead + The time before the start of a proposal slot at which payload attributes should be sent. Low values are + useful for execution nodes which don't improve their payload after the first call, and high values are + useful for ensuring the EL is given ample notice. Default: 1/3 of a slot. + --progressive-balances + Control the progressive balances cache mode. The default `fast` mode uses the cache to speed up fork choice. + A more conservative `checked` mode compares the cache's results against results without the cache. If there + is a mismatch, it falls back to the cache-free result. Using the default `fast` mode is recommended unless + advised otherwise by the Lighthouse team. [possible values: disabled, checked, strict, fast] + --proposer-reorg-cutoff + Maximum delay after the start of the slot at which to propose a reorging block. Lower values can prevent + failed reorgs by ensuring the block has ample time to propagate and be processed by the network. The default + is 1/12th of a slot (1 second on mainnet) + --proposer-reorg-disallowed-offsets + Comma-separated list of integer offsets which can be used to avoid proposing reorging blocks at certain + slots. An offset of N means that reorging proposals will not be attempted at any slot such that `slot % + SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be avoided. Any offsets supplied with this + flag will impose additional restrictions. + --proposer-reorg-epochs-since-finalization + Maximum number of epochs since finalization at which proposer reorgs are allowed. Default: 2 + + --proposer-reorg-threshold + Percentage of vote weight below which to attempt a proposer reorg. Default: 20% + + --prune-blobs + Prune blobs from Lighthouse's database when they are older than the data data availability boundary relative + to the current epoch. [default: true] + --prune-payloads + Prune execution payloads from Lighthouse's database. This saves space but imposes load on the execution + client, as payloads need to be reconstructed and sent to syncing peers. [default: true] + --quic-port + The UDP port that quic will listen on. Defaults to `port` + 1 + + --quic-port6 + The UDP port that quic will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to `port6` + + 1 + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --shuffling-cache-size + Some HTTP API requests can be optimised by caching the shufflings at each epoch. This flag allows the user + to set the shuffling cache size in epochs. Shufflings are dependent on validator count and setting this + value to a large number can consume a large amount of memory. + --slasher-att-cache-size + Set the maximum number of attestation roots for the slasher to cache + + --slasher-backend + Set the database backend to be used by the slasher. [possible values: lmdb, disabled] + + --slasher-broadcast + Broadcast slashings found by the slasher to the rest of the network [Enabled by default]. [default: true] + + --slasher-chunk-size + Number of epochs per validator per chunk stored on disk. + + --slasher-dir + Set the slasher's database directory. + + --slasher-history-length + Configure how many epochs of history the slasher keeps. Immutable after initialization. + + --slasher-max-db-size + Maximum size of the MDBX database used by the slasher. + + --slasher-slot-offset + Set the delay from the start of the slot at which the slasher should ingest attestations. Only effective if + the slasher-update-period is a multiple of the slot duration. + --slasher-update-period + Configure how often the slasher runs batch processing. + + --slasher-validator-chunk-size + Number of validators per chunk stored on disk. + + --slots-per-restore-point + Specifies how often a freezer DB restore point should be stored. Cannot be changed after initialization. + [default: 8192 (mainnet) or 64 (minimal)] + --suggested-fee-recipient + Emergency fallback fee recipient for use in case the validator client does not have one configured. You + should set this flag on the validator client instead of (or in addition to) setting it here. + --target-peers + The target number of peers. + + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + --trusted-peers + One or more comma-delimited trusted peer ids which always have the highest score according to the peer + scoring system. + --trusted-setup-file-override + Path to a json file containing the trusted setup params. NOTE: This will override the trusted setup that is + generated from the mainnet kzg ceremony. Use with caution + --validator-monitor-file + As per --validator-monitor-pubkeys, but the comma-separated list is contained within a file at the given + path. + --validator-monitor-individual-tracking-threshold + Once the validator monitor reaches this number of local validators it will stop collecting per-validator + Prometheus metrics and issuing per-validator logs. Instead, it will provide aggregate metrics and logs. This + avoids infeasibly high cardinality in the Prometheus database and high log volume when using many + validators. Defaults to 64. + --validator-monitor-pubkeys + A comma-separated list of 0x-prefixed validator public keys. These validators will receive special + monitoring and additional logging. + --wss-checkpoint + Specify a weak subjectivity checkpoint in `block_root:epoch` format to verify the node's sync against. The + block root should be 0x-prefixed. Note that this flag is for verification only, to perform a checkpoint sync + from a recent state use --checkpoint-sync-url. +``` \ No newline at end of file diff --git a/book/src/help_general.md b/book/src/help_general.md new file mode 100644 index 00000000000..7169884cdc5 --- /dev/null +++ b/book/src/help_general.md @@ -0,0 +1,107 @@ +# Lighthouse General Commands + +``` +Sigma Prime +Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a validator client and utilities for managing +validator accounts. + +USAGE: + lighthouse [FLAGS] [OPTIONS] [SUBCOMMAND] + +FLAGS: + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will + generally increase memory usage, it should only be provided when debugging + specific memory allocation issues. + -l Enables environment logging giving access to sub-protocol logs such as discv5 + and libp2p + -h, --help Prints help information + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed to + store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be + read by any user on the machine. Note that logs can often contain sensitive + information about your validator and so this flag should be used with caution. + For Windows users, the log file permissions will be inherited from the parent + folder. + -V, --version Prints version information + +OPTIONS: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + +SUBCOMMANDS: + account_manager Utilities for generating and managing Ethereum 2.0 accounts. [aliases: a, am, account, + account_manager] + beacon_node The primary component which connects to the Ethereum 2.0 P2P network and downloads, + verifies and stores blocks. Provides a HTTP API for querying the beacon chain and + publishing messages to the network. [aliases: b, bn, beacon] + boot_node Start a special Lighthouse process that only serves as a discv5 boot-node. This process + will *not* import blocks or perform most typical beacon node functions. Instead, it will + simply run the discv5 service and assist nodes on the network to discover each other. This + is the recommended way to provide a network boot-node since it has a reduced attack surface + compared to a full beacon node. + database_manager Manage a beacon node database [aliases: db] + help Prints this message or the help of the given subcommand(s) + validator_client When connected to a beacon node, performs the duties of a staked validator (e.g., proposing + blocks and attestations). [aliases: v, vc, validator] + validator_manager Utilities for managing a Lighthouse validator client via the HTTP API. [aliases: vm, + validator-manager, validator_manager] +``` \ No newline at end of file diff --git a/book/src/help_vc.md b/book/src/help_vc.md new file mode 100644 index 00000000000..4471b0e1044 --- /dev/null +++ b/book/src/help_vc.md @@ -0,0 +1,202 @@ +# Validator Client + +``` +When connected to a beacon node, performs the duties of a staked validator (e.g., proposing blocks and attestations). + +USAGE: + lighthouse validator_client [FLAGS] [OPTIONS] + +FLAGS: + --builder-proposals + If this flag is set, Lighthouse will query the Beacon Node for only block headers during proposals and will + sign over headers. Useful for outsourcing execution payload construction during proposals. + --disable-auto-discover + If present, do not attempt to discover new validators in the validators-dir. Validators will need to be + manually added to the validator_definitions.yml file. + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag will generally increase memory usage, + it should only be provided when debugging specific memory allocation issues. + --disable-run-on-all + DEPRECATED. Use --broadcast. By default, Lighthouse publishes attestation, sync committee subscriptions and + proposer preparation messages to all beacon nodes provided in the `--beacon-nodes flag`. This option changes + that behaviour such that these api calls only go out to the first available and synced beacon node + --enable-doppelganger-protection + If this flag is set, Lighthouse will delay startup for three epochs and monitor for messages on the network + by any of the validators managed by this client. This will result in three (possibly four) epochs worth of + missed attestations. If an attestation is detected during this period, it means it is very likely that you + are running a second validator client with the same keys. This validator client will immediately shutdown if + this is detected in order to avoid potentially committing a slashable offense. Use this flag in order to + ENABLE this functionality, without this flag Lighthouse will begin attesting immediately. + --enable-high-validator-count-metrics + Enable per validator metrics for > 64 validators. Note: This flag is automatically enabled for <= 64 + validators. Enabling this metric for higher validator counts will lead to higher volume of prometheus + metrics being collected. + -h, --help Prints help information + --http Enable the RESTful HTTP API server. Disabled by default. + --http-allow-keystore-export + If present, allow access to the DELETE /lighthouse/keystores HTTP API method, which allows exporting + keystores and passwords to HTTP API consumers who have access to the API token. This method is useful for + exporting validators, however it should be used with caution since it exposes private key data to authorized + users. + --http-store-passwords-in-secrets-dir + If present, any validators created via the HTTP will have keystore passwords stored in the secrets-dir + rather than the validator definitions file. + --init-slashing-protection + If present, do not require the slashing protection database to exist before running. You SHOULD NOT use this + flag unless you're certain that a new slashing protection database is required. Usually, your database will + have been initialized when you imported your validator keys. If you misplace your database and then run with + this flag you risk being slashed. + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space needed to store old logs. + + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they can be read by any user on the + machine. Note that logs can often contain sensitive information about your validator and so this flag should + be used with caution. For Windows users, the log file permissions will be inherited from the parent folder. + --metrics Enable the Prometheus metrics HTTP server. Disabled by default. + --unencrypted-http-transport + This is a safety flag to ensure that the user is aware that the http transport is unencrypted and using a + custom HTTP address is unsafe. + --use-long-timeouts + If present, the validator client will use longer timeouts for requests made to the beacon node. This flag is + generally not recommended, longer timeouts can cause missed duties when fallbacks are used. + -V, --version Prints version information + +OPTIONS: + --beacon-nodes + Comma-separated addresses to one or more beacon node HTTP APIs. Default is http://localhost:5052. + + --beacon-nodes-tls-certs + Comma-separated paths to custom TLS certificates to use when connecting to a beacon node (and/or proposer + node). These certificates must be in PEM format and are used in addition to the OS trust store. Commas must + only be used as a delimiter, and must not be part of the certificate path. + --broadcast + Comma-separated list of beacon API topics to broadcast to all beacon nodes. Possible values are: none, + attestations, blocks, subscriptions, sync-committee. Default (when flag is omitted) is to broadcast + subscriptions only. + --builder-registration-timestamp-override + This flag takes a unix timestamp value that will be used to override the timestamp used in the builder api + registration + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --gas-limit + The gas limit to be used in all builder proposals for all validators managed by this validator client. Note + this will not necessarily be used if the gas limit set here moves too far from the previous block's gas + limit. [default: 30,000,000] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --graffiti + Specify your custom graffiti to be included in blocks. + + --graffiti-file + Specify a graffiti file to load validator graffitis from. + + --http-address
+ Set the address for the HTTP address. The HTTP server is not encrypted and therefore it is unsafe to publish + on a public network. When this flag is used, it additionally requires the explicit use of the + `--unencrypted-http-transport` flag to ensure the user is aware of the risks involved. For access via the + Internet, users should apply transport-layer security like a HTTPS reverse-proxy or SSH tunnelling. + --http-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not + recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5062). + --http-port + Set the listen TCP port for the RESTful HTTP API server. + + --latency-measurement-service + Set to 'true' to enable a service that periodically attempts to measure latency to BNs. Set to 'false' to + disable. [default: true] + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --metrics-address
+ Set the listen address for the Prometheus metrics HTTP server. + + --metrics-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not + recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5064). + --metrics-port + Set the listen TCP port for the Prometheus metrics HTTP server. + + --monitoring-endpoint
+ Enables the monitoring service for sending system metrics to a remote endpoint. This can be used to monitor + your setup on certain services (e.g. beaconcha.in). This flag sets the endpoint where the beacon node + metrics will be sent. Note: This will send information to a remote sever which may identify and associate + your validators, IP address and other personal information. Always use a HTTPS connection and never provide + an untrusted URL. + --monitoring-endpoint-period + Defines how many seconds to wait between each message sent to the monitoring-endpoint. Default: 60s + + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --proposer-nodes + Comma-separated addresses to one or more beacon node HTTP APIs. These specify nodes that are used to send + beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes. + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --secrets-dir + The directory which contains the password to unlock the validator voting keypairs. Each password should be + contained in a file where the name is the 0x-prefixed hex representation of the validators voting public + key. Defaults to ~/.lighthouse/{network}/secrets. + --suggested-fee-recipient + Once the merge has happened, this address will receive transaction fees from blocks proposed by this + validator client. If a fee recipient is configured in the validator definitions it takes priority over this + value. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + --validator-registration-batch-size + Defines the number of validators per validator/register_validator request sent to the BN. This value can be + reduced to avoid timeouts from builders. [default: 500] + --validators-dir + The directory which contains the validator keystores, deposit data for each validator along with the common + slashing protection database and the validator_definitions.yml +``` \ No newline at end of file diff --git a/book/src/help_vm.md b/book/src/help_vm.md new file mode 100644 index 00000000000..fa08aa4f65f --- /dev/null +++ b/book/src/help_vm.md @@ -0,0 +1,97 @@ +# Validator Manager + +``` +Utilities for managing a Lighthouse validator client via the HTTP API. + +USAGE: + lighthouse validator_manager [FLAGS] [OPTIONS] [SUBCOMMAND] + +FLAGS: + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will + generally increase memory usage, it should only be provided when debugging + specific memory allocation issues. + -h, --help Prints help information + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed to + store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be + read by any user on the machine. Note that logs can often contain sensitive + information about your validator and so this flag should be used with caution. + For Windows users, the log file permissions will be inherited from the parent + folder. + -V, --version Prints version information + +OPTIONS: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + +SUBCOMMANDS: + create Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the + validator keystores and other validator data. This file can then be imported to a validator client + using the "import-validators" command. Another, optional JSON file is created which contains a list of + validator deposits in the same format as the "ethereum/staking-deposit-cli" tool. + help Prints this message or the help of the given subcommand(s) + import Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file + which can be generated using the "create-validators" command. + move Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file + which can be generated using the "create-validators" command. This command only supports validators + signing via a keystore on the local file system (i.e., not Web3Signer validators). +``` \ No newline at end of file diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md new file mode 100644 index 00000000000..505ea8638f9 --- /dev/null +++ b/book/src/help_vm_create.md @@ -0,0 +1,129 @@ +# Validator Manager Create + +``` +Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the validator keystores and +other validator data. This file can then be imported to a validator client using the "import-validators" command. +Another, optional JSON file is created which contains a list of validator deposits in the same format as the +"ethereum/staking-deposit-cli" tool. + +USAGE: + lighthouse validator_manager create [FLAGS] [OPTIONS] --output-path + +FLAGS: + --disable-deposits When provided don't generate the deposits JSON file that is commonly used + for submitting validator deposits via a web UI. Using this flag will save + several seconds per validator if the user has an alternate strategy for + submitting deposits. + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --force-bls-withdrawal-credentials If present, allows BLS withdrawal credentials rather than an execution + address. This is not recommended. + -h, --help Prints help information + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed + to store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can + be read by any user on the machine. Note that logs can often contain + sensitive information about your validator and so this flag should be used + with caution. For Windows users, the log file permissions will be + inherited from the parent folder. + --specify-voting-keystore-password If present, the user will be prompted to enter the voting keystore + password that will be used to encrypt the voting keystores. If this flag + is not provided, a random password will be used. It is not necessary to + keep backups of voting keystore passwords if the mnemonic is safely backed + up. + --stdin-inputs If present, read all user inputs from stdin instead of tty. + -V, --version Prints version information + +OPTIONS: + --beacon-node + A HTTP(S) address of a beacon node using the beacon-API. If this value is provided, an error will be raised + if any validator key here is already known as a validator by that beacon node. This helps prevent the same + validator being created twice and therefore slashable conditions. + --builder-proposals + When provided, all created validators will attempt to create blocks via builder rather than the local EL. + [possible values: true, false] + --count + The number of validators to create, regardless of how many already exist + + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --deposit-gwei + The GWEI value of the deposit amount. Defaults to the minimum amount required for an active validator + (MAX_EFFECTIVE_BALANCE) + --eth1-withdrawal-address + If this field is set, the given eth1 address will be used to create the withdrawal credentials. Otherwise, + it will generate withdrawal credentials with the mnemonic-derived withdrawal public key in EIP-2334 format. + --first-index + The first of consecutive key indexes you wish to create. [default: 0] + + --gas-limit + All created validators will use this gas limit. It is recommended to leave this as the default value by not + specifying this flag. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --mnemonic-path If present, the mnemonic will be read in from this file. + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --output-path + The path to a directory where the validator and (optionally) deposits files will be created. The directory + will be created if it does not exist. + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --suggested-fee-recipient + All created validators will use this value for the suggested fee recipient. Omit this flag to use the + default value from the VC. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. +``` \ No newline at end of file diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md new file mode 100644 index 00000000000..3960a55f1a2 --- /dev/null +++ b/book/src/help_vm_import.md @@ -0,0 +1,101 @@ +# Validator Manager Import + +``` +Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be +generated using the "create-validators" command. + +USAGE: + lighthouse validator_manager import [FLAGS] [OPTIONS] --validators-file + +FLAGS: + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will + generally increase memory usage, it should only be provided when debugging + specific memory allocation issues. + -h, --help Prints help information + --ignore-duplicates If present, ignore any validators which already exist on the VC. Without this + flag, the process will terminate without making any changes. This flag should + be used with caution, whilst it does not directly cause slashable conditions, + it might be an indicator that something is amiss. Users should also be careful + to avoid submitting duplicate deposits for validators that already exist on the + VC. + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed to + store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be + read by any user on the machine. Note that logs can often contain sensitive + information about your validator and so this flag should be used with caution. + For Windows users, the log file permissions will be inherited from the parent + folder. + -V, --version Prints version information + +OPTIONS: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + --validators-file + The path to a JSON file containing a list of validators to be imported to the validator client. This file is + usually named "validators.json". + --vc-token + The file containing a token required by the validator client. + + --vc-url + A HTTP(S) address of a validator client using the keymanager-API. If this value is not supplied then a 'dry + run' will be conducted where no changes are made to the validator client. [default: http://localhost:5062] +``` \ No newline at end of file diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md new file mode 100644 index 00000000000..dea440dca9e --- /dev/null +++ b/book/src/help_vm_move.md @@ -0,0 +1,112 @@ +# Validator Manager Move + +``` +Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be +generated using the "create-validators" command. This command only supports validators signing via a keystore on the +local file system (i.e., not Web3Signer validators). + +USAGE: + lighthouse validator_manager move [FLAGS] [OPTIONS] --dest-vc-token --dest-vc-url --src-vc-token --src-vc-url + +FLAGS: + --disable-log-timestamp If present, do not include timestamps in logging output. + --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will + generally increase memory usage, it should only be provided when debugging + specific memory allocation issues. + -h, --help Prints help information + --log-color Force outputting colors when emitting logs to the terminal. + --logfile-compress If present, compress old log files. This can help reduce the space needed to + store old logs. + --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be + read by any user on the machine. Note that logs can often contain sensitive + information about your validator and so this flag should be used with caution. + For Windows users, the log file permissions will be inherited from the parent + folder. + --stdin-inputs If present, read all user inputs from stdin instead of tty. + -V, --version Prints version information + +OPTIONS: + --builder-proposals + When provided, all created validators will attempt to create blocks via builder rather than the local EL. + [possible values: true, false] + --count The number of validators to move. + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and databases. Defaults to + $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify + separate custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: + info, debug, trace, warn, error, crit] + --dest-vc-token + The file containing a token required by the destination validator client. + + --dest-vc-url + A HTTP(S) address of a validator client using the keymanager-API. This validator client is the "destination" + and will have new validators added as they are removed from the "source" validator client. + --gas-limit + All created validators will use this gas limit. It is recommended to leave this as the default value by not + specifying this flag. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server + URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may + be used. If the genesis state is already included in this binary then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. [default: 180] + + --log-format + Specifies the log format used when emitting logs to the terminal. [possible values: JSON] + + --logfile + File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a + new log file is generated where future logs are stored. Once the number of log files exceeds the value + specified in `--logfile-max-number` the oldest log file will be overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, + debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] + + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, background file logging is disabled. + [default: 5] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is + disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, + chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause + your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. + --src-vc-token + The file containing a token required by the source validator client. + + --src-vc-url + A HTTP(S) address of a validator client using the keymanager-API. This validator client is the "source" and + contains the validators that are to be moved. + --suggested-fee-recipient + All created validators will use this value for the suggested fee recipient. Omit this flag to use the + default value from the VC. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should + only be used if the user has a clear understanding that the broad Ethereum community has elected to override + the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. + Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if + the user has a clear understanding that the broad Ethereum community has elected to override the terminal + PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely + careful with this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal + integer (not a hex value). This flag should only be used if the user has a clear understanding that the + broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will + cause your node to experience a consensus failure. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective + if there is no existing database. + --validators + The validators to be moved. Either a list of 0x-prefixed validator pubkeys or the keyword "all". +``` \ No newline at end of file diff --git a/scripts/cli.sh b/scripts/cli.sh new file mode 100755 index 00000000000..d9def7624fb --- /dev/null +++ b/scripts/cli.sh @@ -0,0 +1,98 @@ +#! /usr/bin/env bash + +# IMPORTANT +# This script should NOT be run directly. +# Run `make cli` from the root of the repository instead. + +set -e + +# A function to generate formatted .md files +write_to_file() { + local cmd="$1" + local file="$2" + local program="$3" + + # Remove first line of cmd to get rid of commit specific numbers. + cmd=${cmd#*$'\n'} + + # We need to add the header and the backticks to create the code block. + printf "# %s\n\n\`\`\`\n%s\n\`\`\`" "$program" "$cmd" > "$file" +} + +CMD=./target/release/lighthouse + +# Store all help strings in variables. +general_cli=$($CMD --help) +bn_cli=$($CMD bn --help) +vc_cli=$($CMD vc --help) +vm_cli=$($CMD vm --help) +vm_cli_create=$($CMD vm create --help) +vm_cli_import=$($CMD vm import --help) +vm_cli_move=$($CMD vm move --help) + +general=./help_general.md +bn=./help_bn.md +vc=./help_vc.md +am=./help_am.md +vm=./help_vm.md +vm_create=./help_vm_create.md +vm_import=./help_vm_import.md +vm_move=./help_vm_move.md + +# create .md files +write_to_file "$general_cli" "$general" "Lighthouse General Commands" +write_to_file "$bn_cli" "$bn" "Beacon Node" +write_to_file "$vc_cli" "$vc" "Validator Client" +write_to_file "$vm_cli" "$vm" "Validator Manager" +write_to_file "$vm_cli_create" "$vm_create" "Validator Manager Create" +write_to_file "$vm_cli_import" "$vm_import" "Validator Manager Import" +write_to_file "$vm_cli_move" "$vm_move" "Validator Manager Move" + +#input 1 = $1 = files; input 2 = $2 = new files +files=(./book/src/help_general.md ./book/src/help_bn.md ./book/src/help_vc.md ./book/src/help_vm.md ./book/src/help_vm_create.md ./book/src/help_vm_import.md ./book/src/help_vm_move.md) +new_files=($general $bn $vc $vm $vm_create $vm_import $vm_move) + +# function to check +check() { + local file="$1" + local new_file="$2" + + if [[ -f $file ]]; then # check for existence of file + diff=$(diff $file $new_file || :) + else + cp $new_file $file + changes=true + echo "$file is not found, it has just been created" + fi + + if [[ -z $diff ]]; then # check for difference + : # do nothing + else + cp $new_file $file + changes=true + echo "$file has been updated" + fi +} + +# define changes as false +changes=false +# call check function to check for each help file +check ${files[0]} ${new_files[0]} +check ${files[1]} ${new_files[1]} +check ${files[2]} ${new_files[2]} +check ${files[3]} ${new_files[3]} +check ${files[4]} ${new_files[4]} +check ${files[5]} ${new_files[5]} +check ${files[6]} ${new_files[6]} + +# remove help files +rm -f help_general.md help_bn.md help_vc.md help_am.md help_vm.md help_vm_create.md help_vm_import.md help_vm_move.md + +# only exit at the very end +if [[ $changes == true ]]; then + echo "Exiting with error to indicate changes occurred..." + exit 1 +else + echo "CLI help texts are up to date." + exit 0 +fi From 8ba39cbf2c2c1f4c261f9eb0c66341b8920d01b6 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 6 Dec 2023 17:02:46 -0800 Subject: [PATCH 07/19] Implement graffiti management API (#4951) * implement get graffiti * add set graffiti * add set graffiti * delete graffiti * set graffiti * set graffiti * fmt * added tests * add graffiti file check * update * fixed delete req * remove unused code * changes based on feedback * changes based on feedback * invalid auth test plus lint * fmt * remove unneeded async --- common/eth2/src/lighthouse_vc/http_client.rs | 61 ++++++++++++ common/eth2/src/lighthouse_vc/std_types.rs | 8 +- common/eth2/src/lighthouse_vc/types.rs | 5 + validator_client/src/http_api/graffiti.rs | 80 ++++++++++++++++ validator_client/src/http_api/mod.rs | 93 ++++++++++++++++++- validator_client/src/http_api/tests.rs | 82 ++++++++++++++++ .../src/initialized_validators.rs | 68 ++++++++++++++ 7 files changed, 394 insertions(+), 3 deletions(-) create mode 100644 validator_client/src/http_api/graffiti.rs diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index b2d53c5e08d..2e6756c63e8 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -226,11 +226,32 @@ impl ValidatorClientHttpClient { ok_or_error(response).await } + /// Perform a HTTP DELETE request, returning the `Response` for further processing. + async fn delete_response(&self, url: U) -> Result { + let response = self + .client + .delete(url) + .headers(self.headers()?) + .send() + .await + .map_err(Error::from)?; + ok_or_error(response).await + } + async fn get(&self, url: U) -> Result { let response = self.get_response(url).await?; self.signed_json(response).await } + async fn delete(&self, url: U) -> Result<(), Error> { + let response = self.delete_response(url).await?; + if response.status().is_success() { + Ok(()) + } else { + Err(Error::StatusCode(response.status())) + } + } + async fn get_unsigned(&self, url: U) -> Result { self.get_response(url) .await? @@ -537,6 +558,18 @@ impl ValidatorClientHttpClient { Ok(url) } + fn make_graffiti_url(&self, pubkey: &PublicKeyBytes) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("validator") + .push(&pubkey.to_string()) + .push("graffiti"); + Ok(url) + } + fn make_gas_limit_url(&self, pubkey: &PublicKeyBytes) -> Result { let mut url = self.server.full.clone(); url.path_segments_mut() @@ -684,6 +717,34 @@ impl ValidatorClientHttpClient { self.post(path, &()).await } + + /// `GET /eth/v1/validator/{pubkey}/graffiti` + pub async fn get_graffiti( + &self, + pubkey: &PublicKeyBytes, + ) -> Result { + let url = self.make_graffiti_url(pubkey)?; + self.get(url) + .await + .map(|generic: GenericResponse| generic.data) + } + + /// `POST /eth/v1/validator/{pubkey}/graffiti` + pub async fn set_graffiti( + &self, + pubkey: &PublicKeyBytes, + graffiti: GraffitiString, + ) -> Result<(), Error> { + let url = self.make_graffiti_url(pubkey)?; + let set_graffiti_request = SetGraffitiRequest { graffiti }; + self.post(url, &set_graffiti_request).await + } + + /// `DELETE /eth/v1/validator/{pubkey}/graffiti` + pub async fn delete_graffiti(&self, pubkey: &PublicKeyBytes) -> Result<(), Error> { + let url = self.make_graffiti_url(pubkey)?; + self.delete(url).await + } } /// Returns `Ok(response)` if the response is a `200 OK` response or a diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 33e2f764efe..ab90d336fa4 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -1,7 +1,7 @@ use account_utils::ZeroizeString; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; -use types::{Address, PublicKeyBytes}; +use types::{Address, Graffiti, PublicKeyBytes}; pub use slashing_protection::interchange::Interchange; @@ -172,3 +172,9 @@ pub enum DeleteRemotekeyStatus { pub struct DeleteRemotekeysResponse { pub data: Vec>, } + +#[derive(Debug, Deserialize, Serialize)] +pub struct GetGraffitiResponse { + pub pubkey: PublicKeyBytes, + pub graffiti: Graffiti, +} diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index f1a91b4ef1e..230293f1b81 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -168,3 +168,8 @@ pub struct SingleExportKeystoresResponse { #[serde(skip_serializing_if = "Option::is_none")] pub validating_keystore_password: Option, } + +#[derive(Serialize, Deserialize, Debug)] +pub struct SetGraffitiRequest { + pub graffiti: GraffitiString, +} diff --git a/validator_client/src/http_api/graffiti.rs b/validator_client/src/http_api/graffiti.rs new file mode 100644 index 00000000000..79d4fd61f3a --- /dev/null +++ b/validator_client/src/http_api/graffiti.rs @@ -0,0 +1,80 @@ +use crate::validator_store::ValidatorStore; +use bls::PublicKey; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::{graffiti::GraffitiString, EthSpec, Graffiti}; + +pub fn get_graffiti( + validator_pubkey: PublicKey, + validator_store: Arc>, + graffiti_flag: Option, +) -> Result { + let initialized_validators_rw_lock = validator_store.initialized_validators(); + let initialized_validators = initialized_validators_rw_lock.read(); + match initialized_validators.validator(&validator_pubkey.compress()) { + None => Err(warp_utils::reject::custom_not_found( + "The key was not found on the server".to_string(), + )), + Some(_) => { + let Some(graffiti) = initialized_validators.graffiti(&validator_pubkey.into()) else { + return graffiti_flag.ok_or(warp_utils::reject::custom_server_error( + "No graffiti found, unable to return the process-wide default".to_string(), + )); + }; + Ok(graffiti) + } + } +} + +pub fn set_graffiti( + validator_pubkey: PublicKey, + graffiti: GraffitiString, + validator_store: Arc>, +) -> Result<(), warp::Rejection> { + let initialized_validators_rw_lock = validator_store.initialized_validators(); + let mut initialized_validators = initialized_validators_rw_lock.write(); + match initialized_validators.validator(&validator_pubkey.compress()) { + None => Err(warp_utils::reject::custom_not_found( + "The key was not found on the server, nothing to update".to_string(), + )), + Some(initialized_validator) => { + if initialized_validator.get_graffiti() == Some(graffiti.clone().into()) { + Ok(()) + } else { + initialized_validators + .set_graffiti(&validator_pubkey, graffiti) + .map_err(|_| { + warp_utils::reject::custom_server_error( + "A graffiti was found, but failed to be updated.".to_string(), + ) + }) + } + } + } +} + +pub fn delete_graffiti( + validator_pubkey: PublicKey, + validator_store: Arc>, +) -> Result<(), warp::Rejection> { + let initialized_validators_rw_lock = validator_store.initialized_validators(); + let mut initialized_validators = initialized_validators_rw_lock.write(); + match initialized_validators.validator(&validator_pubkey.compress()) { + None => Err(warp_utils::reject::custom_not_found( + "The key was not found on the server, nothing to delete".to_string(), + )), + Some(initialized_validator) => { + if initialized_validator.get_graffiti().is_none() { + Ok(()) + } else { + initialized_validators + .delete_graffiti(&validator_pubkey) + .map_err(|_| { + warp_utils::reject::custom_server_error( + "A graffiti was found, but failed to be removed.".to_string(), + ) + }) + } + } + } +} diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 669edc67186..c65beb7390a 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1,12 +1,15 @@ mod api_secret; mod create_signed_voluntary_exit; mod create_validator; +mod graffiti; mod keystores; mod remotekeys; mod tests; pub mod test_utils; +use crate::http_api::graffiti::{delete_graffiti, get_graffiti, set_graffiti}; + use crate::http_api::create_signed_voluntary_exit::create_signed_voluntary_exit; use crate::{determine_graffiti, GraffitiFile, ValidatorStore}; use account_utils::{ @@ -19,7 +22,10 @@ use create_validator::{ }; use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, - types::{self as api_types, GenericResponse, Graffiti, PublicKey, PublicKeyBytes}, + types::{ + self as api_types, GenericResponse, GetGraffitiResponse, Graffiti, PublicKey, + PublicKeyBytes, SetGraffitiRequest, + }, }; use lighthouse_version::version_with_platform; use logging::SSELoggingComponents; @@ -653,7 +659,7 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_store_filter.clone()) - .and(graffiti_file_filter) + .and(graffiti_file_filter.clone()) .and(signer.clone()) .and(task_executor_filter.clone()) .and_then( @@ -1028,6 +1034,86 @@ pub fn serve( }, ); + // GET /eth/v1/validator/{pubkey}/graffiti + let get_graffiti = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("graffiti")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(graffiti_flag_filter) + .and(signer.clone()) + .and_then( + |pubkey: PublicKey, + validator_store: Arc>, + graffiti_flag: Option, + signer| { + blocking_signed_json_task(signer, move || { + let graffiti = get_graffiti(pubkey.clone(), validator_store, graffiti_flag)?; + Ok(GenericResponse::from(GetGraffitiResponse { + pubkey: pubkey.into(), + graffiti, + })) + }) + }, + ); + + // POST /eth/v1/validator/{pubkey}/graffiti + let post_graffiti = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("graffiti")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(graffiti_file_filter.clone()) + .and(signer.clone()) + .and_then( + |pubkey: PublicKey, + query: SetGraffitiRequest, + validator_store: Arc>, + graffiti_file: Option, + signer| { + blocking_signed_json_task(signer, move || { + if graffiti_file.is_some() { + return Err(warp_utils::reject::invalid_auth( + "Unable to update graffiti as the \"--graffiti-file\" flag is set" + .to_string(), + )); + } + set_graffiti(pubkey.clone(), query.graffiti, validator_store) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::ACCEPTED)); + + // DELETE /eth/v1/validator/{pubkey}/graffiti + let delete_graffiti = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::()) + .and(warp::path("graffiti")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(graffiti_file_filter.clone()) + .and(signer.clone()) + .and_then( + |pubkey: PublicKey, + validator_store: Arc>, + graffiti_file: Option, + signer| { + blocking_signed_json_task(signer, move || { + if graffiti_file.is_some() { + return Err(warp_utils::reject::invalid_auth( + "Unable to delete graffiti as the \"--graffiti-file\" flag is set" + .to_string(), + )); + } + delete_graffiti(pubkey.clone(), validator_store) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + // GET /eth/v1/keystores let get_std_keystores = std_keystores .and(signer.clone()) @@ -1175,6 +1261,7 @@ pub fn serve( .or(get_lighthouse_ui_graffiti) .or(get_fee_recipient) .or(get_gas_limit) + .or(get_graffiti) .or(get_std_keystores) .or(get_std_remotekeys) .recover(warp_utils::reject::handle_rejection), @@ -1189,6 +1276,7 @@ pub fn serve( .or(post_gas_limit) .or(post_std_keystores) .or(post_std_remotekeys) + .or(post_graffiti) .recover(warp_utils::reject::handle_rejection), )) .or(warp::patch() @@ -1199,6 +1287,7 @@ pub fn serve( .or(delete_gas_limit) .or(delete_std_keystores) .or(delete_std_remotekeys) + .or(delete_graffiti) .recover(warp_utils::reject::handle_rejection), )), ) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 5f59e35c765..7de3cea21f2 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -640,6 +640,49 @@ impl ApiTester { self } + + pub async fn test_set_graffiti(self, index: usize, graffiti: &str) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + let graffiti_str = GraffitiString::from_str(graffiti).unwrap(); + let resp = self + .client + .set_graffiti(&validator.voting_pubkey, graffiti_str) + .await; + + assert!(resp.is_ok()); + + self + } + + pub async fn test_delete_graffiti(self, index: usize) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + let resp = self.client.get_graffiti(&validator.voting_pubkey).await; + + assert!(resp.is_ok()); + let old_graffiti = resp.unwrap().graffiti; + + let resp = self.client.delete_graffiti(&validator.voting_pubkey).await; + + assert!(resp.is_ok()); + + let resp = self.client.get_graffiti(&validator.voting_pubkey).await; + + assert!(resp.is_ok()); + assert_ne!(old_graffiti, resp.unwrap().graffiti); + + self + } + + pub async fn test_get_graffiti(self, index: usize, expected_graffiti: &str) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + let expected_graffiti_str = GraffitiString::from_str(expected_graffiti).unwrap(); + let resp = self.client.get_graffiti(&validator.voting_pubkey).await; + + assert!(resp.is_ok()); + assert_eq!(&resp.unwrap().graffiti, &expected_graffiti_str.into()); + + self + } } struct HdValidatorScenario { @@ -771,6 +814,20 @@ async fn routes_with_invalid_auth() { }) .await }) + .await + .test_with_invalid_auth(|client| async move { + client.delete_graffiti(&PublicKeyBytes::empty()).await + }) + .await + .test_with_invalid_auth(|client| async move { + client.get_graffiti(&PublicKeyBytes::empty()).await + }) + .await + .test_with_invalid_auth(|client| async move { + client + .set_graffiti(&PublicKeyBytes::empty(), GraffitiString::default()) + .await + }) .await; } @@ -954,6 +1011,31 @@ async fn validator_graffiti() { .await; } +#[tokio::test] +async fn validator_graffiti_api() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_graffiti(0, "Mr F was here") + .await + .test_get_graffiti(0, "Mr F was here") + .await + .test_set_graffiti(0, "Uncle Bill was here") + .await + .test_get_graffiti(0, "Uncle Bill was here") + .await + .test_delete_graffiti(0) + .await; +} + #[tokio::test] async fn keystore_validator_creation() { ApiTester::new() diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index f15ea27c9b2..b65dad4c477 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -716,6 +716,74 @@ impl InitializedValidators { self.validators.get(public_key).and_then(|v| v.graffiti) } + /// Sets the `InitializedValidator` and `ValidatorDefinition` `graffiti` values. + /// + /// ## Notes + /// + /// Setting a validator `graffiti` will cause `self.definitions` to be updated and saved to + /// disk. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn set_graffiti( + &mut self, + voting_public_key: &PublicKey, + graffiti: GraffitiString, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.graffiti = Some(graffiti.clone()); + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.graffiti = Some(graffiti.into()); + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + Ok(()) + } + + /// Removes the `InitializedValidator` and `ValidatorDefinition` `graffiti` values. + /// + /// ## Notes + /// + /// Removing a validator `graffiti` will cause `self.definitions` to be updated and saved to + /// disk. The graffiti for the validator will then fall back to the process level default if + /// it is set. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn delete_graffiti(&mut self, voting_public_key: &PublicKey) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.graffiti = None; + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.graffiti = None; + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + /// Returns a `HashMap` of `public_key` -> `graffiti` for all initialized validators. pub fn get_all_validators_graffiti(&self) -> HashMap<&PublicKeyBytes, Option> { let mut result = HashMap::new(); From 67e0569d9b15ab0c659ac2b515fa12229398d4f4 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 7 Dec 2023 15:12:06 +1100 Subject: [PATCH 08/19] Fix corrupted DB on networks where the first slot is skipped (Holesky) (#4985) * Fix zero block roots on skip slots. * Remove temporary comment, println code and unused imports. * Remove `println!` in test. --- .../src/schema_change/migration_schema_v18.rs | 3 +- beacon_node/beacon_chain/tests/store_tests.rs | 95 ++++++++++++++++--- beacon_node/store/src/hot_cold_store.rs | 51 +++++++++- 3 files changed, 135 insertions(+), 14 deletions(-) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs index e7b68eb4184..04a9da84128 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs @@ -46,7 +46,8 @@ pub fn upgrade_to_v18( db: Arc>, log: Logger, ) -> Result, Error> { - db.heal_freezer_block_roots()?; + db.heal_freezer_block_roots_at_split()?; + db.heal_freezer_block_roots_at_genesis()?; info!(log, "Healed freezer block roots"); // No-op, even if Deneb has already occurred. The database is probably borked in this case, but diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 9f7199cf3cf..8ba099ec73c 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -26,6 +26,7 @@ use std::collections::HashSet; use std::convert::TryInto; use std::sync::Arc; use std::time::Duration; +use store::chunked_vector::Chunk; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION, STATE_UPPER_LIMIT_NO_RETAIN}; use store::{ chunked_vector::{chunk_key, Field}, @@ -106,10 +107,10 @@ fn get_harness_generic( harness } -/// Tests that `store.heal_freezer_block_roots` inserts block roots between last restore point +/// Tests that `store.heal_freezer_block_roots_at_split` inserts block roots between last restore point /// slot and the split slot. #[tokio::test] -async fn heal_freezer_block_roots() { +async fn heal_freezer_block_roots_at_split() { // chunk_size is hard-coded to 128 let num_blocks_produced = E::slots_per_epoch() * 20; let db_path = tempdir().unwrap(); @@ -136,7 +137,7 @@ async fn heal_freezer_block_roots() { // Do a heal before deleting to make sure that it doesn't break. let last_restore_point_slot = Slot::new(16 * E::slots_per_epoch()); - store.heal_freezer_block_roots().unwrap(); + store.heal_freezer_block_roots_at_split().unwrap(); check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); // Delete block roots between `last_restore_point_slot` and `split_slot`. @@ -164,7 +165,7 @@ async fn heal_freezer_block_roots() { assert!(matches!(block_root_err, store::Error::NoContinuationData)); // Re-insert block roots - store.heal_freezer_block_roots().unwrap(); + store.heal_freezer_block_roots_at_split().unwrap(); check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); // Run for another two epochs to check that the invariant is maintained. @@ -243,7 +244,7 @@ async fn heal_freezer_block_roots_with_skip_slots() { assert!(matches!(block_root_err, store::Error::NoContinuationData)); // heal function - store.heal_freezer_block_roots().unwrap(); + store.heal_freezer_block_roots_at_split().unwrap(); check_freezer_block_roots(&harness, last_restore_point_slot, split_slot); // Run for another two epochs to check that the invariant is maintained. @@ -257,12 +258,84 @@ async fn heal_freezer_block_roots_with_skip_slots() { check_iterators(&harness); } -fn check_freezer_block_roots( - harness: &TestHarness, - last_restore_point_slot: Slot, - split_slot: Slot, -) { - for slot in (last_restore_point_slot.as_u64()..split_slot.as_u64()).map(Slot::new) { +/// Tests that `store.heal_freezer_block_roots_at_genesis` replaces 0x0 block roots between slot +/// 0 and the first non-skip slot with genesis block root. +#[tokio::test] +async fn heal_freezer_block_roots_at_genesis() { + // Run for a few epochs to ensure we're past finalization. + let num_blocks_produced = E::slots_per_epoch() * 4; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Start with 2 skip slots. + harness.advance_slot(); + harness.advance_slot(); + + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Do a heal before deleting to make sure that it doesn't break. + store.heal_freezer_block_roots_at_genesis().unwrap(); + check_freezer_block_roots( + &harness, + Slot::new(0), + Epoch::new(1).end_slot(E::slots_per_epoch()), + ); + + // Write 0x0 block roots at slot 1 and slot 2. + let chunk_index = 0; + let chunk_db_key = chunk_key(chunk_index); + let mut chunk = + Chunk::::load(&store.cold_db, DBColumn::BeaconBlockRoots, &chunk_db_key) + .unwrap() + .unwrap(); + + chunk.values[1] = Hash256::zero(); + chunk.values[2] = Hash256::zero(); + + let mut ops = vec![]; + chunk + .store(DBColumn::BeaconBlockRoots, &chunk_db_key, &mut ops) + .unwrap(); + store.cold_db.do_atomically(ops).unwrap(); + + // Ensure the DB is corrupted + let block_roots = store + .forwards_block_roots_iterator_until( + Slot::new(1), + Slot::new(2), + || unreachable!(), + &harness.chain.spec, + ) + .unwrap() + .map(Result::unwrap) + .take(2) + .collect::>(); + assert_eq!( + block_roots, + vec![ + (Hash256::zero(), Slot::new(1)), + (Hash256::zero(), Slot::new(2)) + ] + ); + + // Insert genesis block roots at skip slots before first block slot + store.heal_freezer_block_roots_at_genesis().unwrap(); + check_freezer_block_roots( + &harness, + Slot::new(0), + Epoch::new(1).end_slot(E::slots_per_epoch()), + ); +} + +fn check_freezer_block_roots(harness: &TestHarness, start_slot: Slot, end_slot: Slot) { + for slot in (start_slot.as_u64()..end_slot.as_u64()).map(Slot::new) { let (block_root, result_slot) = harness .chain .store diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 43e14c30970..208dcfdb045 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2216,7 +2216,7 @@ impl, Cold: ItemStore> HotColdDB /// This function fills in missing block roots between last restore point slot and split /// slot, if any. - pub fn heal_freezer_block_roots(&self) -> Result<(), Error> { + pub fn heal_freezer_block_roots_at_split(&self) -> Result<(), Error> { let split = self.get_split_info(); let last_restore_point_slot = (split.slot - 1) / self.config.slots_per_restore_point * self.config.slots_per_restore_point; @@ -2245,6 +2245,53 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } + pub fn heal_freezer_block_roots_at_genesis(&self) -> Result<(), Error> { + let oldest_block_slot = self.get_oldest_block_slot(); + let split_slot = self.get_split_slot(); + + // Check if backfill has been completed AND the freezer db has data in it + if oldest_block_slot != 0 || split_slot == 0 { + return Ok(()); + } + + let mut block_root_iter = self.forwards_block_roots_iterator_until( + Slot::new(0), + split_slot - 1, + || { + Err(Error::DBError { + message: "Should not require end state".to_string(), + }) + }, + &self.spec, + )?; + + let (genesis_block_root, _) = block_root_iter.next().ok_or_else(|| Error::DBError { + message: "Genesis block root missing".to_string(), + })??; + + let slots_to_fix = itertools::process_results(block_root_iter, |iter| { + iter.take_while(|(block_root, _)| block_root.is_zero()) + .map(|(_, slot)| slot) + .collect::>() + })?; + + let Some(first_slot) = slots_to_fix.first() else { + return Ok(()); + }; + + let mut chunk_writer = + ChunkWriter::::new(&self.cold_db, first_slot.as_usize())?; + let mut ops = vec![]; + for slot in slots_to_fix { + chunk_writer.set(slot.as_usize(), genesis_block_root, &mut ops)?; + } + + chunk_writer.write(&mut ops)?; + self.cold_db.do_atomically(ops)?; + + Ok(()) + } + /// Delete *all* states from the freezer database and update the anchor accordingly. /// /// WARNING: this method deletes the genesis state and replaces it with the provided @@ -2257,7 +2304,7 @@ impl, Cold: ItemStore> HotColdDB genesis_state: &BeaconState, ) -> Result<(), Error> { // Make sure there is no missing block roots before pruning - self.heal_freezer_block_roots()?; + self.heal_freezer_block_roots_at_split()?; // Update the anchor to use the dummy state upper limit and disable historic state storage. let old_anchor = self.get_anchor_info(); From 6c0c41c7acecbc4d149a3715eccae24d3d3de933 Mon Sep 17 00:00:00 2001 From: Divma <26765164+divagant-martian@users.noreply.github.com> Date: Thu, 7 Dec 2023 04:39:59 -0500 Subject: [PATCH 09/19] upgrade libp2p to v0.53.* (#4935) * update libp2p and address compiler errors * remove bandwidth logging from transport * use libp2p registry * make clippy happy * use rust 1.73 * correct rpc keep alive * remove comments and obsolte code * remove libp2p prefix * make clippy happy * use quic under facade * remove fast msg id * bubble up close statements * fix wrong comment --- Cargo.lock | 527 +++++++++--------- Dockerfile | 4 +- beacon_node/client/src/builder.rs | 14 +- beacon_node/lighthouse_network/Cargo.toml | 9 +- beacon_node/lighthouse_network/src/config.rs | 7 - .../lighthouse_network/src/discovery/mod.rs | 22 +- beacon_node/lighthouse_network/src/lib.rs | 1 - beacon_node/lighthouse_network/src/metrics.rs | 46 -- .../src/peer_manager/network_behaviour.rs | 22 +- .../lighthouse_network/src/rpc/handler.rs | 210 +++---- beacon_node/lighthouse_network/src/rpc/mod.rs | 88 ++- .../lighthouse_network/src/service/mod.rs | 91 +-- .../lighthouse_network/src/service/utils.rs | 48 +- .../lighthouse_network/tests/common.rs | 2 +- beacon_node/network/src/metrics.rs | 29 +- beacon_node/network/src/service.rs | 9 +- lighthouse/Cargo.toml | 2 +- 17 files changed, 526 insertions(+), 605 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0a1af70bb15..fba036d7086 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -336,6 +336,19 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "asynchronous-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + [[package]] name = "attohttpc" version = "0.16.3" @@ -478,9 +491,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.4" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "base64ct" @@ -1748,7 +1761,7 @@ dependencies = [ "hex", "hkdf", "lazy_static", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", "lru 0.7.8", "more-asserts", @@ -1932,7 +1945,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "bytes", "ed25519-dalek", "hex", @@ -1945,18 +1958,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "enum-as-inner" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "enum-as-inner" version = "0.6.0" @@ -2115,7 +2116,7 @@ dependencies = [ "proto_array", "psutil", "reqwest", - "ring", + "ring 0.16.20", "sensitive_url", "serde", "serde_json", @@ -2156,7 +2157,7 @@ dependencies = [ "bls", "hex", "num-bigint-dig", - "ring", + "ring 0.16.20", "sha2 0.9.9", "zeroize", ] @@ -2329,7 +2330,7 @@ checksum = "233dc6f434ce680dbabf4451ee3380cec46cb3c45d66660445a435619710dd35" dependencies = [ "cpufeatures", "lazy_static", - "ring", + "ring 0.16.20", "sha2 0.10.8", ] @@ -2780,9 +2781,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", @@ -2795,9 +2796,9 @@ dependencies = [ [[package]] name = "futures-bounded" -version = "0.1.0" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b07bbbe7d7e78809544c6f718d875627addc73a7c3582447abc052cd3dc67e0" +checksum = "e1e2774cc104e198ef3d3e1ff4ab40f86fa3245d6cb6a3a46174f21463cee173" dependencies = [ "futures-timer", "futures-util", @@ -2805,9 +2806,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", "futures-sink", @@ -2815,15 +2816,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" dependencies = [ "futures-core", "futures-task", @@ -2833,9 +2834,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" [[package]] name = "futures-lite" @@ -2854,9 +2855,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", @@ -2875,15 +2876,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-ticker" @@ -2904,9 +2905,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures-channel", "futures-core", @@ -3141,7 +3142,7 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "bytes", "headers-core", "http", @@ -3192,6 +3193,52 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" +[[package]] +name = "hickory-proto" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "091a6fbccf4860009355e3efc52ff4acf37a63489aad7435372d44ceeb6fbbcf" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand", + "socket2 0.5.5", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35b8f021164e6a984c9030023544c57789c51760065cd510572fedcfb04164e8" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot 0.12.1", + "rand", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "hkdf" version = "0.12.3" @@ -3449,17 +3496,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "0.4.0" @@ -3690,7 +3726,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.4", + "socket2 0.5.5", "widestring 1.0.2", "windows-sys 0.48.0", "winreg", @@ -3772,9 +3808,9 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.4", - "pem", - "ring", + "base64 0.21.5", + "pem 1.1.1", + "ring 0.16.20", "serde", "serde_json", "simple_asn1", @@ -3848,7 +3884,7 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" dependencies = [ - "spin", + "spin 0.5.2", ] [[package]] @@ -3979,9 +4015,9 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.52.4" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94495eb319a85b70a68b85e2389a95bb3555c71c49025b78c691a854a7e6464" +checksum = "1252a34c693386829c34d44ccfbce86679d2a9a2c61f582863649bbf57f26260" dependencies = [ "bytes", "either", @@ -3991,7 +4027,7 @@ dependencies = [ "instant", "libp2p-allow-block-list", "libp2p-connection-limits", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -4013,11 +4049,11 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" +checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" dependencies = [ - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", "libp2p-swarm", "void", @@ -4025,11 +4061,11 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" +checksum = "f2af4b1e1a1d6c5005a59b42287c0a526bcce94d8d688e2e9233b18eb843ceb4" dependencies = [ - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", "libp2p-swarm", "void", @@ -4063,30 +4099,58 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-core" +version = "0.41.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59c61b924474cf2c7edccca306693e798d797b85d004f4fef5689a7a3e6e8fe5" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "libp2p-identity", + "multiaddr", + "multihash", + "multistream-select", + "once_cell", + "parking_lot 0.12.1", + "pin-project", + "quick-protobuf", + "rand", + "rw-stream-sink", + "smallvec", + "thiserror", + "tracing", + "unsigned-varint 0.7.2", + "void", +] + [[package]] name = "libp2p-dns" -version = "0.40.1" +version = "0.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6a18db73084b4da2871438f6239fef35190b05023de7656e877c18a00541a3b" +checksum = "d17cbcf7160ff35c3e8e560de4a068fe9d6cb777ea72840e48eb76ff9576c4b6" dependencies = [ "async-trait", "futures", - "libp2p-core", + "hickory-resolver", + "libp2p-core 0.41.1", "libp2p-identity", - "log", "parking_lot 0.12.1", "smallvec", - "trust-dns-resolver", + "tracing", ] [[package]] name = "libp2p-gossipsub" -version = "0.45.2" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f9624e2a843b655f1c1b8262b8d5de6f309413fca4d66f01bb0662429f84dc" +checksum = "201f0626acd8985fae7fdd318e86c954574b9eef2e5dec433936a19a0338393d" dependencies = [ - "asynchronous-codec", - "base64 0.21.4", + "asynchronous-codec 0.6.2", + "base64 0.21.5", "byteorder", "bytes", "either", @@ -4096,10 +4160,9 @@ dependencies = [ "getrandom", "hex_fmt", "instant", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", "libp2p-swarm", - "log", "prometheus-client", "quick-protobuf", "quick-protobuf-codec", @@ -4107,30 +4170,31 @@ dependencies = [ "regex", "sha2 0.10.8", "smallvec", + "tracing", "unsigned-varint 0.7.2", "void", ] [[package]] name = "libp2p-identify" -version = "0.43.1" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a96638a0a176bec0a4bcaebc1afa8cf909b114477209d7456ade52c61cd9cd" +checksum = "0544703553921214556f7567278b4f00cdd5052d29b0555ab88290cbfe54d81c" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "either", "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", "libp2p-swarm", - "log", "lru 0.12.0", "quick-protobuf", "quick-protobuf-codec", "smallvec", "thiserror", + "tracing", "void", ] @@ -4159,72 +4223,73 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.44.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a2567c305232f5ef54185e9604579a894fd0674819402bb0ac0246da82f52a" +checksum = "68f273a551ee9d0a79695f75afaeafb1371459dec69c29555e8a73a35608e96a" dependencies = [ "data-encoding", "futures", + "hickory-proto", "if-watch", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", "libp2p-swarm", - "log", "rand", "smallvec", - "socket2 0.5.4", + "socket2 0.5.5", "tokio", - "trust-dns-proto 0.22.0", + "tracing", "void", ] [[package]] name = "libp2p-metrics" -version = "0.13.1" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239ba7d28f8d0b5d77760dc6619c05c7e88e74ec8fbbe97f856f20a56745e620" +checksum = "fdac91ae4f291046a3b2660c039a2830c931f84df2ee227989af92f7692d3357" dependencies = [ + "futures", "instant", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-gossipsub", "libp2p-identify", "libp2p-identity", "libp2p-swarm", - "once_cell", + "pin-project", "prometheus-client", ] [[package]] name = "libp2p-mplex" -version = "0.40.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93959ed08b6caf9810e067655e25f1362098797fef7c44d3103e63dcb6f0fabe" +checksum = "a5e895765e27e30217b25f7cb7ac4686dad1ff80bf2fdeffd1d898566900a924" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "futures", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", - "log", "nohash-hasher", "parking_lot 0.12.1", "rand", "smallvec", + "tracing", "unsigned-varint 0.7.2", ] [[package]] name = "libp2p-noise" -version = "0.43.2" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2eeec39ad3ad0677551907dd304b2f13f17208ccebe333bef194076cd2e8921" +checksum = "8ecd0545ce077f6ea5434bcb76e8d0fe942693b4380aaad0d34a358c2bd05793" dependencies = [ + "asynchronous-codec 0.7.0", "bytes", "curve25519-dalek", "futures", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", - "log", "multiaddr", "multihash", "once_cell", @@ -4234,81 +4299,81 @@ dependencies = [ "snow", "static_assertions", "thiserror", + "tracing", "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-plaintext" -version = "0.40.1" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53cc5390cc2f77b7de2452fb6105892d0bb64e3cafa3bb346abb603f4cc93a09" +checksum = "67330af40b67217e746d42551913cfb7ad04c74fa300fb329660a56318590b3f" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "futures", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", - "log", "quick-protobuf", - "unsigned-varint 0.7.2", + "quick-protobuf-codec", + "tracing", ] [[package]] name = "libp2p-quic" -version = "0.9.3" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "130d451d83f21b81eb7b35b360bc7972aeafb15177784adc56528db082e6b927" +checksum = "c02570b9effbc7c33331803104a8e9e53af7f2bdb4a2b61be420d6667545a0f5" dependencies = [ "bytes", "futures", "futures-timer", "if-watch", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", "libp2p-tls", - "log", "parking_lot 0.12.1", "quinn", "rand", - "ring", + "ring 0.16.20", "rustls", - "socket2 0.5.4", + "socket2 0.5.5", "thiserror", "tokio", + "tracing", ] [[package]] name = "libp2p-swarm" -version = "0.43.6" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ff0e918a45fec0b6f27b30b0547a57c6c214aa8b13be3647b7701bfd8b8797" +checksum = "643ce11d87db56387631c9757b61b83435b434f94dc52ec267c1666e560e78b0" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", "libp2p-swarm-derive", - "log", "multistream-select", "once_cell", "rand", "smallvec", "tokio", + "tracing", "void", ] [[package]] name = "libp2p-swarm-derive" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" +checksum = "9b27d257436d01433a21da8da7688c83dba35826726161a328ff0989cd7af2dd" dependencies = [ "heck", - "proc-macro-warning", "proc-macro2", "quote", "syn 2.0.38", @@ -4316,33 +4381,33 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.40.1" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b558dd40d1bcd1aaaed9de898e9ec6a436019ecc2420dd0016e712fbb61c5508" +checksum = "8b2460fc2748919adff99ecbc1aab296e4579e41f374fb164149bd2c9e529d4c" dependencies = [ "futures", "futures-timer", "if-watch", "libc", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", - "log", - "socket2 0.5.4", + "socket2 0.5.5", "tokio", + "tracing", ] [[package]] name = "libp2p-tls" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8218d1d5482b122ccae396bbf38abdcb283ecc96fa54760e1dfd251f0546ac61" +checksum = "93ce7e3c2e7569d685d08ec795157981722ff96e9e9f9eae75df3c29d02b07a5" dependencies = [ "futures", "futures-rustls", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-identity", "rcgen", - "ring", + "ring 0.16.20", "rustls", "rustls-webpki", "thiserror", @@ -4352,30 +4417,30 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82775a47b34f10f787ad3e2a22e2c1541e6ebef4fe9f28f3ac553921554c94c1" +checksum = "963eb8a174f828f6a51927999a9ab5e45dfa9aa2aa5fed99aa65f79de6229464" dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core", + "libp2p-core 0.41.1", "libp2p-swarm", - "log", "tokio", + "tracing", "void", ] [[package]] name = "libp2p-yamux" -version = "0.44.1" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eedcb62824c4300efb9cfd4e2a6edaf3ca097b9e68b36dabe45a44469fd6a85" +checksum = "751f4778f71bc3db1ccf2451e7f4484463fec7f00c1ac2680e39c8368c23aae8" dependencies = [ "futures", - "libp2p-core", - "log", + "libp2p-core 0.41.1", "thiserror", + "tracing", "yamux", ] @@ -4518,7 +4583,6 @@ dependencies = [ "lazy_static", "libp2p", "libp2p-mplex", - "libp2p-quic", "lighthouse_metrics", "lighthouse_version", "lru 0.7.8", @@ -5604,6 +5668,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" +dependencies = [ + "base64 0.21.5", + "serde", +] + [[package]] name = "pem-rfc7468" version = "0.7.0" @@ -5790,7 +5864,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "byteorder", "bytes", "fallible-iterator", @@ -5928,17 +6002,6 @@ version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" -[[package]] -name = "proc-macro-warning" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.38", -] - [[package]] name = "proc-macro2" version = "1.0.69" @@ -5980,9 +6043,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" +checksum = "510c4f1c9d81d556458f94c98f857748130ea9737bbd6053da497503b26ea63c" dependencies = [ "dtoa", "itoa", @@ -6060,7 +6123,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "quick-protobuf", "thiserror", @@ -6115,7 +6178,7 @@ checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" dependencies = [ "bytes", "rand", - "ring", + "ring 0.16.20", "rustc-hash", "rustls", "slab", @@ -6132,7 +6195,7 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.4", + "socket2 0.5.5", "tracing", "windows-sys 0.48.0", ] @@ -6240,12 +6303,12 @@ dependencies = [ [[package]] name = "rcgen" -version = "0.10.0" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" +checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ - "pem", - "ring", + "pem 3.0.2", + "ring 0.16.20", "time", "yasna", ] @@ -6338,7 +6401,7 @@ version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "bytes", "encoding_rs", "futures-core", @@ -6417,12 +6480,26 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", - "untrusted", + "spin 0.5.2", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +dependencies = [ + "cc", + "getrandom", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "rle-decode-fast" version = "1.0.3" @@ -6568,12 +6645,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", - "ring", + "ring 0.17.5", "rustls-webpki", "sct", ] @@ -6584,17 +6661,17 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", ] [[package]] name = "rustls-webpki" -version = "0.101.6" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.5", + "untrusted 0.9.0", ] [[package]] @@ -6714,8 +6791,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -7267,7 +7344,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek", "rand_core", - "ring", + "ring 0.16.20", "rustc_version", "sha2 0.10.8", "subtle", @@ -7285,9 +7362,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", @@ -7299,6 +7376,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "spki" version = "0.6.0" @@ -7682,18 +7765,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.49" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.49" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", @@ -7827,7 +7910,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.4", + "socket2 0.5.5", "tokio-macros", "windows-sys 0.48.0", ] @@ -7883,7 +7966,7 @@ dependencies = [ "postgres-protocol", "postgres-types", "rand", - "socket2 0.5.4", + "socket2 0.5.5", "tokio", "tokio-util 0.7.9", "whoami", @@ -8136,78 +8219,6 @@ dependencies = [ "rlp", ] -[[package]] -name = "trust-dns-proto" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner 0.5.1", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.2.3", - "ipnet", - "lazy_static", - "rand", - "smallvec", - "socket2 0.4.9", - "thiserror", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "trust-dns-proto" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "559ac980345f7f5020883dd3bcacf176355225e01916f8c2efecad7534f682c6" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner 0.6.0", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.4.0", - "ipnet", - "once_cell", - "rand", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c723b0e608b24ad04c73b2607e0241b2c98fd79795a95e98b068b6966138a29d" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lru-cache", - "once_cell", - "parking_lot 0.12.1", - "rand", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "tracing", - "trust-dns-proto 0.23.1", -] - [[package]] name = "try-lock" version = "0.2.4" @@ -8359,7 +8370,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", ] @@ -8369,6 +8380,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "unused_port" version = "0.1.0" @@ -8385,7 +8402,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna", "percent-encoding", ] @@ -8432,7 +8449,7 @@ dependencies = [ "parking_lot 0.12.1", "rand", "reqwest", - "ring", + "ring 0.16.20", "safe_arith", "sensitive_url", "serde", diff --git a/Dockerfile b/Dockerfile index 878a3602bd2..a8dadf2ad57 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.69.0-bullseye AS builder +FROM rust:1.73.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES @@ -15,4 +15,4 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco ca-certificates \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse \ No newline at end of file +COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index cedf347b9a8..bfd55c3beb0 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -68,7 +68,7 @@ pub struct ClientBuilder { eth1_service: Option, network_globals: Option>>, network_senders: Option>, - gossipsub_registry: Option, + libp2p_registry: Option, db_path: Option, freezer_db_path: Option, http_api_config: http_api::Config, @@ -102,7 +102,7 @@ where eth1_service: None, network_globals: None, network_senders: None, - gossipsub_registry: None, + libp2p_registry: None, db_path: None, freezer_db_path: None, http_api_config: <_>::default(), @@ -531,7 +531,7 @@ where .ok_or("network requires beacon_processor_channels")?; // If gossipsub metrics are required we build a registry to record them - let mut gossipsub_registry = if config.metrics_enabled { + let mut libp2p_registry = if config.metrics_enabled { Some(Registry::default()) } else { None @@ -541,9 +541,7 @@ where beacon_chain, config, context.executor, - gossipsub_registry - .as_mut() - .map(|registry| registry.sub_registry_with_prefix("gossipsub")), + libp2p_registry.as_mut(), beacon_processor_channels.beacon_processor_tx.clone(), beacon_processor_channels.work_reprocessing_tx.clone(), ) @@ -552,7 +550,7 @@ where self.network_globals = Some(network_globals); self.network_senders = Some(network_senders); - self.gossipsub_registry = gossipsub_registry; + self.libp2p_registry = libp2p_registry; Ok(self) } @@ -718,7 +716,7 @@ where chain: self.beacon_chain.clone(), db_path: self.db_path.clone(), freezer_db_path: self.freezer_db_path.clone(), - gossipsub_registry: self.gossipsub_registry.take().map(std::sync::Mutex::new), + gossipsub_registry: self.libp2p_registry.take().map(std::sync::Mutex::new), log: log.clone(), }); diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 125bbe9bc2f..356a6a203b6 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -39,17 +39,16 @@ directory = { workspace = true } regex = { workspace = true } strum = { workspace = true } superstruct = { workspace = true } -prometheus-client = "0.21.0" +prometheus-client = "0.22.0" unused_port = { workspace = true } delay_map = { workspace = true } void = "1" -libp2p-quic= { version = "0.9.2", features=["tokio"]} -libp2p-mplex = "0.40.0" +libp2p-mplex = "0.41.0" [dependencies.libp2p] -version = "0.52" +version = "0.53" default-features = false -features = ["identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] +features = ["identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic"] [dev-dependencies] slog-term = { workspace = true } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 47bd7b86679..f24b94c9ecb 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -455,12 +455,6 @@ pub fn gossipsub_config( fork_context: Arc, gossipsub_config_params: GossipsubConfigParams, ) -> gossipsub::Config { - // The function used to generate a gossipsub message id - // We use the first 8 bytes of SHA256(topic, data) for content addressing - let fast_gossip_message_id = |message: &gossipsub::RawMessage| { - let data = [message.topic.as_str().as_bytes(), &message.data].concat(); - gossipsub::FastMessageId::from(&Sha256::digest(&data)[..8]) - }; fn prefix( prefix: [u8; 4], message: &gossipsub::Message, @@ -518,7 +512,6 @@ pub fn gossipsub_config( .validation_mode(gossipsub::ValidationMode::Anonymous) .duplicate_cache_time(DUPLICATE_CACHE_TIME) .message_id_fn(gossip_message_id) - .fast_message_id_fn(fast_gossip_message_id) .allow_self_origin(true) .build() .expect("valid gossipsub configuration") diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 388790568f0..0894dc65bd6 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -29,7 +29,7 @@ pub use libp2p::{ identity::PeerId, swarm::{ dummy::ConnectionHandler, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, - PollParameters, SubstreamProtocol, ToSwarm, + SubstreamProtocol, ToSwarm, }, }; use lru::LruCache; @@ -955,11 +955,7 @@ impl NetworkBehaviour for Discovery { } // Main execution loop to drive the behaviour - fn poll( - &mut self, - cx: &mut Context, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { if !self.started { return Poll::Pending; } @@ -1041,7 +1037,7 @@ impl NetworkBehaviour for Discovery { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => { self.on_dial_failure(peer_id, error) @@ -1114,17 +1110,7 @@ impl NetworkBehaviour for Discovery { Err(e) => warn!(self.log, "Failed to update ENR"; "error" => ?e), } } - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => { + _ => { // Ignore events not relevant to discovery } } diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 7467fb7f067..ea1ab07e3e3 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -115,7 +115,6 @@ pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; pub use libp2p; -pub use libp2p::bandwidth::BandwidthSinks; pub use libp2p::gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; pub use libp2p::{multiaddr, Multiaddr}; diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 4650553d89d..ae02b689d81 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,6 +1,3 @@ -use libp2p::bandwidth::BandwidthSinks; -use std::sync::Arc; - pub use lighthouse_metrics::*; lazy_static! { @@ -187,46 +184,3 @@ pub fn scrape_discovery_metrics() { set_gauge(&DISCOVERY_SENT_BYTES, metrics.bytes_sent as i64); set_gauge(&DISCOVERY_RECV_BYTES, metrics.bytes_recv as i64); } - -/// Aggregated `BandwidthSinks` of tcp and quic transports -/// used in libp2p. -pub struct AggregatedBandwidthSinks { - tcp_sinks: Arc, - quic_sinks: Option>, -} - -impl AggregatedBandwidthSinks { - /// Create a new `AggregatedBandwidthSinks`. - pub fn new(tcp_sinks: Arc, quic_sinks: Option>) -> Self { - AggregatedBandwidthSinks { - tcp_sinks, - quic_sinks, - } - } - - /// Total QUIC inbound bandwidth. - pub fn total_quic_inbound(&self) -> u64 { - self.quic_sinks - .as_ref() - .map(|q| q.total_inbound()) - .unwrap_or_default() - } - - /// Total TCP inbound bandwidth. - pub fn total_tcp_inbound(&self) -> u64 { - self.tcp_sinks.total_inbound() - } - - /// Total QUIC outbound bandwidth. - pub fn total_quic_outbound(&self) -> u64 { - self.quic_sinks - .as_ref() - .map(|q| q.total_outbound()) - .unwrap_or_default() - } - - /// Total TCP outbound bandwidth. - pub fn total_tcp_outbound(&self) -> u64 { - self.tcp_sinks.total_outbound() - } -} diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 0617c8fa372..da205d169ac 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -9,7 +9,7 @@ use libp2p::identity::PeerId; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dummy::ConnectionHandler; -use libp2p::swarm::{ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; +use libp2p::swarm::{ConnectionDenied, ConnectionId, NetworkBehaviour, ToSwarm}; use slog::{debug, error, trace}; use types::EthSpec; @@ -36,11 +36,7 @@ impl NetworkBehaviour for PeerManager { // no events from the dummy handler } - fn poll( - &mut self, - cx: &mut Context<'_>, - _params: &mut impl PollParameters, - ) -> Poll> { + fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { // perform the heartbeat when necessary while self.heartbeat.poll_tick(cx).is_ready() { self.heartbeat(); @@ -121,7 +117,7 @@ impl NetworkBehaviour for PeerManager { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, @@ -155,15 +151,9 @@ impl NetworkBehaviour for PeerManager { // TODO: we likely want to check this against our assumed external tcp // address } - FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) => { + _ => { + // NOTE: FromSwarm is a non exhaustive enum so updates should be based on release + // notes more than compiler feedback // The rest of the events we ignore since they are handled in their associated // `SwarmEvent` } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 8d1563fafa8..03f4761ff08 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -12,8 +12,7 @@ use futures::prelude::*; use futures::{Sink, SinkExt}; use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, StreamUpgradeError, - SubstreamProtocol, + FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; use libp2p::swarm::Stream; use slog::{crit, debug, trace, warn}; @@ -51,7 +50,12 @@ impl SubstreamId { type InboundSubstream = InboundFramed; /// Events the handler emits to the behaviour. -pub type HandlerEvent = Result, HandlerErr>; +#[derive(Debug)] +pub enum HandlerEvent { + Ok(RPCReceived), + Err(HandlerErr), + Close(RPCError), +} /// An error encountered by the handler. #[derive(Debug)] @@ -249,11 +253,12 @@ where } // We now drive to completion communications already dialed/established while let Some((id, req)) = self.dial_queue.pop() { - self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::Disconnected, - proto: req.versioned_protocol().protocol(), - id, - })); + self.events_out + .push(HandlerEvent::Err(HandlerErr::Outbound { + error: RPCError::Disconnected, + proto: req.versioned_protocol().protocol(), + id, + })); } // Queue our goodbye message. @@ -273,11 +278,13 @@ where HandlerState::Active => { self.dial_queue.push((id, req)); } - _ => self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::Disconnected, - proto: req.versioned_protocol().protocol(), - id, - })), + _ => self + .events_out + .push(HandlerEvent::Err(HandlerErr::Outbound { + error: RPCError::Disconnected, + proto: req.versioned_protocol().protocol(), + id, + })), } } @@ -296,7 +303,7 @@ where }; // If the response we are sending is an error, report back for handling if let RPCCodedResponse::Error(ref code, ref reason) = response { - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error: RPCError::ErrorResponse(*code, reason.to_string()), proto: inbound_info.protocol, id: inbound_id, @@ -320,7 +327,6 @@ where { type FromBehaviour = RPCSend; type ToBehaviour = HandlerEvent; - type Error = RPCError; type InboundProtocol = RPCProtocol; type OutboundProtocol = OutboundRequestContainer; type OutboundOpenInfo = (Id, OutboundRequest); // Keep track of the id and the request @@ -342,28 +348,23 @@ where } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { // Check that we don't have outbound items pending for dialing, nor dialing, nor // established. Also check that there are no established inbound substreams. // Errors and events need to be reported back, so check those too. - let should_shutdown = match self.state { + match self.state { HandlerState::ShuttingDown(_) => { - self.dial_queue.is_empty() - && self.outbound_substreams.is_empty() - && self.inbound_substreams.is_empty() - && self.events_out.is_empty() - && self.dial_negotiated == 0 + !self.dial_queue.is_empty() + || !self.outbound_substreams.is_empty() + || !self.inbound_substreams.is_empty() + || !self.events_out.is_empty() + || !self.dial_negotiated != 0 } HandlerState::Deactivated => { // Regardless of events, the timeout has expired. Force the disconnect. - true + false } - _ => false, - }; - if should_shutdown { - KeepAlive::No - } else { - KeepAlive::Yes + _ => true, } } @@ -371,12 +372,7 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { if let Some(waker) = &self.waker { if waker.will_wake(cx.waker()) { @@ -400,7 +396,9 @@ where Poll::Ready(_) => { self.state = HandlerState::Deactivated; debug!(self.log, "Handler deactivated"); - return Poll::Ready(ConnectionHandlerEvent::Close(RPCError::Disconnected)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Close(RPCError::Disconnected), + )); } Poll::Pending => {} }; @@ -414,7 +412,7 @@ where if let Some(info) = self.inbound_substreams.get_mut(inbound_id.get_ref()) { // the delay has been removed info.delay_key = None; - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error: RPCError::StreamTimeout, proto: info.protocol, id: *inbound_id.get_ref(), @@ -432,9 +430,11 @@ where Poll::Ready(Some(Err(e))) => { warn!(self.log, "Inbound substream poll failed"; "error" => ?e); // drops the peer if we cannot read the delay queue - return Poll::Ready(ConnectionHandlerEvent::Close(RPCError::InternalError( - "Could not poll inbound stream timer", - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Close(RPCError::InternalError( + "Could not poll inbound stream timer", + )), + )); } Poll::Pending | Poll::Ready(None) => break, } @@ -453,18 +453,20 @@ where error: RPCError::StreamTimeout, }; // notify the user - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( - outbound_err, - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Err(outbound_err), + )); } else { crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref()); } } Poll::Ready(Some(Err(e))) => { warn!(self.log, "Outbound substream poll failed"; "error" => ?e); - return Poll::Ready(ConnectionHandlerEvent::Close(RPCError::InternalError( - "Could not poll outbound stream timer", - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Close(RPCError::InternalError( + "Could not poll outbound stream timer", + )), + )); } Poll::Pending | Poll::Ready(None) => break, } @@ -516,7 +518,7 @@ where // If there was an error in shutting down the substream report the // error if let Err(error) = res { - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error, proto: info.protocol, id: *id, @@ -528,7 +530,7 @@ where if info.pending_items.back().map(|l| l.close_after()) == Some(false) { // if the request was still active, report back to cancel it - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error: RPCError::Disconnected, proto: info.protocol, id: *id, @@ -613,7 +615,7 @@ where self.inbound_substreams_delay.remove(delay_key); } // Report the error that occurred during the send process - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { error, proto: info.protocol, id: *id, @@ -666,11 +668,12 @@ where } if deactivated => { // the handler is deactivated. Close the stream entry.get_mut().state = OutboundSubstreamState::Closing(substream); - self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::Disconnected, - proto: entry.get().proto, - id: entry.get().req_id, - })) + self.events_out + .push(HandlerEvent::Err(HandlerErr::Outbound { + error: RPCError::Disconnected, + proto: entry.get().proto, + id: entry.get().req_id, + })) } OutboundSubstreamState::RequestPendingResponse { mut substream, @@ -711,14 +714,18 @@ where let received = match response { RPCCodedResponse::StreamTermination(t) => { - Ok(RPCReceived::EndOfStream(id, t)) + HandlerEvent::Ok(RPCReceived::EndOfStream(id, t)) + } + RPCCodedResponse::Success(resp) => { + HandlerEvent::Ok(RPCReceived::Response(id, resp)) + } + RPCCodedResponse::Error(ref code, ref r) => { + HandlerEvent::Err(HandlerErr::Outbound { + id, + proto, + error: RPCError::ErrorResponse(*code, r.to_string()), + }) } - RPCCodedResponse::Success(resp) => Ok(RPCReceived::Response(id, resp)), - RPCCodedResponse::Error(ref code, ref r) => Err(HandlerErr::Outbound { - id, - proto, - error: RPCError::ErrorResponse(*code, r.to_string()), - }), }; return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(received)); @@ -736,9 +743,12 @@ where // notify the application error if request.expected_responses() > 1 { // return an end of stream result - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok( - RPCReceived::EndOfStream(request_id, request.stream_termination()), - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Ok(RPCReceived::EndOfStream( + request_id, + request.stream_termination(), + )), + )); } // else we return an error, stream should not have closed early. @@ -747,9 +757,9 @@ where proto: request.versioned_protocol().protocol(), error: RPCError::IncompleteStream, }; - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( - outbound_err, - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Err(outbound_err), + )); } Poll::Pending => { entry.get_mut().state = @@ -765,9 +775,9 @@ where error: e, }; entry.remove_entry(); - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( - outbound_err, - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Err(outbound_err), + )); } }, OutboundSubstreamState::Closing(mut substream) => { @@ -788,9 +798,12 @@ where // termination to the application if let Some(termination) = protocol.terminator() { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok( - RPCReceived::EndOfStream(request_id, termination), - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Ok(RPCReceived::EndOfStream( + request_id, + termination, + )), + )); } } Poll::Pending => { @@ -831,7 +844,9 @@ where && self.events_out.is_empty() && self.dial_negotiated == 0 { - return Poll::Ready(ConnectionHandlerEvent::Close(RPCError::Disconnected)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::Close(RPCError::Disconnected), + )); } } @@ -859,24 +874,9 @@ where ConnectionEvent::DialUpgradeError(DialUpgradeError { info, error }) => { self.on_dial_upgrade_error(info, error) } - ConnectionEvent::ListenUpgradeError(libp2p::swarm::handler::ListenUpgradeError { - info: _, - error: _, /* RPCError */ - }) => { - // This is going to be removed in the next libp2p release. I think its fine to do - // nothing. - } - ConnectionEvent::LocalProtocolsChange(_) => { - // This shouldn't effect this handler, we will still negotiate streams if we support - // the protocol as usual. - } - ConnectionEvent::RemoteProtocolsChange(_) => { - // This shouldn't effect this handler, we will still negotiate streams if we support - // the protocol as usual. - } - ConnectionEvent::AddressChange(_) => { - // We dont care about these changes as they have no bearing on our RPC internal - // logic. + _ => { + // NOTE: ConnectionEvent is a non exhaustive enum so updates should be based on + // release notes more than compiler feedback } } } @@ -919,7 +919,7 @@ where }, ); } else { - self.events_out.push(Err(HandlerErr::Inbound { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { id: self.current_inbound_substream_id, proto: req.versioned_protocol().protocol(), error: RPCError::HandlerRejected, @@ -933,7 +933,7 @@ where self.shutdown(None); } - self.events_out.push(Ok(RPCReceived::Request( + self.events_out.push(HandlerEvent::Ok(RPCReceived::Request( self.current_inbound_substream_id, req, ))); @@ -953,11 +953,12 @@ where // accept outbound connections only if the handler is not deactivated if matches!(self.state, HandlerState::Deactivated) { - self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::Disconnected, - proto, - id, - })); + self.events_out + .push(HandlerEvent::Err(HandlerErr::Outbound { + error: RPCError::Disconnected, + proto, + id, + })); } // add the stream to substreams if we expect a response, otherwise drop the stream. @@ -1030,11 +1031,12 @@ where self.dial_negotiated -= 1; self.outbound_io_error_retries = 0; - self.events_out.push(Err(HandlerErr::Outbound { - error, - proto: req.versioned_protocol().protocol(), - id, - })); + self.events_out + .push(HandlerEvent::Err(HandlerErr::Outbound { + error, + proto: req.versioned_protocol().protocol(), + id, + })); } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index ab87a533d69..d6686ff1b11 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -5,10 +5,9 @@ //! syncing. use futures::future::FutureExt; -use handler::{HandlerEvent, RPCHandler}; +use handler::RPCHandler; use libp2p::swarm::{ - handler::ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, PollParameters, - ToSwarm, + handler::ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, ToSwarm, }; use libp2p::swarm::{FromSwarm, SubstreamProtocol, THandlerInEvent}; use libp2p::PeerId; @@ -20,7 +19,7 @@ use std::task::{Context, Poll}; use std::time::Duration; use types::{EthSpec, ForkContext}; -pub(crate) use handler::HandlerErr; +pub(crate) use handler::{HandlerErr, HandlerEvent}; pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse}; pub(crate) use protocol::InboundRequest; @@ -282,25 +281,9 @@ where Ok(handler) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionClosed(_) - | FromSwarm::ConnectionEstablished(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => { - // Rpc Behaviour does not act on these swarm events. We use a comprehensive match - // statement to ensure future events are dealt with appropriately. - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) { + // NOTE: FromSwarm is a non exhaustive enum so updates should be based on release notes more + // than compiler feedback } fn on_connection_handler_event( @@ -309,7 +292,7 @@ where conn_id: ConnectionId, event: ::ToBehaviour, ) { - if let Ok(RPCReceived::Request(ref id, ref req)) = event { + if let HandlerEvent::Ok(RPCReceived::Request(ref id, ref req)) = event { if let Some(limiter) = self.limiter.as_mut() { // check if the request is conformant to the quota match limiter.allows(&peer_id, req) { @@ -374,11 +357,7 @@ where } } - fn poll( - &mut self, - cx: &mut Context, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { // let the rate limiter prune. if let Some(limiter) = self.limiter.as_mut() { let _ = limiter.poll_unpin(cx); @@ -409,27 +388,38 @@ where serializer: &mut dyn slog::Serializer, ) -> slog::Result { serializer.emit_arguments("peer_id", &format_args!("{}", self.peer_id))?; - let (msg_kind, protocol) = match &self.event { - Ok(received) => match received { - RPCReceived::Request(_, req) => ("request", req.versioned_protocol().protocol()), - RPCReceived::Response(_, res) => ("response", res.protocol()), - RPCReceived::EndOfStream(_, end) => ( - "end_of_stream", - match end { - ResponseTermination::BlocksByRange => Protocol::BlocksByRange, - ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, - ResponseTermination::BlobsByRange => Protocol::BlobsByRange, - ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, - }, - ), - }, - Err(error) => match &error { - HandlerErr::Inbound { proto, .. } => ("inbound_err", *proto), - HandlerErr::Outbound { proto, .. } => ("outbound_err", *proto), - }, + match &self.event { + HandlerEvent::Ok(received) => { + let (msg_kind, protocol) = match received { + RPCReceived::Request(_, req) => { + ("request", req.versioned_protocol().protocol()) + } + RPCReceived::Response(_, res) => ("response", res.protocol()), + RPCReceived::EndOfStream(_, end) => ( + "end_of_stream", + match end { + ResponseTermination::BlocksByRange => Protocol::BlocksByRange, + ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, + ResponseTermination::BlobsByRange => Protocol::BlobsByRange, + ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, + }, + ), + }; + serializer.emit_str("msg_kind", msg_kind)?; + serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; + } + HandlerEvent::Err(error) => { + let (msg_kind, protocol) = match &error { + HandlerErr::Inbound { proto, .. } => ("inbound_err", *proto), + HandlerErr::Outbound { proto, .. } => ("outbound_err", *proto), + }; + serializer.emit_str("msg_kind", msg_kind)?; + serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; + } + HandlerEvent::Close(err) => { + serializer.emit_arguments("handler_close", &format_args!("{}", err))?; + } }; - serializer.emit_str("msg_kind", msg_kind)?; - serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; slog::Result::Ok(()) } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index a38b7b2f2ef..3c2a3f5a95f 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -4,7 +4,6 @@ use crate::config::{gossipsub_config, GossipsubConfigParams, NetworkLoad}; use crate::discovery::{ subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, }; -use crate::metrics::AggregatedBandwidthSinks; use crate::peer_manager::{ config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, @@ -127,8 +126,6 @@ pub struct Network { /// The interval for updating gossipsub scores update_gossipsub_scores: tokio::time::Interval, gossip_cache: GossipCache, - /// The bandwidth logger for the underlying libp2p transport. - pub bandwidth: AggregatedBandwidthSinks, /// This node's PeerId. pub local_peer_id: PeerId, /// Logger for behaviour actions. @@ -139,10 +136,11 @@ pub struct Network { impl Network { pub async fn new( executor: task_executor::TaskExecutor, - ctx: ServiceContext<'_>, + mut ctx: ServiceContext<'_>, log: &slog::Logger, ) -> error::Result<(Self, Arc>)> { let log = log.new(o!("service"=> "libp2p")); + let mut config = ctx.config.clone(); trace!(log, "Libp2p Service starting"); // initialise the node's ID @@ -257,10 +255,13 @@ impl Network { gossipsub_config_params, ); - // If metrics are enabled for gossipsub build the configuration - let gossipsub_metrics = ctx - .gossipsub_registry - .map(|registry| (registry, Default::default())); + // If metrics are enabled for libp2p build the configuration + let gossipsub_metrics = ctx.libp2p_registry.as_mut().map(|registry| { + ( + registry.sub_registry_with_prefix("gossipsub"), + Default::default(), + ) + }); let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( @@ -366,9 +367,8 @@ impl Network { }; // Set up the transport - tcp/quic with noise and mplex - let (transport, bandwidth) = - build_transport(local_keypair.clone(), !config.disable_quic_support) - .map_err(|e| format!("Failed to build transport: {:?}", e))?; + let transport = build_transport(local_keypair.clone(), !config.disable_quic_support) + .map_err(|e| format!("Failed to build transport: {:?}", e))?; // use the executor for libp2p struct Executor(task_executor::TaskExecutor); @@ -379,20 +379,41 @@ impl Network { } // sets up the libp2p swarm. - let swarm = SwarmBuilder::with_existing_identity(local_keypair) - .with_tokio() - .with_other_transport(|_key| transport) - .expect("infalible") - .with_behaviour(|_| behaviour) - .expect("infalible") - .with_swarm_config(|_| { - libp2p::swarm::Config::with_executor(Executor(executor)) - .with_notify_handler_buffer_size( - std::num::NonZeroUsize::new(7).expect("Not zero"), - ) - .with_per_connection_event_buffer_size(4) - }) - .build(); + + let swarm = { + let builder = SwarmBuilder::with_existing_identity(local_keypair) + .with_tokio() + .with_other_transport(|_key| transport) + .expect("infalible"); + + // NOTE: adding bandwidth metrics changes the generics of the swarm, so types diverge + if let Some(libp2p_registry) = ctx.libp2p_registry { + builder + .with_bandwidth_metrics(libp2p_registry) + .with_behaviour(|_| behaviour) + .expect("infalible") + .with_swarm_config(|_| { + libp2p::swarm::Config::with_executor(Executor(executor)) + .with_notify_handler_buffer_size( + std::num::NonZeroUsize::new(7).expect("Not zero"), + ) + .with_per_connection_event_buffer_size(4) + }) + .build() + } else { + builder + .with_behaviour(|_| behaviour) + .expect("infalible") + .with_swarm_config(|_| { + libp2p::swarm::Config::with_executor(Executor(executor)) + .with_notify_handler_buffer_size( + std::num::NonZeroUsize::new(7).expect("Not zero"), + ) + .with_per_connection_event_buffer_size(4) + }) + .build() + } + }; let mut network = Network { swarm, @@ -403,7 +424,6 @@ impl Network { score_settings, update_gossipsub_scores, gossip_cache, - bandwidth, local_peer_id, log, }; @@ -1251,7 +1271,7 @@ impl Network { let handler_id = event.conn_id; // The METADATA and PING RPC responses are handled within the behaviour and not propagated match event.event { - Err(handler_err) => { + HandlerEvent::Err(handler_err) => { match handler_err { HandlerErr::Inbound { id: _, @@ -1286,7 +1306,7 @@ impl Network { } } } - Ok(RPCReceived::Request(id, request)) => { + HandlerEvent::Ok(RPCReceived::Request(id, request)) => { let peer_request_id = (handler_id, id); match request { /* Behaviour managed protocols: Ping and Metadata */ @@ -1385,7 +1405,7 @@ impl Network { } } } - Ok(RPCReceived::Response(id, resp)) => { + HandlerEvent::Ok(RPCReceived::Response(id, resp)) => { match resp { /* Behaviour managed protocols */ RPCResponse::Pong(ping) => { @@ -1422,7 +1442,7 @@ impl Network { } } } - Ok(RPCReceived::EndOfStream(id, termination)) => { + HandlerEvent::Ok(RPCReceived::EndOfStream(id, termination)) => { let response = match termination { ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), @@ -1431,6 +1451,11 @@ impl Network { }; self.build_response(id, peer_id, response) } + HandlerEvent::Close(_) => { + let _ = self.swarm.disconnect_peer_id(peer_id); + // NOTE: we wait for the swarm to report the connection as actually closed + None + } } } @@ -1624,7 +1649,11 @@ impl Network { None } } - SwarmEvent::Dialing { .. } => None, + _ => { + // NOTE: SwarmEvent is a non exhaustive enum so updates should be based on + // release notes more than compiler feedback + None + } }; if let Some(ev) = maybe_event { diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index ab6b3a771bd..5fe5946ce29 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -1,4 +1,3 @@ -use crate::metrics::AggregatedBandwidthSinks; use crate::multiaddr::Protocol; use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; use crate::types::{ @@ -9,8 +8,8 @@ use futures::future::Either; use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed}; use libp2p::gossipsub; use libp2p::identity::{secp256k1, Keypair}; -use libp2p::{core, noise, yamux, PeerId, Transport, TransportExt}; -use libp2p_quic; +use libp2p::quic; +use libp2p::{core, noise, yamux, PeerId, Transport}; use prometheus_client::registry::Registry; use slog::{debug, warn}; use ssz::Decode; @@ -34,7 +33,7 @@ pub struct Context<'a> { pub enr_fork_id: EnrForkId, pub fork_context: Arc, pub chain_spec: &'a ChainSpec, - pub gossipsub_registry: Option<&'a mut Registry>, + pub libp2p_registry: Option<&'a mut Registry>, } type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; @@ -44,7 +43,7 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; pub fn build_transport( local_private_key: Keypair, quic_support: bool, -) -> std::io::Result<(BoxedTransport, AggregatedBandwidthSinks)> { +) -> std::io::Result { // mplex config let mut mplex_config = libp2p_mplex::MplexConfig::new(); mplex_config.set_max_buffer_size(256); @@ -53,44 +52,35 @@ pub fn build_transport( // yamux config let mut yamux_config = yamux::Config::default(); yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read()); - // Creates the TCP transport layer - let (tcp, tcp_bandwidth) = - libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::default().nodelay(true)) - .upgrade(core::upgrade::Version::V1) - .authenticate(generate_noise_config(&local_private_key)) - .multiplex(core::upgrade::SelectUpgrade::new( - yamux_config, - mplex_config, - )) - .timeout(Duration::from_secs(10)) - .with_bandwidth_logging(); - - let (transport, bandwidth) = if quic_support { + let tcp = libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::default().nodelay(true)) + .upgrade(core::upgrade::Version::V1) + .authenticate(generate_noise_config(&local_private_key)) + .multiplex(core::upgrade::SelectUpgrade::new( + yamux_config, + mplex_config, + )) + .timeout(Duration::from_secs(10)); + let transport = if quic_support { // Enables Quic // The default quic configuration suits us for now. - let quic_config = libp2p_quic::Config::new(&local_private_key); - let (quic, quic_bandwidth) = - libp2p_quic::tokio::Transport::new(quic_config).with_bandwidth_logging(); + let quic_config = quic::Config::new(&local_private_key); + let quic = quic::tokio::Transport::new(quic_config); let transport = tcp .or_transport(quic) .map(|either_output, _| match either_output { Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - }) - .boxed(); - ( - transport, - AggregatedBandwidthSinks::new(tcp_bandwidth, Some(quic_bandwidth)), - ) + }); + transport.boxed() } else { - (tcp, AggregatedBandwidthSinks::new(tcp_bandwidth, None)) + tcp.boxed() }; // Enables DNS over the transport. let transport = libp2p::dns::tokio::Transport::system(transport)?.boxed(); - Ok((transport, bandwidth)) + Ok(transport) } // Useful helper functions for debugging. Currently not used in the client. diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index dc77e3efe21..9585dcf5af1 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -113,7 +113,7 @@ pub async fn build_libp2p_instance( enr_fork_id: EnrForkId::default(), fork_context: Arc::new(fork_context(fork_name)), chain_spec: spec, - gossipsub_registry: None, + libp2p_registry: None, }; Libp2pInstance( LibP2PService::new(executor, libp2p_context, &log) diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 799953538a4..0509ed1ea7d 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -7,8 +7,8 @@ use beacon_chain::{ use fnv::FnvHashMap; pub use lighthouse_metrics::*; use lighthouse_network::{ - metrics::AggregatedBandwidthSinks, peer_manager::peerdb::client::ClientKind, types::GossipKind, - GossipTopic, Gossipsub, NetworkGlobals, + peer_manager::peerdb::client::ClientKind, types::GossipKind, GossipTopic, Gossipsub, + NetworkGlobals, }; use std::sync::Arc; use strum::IntoEnumIterator; @@ -223,12 +223,6 @@ lazy_static! { lazy_static! { - /* - * Bandwidth metrics - */ - pub static ref LIBP2P_BYTES: Result = - try_create_int_counter_vec("libp2p_inbound_bytes", "The bandwidth over libp2p", &["direction", "transport"]); - /* * Sync related metrics */ @@ -327,25 +321,6 @@ lazy_static! { ); } -pub fn update_bandwidth_metrics(bandwidth: &AggregatedBandwidthSinks) { - if let Some(tcp_in_bandwidth) = get_int_counter(&LIBP2P_BYTES, &["inbound", "tcp"]) { - tcp_in_bandwidth.reset(); - tcp_in_bandwidth.inc_by(bandwidth.total_tcp_inbound()); - } - if let Some(tcp_out_bandwidth) = get_int_counter(&LIBP2P_BYTES, &["outbound", "tcp"]) { - tcp_out_bandwidth.reset(); - tcp_out_bandwidth.inc_by(bandwidth.total_tcp_outbound()); - } - if let Some(quic_in_bandwidth) = get_int_counter(&LIBP2P_BYTES, &["inbound", "quic"]) { - quic_in_bandwidth.reset(); - quic_in_bandwidth.inc_by(bandwidth.total_quic_inbound()); - } - if let Some(quic_out_bandwidth) = get_int_counter(&LIBP2P_BYTES, &["outbound", "quic"]) { - quic_out_bandwidth.reset(); - quic_out_bandwidth.inc_by(bandwidth.total_quic_outbound()); - } -} - pub fn register_finality_update_error(error: &LightClientFinalityUpdateError) { inc_counter_vec(&GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE, &[error.as_ref()]); } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 03715dd99f2..17760cef592 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -219,7 +219,7 @@ impl NetworkService { beacon_chain: Arc>, config: &NetworkConfig, executor: task_executor::TaskExecutor, - gossipsub_registry: Option<&'_ mut Registry>, + libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, ) -> error::Result<( @@ -285,7 +285,7 @@ impl NetworkService { enr_fork_id, fork_context: fork_context.clone(), chain_spec: &beacon_chain.spec, - gossipsub_registry, + libp2p_registry, }; // launch libp2p service @@ -380,7 +380,7 @@ impl NetworkService { beacon_chain: Arc>, config: &NetworkConfig, executor: task_executor::TaskExecutor, - gossipsub_registry: Option<&'_ mut Registry>, + libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, ) -> error::Result<(Arc>, NetworkSenders)> { @@ -388,7 +388,7 @@ impl NetworkService { beacon_chain, config, executor.clone(), - gossipsub_registry, + libp2p_registry, beacon_processor_send, beacon_processor_reprocess_tx, ) @@ -497,7 +497,6 @@ impl NetworkService { } } } - metrics::update_bandwidth_metrics(&self.libp2p.bandwidth); } }; executor.spawn(service_fut, "network"); diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 48b4eb037ab..42c8bea038a 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,7 +4,7 @@ version = "4.5.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false -rust-version = "1.69.0" +rust-version = "1.73.0" [features] default = ["slasher-lmdb"] From e02adbf7bf3081b653509cb0fc40958845a67ebe Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 8 Dec 2023 10:45:05 +1100 Subject: [PATCH 10/19] Update docs for v4.6.0 (#4982) * Update DB migration docs * Document VC broadcast modes * Update downgrade example (#6) * update downgrade example * Add period * Add v4.1.0 --------- Co-authored-by: chonghe <44791194+chong-he@users.noreply.github.com> --- book/src/database-migrations.md | 80 ++++++++++++++++++++------------- book/src/redundancy.md | 29 +++++++++++- 2 files changed, 77 insertions(+), 32 deletions(-) diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index ed7f7d72f52..5b7b4d49371 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -16,27 +16,24 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| -| v2.0.0 | Oct 2021 | v5 | no | -| v2.1.0 | Jan 2022 | v8 | no | -| v2.2.0 | Apr 2022 | v8 | no | -| v2.3.0 | May 2022 | v9 | yes from <= v3.3.0 | -| v2.4.0 | Jul 2022 | v9 | yes from <= v3.3.0 | -| v2.5.0 | Aug 2022 | v11 | yes | -| v3.0.0 | Aug 2022 | v11 | yes | -| v3.1.0 | Sep 2022 | v12 | yes | -| v3.2.0 | Oct 2022 | v12 | yes | -| v3.3.0 | Nov 2022 | v13 | yes | -| v3.4.0 | Jan 2023 | v13 | yes | -| v3.5.0 | Feb 2023 | v15 | yes before Capella | -| v4.0.1 | Mar 2023 | v16 | yes before Capella | +| v4.6.0 | Dec 2023 | v18 | yes before Deneb | +| v4.5.0 | Sep 2023 | v17 | yes | +| v4.4.0 | Aug 2023 | v17 | yes | +| v4.3.0 | Jul 2023 | v17 | yes | | v4.2.0 | May 2023 | v17 | yes | +| v4.1.0 | Apr 2023 | v16 | no | +| v4.0.1 | Mar 2023 | v16 | no | -> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release -> (e.g. v2.3.0). +> **Note**: All point releases (e.g. v4.4.1) are schema-compatible with the prior minor release +> (e.g. v4.4.0). > **Note**: Support for old schemas is gradually removed from newer versions of Lighthouse. We -usually do this after a major version has been out for a while and everyone has upgraded. In this -case the above table will continue to record the deprecated schema changes for reference. +usually do this after a major version has been out for a while and everyone has upgraded. Deprecated +schema versions for previous releases are archived under +[Full list of schema versions](#full-list-of-schema-versions). If you get stuck and are unable +to upgrade a **testnet** node to the latest version, sometimes it is possible to upgrade via an +intermediate version (e.g. upgrade from v3.5.0 to v4.6.0 via v4.0.1). This is never necessary +on mainnet. ## How to apply a database downgrade @@ -44,9 +41,7 @@ To apply a downgrade you need to use the `lighthouse db migrate` command with th 1. Make sure you have a copy of the latest version of Lighthouse. This will be the version that knows about the latest schema change, and has the ability to revert it. -2. Work out the schema version you would like to downgrade to by checking the table above, or the - Lighthouse release notes. E.g. if you want to downgrade from v2.3.0, which upgraded the version - from v8 to v9, then you'll want to _downgrade_ to v8 in order to run v2.2.x or earlier. +2. Work out the schema version you would like to downgrade to by checking the table above, or the [Full list of schema versions](#full-list-of-schema-versions) below. E.g. if you want to downgrade from v4.2.0, which upgraded the version from v16 to v17, then you'll want to downgrade to v16 in order to run v4.0.1. 3. **Ensure that downgrading is feasible**. Not all schema upgrades can be reverted, and some of them are time-sensitive. The release notes will state whether a downgrade is available and whether any caveats apply to it. @@ -59,14 +54,13 @@ To apply a downgrade you need to use the `lighthouse db migrate` command with th sudo -u "$LH_USER" lighthouse db migrate --to "$VERSION" --datadir "$LH_DATADIR" --network "$NET" ``` -For example if you want to downgrade to Lighthouse v2.1 or v2.2 from v2.3 and you followed Somer -Esat's guide, you would run: +For example if you want to downgrade to Lighthouse v4.0.1 from v4.2.0 and you followed Somer Esat's guide, you would run: ``` -sudo -u lighthousebeacon lighthouse db migrate --to 8 --datadir /var/lib/lighthouse --network mainnet +sudo -u lighthousebeacon lighthouse db migrate --to 16 --datadir /var/lib/lighthouse --network mainnet ``` -Where `lighthouse` is Lighthouse v2.3.0+. After the downgrade succeeds you can then replace your +Where `lighthouse` is Lighthouse v4.2.0+. After the downgrade succeeds you can then replace your global `lighthouse` binary with the older version and start your node again. ## How to apply a database upgrade @@ -161,27 +155,27 @@ lighthouse db version --network mainnet ## How to prune historic states -Pruning historic states helps in managing the disk space used by the Lighthouse beacon node by removing old beacon -states from the freezer database. This can be especially useful when the database has accumulated a significant amount -of historic data. This command is intended for nodes synced before 4.4.1, as newly synced node no longer store +Pruning historic states helps in managing the disk space used by the Lighthouse beacon node by removing old beacon +states from the freezer database. This can be especially useful when the database has accumulated a significant amount +of historic data. This command is intended for nodes synced before 4.4.1, as newly synced node no longer store historic states by default. Here are the steps to prune historic states: 1. Before running the prune command, make sure that the Lighthouse beacon node is not running. If you are using systemd, you might stop the Lighthouse beacon node with a command like: - + ```bash sudo systemctl stop lighthousebeacon ``` 2. Use the `prune-states` command to prune the historic states. You can do a test run without the `--confirm` flag to check that the database can be pruned: - + ```bash sudo -u "$LH_USER" lighthouse db prune-states --datadir "$LH_DATADIR" --network "$NET" ``` 3. If you are ready to prune the states irreversibly, add the `--confirm` flag to commit the changes: - + ```bash sudo -u "$LH_USER" lighthouse db prune-states --confirm --datadir "$LH_DATADIR" --network "$NET" ``` @@ -189,7 +183,31 @@ Here are the steps to prune historic states: The `--confirm` flag ensures that you are aware the action is irreversible, and historic states will be permanently removed. 4. After successfully pruning the historic states, you can restart the Lighthouse beacon node: - + ```bash sudo systemctl start lighthousebeacon ``` + +## Full list of schema versions + +| Lighthouse version | Release date | Schema version | Downgrade available? | +|--------------------|--------------|----------------|-------------------------------------| +| v4.6.0 | Dec 2023 | v18 | yes before Deneb | +| v4.5.0 | Sep 2023 | v17 | yes | +| v4.4.0 | Aug 2023 | v17 | yes | +| v4.3.0 | Jul 2023 | v17 | yes | +| v4.2.0 | May 2023 | v17 | yes | +| v4.1.0 | Apr 2023 | v16 | yes before Capella using <= v4.5.0 | +| v4.0.1 | Mar 2023 | v16 | yes before Capella using <= v4.5.0 | +| v3.5.0 | Feb 2023 | v15 | yes before Capella using <= v4.5.0 | +| v3.4.0 | Jan 2023 | v13 | yes using <= 4.5.0 | +| v3.3.0 | Nov 2022 | v13 | yes using <= 4.5.0 | +| v3.2.0 | Oct 2022 | v12 | yes using <= 4.5.0 | +| v3.1.0 | Sep 2022 | v12 | yes using <= 4.5.0 | +| v3.0.0 | Aug 2022 | v11 | yes using <= 4.5.0 | +| v2.5.0 | Aug 2022 | v11 | yes using <= 4.5.0 | +| v2.4.0 | Jul 2022 | v9 | yes using <= v3.3.0 | +| v2.3.0 | May 2022 | v9 | yes using <= v3.3.0 | +| v2.2.0 | Apr 2022 | v8 | no | +| v2.1.0 | Jan 2022 | v8 | no | +| v2.0.0 | Oct 2021 | v5 | no | diff --git a/book/src/redundancy.md b/book/src/redundancy.md index 77cec325371..93529295aed 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -75,7 +75,34 @@ lighthouse bn \ Prior to v3.2.0 fallback beacon nodes also required the `--subscribe-all-subnets` and `--import-all-attestations` flags. These flags are no longer required as the validator client will now broadcast subscriptions to all connected beacon nodes by default. This broadcast behaviour -can be disabled using the `--disable-run-on-all` flag for `lighthouse vc`. +can be disabled using the `--broadcast none` flag for `lighthouse vc` (or `--disable-run-on-all` +[deprecated]). + +### Broadcast modes + +Since v4.6.0, the Lighthouse VC can be configured to broadcast messages to all configured beacon +nodes rather than just the first available. + +The flag to control this behaviour is `--broadcast`, which takes multiple comma-separated values +from this list: + +- `subscriptions`: Send subnet subscriptions & other control messages which keep the beacon nodes + primed and ready to process messages. It is recommended to leave this enabled. +- `attestations`: Send attestations & aggregates to all beacon nodes. This can improve + propagation of attestations throughout the network, at the cost of increased load on the beacon + nodes and increased bandwidth between the VC and the BNs. +- `blocks`: Send proposed blocks to all beacon nodes. This can improve propagation of blocks + throughout the network, at the cost of slightly increased load on the beacon nodes and increased + bandwidth between the VC and the BNs. If you are looking to improve performance in a multi-BN + setup this is the first option we would recommend enabling. +- `sync-committee`: Send sync committee signatures & aggregates to all beacon nodes. This can + improve propagation of sync committee messages with similar tradeoffs to broadcasting + attestations, although occuring less often due to the infrequency of sync committee duties. +- `none`: Disable all broadcasting. This option only has an effect when provided alone, otherwise + it is ignored. Not recommended except for expert tweakers. + +The default is `--broadcast subscriptions`. To also broadcast blocks for example, use +`--broadcast subscriptions,blocks`. ## Redundant execution nodes From b882519d2f4c708fc13d17f0a97281569a8f2f5e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 8 Dec 2023 12:09:36 +1100 Subject: [PATCH 11/19] Implement POST validators/validator_balances APIs (#4872) * Add POST for fetching validators from state * Implement POST for balances * Tests --- beacon_node/http_api/src/lib.rs | 160 +++++++++---------------- beacon_node/http_api/src/validators.rs | 119 ++++++++++++++++++ beacon_node/http_api/tests/tests.rs | 29 ++++- common/eth2/src/lib.rs | 58 +++++++++ common/eth2/src/types.rs | 47 +++++++- 5 files changed, 305 insertions(+), 108 deletions(-) create mode 100644 beacon_node/http_api/src/validators.rs diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 08c67a00bf7..0b25015e264 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -26,6 +26,7 @@ pub mod test_utils; mod ui; mod validator; mod validator_inclusion; +mod validators; mod version; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; @@ -41,7 +42,8 @@ use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, - PublishBlockRequest, ValidatorId, ValidatorStatus, + PublishBlockRequest, ValidatorBalancesRequestBody, ValidatorId, ValidatorStatus, + ValidatorsRequestBody, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; @@ -663,47 +665,32 @@ pub fn serve( query_res: Result| { task_spawner.blocking_json_task(Priority::P1, move || { let query = query_res?; - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - Ok(( - state - .validators() - .iter() - .zip(state.balances().iter()) - .enumerate() - // filter by validator id(s) if provided - .filter(|(index, (validator, _))| { - query.id.as_ref().map_or(true, |ids| { - ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => { - &validator.pubkey == pubkey - } - ValidatorId::Index(param_index) => { - *param_index == *index as u64 - } - }) - }) - }) - .map(|(index, (_, balance))| { - Some(api_types::ValidatorBalanceData { - index: index as u64, - balance: *balance, - }) - }) - .collect::>(), - execution_optimistic, - finalized, - )) - }, - )?; + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + query.id.as_deref(), + ) + }) + }, + ); - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + // POST beacon/states/{state_id}/validator_balances + let post_beacon_state_validator_balances = beacon_states_path + .clone() + .and(warp::path("validator_balances")) + .and(warp::path::end()) + .and(warp::body::json()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorBalancesRequestBody| { + task_spawner.blocking_json_task(Priority::P1, move || { + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + Some(&query.ids), + ) }) }, ); @@ -721,69 +708,34 @@ pub fn serve( query_res: Result| { task_spawner.blocking_json_task(Priority::P1, move || { let query = query_res?; - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; - - Ok(( - state - .validators() - .iter() - .zip(state.balances().iter()) - .enumerate() - // filter by validator id(s) if provided - .filter(|(index, (validator, _))| { - query.id.as_ref().map_or(true, |ids| { - ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => { - &validator.pubkey == pubkey - } - ValidatorId::Index(param_index) => { - *param_index == *index as u64 - } - }) - }) - }) - // filter by status(es) if provided and map the result - .filter_map(|(index, (validator, balance))| { - let status = api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ); - - let status_matches = - query.status.as_ref().map_or(true, |statuses| { - statuses.contains(&status) - || statuses.contains(&status.superstatus()) - }); - - if status_matches { - Some(api_types::ValidatorData { - index: index as u64, - balance: *balance, - status, - validator: validator.clone(), - }) - } else { - None - } - }) - .collect::>(), - execution_optimistic, - finalized, - )) - }, - )?; + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.id, + &query.status, + ) + }) + }, + ); - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + // POST beacon/states/{state_id}/validators + let post_beacon_state_validators = beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and(warp::body::json()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorsRequestBody| { + task_spawner.blocking_json_task(Priority::P1, move || { + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.ids, + &query.statuses, + ) }) }, ); @@ -4709,6 +4661,8 @@ pub fn serve( .uor(post_beacon_pool_voluntary_exits) .uor(post_beacon_pool_sync_committees) .uor(post_beacon_pool_bls_to_execution_changes) + .uor(post_beacon_state_validators) + .uor(post_beacon_state_validator_balances) .uor(post_beacon_rewards_attestations) .uor(post_beacon_rewards_sync_committee) .uor(post_validator_duties_attester) diff --git a/beacon_node/http_api/src/validators.rs b/beacon_node/http_api/src/validators.rs new file mode 100644 index 00000000000..20af7a680df --- /dev/null +++ b/beacon_node/http_api/src/validators.rs @@ -0,0 +1,119 @@ +use crate::state_id::StateId; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::{ + self as api_types, ExecutionOptimisticFinalizedResponse, ValidatorBalanceData, ValidatorData, + ValidatorId, ValidatorStatus, +}; +use std::sync::Arc; + +pub fn get_beacon_state_validators( + state_id: StateId, + chain: Arc>, + query_ids: &Option>, + query_statuses: &Option>, +) -> Result>, warp::Rejection> { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; + + Ok(( + state + .validators() + .iter() + .zip(state.balances().iter()) + .enumerate() + // filter by validator id(s) if provided + .filter(|(index, (validator, _))| { + query_ids.as_ref().map_or(true, |ids| { + ids.iter().any(|id| match id { + ValidatorId::PublicKey(pubkey) => &validator.pubkey == pubkey, + ValidatorId::Index(param_index) => { + *param_index == *index as u64 + } + }) + }) + }) + // filter by status(es) if provided and map the result + .filter_map(|(index, (validator, balance))| { + let status = api_types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ); + + let status_matches = query_statuses.as_ref().map_or(true, |statuses| { + statuses.contains(&status) + || statuses.contains(&status.superstatus()) + }); + + if status_matches { + Some(ValidatorData { + index: index as u64, + balance: *balance, + status, + validator: validator.clone(), + }) + } else { + None + } + }) + .collect::>(), + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) +} + +pub fn get_beacon_state_validator_balances( + state_id: StateId, + chain: Arc>, + optional_ids: Option<&[ValidatorId]>, +) -> Result>, warp::Rejection> { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + Ok(( + state + .validators() + .iter() + .zip(state.balances().iter()) + .enumerate() + // filter by validator id(s) if provided + .filter(|(index, (validator, _))| { + optional_ids.map_or(true, |ids| { + ids.iter().any(|id| match id { + ValidatorId::PublicKey(pubkey) => &validator.pubkey == pubkey, + ValidatorId::Index(param_index) => { + *param_index == *index as u64 + } + }) + }) + }) + .map(|(index, (_, balance))| ValidatorBalanceData { + index: index as u64, + balance: *balance, + }) + .collect::>(), + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 7b769009cff..ebd681b59fd 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -850,6 +850,18 @@ impl ApiTester { .await .unwrap() .map(|res| res.data); + let result_post_index_ids = self + .client + .post_beacon_states_validator_balances(state_id.0, validator_index_ids) + .await + .unwrap() + .map(|res| res.data); + let result_post_pubkey_ids = self + .client + .post_beacon_states_validator_balances(state_id.0, validator_pubkey_ids) + .await + .unwrap() + .map(|res| res.data); let expected = state_opt.map(|(state, _execution_optimistic, _finalized)| { let mut validators = Vec::with_capacity(validator_indices.len()); @@ -868,6 +880,8 @@ impl ApiTester { assert_eq!(result_index_ids, expected, "{:?}", state_id); assert_eq!(result_pubkey_ids, expected, "{:?}", state_id); + assert_eq!(result_post_index_ids, expected, "{:?}", state_id); + assert_eq!(result_post_pubkey_ids, expected, "{:?}", state_id); } } @@ -913,7 +927,6 @@ impl ApiTester { .await .unwrap() .map(|res| res.data); - let result_pubkey_ids = self .client .get_beacon_states_validators( @@ -924,6 +937,18 @@ impl ApiTester { .await .unwrap() .map(|res| res.data); + let post_result_index_ids = self + .client + .post_beacon_states_validators(state_id.0, Some(validator_index_ids), None) + .await + .unwrap() + .map(|res| res.data); + let post_result_pubkey_ids = self + .client + .post_beacon_states_validators(state_id.0, Some(validator_pubkey_ids), None) + .await + .unwrap() + .map(|res| res.data); let expected = state_opt.map(|state| { let epoch = state.current_epoch(); @@ -959,6 +984,8 @@ impl ApiTester { assert_eq!(result_index_ids, expected, "{:?}", state_id); assert_eq!(result_pubkey_ids, expected, "{:?}", state_id); + assert_eq!(post_result_index_ids, expected, "{:?}", state_id); + assert_eq!(post_result_pubkey_ids, expected, "{:?}", state_id); } } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 50f18074459..e045494c9d8 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -317,6 +317,18 @@ impl BeaconNodeHttpClient { .map_err(Into::into) } + async fn post_with_opt_response( + &self, + url: U, + body: &T, + ) -> Result, Error> { + if let Some(response) = self.post_generic(url, body, None).await.optional()? { + response.json().await.map_err(Into::into) + } else { + Ok(None) + } + } + /// Perform a HTTP POST request with a custom timeout. async fn post_with_timeout( &self, @@ -524,6 +536,29 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `POST beacon/states/{state_id}/validator_balances` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_states_validator_balances( + &self, + state_id: StateId, + ids: Vec, + ) -> Result>>, Error> + { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validator_balances"); + + let request = ValidatorBalancesRequestBody { ids }; + + self.post_with_opt_response(path, &request).await + } + /// `GET beacon/states/{state_id}/validators?id,status` /// /// Returns `Ok(None)` on a 404 error. @@ -563,6 +598,29 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `POST beacon/states/{state_id}/validators` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_states_validators( + &self, + state_id: StateId, + ids: Option>, + statuses: Option>, + ) -> Result>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validators"); + + let request = ValidatorsRequestBody { ids, statuses }; + + self.post_with_opt_response(path, &request).await + } + /// `GET beacon/states/{state_id}/committees?slot,index,epoch` /// /// Returns `Ok(None)` on a 404 error. diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 7007138d8e8..d8086784b1a 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -278,17 +278,18 @@ pub struct FinalityCheckpointsData { } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(try_from = "&str")] +#[serde(into = "String")] +#[serde(try_from = "std::borrow::Cow")] pub enum ValidatorId { PublicKey(PublicKeyBytes), Index(u64), } -impl TryFrom<&str> for ValidatorId { +impl TryFrom> for ValidatorId { type Error = String; - fn try_from(s: &str) -> Result { - Self::from_str(s) + fn try_from(s: std::borrow::Cow) -> Result { + Self::from_str(&s) } } @@ -317,6 +318,12 @@ impl fmt::Display for ValidatorId { } } +impl From for String { + fn from(id: ValidatorId) -> String { + id.to_string() + } +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorData { #[serde(with = "serde_utils::quoted_u64")] @@ -492,6 +499,15 @@ pub struct ValidatorsQuery { pub status: Option>, } +#[derive(Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ValidatorsRequestBody { + #[serde(default)] + pub ids: Option>, + #[serde(default)] + pub statuses: Option>, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CommitteeData { #[serde(with = "serde_utils::quoted_u64")] @@ -656,6 +672,12 @@ pub struct ValidatorBalancesQuery { pub id: Option>, } +#[derive(Clone, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ValidatorBalancesRequestBody { + pub ids: Vec, +} + #[derive(Clone, Deserialize)] #[serde(deny_unknown_fields)] pub struct BlobIndicesQuery { @@ -1879,3 +1901,20 @@ pub struct BlobsBundle { #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] pub blobs: BlobsList, } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn validator_id_serde() { + let id_str = "\"1\""; + let x: ValidatorId = serde_json::from_str(id_str).unwrap(); + assert_eq!(x, ValidatorId::Index(1)); + assert_eq!(serde_json::to_string(&x).unwrap(), id_str); + + let pubkey_str = "\"0xb824b5ede33a7b05a378a84b183b4bc7e7db894ce48b659f150c97d359edca2f503081d6678d1200f582ec7cafa9caf2\""; + let y: ValidatorId = serde_json::from_str(pubkey_str).unwrap(); + assert_eq!(serde_json::to_string(&y).unwrap(), pubkey_str); + } +} From 46184e5ce49b09cf444c1a7b1e8b10ed9d9c4e13 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 8 Dec 2023 15:42:55 -0500 Subject: [PATCH 12/19] Remove delayed lookups (#4992) * initial rip out * fix unused imports * delete tests and fix lint * fix peers scoring for blobs --- .../src/data_availability_checker.rs | 25 -- .../gossip_methods.rs | 81 ++---- .../src/network_beacon_processor/mod.rs | 73 +----- .../network_beacon_processor/sync_methods.rs | 25 +- .../src/network_beacon_processor/tests.rs | 4 - beacon_node/network/src/router.rs | 6 - .../network/src/sync/block_lookups/common.rs | 47 +--- .../network/src/sync/block_lookups/mod.rs | 91 ++----- .../src/sync/block_lookups/parent_lookup.rs | 15 +- .../sync/block_lookups/single_block_lookup.rs | 111 +++------ .../network/src/sync/block_lookups/tests.rs | 233 +----------------- beacon_node/network/src/sync/manager.rs | 36 +-- 12 files changed, 94 insertions(+), 653 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 6b327246a2e..67e98a01c1a 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -413,31 +413,6 @@ impl DataAvailabilityChecker { .incomplete_processing_components(slot) } - /// Determines whether we are at least the `single_lookup_delay` duration into the given slot. - /// If we are not currently in the Deneb fork, this delay is not considered. - /// - /// The `single_lookup_delay` is the duration we wait for a blocks or blobs to arrive over - /// gossip before making single block or blob requests. This is to minimize the number of - /// single lookup requests we end up making. - pub fn should_delay_lookup(&self, slot: Slot) -> bool { - if !self.is_deneb() { - return false; - } - - let current_or_future_slot = self - .slot_clock - .now() - .map_or(false, |current_slot| current_slot <= slot); - - let delay_threshold_unmet = self - .slot_clock - .millis_from_current_slot_start() - .map_or(false, |millis_into_slot| { - millis_into_slot < self.slot_clock.single_lookup_delay() - }); - current_or_future_slot && delay_threshold_unmet - } - /// The epoch at which we require a data availability check in block processing. /// `None` if the `Deneb` fork is disabled. pub fn data_availability_boundary(&self) -> Option { diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 5d98039a819..9d9b196e9be 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,8 +4,6 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; -use std::collections::HashSet; - use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::store::Error; @@ -756,11 +754,6 @@ impl NetworkBeaconProcessor { let blob_slot = verified_blob.slot(); let blob_index = verified_blob.id().index; - let delay_lookup = self - .chain - .data_availability_checker - .should_delay_lookup(blob_slot); - match self.chain.process_gossip_blob(verified_blob).await { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { // Note: Reusing block imported metric here @@ -772,29 +765,14 @@ impl NetworkBeaconProcessor { ); self.chain.recompute_head_at_current_slot().await; } - Ok(AvailabilityProcessingStatus::MissingComponents(_slot, block_root)) => { - if delay_lookup { - self.cache_peer(peer_id, &block_root); - trace!( - self.log, - "Processed blob, delaying lookup for other components"; - "slot" => %blob_slot, - "blob_index" => %blob_index, - "block_root" => %block_root, - ); - } else { - trace!( - self.log, - "Missing block components for gossip verified blob"; - "slot" => %blob_slot, - "blob_index" => %blob_index, - "block_root" => %block_root, - ); - self.send_sync_message(SyncMessage::MissingGossipBlockComponents( - vec![peer_id], - block_root, - )); - } + Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { + trace!( + self.log, + "Processed blob, waiting for other components"; + "slot" => %slot, + "blob_index" => %blob_index, + "block_root" => %block_root, + ); } Err(err) => { debug!( @@ -818,18 +796,6 @@ impl NetworkBeaconProcessor { } } - /// Cache the peer id for the given block root. - fn cache_peer(self: &Arc, peer_id: PeerId, block_root: &Hash256) { - let mut guard = self.delayed_lookup_peers.lock(); - if let Some(peers) = guard.get_mut(block_root) { - peers.insert(peer_id); - } else { - let mut peers = HashSet::new(); - peers.insert(peer_id); - guard.push(*block_root, peers); - } - } - /// Process the beacon block received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. @@ -1170,11 +1136,6 @@ impl NetworkBeaconProcessor { let block = verified_block.block.block_cloned(); let block_root = verified_block.block_root; - let delay_lookup = self - .chain - .data_availability_checker - .should_delay_lookup(verified_block.block.slot()); - let result = self .chain .process_block_with_early_caching(block_root, verified_block, NotifyExecutionLayer::Yes) @@ -1209,26 +1170,12 @@ impl NetworkBeaconProcessor { self.chain.recompute_head_at_current_slot().await; } Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => { - if delay_lookup { - self.cache_peer(peer_id, block_root); - trace!( - self.log, - "Processed block, delaying lookup for other components"; - "slot" => slot, - "block_root" => %block_root, - ); - } else { - trace!( - self.log, - "Missing block components for gossip verified block"; - "slot" => slot, - "block_root" => %block_root, - ); - self.send_sync_message(SyncMessage::MissingGossipBlockComponents( - vec![peer_id], - *block_root, - )); - } + trace!( + self.log, + "Processed block, waiting for other components"; + "slot" => slot, + "block_root" => %block_root, + ); } Err(BlockError::ParentUnknown(block)) => { // Inform the sync manager to find parents for this block diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 2356a197cc2..67fc2fabb1e 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -18,11 +18,8 @@ use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, }; -use lru::LruCache; -use parking_lot::Mutex; -use slog::{crit, debug, error, trace, Logger}; -use slot_clock::{ManualSlotClock, SlotClock}; -use std::collections::HashSet; +use slog::{debug, Logger}; +use slot_clock::ManualSlotClock; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -30,7 +27,6 @@ use store::MemoryStore; use task_executor::test_utils::TestRuntime; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, error::TrySendError}; -use tokio::time::{interval_at, Instant}; use types::*; pub use sync_methods::ChainSegmentProcessId; @@ -44,7 +40,6 @@ mod sync_methods; mod tests; pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; -pub const DELAYED_PEER_CACHE_SIZE: usize = 16; /// Defines if and where we will store the SSZ files of invalid blocks. #[derive(Clone)] @@ -65,7 +60,6 @@ pub struct NetworkBeaconProcessor { pub reprocess_tx: mpsc::Sender, pub network_globals: Arc>, pub invalid_block_storage: InvalidBlockStorage, - pub delayed_lookup_peers: Mutex>>, pub executor: TaskExecutor, pub log: Logger, } @@ -630,68 +624,6 @@ impl NetworkBeaconProcessor { "error" => %e) }); } - - /// This service is responsible for collecting lookup messages and sending them back to sync - /// for processing after a short delay. - /// - /// We want to delay lookups triggered from gossip for the following reasons: - /// - /// - We only want to make one request for components we are unlikely to see on gossip. This means - /// we don't have to repeatedly update our RPC request's state as we receive gossip components. - /// - /// - We are likely to receive blocks/blobs over gossip more quickly than we could via an RPC request. - /// - /// - Delaying a lookup means we are less likely to simultaneously download the same blocks/blobs - /// over gossip and RPC. - /// - /// - We would prefer to request peers based on whether we've seen them attest, because this gives - /// us an idea about whether they *should* have the block/blobs we're missing. This is because a - /// node should not attest to a block unless it has all the blobs for that block. This gives us a - /// stronger basis for peer scoring. - pub fn spawn_delayed_lookup_service(self: &Arc) { - let processor_clone = self.clone(); - let executor = self.executor.clone(); - let log = self.log.clone(); - let beacon_chain = self.chain.clone(); - executor.spawn( - async move { - let slot_duration = beacon_chain.slot_clock.slot_duration(); - let delay = beacon_chain.slot_clock.single_lookup_delay(); - let interval_start = match ( - beacon_chain.slot_clock.duration_to_next_slot(), - beacon_chain.slot_clock.seconds_from_current_slot_start(), - ) { - (Some(duration_to_next_slot), Some(seconds_from_current_slot_start)) => { - let duration_until_start = if seconds_from_current_slot_start > delay { - duration_to_next_slot + delay - } else { - delay - seconds_from_current_slot_start - }; - Instant::now() + duration_until_start - } - _ => { - crit!(log, - "Failed to read slot clock, delayed lookup service timing will be inaccurate.\ - This may degrade performance" - ); - Instant::now() - } - }; - - let mut interval = interval_at(interval_start, slot_duration); - loop { - interval.tick().await; - let Some(slot) = beacon_chain.slot_clock.now_or_genesis() else { - error!(log, "Skipping delayed lookup poll, unable to read slot clock"); - continue - }; - trace!(log, "Polling delayed lookups for slot: {slot}"); - processor_clone.poll_delayed_lookups(slot) - } - }, - "delayed_lookups", - ); - } } type TestBeaconChainType = @@ -734,7 +666,6 @@ impl NetworkBeaconProcessor> { reprocess_tx: work_reprocessing_tx, network_globals, invalid_block_storage: InvalidBlockStorage::Disabled, - delayed_lookup_peers: Mutex::new(LruCache::new(DELAYED_PEER_CACHE_SIZE)), executor: runtime.task_executor.clone(), log, }; diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index acfa069d355..95c1fa33e85 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -19,7 +19,7 @@ use beacon_processor::{ AsyncFn, BlockingFn, DuplicateCache, }; use lighthouse_network::PeerAction; -use slog::{debug, error, info, trace, warn}; +use slog::{debug, error, info, warn}; use slot_clock::SlotClock; use std::sync::Arc; use std::time::Duration; @@ -28,7 +28,7 @@ use store::KzgCommitment; use tokio::sync::mpsc; use types::beacon_block_body::format_kzg_commitments; use types::blob_sidecar::FixedBlobSidecarList; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, Hash256}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. #[derive(Clone, Debug, PartialEq)] @@ -373,27 +373,6 @@ impl NetworkBeaconProcessor { }); } - /// Poll the beacon chain for any delayed lookups that are now available. - pub fn poll_delayed_lookups(&self, slot: Slot) { - let block_roots = self - .chain - .data_availability_checker - .incomplete_processing_components(slot); - if block_roots.is_empty() { - trace!(self.log, "No delayed lookups found on poll"); - } else { - debug!(self.log, "Found delayed lookups on poll"; "lookup_count" => block_roots.len()); - } - for block_root in block_roots { - if let Some(peer_ids) = self.delayed_lookup_peers.lock().pop(&block_root) { - self.send_sync_message(SyncMessage::MissingGossipBlockComponents( - peer_ids.into_iter().collect(), - block_root, - )); - } - } - } - /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. pub async fn process_chain_segment( diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 503d2f12618..844fc53ab17 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -1,7 +1,6 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. #![cfg(test)] -use crate::network_beacon_processor::DELAYED_PEER_CACHE_SIZE; use crate::{ network_beacon_processor::{ ChainSegmentProcessId, DuplicateCache, InvalidBlockStorage, NetworkBeaconProcessor, @@ -24,8 +23,6 @@ use lighthouse_network::{ types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, Client, MessageId, NetworkGlobals, PeerId, Response, }; -use lru::LruCache; -use parking_lot::Mutex; use slot_clock::SlotClock; use std::iter::Iterator; use std::sync::Arc; @@ -223,7 +220,6 @@ impl TestRig { reprocess_tx: work_reprocessing_tx.clone(), network_globals: network_globals.clone(), invalid_block_storage: InvalidBlockStorage::Disabled, - delayed_lookup_peers: Mutex::new(LruCache::new(DELAYED_PEER_CACHE_SIZE)), executor: executor.clone(), log: log.clone(), }; diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 5d3dde90ce0..f56a3b7445e 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -21,8 +21,6 @@ use lighthouse_network::{ MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, }; use logging::TimeLatch; -use lru::LruCache; -use parking_lot::Mutex; use slog::{crit, debug, o, trace}; use slog::{error, warn}; use std::sync::Arc; @@ -111,14 +109,10 @@ impl Router { reprocess_tx: beacon_processor_reprocess_tx, network_globals: network_globals.clone(), invalid_block_storage, - delayed_lookup_peers: Mutex::new(LruCache::new( - crate::network_beacon_processor::DELAYED_PEER_CACHE_SIZE, - )), executor: executor.clone(), log: log.clone(), }; let network_beacon_processor = Arc::new(network_beacon_processor); - network_beacon_processor.spawn_delayed_lookup_service(); // spawn the sync thread crate::sync::manager::spawn( diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index e089ef4fef3..7a1be46e69d 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -3,8 +3,7 @@ use crate::sync::block_lookups::single_block_lookup::{ LookupRequestError, LookupVerifyError, SingleBlockLookup, SingleLookupRequestState, State, }; use crate::sync::block_lookups::{ - BlobRequestState, BlockLookups, BlockRequestState, PeerShouldHave, - SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, + BlobRequestState, BlockLookups, BlockRequestState, PeerId, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, }; use crate::sync::manager::{BlockProcessType, Id, SingleLookupReqId}; use crate::sync::network_context::SyncNetworkContext; @@ -13,7 +12,6 @@ use beacon_chain::data_availability_checker::{AvailabilityView, ChildComponents} use beacon_chain::{get_block_root, BeaconChainTypes}; use lighthouse_network::rpc::methods::BlobsByRootRequest; use lighthouse_network::rpc::BlocksByRootRequest; -use lighthouse_network::PeerId; use rand::prelude::IteratorRandom; use ssz_types::VariableList; use std::ops::IndexMut; @@ -89,7 +87,7 @@ pub trait RequestState { /* Request building methods */ /// Construct a new request. - fn build_request(&mut self) -> Result<(PeerShouldHave, Self::RequestType), LookupRequestError> { + fn build_request(&mut self) -> Result<(PeerId, Self::RequestType), LookupRequestError> { // Verify and construct request. self.too_many_attempts()?; let peer = self.get_peer()?; @@ -121,7 +119,7 @@ pub trait RequestState { id, req_counter: self.get_state().req_counter, }; - Self::make_request(id, peer_id.to_peer_id(), request, cx) + Self::make_request(id, peer_id, request, cx) } /// Verify the current request has not exceeded the maximum number of attempts. @@ -140,26 +138,15 @@ pub trait RequestState { /// Get the next peer to request. Draws from the set of peers we think should have both the /// block and blob first. If that fails, we draw from the set of peers that may have either. - fn get_peer(&mut self) -> Result { + fn get_peer(&mut self) -> Result { let request_state = self.get_state_mut(); - let available_peer_opt = request_state + let peer_id = request_state .available_peers .iter() .choose(&mut rand::thread_rng()) .copied() - .map(PeerShouldHave::BlockAndBlobs); - - let Some(peer_id) = available_peer_opt.or_else(|| { - request_state - .potential_peers - .iter() - .choose(&mut rand::thread_rng()) - .copied() - .map(PeerShouldHave::Neither) - }) else { - return Err(LookupRequestError::NoPeers); - }; - request_state.used_peers.insert(peer_id.to_peer_id()); + .ok_or(LookupRequestError::NoPeers)?; + request_state.used_peers.insert(peer_id); Ok(peer_id) } @@ -211,7 +198,7 @@ pub trait RequestState { &mut self, expected_block_root: Hash256, response: Option, - peer_id: PeerShouldHave, + peer_id: PeerId, ) -> Result, LookupVerifyError>; /// A getter for the parent root of the response. Returns an `Option` because we won't know @@ -241,11 +228,6 @@ pub trait RequestState { cx: &SyncNetworkContext, ) -> Result<(), LookupRequestError>; - /// Remove the peer from the lookup if it is useless. - fn remove_if_useless(&mut self, peer: &PeerId) { - self.get_state_mut().remove_peer_if_useless(peer) - } - /// Register a failure to process the block or blob. fn register_failure_downloading(&mut self) { self.get_state_mut().register_failure_downloading() @@ -290,7 +272,7 @@ impl RequestState for BlockRequestState &mut self, expected_block_root: Hash256, response: Option, - peer_id: PeerShouldHave, + peer_id: PeerId, ) -> Result>>, LookupVerifyError> { match response { Some(block) => { @@ -310,13 +292,8 @@ impl RequestState for BlockRequestState } } None => { - if peer_id.should_have_block() { - self.state.register_failure_downloading(); - Err(LookupVerifyError::NoBlockReturned) - } else { - self.state.state = State::AwaitingDownload; - Err(LookupVerifyError::BenignFailure) - } + self.state.register_failure_downloading(); + Err(LookupVerifyError::NoBlockReturned) } } } @@ -396,7 +373,7 @@ impl RequestState for BlobRequestState, - peer_id: PeerShouldHave, + peer_id: PeerId, ) -> Result>, LookupVerifyError> { match blob { Some(blob) => { diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index c5732069a00..62cdc4fa223 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -7,9 +7,7 @@ use crate::metrics; use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::block_lookups::common::LookupType; use crate::sync::block_lookups::parent_lookup::{ParentLookup, RequestError}; -use crate::sync::block_lookups::single_block_lookup::{ - CachedChild, LookupRequestError, LookupVerifyError, -}; +use crate::sync::block_lookups::single_block_lookup::{CachedChild, LookupRequestError}; use crate::sync::manager::{Id, SingleLookupReqId}; use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; pub use beacon_chain::data_availability_checker::ChildComponents; @@ -30,11 +28,9 @@ pub use single_block_lookup::{BlobRequestState, BlockRequestState}; use slog::{debug, error, trace, warn, Logger}; use smallvec::SmallVec; use std::collections::{HashMap, VecDeque}; -use std::fmt::Debug; use std::sync::Arc; use std::time::Duration; use store::Hash256; -use strum::Display; use types::blob_sidecar::FixedBlobSidecarList; use types::Slot; @@ -49,43 +45,6 @@ pub type DownloadedBlock = (Hash256, RpcBlock); const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; -/// This enum is used to track what a peer *should* be able to respond with based on -/// other messages we've seen from this peer on the network. This is useful for peer scoring. -/// We expect a peer tracked by the `BlockAndBlobs` variant to be able to respond to all -/// components of a block. This peer has either sent an attestation for the requested block -/// or has forwarded a block or blob that is a descendant of the requested block. An honest node -/// should not attest unless it has all components of a block, and it should not forward -/// messages if it does not have all components of the parent block. A peer tracked by the -/// `Neither` variant has likely just sent us a block or blob over gossip, in which case we -/// can't know whether the peer has all components of the block, and could be acting honestly -/// by forwarding a message without any other block components. -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, Display)] -pub enum PeerShouldHave { - BlockAndBlobs(PeerId), - Neither(PeerId), -} - -impl PeerShouldHave { - fn as_peer_id(&self) -> &PeerId { - match self { - PeerShouldHave::BlockAndBlobs(id) => id, - PeerShouldHave::Neither(id) => id, - } - } - fn to_peer_id(self) -> PeerId { - match self { - PeerShouldHave::BlockAndBlobs(id) => id, - PeerShouldHave::Neither(id) => id, - } - } - fn should_have_block(&self) -> bool { - match self { - PeerShouldHave::BlockAndBlobs(_) => true, - PeerShouldHave::Neither(_) => false, - } - } -} - pub struct BlockLookups { /// Parent chain lookups being downloaded. parent_lookups: SmallVec<[ParentLookup; 3]>, @@ -123,7 +82,7 @@ impl BlockLookups { pub fn search_block( &mut self, block_root: Hash256, - peer_source: &[PeerShouldHave], + peer_source: &[PeerId], cx: &mut SyncNetworkContext, ) { self.new_current_lookup(block_root, None, peer_source, cx) @@ -139,7 +98,7 @@ impl BlockLookups { &mut self, block_root: Hash256, child_components: ChildComponents, - peer_source: &[PeerShouldHave], + peer_source: &[PeerId], cx: &mut SyncNetworkContext, ) { self.new_current_lookup(block_root, Some(child_components), peer_source, cx) @@ -180,7 +139,7 @@ impl BlockLookups { &mut self, block_root: Hash256, child_components: Option>, - peers: &[PeerShouldHave], + peers: &[PeerId], cx: &mut SyncNetworkContext, ) { // Do not re-request a block that is already being requested @@ -248,9 +207,6 @@ impl BlockLookups { peer_id: PeerId, cx: &mut SyncNetworkContext, ) { - // Gossip blocks or blobs shouldn't be propagated if parents are unavailable. - let peer_source = PeerShouldHave::BlockAndBlobs(peer_id); - // If this block or it's parent is part of a known failed chain, ignore it. if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) { debug!(self.log, "Block is from a past failed chain. Dropping"; @@ -263,7 +219,7 @@ impl BlockLookups { if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| { parent_req.contains_block(&block_root) || parent_req.is_for_block(block_root) }) { - parent_lookup.add_peer(peer_source); + parent_lookup.add_peer(peer_id); // we are already searching for this block, ignore it return; } @@ -279,7 +235,7 @@ impl BlockLookups { let parent_lookup = ParentLookup::new( block_root, parent_root, - peer_source, + peer_id, self.da_checker.clone(), cx, ); @@ -398,14 +354,8 @@ impl BlockLookups { "response_type" => ?response_type, "error" => ?e ); - if matches!(e, LookupVerifyError::BenignFailure) { - request_state - .get_state_mut() - .remove_peer_if_useless(&peer_id); - } else { - let msg = e.into(); - cx.report_peer(peer_id, PeerAction::LowToleranceError, msg); - }; + let msg = e.into(); + cx.report_peer(peer_id, PeerAction::LowToleranceError, msg); request_state.register_failure_downloading(); lookup.request_block_and_blobs(cx)?; @@ -456,7 +406,7 @@ impl BlockLookups { // we should penalize the blobs peer because they did not provide all blobs on the // initial request. if lookup.both_components_downloaded() { - lookup.penalize_blob_peer(false, cx); + lookup.penalize_blob_peer(cx); lookup .blob_request_state .state @@ -619,15 +569,6 @@ impl BlockLookups { "bbroot_failed_chains", ); } - ParentVerifyError::BenignFailure => { - trace!( - self.log, - "Requested peer could not respond to block request, requesting a new peer"; - ); - let request_state = R::request_state_mut(&mut parent_lookup.current_parent_request); - request_state.remove_if_useless(&peer_id); - parent_lookup.request_parent(cx)?; - } } Ok(()) } @@ -846,7 +787,7 @@ impl BlockLookups { request_state.get_state_mut().component_processed = true; if lookup.both_components_processed() { - lookup.penalize_blob_peer(false, cx); + lookup.penalize_blob_peer(cx); // Try it again if possible. lookup @@ -864,7 +805,7 @@ impl BlockLookups { &mut self, cx: &mut SyncNetworkContext, mut lookup: SingleBlockLookup, - peer_id: PeerShouldHave, + peer_id: PeerId, e: BlockError, ) -> Result>, LookupRequestError> { let root = lookup.block_root(); @@ -884,7 +825,7 @@ impl BlockLookups { let parent_root = block.parent_root(); lookup.add_child_components(block.into()); lookup.request_block_and_blobs(cx)?; - self.search_parent(slot, root, parent_root, peer_id.to_peer_id(), cx); + self.search_parent(slot, root, parent_root, peer_id, cx); } ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { // These errors indicate that the execution layer is offline @@ -920,7 +861,7 @@ impl BlockLookups { warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); if let Ok(block_peer) = lookup.block_request_state.state.processing_peer() { cx.report_peer( - block_peer.to_peer_id(), + block_peer, PeerAction::MidToleranceError, "single_block_failure", ); @@ -1141,13 +1082,9 @@ impl BlockLookups { let Ok(block_peer_id) = parent_lookup.block_processing_peer() else { return; }; - let block_peer_id = block_peer_id.to_peer_id(); // We may not have a blob peer, if there were no blobs required for this block. - let blob_peer_id = parent_lookup - .blob_processing_peer() - .ok() - .map(PeerShouldHave::to_peer_id); + let blob_peer_id = parent_lookup.blob_processing_peer().ok(); // all else we consider the chain a failure and downvote the peer that sent // us the last block diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index 93bd2f57c09..5c2e90b48c9 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,5 +1,5 @@ use super::single_block_lookup::{LookupRequestError, LookupVerifyError, SingleBlockLookup}; -use super::{DownloadedBlock, PeerShouldHave}; +use super::{DownloadedBlock, PeerId}; use crate::sync::block_lookups::common::Parent; use crate::sync::block_lookups::common::RequestState; use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext}; @@ -8,7 +8,6 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::data_availability_checker::{ChildComponents, DataAvailabilityChecker}; use beacon_chain::BeaconChainTypes; use itertools::Itertools; -use lighthouse_network::PeerId; use std::collections::VecDeque; use std::sync::Arc; use store::Hash256; @@ -41,7 +40,6 @@ pub enum ParentVerifyError { ExtraBlobsReturned, InvalidIndex(u64), PreviousFailure { parent_root: Hash256 }, - BenignFailure, } #[derive(Debug, PartialEq, Eq)] @@ -61,7 +59,7 @@ impl ParentLookup { pub fn new( block_root: Hash256, parent_root: Hash256, - peer_id: PeerShouldHave, + peer_id: PeerId, da_checker: Arc>, cx: &mut SyncNetworkContext, ) -> Self { @@ -126,14 +124,14 @@ impl ParentLookup { .update_requested_parent_block(next_parent) } - pub fn block_processing_peer(&self) -> Result { + pub fn block_processing_peer(&self) -> Result { self.current_parent_request .block_request_state .state .processing_peer() } - pub fn blob_processing_peer(&self) -> Result { + pub fn blob_processing_peer(&self) -> Result { self.current_parent_request .blob_request_state .state @@ -211,12 +209,12 @@ impl ParentLookup { Ok(root_and_verified) } - pub fn add_peer(&mut self, peer: PeerShouldHave) { + pub fn add_peer(&mut self, peer: PeerId) { self.current_parent_request.add_peer(peer) } /// Adds a list of peers to the parent request. - pub fn add_peers(&mut self, peers: &[PeerShouldHave]) { + pub fn add_peers(&mut self, peers: &[PeerId]) { self.current_parent_request.add_peers(peers) } @@ -248,7 +246,6 @@ impl From for ParentVerifyError { E::ExtraBlobsReturned => ParentVerifyError::ExtraBlobsReturned, E::InvalidIndex(index) => ParentVerifyError::InvalidIndex(index), E::NotEnoughBlobsReturned => ParentVerifyError::NotEnoughBlobsReturned, - E::BenignFailure => ParentVerifyError::BenignFailure, } } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index e0f7d880949..e10e8328cde 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,4 +1,4 @@ -use super::PeerShouldHave; +use super::PeerId; use crate::sync::block_lookups::common::{Lookup, RequestState}; use crate::sync::block_lookups::Id; use crate::sync::network_context::SyncNetworkContext; @@ -8,7 +8,7 @@ use beacon_chain::data_availability_checker::{ }; use beacon_chain::data_availability_checker::{AvailabilityView, ChildComponents}; use beacon_chain::BeaconChainTypes; -use lighthouse_network::{PeerAction, PeerId}; +use lighthouse_network::PeerAction; use slog::{trace, Logger}; use std::collections::HashSet; use std::fmt::Debug; @@ -22,8 +22,8 @@ use types::EthSpec; #[derive(Debug, PartialEq, Eq)] pub enum State { AwaitingDownload, - Downloading { peer_id: PeerShouldHave }, - Processing { peer_id: PeerShouldHave }, + Downloading { peer_id: PeerId }, + Processing { peer_id: PeerId }, } #[derive(Debug, PartialEq, Eq, IntoStaticStr)] @@ -35,10 +35,6 @@ pub enum LookupVerifyError { ExtraBlobsReturned, NotEnoughBlobsReturned, InvalidIndex(u64), - /// We don't have enough information to know - /// whether the peer is at fault or simply missed - /// what was requested on gossip. - BenignFailure, } #[derive(Debug, PartialEq, Eq, IntoStaticStr)] @@ -66,7 +62,7 @@ impl SingleBlockLookup { pub fn new( requested_block_root: Hash256, child_components: Option>, - peers: &[PeerShouldHave], + peers: &[PeerId], da_checker: Arc>, id: Id, ) -> Self { @@ -191,21 +187,13 @@ impl SingleBlockLookup { } /// Add all given peers to both block and blob request states. - pub fn add_peer(&mut self, peer: PeerShouldHave) { - match peer { - PeerShouldHave::BlockAndBlobs(peer_id) => { - self.block_request_state.state.add_peer(&peer_id); - self.blob_request_state.state.add_peer(&peer_id); - } - PeerShouldHave::Neither(peer_id) => { - self.block_request_state.state.add_potential_peer(&peer_id); - self.blob_request_state.state.add_potential_peer(&peer_id); - } - } + pub fn add_peer(&mut self, peer_id: PeerId) { + self.block_request_state.state.add_peer(&peer_id); + self.blob_request_state.state.add_peer(&peer_id); } /// Add all given peers to both block and blob request states. - pub fn add_peers(&mut self, peers: &[PeerShouldHave]) { + pub fn add_peers(&mut self, peers: &[PeerId]) { for peer in peers { self.add_peer(*peer); } @@ -293,38 +281,31 @@ impl SingleBlockLookup { } } - /// Penalizes a blob peer if it should have blobs but didn't return them to us. Does not penalize - /// a peer who we request blobs from based on seeing a block or blobs over gossip. This may - /// have been a benign failure. - pub fn penalize_blob_peer(&mut self, penalize_always: bool, cx: &SyncNetworkContext) { + /// Penalizes a blob peer if it should have blobs but didn't return them to us. + pub fn penalize_blob_peer(&mut self, cx: &SyncNetworkContext) { if let Ok(blob_peer) = self.blob_request_state.state.processing_peer() { - if penalize_always || matches!(blob_peer, PeerShouldHave::BlockAndBlobs(_)) { - cx.report_peer( - blob_peer.to_peer_id(), - PeerAction::MidToleranceError, - "single_blob_failure", - ); - } - self.blob_request_state - .state - .remove_peer_if_useless(blob_peer.as_peer_id()); + cx.report_peer( + blob_peer, + PeerAction::MidToleranceError, + "single_blob_failure", + ); } } - /// This failure occurs on download, so register a failure downloading, penalize the peer if - /// necessary and clear the blob cache. + /// This failure occurs on download, so register a failure downloading, penalize the peer + /// and clear the blob cache. pub fn handle_consistency_failure(&mut self, cx: &SyncNetworkContext) { - self.penalize_blob_peer(false, cx); + self.penalize_blob_peer(cx); if let Some(cached_child) = self.child_components.as_mut() { cached_child.clear_blobs(); } self.blob_request_state.state.register_failure_downloading() } - /// This failure occurs after processing, so register a failure processing, penalize the peer if - /// necessary and clear the blob cache. + /// This failure occurs after processing, so register a failure processing, penalize the peer + /// and clear the blob cache. pub fn handle_availability_check_failure(&mut self, cx: &SyncNetworkContext) { - self.penalize_blob_peer(true, cx); + self.penalize_blob_peer(cx); if let Some(cached_child) = self.child_components.as_mut() { cached_child.clear_blobs(); } @@ -345,7 +326,7 @@ pub struct BlobRequestState { } impl BlobRequestState { - pub fn new(block_root: Hash256, peer_source: &[PeerShouldHave], is_deneb: bool) -> Self { + pub fn new(block_root: Hash256, peer_source: &[PeerId], is_deneb: bool) -> Self { let default_ids = MissingBlobs::new_without_block(block_root, is_deneb); Self { requested_ids: default_ids, @@ -364,7 +345,7 @@ pub struct BlockRequestState { } impl BlockRequestState { - pub fn new(block_root: Hash256, peers: &[PeerShouldHave]) -> Self { + pub fn new(block_root: Hash256, peers: &[PeerId]) -> Self { Self { requested_block_root: block_root, state: SingleLookupRequestState::new(peers), @@ -396,8 +377,6 @@ pub struct SingleLookupRequestState { pub state: State, /// Peers that should have this block or blob. pub available_peers: HashSet, - /// Peers that mar or may not have this block or blob. - pub potential_peers: HashSet, /// Peers from which we have requested this block. pub used_peers: HashSet, /// How many times have we attempted to process this block or blob. @@ -417,24 +396,15 @@ pub struct SingleLookupRequestState { } impl SingleLookupRequestState { - pub fn new(peers: &[PeerShouldHave]) -> Self { + pub fn new(peers: &[PeerId]) -> Self { let mut available_peers = HashSet::default(); - let mut potential_peers = HashSet::default(); - for peer in peers { - match peer { - PeerShouldHave::BlockAndBlobs(peer_id) => { - available_peers.insert(*peer_id); - } - PeerShouldHave::Neither(peer_id) => { - potential_peers.insert(*peer_id); - } - } + for peer in peers.iter().copied() { + available_peers.insert(peer); } Self { state: State::AwaitingDownload, available_peers, - potential_peers, used_peers: HashSet::default(), failed_processing: 0, failed_downloading: 0, @@ -462,25 +432,16 @@ impl SingleLookupRequestState { self.failed_processing + self.failed_downloading } - /// This method should be used for peers wrapped in `PeerShouldHave::BlockAndBlobs`. + /// This method should be used for peers wrapped in `PeerId::BlockAndBlobs`. pub fn add_peer(&mut self, peer_id: &PeerId) { - self.potential_peers.remove(peer_id); self.available_peers.insert(*peer_id); } - /// This method should be used for peers wrapped in `PeerShouldHave::Neither`. - pub fn add_potential_peer(&mut self, peer_id: &PeerId) { - if !self.available_peers.contains(peer_id) { - self.potential_peers.insert(*peer_id); - } - } - /// If a peer disconnects, this request could be failed. If so, an error is returned pub fn check_peer_disconnected(&mut self, dc_peer_id: &PeerId) -> Result<(), ()> { self.available_peers.remove(dc_peer_id); - self.potential_peers.remove(dc_peer_id); if let State::Downloading { peer_id } = &self.state { - if peer_id.as_peer_id() == dc_peer_id { + if peer_id == dc_peer_id { // Peer disconnected before providing a block self.register_failure_downloading(); return Err(()); @@ -491,21 +452,13 @@ impl SingleLookupRequestState { /// Returns the id peer we downloaded from if we have downloaded a verified block, otherwise /// returns an error. - pub fn processing_peer(&self) -> Result { + pub fn processing_peer(&self) -> Result { if let State::Processing { peer_id } = &self.state { Ok(*peer_id) } else { Err(()) } } - - /// Remove the given peer from the set of potential peers, so long as there is at least one - /// other potential peer or we have any available peers. - pub fn remove_peer_if_useless(&mut self, peer_id: &PeerId) { - if !self.available_peers.is_empty() || self.potential_peers.len() > 1 { - self.potential_peers.remove(peer_id); - } - } } impl slog::Value for SingleBlockLookup { @@ -609,7 +562,7 @@ mod tests { #[test] fn test_happy_path() { - let peer_id = PeerShouldHave::BlockAndBlobs(PeerId::random()); + let peer_id = PeerId::random(); let block = rand_block(); let spec = E::default_spec(); let slot_clock = TestingSlotClock::new( @@ -649,7 +602,7 @@ mod tests { #[test] fn test_block_lookup_failures() { - let peer_id = PeerShouldHave::BlockAndBlobs(PeerId::random()); + let peer_id = PeerId::random(); let block = rand_block(); let spec = E::default_spec(); let slot_clock = TestingSlotClock::new( diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 9dd7395a4e0..83f0b26156b 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -233,11 +233,7 @@ fn test_single_block_lookup_happy_path() { let peer_id = PeerId::random(); let block_root = block.canonical_root(); // Trigger the request - bl.search_block( - block_root, - &[PeerShouldHave::BlockAndBlobs(peer_id)], - &mut cx, - ); + bl.search_block(block_root, &[peer_id], &mut cx); let id = rig.expect_lookup_request(response_type); // If we're in deneb, a blob request should have been triggered as well, // we don't require a response because we're generateing 0-blob blocks in this test. @@ -285,11 +281,7 @@ fn test_single_block_lookup_empty_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_block( - block_hash, - &[PeerShouldHave::BlockAndBlobs(peer_id)], - &mut cx, - ); + bl.search_block(block_hash, &[peer_id], &mut cx); let id = rig.expect_lookup_request(response_type); // If we're in deneb, a blob request should have been triggered as well, // we don't require a response because we're generateing 0-blob blocks in this test. @@ -317,11 +309,7 @@ fn test_single_block_lookup_wrong_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_block( - block_hash, - &[PeerShouldHave::BlockAndBlobs(peer_id)], - &mut cx, - ); + bl.search_block(block_hash, &[peer_id], &mut cx); let id = rig.expect_lookup_request(response_type); // If we're in deneb, a blob request should have been triggered as well, // we don't require a response because we're generateing 0-blob blocks in this test. @@ -359,11 +347,7 @@ fn test_single_block_lookup_failure() { let peer_id = PeerId::random(); // Trigger the request - bl.search_block( - block_hash, - &[PeerShouldHave::BlockAndBlobs(peer_id)], - &mut cx, - ); + bl.search_block(block_hash, &[peer_id], &mut cx); let id = rig.expect_lookup_request(response_type); // If we're in deneb, a blob request should have been triggered as well, // we don't require a response because we're generateing 0-blob blocks in this test. @@ -395,11 +379,7 @@ fn test_single_block_lookup_becomes_parent_request() { let peer_id = PeerId::random(); // Trigger the request - bl.search_block( - block.canonical_root(), - &[PeerShouldHave::BlockAndBlobs(peer_id)], - &mut cx, - ); + bl.search_block(block.canonical_root(), &[peer_id], &mut cx); let id = rig.expect_lookup_request(response_type); // If we're in deneb, a blob request should have been triggered as well, // we don't require a response because we're generateing 0-blob blocks in this test. @@ -981,11 +961,7 @@ fn test_single_block_lookup_ignored_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_block( - block.canonical_root(), - &[PeerShouldHave::BlockAndBlobs(peer_id)], - &mut cx, - ); + bl.search_block(block.canonical_root(), &[peer_id], &mut cx); let id = rig.expect_lookup_request(response_type); // If we're in deneb, a blob request should have been triggered as well, // we don't require a response because we're generateing 0-blob blocks in this test. @@ -1205,7 +1181,6 @@ mod deneb_only { AttestationUnknownBlock, GossipUnknownParentBlock, GossipUnknownParentBlob, - GossipUnknownBlockOrBlob, } impl DenebTester { @@ -1234,11 +1209,7 @@ mod deneb_only { let (block_req_id, blob_req_id, parent_block_req_id, parent_blob_req_id) = match request_trigger { RequestTrigger::AttestationUnknownBlock => { - bl.search_block( - block_root, - &[PeerShouldHave::BlockAndBlobs(peer_id)], - &mut cx, - ); + bl.search_block(block_root, &[peer_id], &mut cx); let block_req_id = rig.expect_lookup_request(ResponseType::Block); let blob_req_id = rig.expect_lookup_request(ResponseType::Blob); (Some(block_req_id), Some(blob_req_id), None, None) @@ -1261,7 +1232,7 @@ mod deneb_only { bl.search_child_block( child_root, ChildComponents::new(child_root, Some(child_block), None), - &[PeerShouldHave::Neither(peer_id)], + &[peer_id], &mut cx, ); @@ -1299,7 +1270,7 @@ mod deneb_only { bl.search_child_block( child_root, ChildComponents::new(child_root, None, Some(blobs)), - &[PeerShouldHave::Neither(peer_id)], + &[peer_id], &mut cx, ); @@ -1316,12 +1287,6 @@ mod deneb_only { Some(parent_blob_req_id), ) } - RequestTrigger::GossipUnknownBlockOrBlob => { - bl.search_block(block_root, &[PeerShouldHave::Neither(peer_id)], &mut cx); - let block_req_id = rig.expect_lookup_request(ResponseType::Block); - let blob_req_id = rig.expect_lookup_request(ResponseType::Blob); - (Some(block_req_id), Some(blob_req_id), None, None) - } }; Some(Self { @@ -1838,186 +1803,6 @@ mod deneb_only { .block_response_triggering_process(); } - #[test] - fn single_block_and_blob_lookup_block_returned_first_gossip() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { - return; - }; - - tester - .block_response_triggering_process() - .blobs_response() - .blobs_response_was_valid() - .block_imported(); - } - - #[test] - fn single_block_and_blob_lookup_blobs_returned_first_gossip() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { - return; - }; - - tester - .blobs_response() - .blobs_response_was_valid() - .block_response_triggering_process() - .block_imported(); - } - - #[test] - fn single_block_and_blob_lookup_empty_response_gossip() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { - return; - }; - - tester - .empty_block_response() - .expect_block_request() - .expect_no_penalty() - .expect_no_blobs_request() - .empty_blobs_response() - .expect_no_penalty() - .expect_no_block_request() - .expect_no_blobs_request() - .block_response_triggering_process() - .missing_components_from_block_request(); - } - - #[test] - fn single_block_response_then_empty_blob_response_gossip() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { - return; - }; - - tester - .block_response_triggering_process() - .missing_components_from_block_request() - .empty_blobs_response() - .missing_components_from_blob_request() - .expect_blobs_request() - .expect_no_penalty() - .expect_no_block_request(); - } - - #[test] - fn single_blob_response_then_empty_block_response_gossip() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { - return; - }; - - tester - .blobs_response() - .blobs_response_was_valid() - .expect_no_penalty() - .expect_no_block_request() - .expect_no_blobs_request() - .missing_components_from_blob_request() - .empty_block_response() - .expect_block_request() - .expect_no_penalty() - .expect_no_blobs_request(); - } - - #[test] - fn single_invalid_block_response_then_blob_response_gossip() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { - return; - }; - - tester - .block_response_triggering_process() - .invalid_block_processed() - .expect_penalty() - .expect_block_request() - .expect_no_blobs_request() - .blobs_response() - .missing_components_from_blob_request() - .expect_no_penalty() - .expect_no_block_request() - .expect_no_block_request(); - } - - #[test] - fn single_block_response_then_invalid_blob_response_gossip() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { - return; - }; - - tester - .block_response_triggering_process() - .missing_components_from_block_request() - .blobs_response() - .invalid_blob_processed() - .expect_penalty() - .expect_blobs_request() - .expect_no_block_request(); - } - - #[test] - fn single_block_response_then_too_few_blobs_response_gossip() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { - return; - }; - - tester - .block_response_triggering_process() - .missing_components_from_block_request() - .invalidate_blobs_too_few() - .blobs_response() - .missing_components_from_blob_request() - .expect_blobs_request() - .expect_no_penalty() - .expect_no_block_request(); - } - - #[test] - fn single_block_response_then_too_many_blobs_response_gossip() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { - return; - }; - - tester - .block_response_triggering_process() - .invalidate_blobs_too_many() - .blobs_response() - .expect_penalty() - .expect_blobs_request() - .expect_no_block_request(); - } - #[test] - fn too_few_blobs_response_then_block_response_gossip() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { - return; - }; - - tester - .invalidate_blobs_too_few() - .blobs_response() - .blobs_response_was_valid() - .missing_components_from_blob_request() - .expect_no_penalty() - .expect_no_blobs_request() - .expect_no_block_request() - .block_response_triggering_process() - .missing_components_from_block_request() - .expect_blobs_request(); - } - - #[test] - fn too_many_blobs_response_then_block_response_gossip() { - let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownBlockOrBlob) else { - return; - }; - - tester - .invalidate_blobs_too_many() - .blobs_response() - .expect_penalty() - .expect_blobs_request() - .expect_no_block_request() - .block_response_triggering_process(); - } - #[test] fn parent_block_unknown_parent() { let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock) else { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 3bd32308ae8..bcb239aaa05 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -34,7 +34,7 @@ //! search for the block and subsequently search for parents if needed. use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; -use super::block_lookups::{BlockLookups, PeerShouldHave}; +use super::block_lookups::BlockLookups; use super::network_context::{BlockOrBlob, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; @@ -139,13 +139,6 @@ pub enum SyncMessage { /// manager to attempt to find the block matching the unknown hash. UnknownBlockHashFromAttestation(PeerId, Hash256), - /// A peer has sent a blob that references a block that is unknown or a peer has sent a block for - /// which we haven't received blobs. - /// - /// We will either attempt to find the block matching the unknown hash immediately or queue a lookup, - /// which will then trigger the request when we receive `MissingGossipBlockComponentsDelayed`. - MissingGossipBlockComponents(Vec, Hash256), - /// A peer has disconnected. Disconnect(PeerId), @@ -658,31 +651,8 @@ impl SyncManager { SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_hash) => { // If we are not synced, ignore this block. if self.synced_and_connected(&peer_id) { - self.block_lookups.search_block( - block_hash, - &[PeerShouldHave::BlockAndBlobs(peer_id)], - &mut self.network, - ); - } - } - SyncMessage::MissingGossipBlockComponents(peer_id, block_root) => { - let peers_guard = self.network_globals().peers.read(); - let connected_peers = peer_id - .into_iter() - .filter_map(|peer_id| { - if peers_guard.is_connected(&peer_id) { - Some(PeerShouldHave::Neither(peer_id)) - } else { - None - } - }) - .collect::>(); - drop(peers_guard); - - // If we are not synced, ignore this block. - if self.synced() && !connected_peers.is_empty() { self.block_lookups - .search_block(block_root, &connected_peers, &mut self.network) + .search_block(block_hash, &[peer_id], &mut self.network); } } SyncMessage::Disconnect(peer_id) => { @@ -766,7 +736,7 @@ impl SyncManager { self.block_lookups.search_child_block( block_root, child_components, - &[PeerShouldHave::Neither(peer_id)], + &[peer_id], &mut self.network, ); } From 78ffa378b40e91cff23fdfdeb3c2dbb6a5520597 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Fri, 8 Dec 2023 15:48:03 -0600 Subject: [PATCH 13/19] Batch Verify RPC Blobs (#4934) --- .../beacon_chain/src/blob_verification.rs | 48 +++++++++++++++---- .../src/data_availability_checker.rs | 18 ++++--- .../overflow_lru_cache.rs | 4 +- 3 files changed, 48 insertions(+), 22 deletions(-) diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 05457adab32..cc087e74a07 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -17,8 +17,7 @@ use ssz_types::VariableList; use tree_hash::TreeHash; use types::blob_sidecar::BlobIdentifier; use types::{ - BeaconStateError, BlobSidecar, BlobSidecarList, CloneConfig, EthSpec, Hash256, - SignedBeaconBlockHeader, Slot, + BeaconStateError, BlobSidecar, CloneConfig, EthSpec, Hash256, SignedBeaconBlockHeader, Slot, }; /// An error occurred while validating a gossip blob. @@ -279,7 +278,6 @@ impl KzgVerifiedBlob { pub fn new(blob: Arc>, kzg: &Kzg) -> Result { verify_kzg_for_blob(blob, kzg) } - pub fn to_blob(self) -> Arc> { self.blob } @@ -310,21 +308,51 @@ pub fn verify_kzg_for_blob( kzg: &Kzg, ) -> Result, KzgError> { validate_blob::(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof)?; - Ok(KzgVerifiedBlob { blob }) } +pub struct KzgVerifiedBlobList { + verified_blobs: Vec>, +} + +impl KzgVerifiedBlobList { + pub fn new>>>( + blob_list: I, + kzg: &Kzg, + ) -> Result { + let blobs = blob_list.into_iter().collect::>(); + verify_kzg_for_blob_list(blobs.iter(), kzg)?; + Ok(Self { + verified_blobs: blobs + .into_iter() + .map(|blob| KzgVerifiedBlob { blob }) + .collect(), + }) + } +} + +impl IntoIterator for KzgVerifiedBlobList { + type Item = KzgVerifiedBlob; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.verified_blobs.into_iter() + } +} + /// Complete kzg verification for a list of `BlobSidecar`s. /// Returns an error if any of the `BlobSidecar`s fails kzg verification. /// /// Note: This function should be preferred over calling `verify_kzg_for_blob` /// in a loop since this function kzg verifies a list of blobs more efficiently. -pub fn verify_kzg_for_blob_list( - blob_list: &BlobSidecarList, - kzg: &Kzg, -) -> Result<(), KzgError> { - let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list - .iter() +pub fn verify_kzg_for_blob_list<'a, T: EthSpec, I>( + blob_iter: I, + kzg: &'a Kzg, +) -> Result<(), KzgError> +where + I: Iterator>>, +{ + let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_iter .map(|blob| (&blob.blob, (blob.kzg_commitment, blob.kzg_proof))) .unzip(); validate_blobs::(kzg, commitments.as_slice(), blobs, proofs.as_slice()) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 67e98a01c1a..eff8d1d9d09 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -1,4 +1,4 @@ -use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, KzgVerifiedBlob}; +use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, KzgVerifiedBlobList}; use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, }; @@ -197,15 +197,13 @@ impl DataAvailabilityChecker { block_root: Hash256, blobs: FixedBlobSidecarList, ) -> Result, AvailabilityCheckError> { - let mut verified_blobs = vec![]; - if let Some(kzg) = self.kzg.as_ref() { - for blob in Vec::from(blobs).into_iter().flatten() { - verified_blobs - .push(KzgVerifiedBlob::new(blob, kzg).map_err(AvailabilityCheckError::Kzg)?); - } - } else { + let Some(kzg) = self.kzg.as_ref() else { return Err(AvailabilityCheckError::KzgNotInitialized); }; + + let verified_blobs = KzgVerifiedBlobList::new(Vec::from(blobs).into_iter().flatten(), kzg) + .map_err(AvailabilityCheckError::Kzg)?; + self.availability_cache .put_kzg_verified_blobs(block_root, verified_blobs) } @@ -261,7 +259,7 @@ impl DataAvailabilityChecker { .kzg .as_ref() .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_blob_list(&blob_list, kzg) + verify_kzg_for_blob_list(blob_list.iter(), kzg) .map_err(AvailabilityCheckError::Kzg)?; Some(blob_list) } else { @@ -302,7 +300,7 @@ impl DataAvailabilityChecker { .kzg .as_ref() .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_blob_list(&all_blobs, kzg)?; + verify_kzg_for_blob_list(all_blobs.iter(), kzg)?; } for block in blocks { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 7997a2e5e36..290be988c33 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -414,10 +414,10 @@ impl OverflowLRUCache { } } - pub fn put_kzg_verified_blobs( + pub fn put_kzg_verified_blobs>>( &self, block_root: Hash256, - kzg_verified_blobs: Vec>, + kzg_verified_blobs: I, ) -> Result, AvailabilityCheckError> { let mut fixed_blobs = FixedVector::default(); From 69f1b7afec568617994cc64d2fa4c159c7f91526 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 12 Dec 2023 09:45:54 +1100 Subject: [PATCH 14/19] Disable flood publishing (#4383) * Disable flood publish * Change default configuration --- beacon_node/lighthouse_network/src/config.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index f24b94c9ecb..0eb3f7bc80c 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -363,7 +363,7 @@ impl Default for Config { disable_discovery: false, disable_quic_support: false, upnp_enabled: true, - network_load: 3, + network_load: 4, private: false, subscribe_all_subnets: false, import_all_attestations: false, @@ -422,7 +422,7 @@ impl From for NetworkLoad { mesh_n_high: 10, gossip_lazy: 3, history_gossip: 3, - heartbeat_interval: Duration::from_millis(700), + heartbeat_interval: Duration::from_millis(1000), }, 4 => NetworkLoad { name: "Average", @@ -432,7 +432,7 @@ impl From for NetworkLoad { mesh_n_high: 12, gossip_lazy: 3, history_gossip: 3, - heartbeat_interval: Duration::from_millis(700), + heartbeat_interval: Duration::from_millis(1000), }, // 5 and above _ => NetworkLoad { @@ -443,7 +443,7 @@ impl From for NetworkLoad { mesh_n_high: 15, gossip_lazy: 5, history_gossip: 6, - heartbeat_interval: Duration::from_millis(500), + heartbeat_interval: Duration::from_millis(700), }, } } @@ -506,6 +506,7 @@ pub fn gossipsub_config( .gossip_lazy(load.gossip_lazy) .fanout_ttl(Duration::from_secs(60)) .history_length(12) + .flood_publish(false) .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large .history_gossip(load.history_gossip) .validate_messages() // require validation before propagation From 4cf4819497122daba4c137f6989b16a309f2db6f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 12 Dec 2023 16:04:29 +1100 Subject: [PATCH 15/19] Add `mergify` merge queue configuration file (#4917) * Add mergify.yml. * Abstract out CI jobs to a "success" job so that a change in the CI jobs don't require modification to mergify configuration on stable branch. * Add new jobs to the `needs` list of `test-suite-success`. * Set `batch_max_wait_time` to 60 s. --- .github/mergify.yml | 13 ++++++++++++ .github/workflows/local-testnet.yml | 13 +++++++++++- .github/workflows/test-suite.yml | 33 +++++++++++++++++++++++++++++ scripts/ci/check-success-job.sh | 13 ++++++++++++ 4 files changed, 71 insertions(+), 1 deletion(-) create mode 100644 .github/mergify.yml create mode 100755 scripts/ci/check-success-job.sh diff --git a/.github/mergify.yml b/.github/mergify.yml new file mode 100644 index 00000000000..ae01e3ffd2d --- /dev/null +++ b/.github/mergify.yml @@ -0,0 +1,13 @@ +queue_rules: + - name: default + batch_size: 8 + batch_max_wait_time: 60 s + checks_timeout: 10800 s + merge_method: squash + queue_conditions: + - "#approved-reviews-by >= 1" + - "check-success=license/cla" + - "check-success=target-branch-check" + merge_conditions: + - "check-success=test-suite-success" + - "check-success=local-testnet-success" diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 74a9071eab5..75a81ce0e7c 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -86,4 +86,15 @@ jobs: - name: Stop local testnet with blinded block production run: ./stop_local_testnet.sh - working-directory: scripts/local_testnet \ No newline at end of file + working-directory: scripts/local_testnet + + # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether + # a PR is safe to merge. New jobs should be added here. + local-testnet-success: + name: local-testnet-success + runs-on: ubuntu-latest + needs: ["run-local-testnet"] + steps: + - uses: actions/checkout@v3 + - name: Check that success job is dependent on all others + run: ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 0a1499340d0..70fb59424ff 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -423,3 +423,36 @@ jobs: cache-target: release - name: Run Makefile to trigger the bash script run: make cli + # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether + # a PR is safe to merge. New jobs should be added here. + test-suite-success: + name: test-suite-success + runs-on: ubuntu-latest + needs: [ + 'target-branch-check', + 'release-tests-ubuntu', + 'release-tests-windows', + 'beacon-chain-tests', + 'op-pool-tests', + 'network-tests', + 'slasher-tests', + 'debug-tests-ubuntu', + 'state-transition-vectors-ubuntu', + 'ef-tests-ubuntu', + 'dockerfile-ubuntu', + 'eth1-simulator-ubuntu', + 'merge-transition-ubuntu', + 'no-eth1-simulator-ubuntu', + 'syncing-simulator-ubuntu', + 'doppelganger-protection-test', + 'execution-engine-integration-ubuntu', + 'check-code', + 'check-msrv', + 'cargo-udeps', + 'compile-with-beta-compiler', + 'cli-check', + ] + steps: + - uses: actions/checkout@v3 + - name: Check that success job is dependent on all others + run: ./scripts/ci/check-success-job.sh ./.github/workflows/test-suite.yml test-suite-success diff --git a/scripts/ci/check-success-job.sh b/scripts/ci/check-success-job.sh new file mode 100755 index 00000000000..dfa5c03257c --- /dev/null +++ b/scripts/ci/check-success-job.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Check that $SUCCESS_JOB depends on all other jobs in the given $YAML + +set -euf -o pipefail + +YAML=$1 +SUCCESS_JOB=$2 + +yq '... comments="" | .jobs | map(. | key) | .[]' < "$YAML" | grep -v "$SUCCESS_JOB" | sort > all_jobs.txt +yq "... comments=\"\" | .jobs.$SUCCESS_JOB.needs[]" < "$YAML" | grep -v "$SUCCESS_JOB" | sort > dep_jobs.txt +diff all_jobs.txt dep_jobs.txt || (echo "COMPLETENESS CHECK FAILED" && exit 1) +rm all_jobs.txt dep_jobs.txt +echo "COMPLETENESS CHECK PASSED" From 153aaa167988d84a058a9ad501acd1ed3d2936d8 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 12 Dec 2023 16:54:22 +1100 Subject: [PATCH 16/19] =?UTF-8?q?Remove=20bors.toml=20=F0=9F=AB=A1=20(#500?= =?UTF-8?q?1)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- bors.toml | 24 ------------------------ 1 file changed, 24 deletions(-) delete mode 100644 bors.toml diff --git a/bors.toml b/bors.toml deleted file mode 100644 index e821b89a813..00000000000 --- a/bors.toml +++ /dev/null @@ -1,24 +0,0 @@ -status = [ - "release-tests-ubuntu", - "release-tests-windows", - "debug-tests-ubuntu", - "state-transition-vectors-ubuntu", - "ef-tests-ubuntu", - "dockerfile-ubuntu", - "eth1-simulator-ubuntu", - "merge-transition-ubuntu", - "no-eth1-simulator-ubuntu", - "check-code", - "cargo-udeps", - "beacon-chain-tests", - "op-pool-tests", - "doppelganger-protection-test", - "execution-engine-integration-ubuntu", - "check-msrv", - "slasher-tests", - "syncing-simulator-ubuntu", - "compile-with-beta-compiler", -] -use_squash_merge = true -timeout_sec = 10800 -pr_status = ["license/cla", "target-branch-check"] From 7f64738085fad88f55e5c27e2baaa312d39e9165 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 13 Dec 2023 02:20:49 +1100 Subject: [PATCH 17/19] Update Rust version in lcli `Dockerfile`. (#5002) --- lcli/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 1ee80e14fd2..2ff4706a919 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.69.0-bullseye AS builder +FROM rust:1.73.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG PORTABLE From a3fb27c99b6696b7b9ccf69d5a43977f44440687 Mon Sep 17 00:00:00 2001 From: Gua00va <105484243+Gua00va@users.noreply.github.com> Date: Wed, 13 Dec 2023 10:36:00 +0530 Subject: [PATCH 18/19] add forK_choice_read_lock as parameter (#4978) --- beacon_node/beacon_chain/src/block_verification.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index e86ca85bbfa..145614c4b32 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -838,10 +838,11 @@ impl GossipVerifiedBlock { &fork_choice_read_lock, block, )?; - drop(fork_choice_read_lock); let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - let (parent_block, block) = verify_parent_block_is_known(block_root, chain, block)?; + let (parent_block, block) = + verify_parent_block_is_known::(block_root, &fork_choice_read_lock, block)?; + drop(fork_choice_read_lock); // Track the number of skip slots between the block and its parent. metrics::set_gauge( @@ -1775,14 +1776,10 @@ pub fn get_block_root(block: &SignedBeaconBlock) -> Hash256 { #[allow(clippy::type_complexity)] fn verify_parent_block_is_known( block_root: Hash256, - chain: &BeaconChain, + fork_choice_read_lock: &RwLockReadGuard>, block: Arc>, ) -> Result<(ProtoBlock, Arc>), BlockError> { - if let Some(proto_block) = chain - .canonical_head - .fork_choice_read_lock() - .get_block(&block.parent_root()) - { + if let Some(proto_block) = fork_choice_read_lock.get_block(&block.parent_root()) { Ok((proto_block, block)) } else { Err(BlockError::ParentUnknown(RpcBlock::new_without_blobs( From a3a370302a2e97a736fe5d952352bd47048c9719 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 13 Dec 2023 14:24:36 -0800 Subject: [PATCH 19/19] Use the block header to compute the canonical_root (#5003) --- .../beacon_chain/src/block_verification.rs | 21 +++++++++++++++++-- beacon_node/beacon_chain/src/metrics.rs | 4 ++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 145614c4b32..23c9ab72571 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -780,7 +780,10 @@ impl GossipVerifiedBlock { // it to the slasher if an error occurs, because that's the end of this block's journey, // and it could be a repeat proposal (a likely cause for slashing!). let header = block.signed_block_header(); - Self::new_without_slasher_checks(block, chain).map_err(|e| { + // The `SignedBeaconBlock` and `SignedBeaconBlockHeader` have the same canonical root, + // but it's way quicker to calculate root of the header since the hash of the tree rooted + // at `BeaconBlockBody` is already computed in the header. + Self::new_without_slasher_checks(block, &header, chain).map_err(|e| { process_block_slash_info::<_, BlockError>( chain, BlockSlashInfo::from_early_error_block(header, e), @@ -791,6 +794,7 @@ impl GossipVerifiedBlock { /// As for new, but doesn't pass the block to the slasher. fn new_without_slasher_checks( block: Arc>, + block_header: &SignedBeaconBlockHeader, chain: &BeaconChain, ) -> Result> { // Ensure the block is the correct structure for the fork at `block.slot()`. @@ -810,7 +814,7 @@ impl GossipVerifiedBlock { }); } - let block_root = get_block_root(&block); + let block_root = get_block_header_root(block_header); // Disallow blocks that conflict with the anchor (weak subjectivity checkpoint), if any. check_block_against_anchor_slot(block.message(), chain)?; @@ -1771,6 +1775,19 @@ pub fn get_block_root(block: &SignedBeaconBlock) -> Hash256 { block_root } +/// Returns the canonical root of the given `block_header`. +/// +/// Use this function to ensure that we report the block hashing time Prometheus metric. +pub fn get_block_header_root(block_header: &SignedBeaconBlockHeader) -> Hash256 { + let block_root_timer = metrics::start_timer(&metrics::BLOCK_HEADER_PROCESSING_BLOCK_ROOT); + + let block_root = block_header.message.canonical_root(); + + metrics::stop_timer(block_root_timer); + + block_root +} + /// Verify the parent of `block` is known, returning some information about the parent block from /// fork choice. #[allow(clippy::type_complexity)] diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index ca04366b01e..b9a748b6d3f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -40,6 +40,10 @@ lazy_static! { "beacon_block_processing_block_root_seconds", "Time spent calculating the block root when processing a block." ); + pub static ref BLOCK_HEADER_PROCESSING_BLOCK_ROOT: Result = try_create_histogram( + "beacon_block_header_processing_block_root_seconds", + "Time spent calculating the block root for a beacon block header." + ); pub static ref BLOCK_PROCESSING_BLOB_ROOT: Result = try_create_histogram( "beacon_block_processing_blob_root_seconds", "Time spent calculating the blob root when processing a block."