diff --git a/.config/zepter.yaml b/.config/zepter.yaml index 3fb82bec823..d0672390500 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -8,14 +8,14 @@ version: workflows: check: - [ - "lint", - # Check that `A` activates the features of `B`. - "propagate-feature", - # These are the features to check: - "--features=std,op,scroll-alloy-traits,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat", - # Do not try to add a new section to `[features]` of `A` only because `B` exposes that feature. There are edge-cases where this is still needed, but we can add them manually. - "--left-side-feature-missing=ignore", - # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. + "lint", + # Check that `A` activates the features of `B`. + "propagate-feature", + # These are the features to check: + "--features=std,op,scroll-alloy-traits,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat,min-error-logs,min-warn-logs,min-info-logs,min-debug-logs,min-trace-logs,otlp,js-tracer", + # Do not try to add a new section to `[features]` of `A` only because `B` exposes that feature. There are edge-cases where this is still needed, but we can add them manually. + "--left-side-feature-missing=ignore", + # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. "--left-side-outside-workspace=ignore", # Auxiliary flags: diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ffbd600db7e..eed64b157f3 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,12 +1,12 @@ * @gakonst -crates/blockchain-tree-api/ @rakita @rkrasiuk @mattsse @Rjected -crates/blockchain-tree/ @rakita @rkrasiuk @mattsse @Rjected -crates/chain-state/ @fgimenez @mattsse @rkrasiuk +crates/blockchain-tree-api/ @rakita @mattsse @Rjected +crates/blockchain-tree/ @rakita @mattsse @Rjected +crates/chain-state/ @fgimenez @mattsse crates/chainspec/ @Rjected @joshieDo @mattsse crates/cli/ @mattsse -crates/consensus/ @rkrasiuk @mattsse @Rjected +crates/consensus/ @mattsse @Rjected crates/e2e-test-utils/ @mattsse @Rjected @klkvr @fgimenez -crates/engine/ @rkrasiuk @mattsse @Rjected @fgimenez @mediocregopher @yongkangc +crates/engine/ @mattsse @Rjected @fgimenez @mediocregopher @yongkangc crates/era/ @mattsse @RomanHodulak crates/errors/ @mattsse crates/ethereum-forks/ @mattsse @Rjected @@ -15,17 +15,17 @@ crates/etl/ @joshieDo @shekhirin crates/evm/ @rakita @mattsse @Rjected crates/exex/ @shekhirin crates/net/ @mattsse @Rjected -crates/net/downloaders/ @rkrasiuk +crates/net/downloaders/ @Rjected crates/node/ @mattsse @Rjected @klkvr crates/optimism/ @mattsse @Rjected @fgimenez crates/payload/ @mattsse @Rjected crates/primitives-traits/ @Rjected @RomanHodulak @mattsse @klkvr crates/primitives/ @Rjected @mattsse @klkvr crates/prune/ @shekhirin @joshieDo -crates/ress @rkrasiuk +crates/ress @shekhirin @Rjected crates/revm/ @mattsse @rakita crates/rpc/ @mattsse @Rjected @RomanHodulak -crates/stages/ @rkrasiuk @shekhirin @mediocregopher +crates/stages/ @shekhirin @mediocregopher crates/static-file/ @joshieDo @shekhirin crates/storage/codecs/ @joshieDo crates/storage/db-api/ @joshieDo @rakita @@ -35,10 +35,10 @@ crates/storage/errors/ @rakita crates/storage/libmdbx-rs/ @rakita @shekhirin crates/storage/nippy-jar/ @joshieDo @shekhirin crates/storage/provider/ @rakita @joshieDo @shekhirin -crates/storage/storage-api/ @joshieDo @rkrasiuk +crates/storage/storage-api/ @joshieDo crates/tasks/ @mattsse crates/tokio-util/ @fgimenez crates/transaction-pool/ @mattsse @yongkangc -crates/trie/ @rkrasiuk @Rjected @shekhirin @mediocregopher +crates/trie/ @Rjected @shekhirin @mediocregopher etc/ @Rjected @shekhirin .github/ @gakonst @DaniPopes diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 9f535a45f75..d603644283a 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -11,6 +11,7 @@ exclude_crates=( # The following require investigation if they can be fixed reth-basic-payload-builder reth-bench + reth-bench-compare reth-cli reth-cli-commands reth-cli-runner @@ -68,6 +69,7 @@ exclude_crates=( reth-payload-builder # reth-metrics reth-provider # tokio reth-prune # tokio + reth-prune-static-files # reth-provider reth-stages-api # reth-provider, reth-prune reth-static-file # tokio reth-transaction-pool # c-kzg diff --git a/.github/assets/hive/build_simulators.sh b/.github/assets/hive/build_simulators.sh index dab77772f8e..d65e609e700 100755 --- a/.github/assets/hive/build_simulators.sh +++ b/.github/assets/hive/build_simulators.sh @@ -11,7 +11,8 @@ go build . # Run each hive command in the background for each simulator and wait echo "Building images" -./hive -client reth --sim "ethereum/eest" --sim.buildarg fixtures=https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0/fixtures_develop.tar.gz --sim.buildarg branch=v5.1.0 -sim.timelimit 1s || true & +# TODO: test code has been moved from https://github.com/ethereum/execution-spec-tests to https://github.com/ethereum/execution-specs we need to pin eels branch with `--sim.buildarg branch=` once we have the fusaka release tagged on the new repo +./hive -client reth --sim "ethereum/eels" --sim.buildarg fixtures=https://github.com/ethereum/execution-spec-tests/releases/download/v5.3.0/fixtures_develop.tar.gz -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/engine" -sim.timelimit 1s || true & ./hive -client reth --sim "devp2p" -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/rpc-compat" -sim.timelimit 1s || true & @@ -27,8 +28,8 @@ docker save hive/hiveproxy:latest -o ../hive_assets/hiveproxy.tar & saving_pids+ docker save hive/simulators/devp2p:latest -o ../hive_assets/devp2p.tar & saving_pids+=( $! ) docker save hive/simulators/ethereum/engine:latest -o ../hive_assets/engine.tar & saving_pids+=( $! ) docker save hive/simulators/ethereum/rpc-compat:latest -o ../hive_assets/rpc_compat.tar & saving_pids+=( $! ) -docker save hive/simulators/ethereum/eest/consume-engine:latest -o ../hive_assets/eest_engine.tar & saving_pids+=( $! ) -docker save hive/simulators/ethereum/eest/consume-rlp:latest -o ../hive_assets/eest_rlp.tar & saving_pids+=( $! ) +docker save hive/simulators/ethereum/eels/consume-engine:latest -o ../hive_assets/eels_engine.tar & saving_pids+=( $! ) +docker save hive/simulators/ethereum/eels/consume-rlp:latest -o ../hive_assets/eels_rlp.tar & saving_pids+=( $! ) docker save hive/simulators/smoke/genesis:latest -o ../hive_assets/smoke_genesis.tar & saving_pids+=( $! ) docker save hive/simulators/smoke/network:latest -o ../hive_assets/smoke_network.tar & saving_pids+=( $! ) docker save hive/simulators/ethereum/sync:latest -o ../hive_assets/ethereum_sync.tar & saving_pids+=( $! ) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index 6a580d9a110..db18aa9ceda 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -30,7 +30,7 @@ engine-withdrawals: - Corrupted Block Hash Payload (INVALID) (Paris) (reth) - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org (Paris) (reth) -engine-api: [] +engine-api: [ ] # no fix due to https://github.com/paradigmxyz/reth/issues/8732 engine-cancun: @@ -39,22 +39,35 @@ engine-cancun: # in hive or its dependencies - Blob Transaction Ordering, Multiple Clients (Cancun) (reth) -sync: [] +sync: [ ] -# https://github.com/ethereum/hive/issues/1277 -engine-auth: - - "JWT Authentication: No time drift, correct secret (Paris) (reth)" - - "JWT Authentication: Negative time drift, within limit, correct secret (Paris) (reth)" - - "JWT Authentication: Positive time drift, within limit, correct secret (Paris) (reth)" +engine-auth: [ ] -# 7702 test - no fix: it’s too expensive to check whether the storage is empty on each creation -# 6110 related tests - may start passing when fixtures improve -# 7002 related tests - post-fork test, should fix for spec compliance but not -# realistic on mainnet -# 7251 related tests - modified contract, not necessarily practical on mainnet, -# 7594: https://github.com/paradigmxyz/reth/issues/18471 -# worth re-visiting when more of these related tests are passing -eest/consume-engine: +# EIP-7610 related tests (Revert creation in case of non-empty storage): +# +# tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage +# The test artificially creates an empty account with storage, then tests EIP-7610's behavior. +# On mainnet, ~25 such accounts exist as contract addresses (derived from keccak(prefix, caller, +# nonce/salt), not from public keys). No private key exists for contract addresses. To trigger +# this with EIP-7702, you'd need to recover a private key from one of the already deployed contract addresses - mathematically impossible. +# +# tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_* +# Requires hash collision on create2 address to target already deployed accounts with storage. +# ~20-30 such accounts exist from before the state-clear EIP. Creating new accounts targeting +# these requires hash collision - mathematically impossible to trigger on mainnet. +# ref: https://github.com/ethereum/go-ethereum/pull/28666#issuecomment-1891997143 +# +# System contract tests (already fixed and deployed): +# +# tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout and test_invalid_log_length +# System contract is already fixed and deployed; tests cover scenarios where contract is +# malformed which can't happen retroactively. No point in adding checks. +# +# tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment +# tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment +# Post-fork system contract deployment tests. Should fix for spec compliance but not realistic +# on mainnet as these contracts are already deployed at the correct addresses. +eels/consume-engine: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-nonzero_balance]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-zero_balance]-reth @@ -84,7 +97,64 @@ eest/consume-engine: - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Osaka-blockchain_test_engine-log_argument_withdrawal_credentials_size-value_zero]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_log_length[fork_Osaka-blockchain_test_engine-slice_bytes_False]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_log_length[fork_Osaka-blockchain_test_engine-slice_bytes_True]-reth -eest/consume-rlp: + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_engine_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_engine_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + +# Blob limit tests: +# +# tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py::test_max_blobs_per_tx_fork_transition[fork_PragueToOsakaAtTime15k-blob_count_7-blockchain_test] +# this test inserts a chain via chain.rlp where the last block is invalid, but expects import to stop there, this doesn't work properly with our pipeline import approach hence the import fails when the invalid block is detected. +#. In other words, if this test fails, this means we're correctly rejecting the block. +#. The same test exists in the consume-engine simulator where it is passing as expected +eels/consume-rlp: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test-zero_nonce]-reth - tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py::test_system_contract_errors[fork_Prague-blockchain_test_engine-system_contract_reaches_gas_limit-system_contract_0x0000bbddc7ce488642fb579f8b00f3a590007251]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-nonzero_balance]-reth @@ -121,3 +191,53 @@ eest/consume-rlp: - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-zero_balance]-reth - tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-nonzero_balance]-reth - tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-zero_balance]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_1-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_0-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_2-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_1-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_1-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_2-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_1-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_2-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_0-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_2-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_1-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Cancun-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Osaka-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Paris-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Prague-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_from_state_test-opcode_CREATE-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_0-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_opcode[fork_Shanghai-blockchain_test_from_state_test-opcode_CREATE2-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_1-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_2-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_1-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_2-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_1-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_2-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Osaka-tx_type_0-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_0-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_2-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Paris-tx_type_0-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_0-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Cancun-tx_type_0-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_from_state_test-non-empty-balance-revert-initcode]-reth + - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_from_state_test-non-empty-balance-correct-initcode]-reth diff --git a/.github/assets/hive/load_images.sh b/.github/assets/hive/load_images.sh index 37a2f82de54..e7dd7c99f4a 100755 --- a/.github/assets/hive/load_images.sh +++ b/.github/assets/hive/load_images.sh @@ -11,8 +11,8 @@ IMAGES=( "/tmp/smoke_genesis.tar" "/tmp/smoke_network.tar" "/tmp/ethereum_sync.tar" - "/tmp/eest_engine.tar" - "/tmp/eest_rlp.tar" + "/tmp/eels_engine.tar" + "/tmp/eels_rlp.tar" "/tmp/reth_image.tar" ) diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 3039541c8f1..de3de43aa9d 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -13,13 +13,15 @@ on: jobs: build: runs-on: ubuntu-latest - timeout-minutes: 60 + timeout-minutes: 90 steps: - name: Checkout uses: actions/checkout@v5 - name: Install bun uses: oven-sh/setup-bun@v2 + with: + bun-version: v1.2.23 - name: Install Playwright browsers # Required for rehype-mermaid to render Mermaid diagrams during build diff --git a/.github/workflows/docker-tag-latest.yml b/.github/workflows/docker-tag-latest.yml new file mode 100644 index 00000000000..1f76254d49d --- /dev/null +++ b/.github/workflows/docker-tag-latest.yml @@ -0,0 +1,73 @@ +# Tag a specific Docker release version as latest + +name: docker-tag-latest + +on: + workflow_dispatch: + inputs: + version: + description: 'Release version to tag as latest (e.g., v1.8.4)' + required: true + type: string + tag_reth: + description: 'Tag reth image as latest' + required: false + type: boolean + default: true + tag_op_reth: + description: 'Tag op-reth image as latest' + required: false + type: boolean + default: false + +env: + DOCKER_USERNAME: ${{ github.actor }} + +jobs: + tag-reth-latest: + name: Tag reth as latest + runs-on: ubuntu-24.04 + if: ${{ inputs.tag_reth }} + permissions: + packages: write + contents: read + steps: + - name: Log in to Docker + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin + + - name: Pull reth release image + run: | + docker pull ghcr.io/${{ github.repository_owner }}/reth:${{ inputs.version }} + + - name: Tag reth as latest + run: | + docker tag ghcr.io/${{ github.repository_owner }}/reth:${{ inputs.version }} ghcr.io/${{ github.repository_owner }}/reth:latest + + - name: Push reth latest tag + run: | + docker push ghcr.io/${{ github.repository_owner }}/reth:latest + + tag-op-reth-latest: + name: Tag op-reth as latest + runs-on: ubuntu-24.04 + if: ${{ inputs.tag_op_reth }} + permissions: + packages: write + contents: read + steps: + - name: Log in to Docker + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin + + - name: Pull op-reth release image + run: | + docker pull ghcr.io/${{ github.repository_owner }}/op-reth:${{ inputs.version }} + + - name: Tag op-reth as latest + run: | + docker tag ghcr.io/${{ github.repository_owner }}/op-reth:${{ inputs.version }} ghcr.io/${{ github.repository_owner }}/op-reth:latest + + - name: Push op-reth latest tag + run: | + docker push ghcr.io/${{ github.repository_owner }}/op-reth:latest diff --git a/.github/workflows/grafana.yml b/.github/workflows/grafana.yml new file mode 100644 index 00000000000..ffa09193952 --- /dev/null +++ b/.github/workflows/grafana.yml @@ -0,0 +1,21 @@ +name: grafana + +on: + pull_request: + merge_group: + push: + branches: [main] + +jobs: + check-dashboard: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Check for ${DS_PROMETHEUS} in overview.json + run: | + if grep -Fn '${DS_PROMETHEUS}' etc/grafana/dashboards/overview.json; then + echo "Error: overview.json contains '\${DS_PROMETHEUS}' placeholder" + echo "Please replace it with '\${datasource}'" + exit 1 + fi + echo "✓ overview.json does not contain '\${DS_PROMETHEUS}' placeholder" diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 175a9e0d541..6a7e2bd88c9 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -33,16 +33,41 @@ jobs: repository: ethereum/hive path: hivetests + - name: Get hive commit hash + id: hive-commit + run: echo "hash=$(cd hivetests && git rev-parse HEAD)" >> $GITHUB_OUTPUT + - uses: actions/setup-go@v6 with: go-version: "^1.13.1" - run: go version + - name: Restore hive assets cache + id: cache-hive + uses: actions/cache@v4 + with: + path: ./hive_assets + key: hive-assets-${{ steps.hive-commit.outputs.hash }}-${{ hashFiles('.github/assets/hive/build_simulators.sh') }} + - name: Build hive assets + if: steps.cache-hive.outputs.cache-hit != 'true' run: .github/assets/hive/build_simulators.sh + - name: Load cached Docker images + if: steps.cache-hive.outputs.cache-hit == 'true' + run: | + cd hive_assets + for tar_file in *.tar; do + if [ -f "$tar_file" ]; then + echo "Loading $tar_file..." + docker load -i "$tar_file" + fi + done + # Make hive binary executable + chmod +x hive + - name: Upload hive assets - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: hive_assets path: ./hive_assets @@ -111,40 +136,44 @@ jobs: - debug_ # consume-engine - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/osaka.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/prague.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/cancun.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/shanghai.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/berlin.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/istanbul.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/homestead.* - - sim: ethereum/eest/consume-engine + - sim: ethereum/eels/consume-engine limit: .*tests/frontier.* + - sim: ethereum/eels/consume-engine + limit: .*tests/paris.* # consume-rlp - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/osaka.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/prague.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/cancun.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/shanghai.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/berlin.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/istanbul.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/homestead.* - - sim: ethereum/eest/consume-rlp + - sim: ethereum/eels/consume-rlp limit: .*tests/frontier.* + - sim: ethereum/eels/consume-rlp + limit: .*tests/paris.* needs: - prepare-reth - prepare-hive @@ -158,13 +187,13 @@ jobs: fetch-depth: 0 - name: Download hive assets - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 with: name: hive_assets path: /tmp - name: Download reth image - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 with: name: artifacts path: /tmp diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml index 0e08d1641de..7477e759209 100644 --- a/.github/workflows/kurtosis-op.yml +++ b/.github/workflows/kurtosis-op.yml @@ -42,7 +42,7 @@ jobs: fetch-depth: 0 - name: Download reth image - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 with: name: artifacts path: /tmp diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index f78fc81235a..b45e997ef73 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -40,7 +40,7 @@ jobs: fetch-depth: 0 - name: Download reth image - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 with: name: artifacts path: /tmp diff --git a/.github/workflows/prepare-reth.yml b/.github/workflows/prepare-reth.yml index 6334297d7af..2e1f77ba4ba 100644 --- a/.github/workflows/prepare-reth.yml +++ b/.github/workflows/prepare-reth.yml @@ -50,7 +50,7 @@ jobs: - name: Upload reth image id: upload - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: artifacts path: ./artifacts diff --git a/.github/workflows/release-reproducible.yml b/.github/workflows/release-reproducible.yml index 9726cb77b89..e0e7f78aa58 100644 --- a/.github/workflows/release-reproducible.yml +++ b/.github/workflows/release-reproducible.yml @@ -40,20 +40,12 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Extract Rust version from Cargo.toml - id: rust_version - run: | - RUST_VERSION=$(cargo metadata --format-version 1 | jq -r '.packages[] | select(.name == "reth") | .rust_version' || echo "1") - echo "RUST_VERSION=$RUST_VERSION" >> $GITHUB_OUTPUT - - name: Build and push reproducible image uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile.reproducible push: true - build-args: | - RUST_VERSION=${{ steps.rust_version.outputs.RUST_VERSION }} tags: | ${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:${{ needs.extract-version.outputs.VERSION }} ${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:latest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 029b145f07b..9f330935972 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -22,7 +22,6 @@ env: CARGO_TERM_COLOR: always DOCKER_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/reth DOCKER_OP_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/op-reth - DEB_SUPPORTED_TARGETS: x86_64-unknown-linux-gnu aarch64-unknown-linux-gnu riscv64gc-unknown-linux-gnu jobs: dry-run: @@ -74,10 +73,6 @@ jobs: os: ubuntu-24.04 profile: maxperf allow_fail: false - - target: x86_64-unknown-linux-gnu - os: ubuntu-24.04 - profile: reproducible - allow_fail: false - target: aarch64-unknown-linux-gnu os: ubuntu-24.04 profile: maxperf @@ -124,34 +119,12 @@ jobs: echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV - name: Build Reth - if: ${{ !(matrix.build.binary == 'op-reth' && matrix.configs.profile == 'reproducible') }} - run: | - if [[ "${{ matrix.build.binary }}" == "reth" && "${{ matrix.configs.profile }}" == "reproducible" ]]; then - make build-reth-reproducible - else - make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - fi - - - name: Build Reth deb package - if: ${{ matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} - run: make build-deb-${{ matrix.configs.target }} PROFILE=${{ matrix.configs.profile }} VERSION=${{ needs.extract-version.outputs.VERSION }} - + run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - name: Move binary run: | mkdir artifacts [[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe" - - # Handle reproducible builds which always target x86_64-unknown-linux-gnu - if [[ "${{ matrix.build.binary }}" == "reth" && "${{ matrix.configs.profile }}" == "reproducible" ]]; then - mv "target/x86_64-unknown-linux-gnu/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - else - mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - fi - - # Move deb packages if they exist - if [[ "${{ matrix.build.binary }}" == "reth" && "${{ env.DEB_SUPPORTED_TARGETS }}" == *"${{ matrix.configs.target }}"* ]]; then - mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb" ./artifacts - fi + mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - name: Configure GPG and create artifacts env: @@ -161,42 +134,25 @@ jobs: export GPG_TTY=$(tty) echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import cd artifacts - tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz ${{ matrix.build.binary }}*[!.deb] + tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz ${{ matrix.build.binary }}* echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz - if [[ -f "${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb" ]]; then - echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb - fi - mv *tar.gz* *.deb* .. + mv *tar.gz* .. shell: bash - name: Upload artifact if: ${{ github.event.inputs.dry_run != 'true' }} - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz - name: Upload signature if: ${{ github.event.inputs.dry_run != 'true' }} - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc - - name: Upload deb package - if: ${{ github.event.inputs.dry_run != 'true' && matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} - uses: actions/upload-artifact@v4 - with: - name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb - path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb - - - name: Upload deb package signature - if: ${{ github.event.inputs.dry_run != 'true' && matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} - uses: actions/upload-artifact@v4 - with: - name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb.asc - path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb.asc - draft-release: name: draft release runs-on: ubuntu-latest @@ -214,7 +170,7 @@ jobs: with: fetch-depth: 0 - name: Download artifacts - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 - name: Generate full changelog id: changelog run: | diff --git a/.github/workflows/reproducible-build.yml b/.github/workflows/reproducible-build.yml index 0f5dd2e72d8..b4a93cedaba 100644 --- a/.github/workflows/reproducible-build.yml +++ b/.github/workflows/reproducible-build.yml @@ -15,18 +15,24 @@ jobs: - uses: dtolnay/rust-toolchain@stable with: target: x86_64-unknown-linux-gnu + - name: Install cross main + run: | + cargo install cross --git https://github.com/cross-rs/cross - name: Install cargo-cache run: | cargo install cargo-cache + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true - name: Build Reth run: | - make build-reth-reproducible - mv target/x86_64-unknown-linux-gnu/reproducible/reth reth-build-1 + make build-reproducible + mv target/x86_64-unknown-linux-gnu/release/reth reth-build-1 - name: Clean cache run: make clean && cargo cache -a - name: Build Reth again run: | - make build-reth-reproducible - mv target/x86_64-unknown-linux-gnu/reproducible/reth reth-build-2 + make build-reproducible + mv target/x86_64-unknown-linux-gnu/release/reth reth-build-2 - name: Compare binaries run: cmp reth-build-1 reth-build-2 diff --git a/CLAUDE.md b/CLAUDE.md index 99282fbf864..c7a709c6713 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -236,6 +236,85 @@ Common refactoring pattern: - Add trait bounds for flexibility - Enable reuse across different chain types (Ethereum, Optimism) +#### When to Comment + +Write comments that remain valuable after the PR is merged. Future readers won't have PR context - they only see the current code. + +##### ✅ DO: Add Value + +**Explain WHY and non-obvious behavior:** +```rust +// Process must handle allocations atomically to prevent race conditions +// between dealloc on drop and concurrent limit checks +unsafe impl GlobalAlloc for LimitedAllocator { ... } + +// Binary search requires sorted input. Panics on unsorted slices. +fn find_index(items: &[Item], target: &Item) -> Option + +// Timeout set to 5s to match EVM block processing limits +const TRACER_TIMEOUT: Duration = Duration::from_secs(5); +``` + +**Document constraints and assumptions:** +```rust +/// Returns heap size estimate. +/// +/// Note: May undercount shared references (Rc/Arc). For precise +/// accounting, combine with an allocator-based approach. +fn deep_size_of(&self) -> usize +``` + +**Explain complex logic:** +```rust +// We reset limits at task start because tokio reuses threads in +// spawn_blocking pool. Without reset, second task inherits first +// task's allocation count and immediately hits limit. +THREAD_ALLOCATED.with(|allocated| allocated.set(0)); +``` + +##### ❌ DON'T: Describe Changes +```rust +// ❌ BAD - Describes the change, not the code +// Changed from Vec to HashMap for O(1) lookups + +// ✅ GOOD - Explains the decision +// HashMap provides O(1) symbol lookups during trace replay +``` +```rust +// ❌ BAD - PR-specific context +// Fix for issue #234 where memory wasn't freed + +// ✅ GOOD - Documents the actual behavior +// Explicitly drop allocations before limit check to ensure +// accurate accounting +``` +```rust +// ❌ BAD - States the obvious +// Increment counter +counter += 1; + +// ✅ GOOD - Explains non-obvious purpose +// Track allocations across all threads for global limit enforcement +GLOBAL_COUNTER.fetch_add(1, Ordering::SeqCst); +``` + +✅ **Comment when:** +- Non-obvious behavior or edge cases +- Performance trade-offs +- Safety requirements (unsafe blocks must always be documented) +- Limitations or gotchas +- Why simpler alternatives don't work + +❌ **Don't comment when:** +- Code is self-explanatory +- Just restating the code in English +- Describing what changed in this PR + +##### The Test: "Will this make sense in 6 months?" + +Before adding a comment, ask: Would someone reading just the current code (no PR, no history) find this helpful? + + ### Example Contribution Workflow Let's say you want to fix a bug where external IP resolution fails on startup: diff --git a/Cargo.lock b/Cargo.lock index 6516e410911..0d04ac07097 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -74,6 +74,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b" +dependencies = [ + "equator", +] + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -112,9 +121,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59094911f05dbff1cf5b29046a00ef26452eccc8d47136d50a47c0cf22f00c85" +checksum = "90d103d3e440ad6f703dd71a5b58a6abd24834563bde8a5fabe706e00242f810" dependencies = [ "alloy-eips", "alloy-primitives", @@ -134,14 +143,14 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-consensus-any" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903cb8f728107ca27c816546f15be38c688df3c381d7bd1a4a9f215effc1ddb4" +checksum = "48ead76c8c84ab3a50c31c56bc2c748c2d64357ad2131c32f9b10ab790a25e1a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -154,9 +163,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03df5cb3b428ac96b386ad64c11d5c6e87a5505682cf1fbd6f8f773e9eda04f6" +checksum = "d5903097e4c131ad2dd80d87065f23c715ccb9cdb905fa169dffab8e1e798bae" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -171,7 +180,7 @@ dependencies = [ "futures", "futures-util", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -203,7 +212,7 @@ dependencies = [ "crc", "rand 0.8.5", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -232,14 +241,14 @@ dependencies = [ "rand 0.8.5", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-eips" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac7f1c9a1ccc7f3e03c36976455751a6166a4f0d2d2c530c3f87dfe7d0cdc836" +checksum = "7bdbec74583d0067798d77afa43d58f00d93035335d7ceaa5d3f93857d461bb9" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -256,15 +265,15 @@ dependencies = [ "ethereum_ssz_derive", "serde", "serde_with", - "sha2 0.10.9", - "thiserror 2.0.16", + "sha2", + "thiserror 2.0.17", ] [[package]] name = "alloy-evm" -version = "0.21.2" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a5f67ee74999aa4fe576a83be1996bdf74a30fce3d248bf2007d6fc7dae8aa" +checksum = "6223235f0b785a83dd10dc1599b7f3763c65e4f98b4e9e4e10e576bbbdf7dfa2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -280,14 +289,14 @@ dependencies = [ "op-alloy-rpc-types-engine", "op-revm", "revm", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-genesis" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1421f6c9d15e5b86afbfe5865ca84dea3b9f77173a0963c1a2ee4e626320ada9" +checksum = "c25d5acb35706e683df1ea333c862bdb6b7c5548836607cd5bb56e501cca0b4f" dependencies = [ "alloy-eips", "alloy-primitives", @@ -299,9 +308,9 @@ dependencies = [ [[package]] name = "alloy-hardforks" -version = "0.3.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "889eb3949b58368a09d4f16931c660275ef5fb08e5fbd4a96573b19c7085c41f" +checksum = "1e29d7eacf42f89c21d7f089916d0bdb4f36139a31698790e8837d2dbbd4b2c3" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -325,24 +334,24 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65f763621707fa09cece30b73ecc607eb43fd7a72451fe3b46f645b905086926" +checksum = "31b67c5a702121e618217f7a86f314918acb2622276d0273490e2d4534490bc0" dependencies = [ "alloy-primitives", "alloy-sol-types", "http", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "alloy-network" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f59a869fa4b4c3a7f08b1c8cb79aec61c29febe6e24a24fe0fcfded8a9b5703" +checksum = "612296e6b723470bb1101420a73c63dfd535aa9bf738ce09951aedbd4ab7292e" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -361,14 +370,14 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-network-primitives" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46e9374c667c95c41177602ebe6f6a2edd455193844f011d973d374b65501b38" +checksum = "a0e7918396eecd69d9c907046ec8a93fb09b89e2f325d5e7ea9c4e3929aa0dd2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -379,9 +388,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.21.2" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17aaeb600740c181bf29c9f138f9b228d115ea74fa6d0f0343e1952f1a766968" +checksum = "3ad8f3a679eb44ee21481edabd628d191c9a42d182ed29923b4d43a27a0f2cc8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -392,13 +401,14 @@ dependencies = [ "op-alloy-consensus", "op-revm", "revm", + "thiserror 2.0.17", ] [[package]] name = "alloy-op-hardforks" -version = "0.3.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "599c1d7dfbccb66603cb93fde00980d12848d32fe5e814f50562104a92df6487" +checksum = "95ac97adaba4c26e17192d81f49186ac20c1e844e35a00e169c8d3d58bc84e6b" dependencies = [ "alloy-chains", "alloy-hardforks", @@ -422,7 +432,7 @@ dependencies = [ "foldhash 0.2.0", "getrandom 0.3.3", "hashbrown 0.16.0", - "indexmap 2.11.1", + "indexmap 2.12.0", "itoa", "k256", "keccak-asm", @@ -439,9 +449,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77818b7348bd5486491a5297579dbfe5f706a81f8e1f5976393025f1e22a7c7d" +checksum = "55c1313a527a2e464d067c031f3c2ec073754ef615cc0eabca702fd0fe35729c" dependencies = [ "alloy-chains", "alloy-consensus", @@ -475,7 +485,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", "url", @@ -484,9 +494,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "249b45103a66c9ad60ad8176b076106d03a2399a37f0ee7b0e03692e6b354cb9" +checksum = "810766eeed6b10ffa11815682b3f37afc5019809e3b470b23555297d5770ce63" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -528,9 +538,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2430d5623e428dd012c6c2156ae40b7fe638d6fca255e3244e0fba51fa698e93" +checksum = "45f802228273056528dfd6cc8845cc91a7c7e0c6fc1a66d19e8673743dacdc7e" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -554,9 +564,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e131624d08a25cfc40557041e7dc42e1182fa1153e7592d120f769a1edce56" +checksum = "33ff3df608dcabd6bdd197827ff2b8faaa6cefe0c462f7dc5e74108666a01f56" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -567,9 +577,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59407723b1850ebaa49e46d10c2ba9c10c10b3aedf2f7e97015ee23c3f4e639" +checksum = "00e11a40c917c704888aa5aa6ffa563395123b732868d2e072ec7dd46c3d4672" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -579,9 +589,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65e3266095e6d8e8028aab5f439c6b8736c5147314f7e606c61597e014cb8a0" +checksum = "ac2bc988d7455e02dfb53460e1caa61f932b3f8452e12424e68ba8dcf60bba90" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -591,9 +601,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07429a1099cd17227abcddb91b5e38c960aaeb02a6967467f5bb561fbe716ac6" +checksum = "cdbf6d1766ca41e90ac21c4bc5cbc5e9e965978a25873c3f90b3992d905db4cb" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -602,28 +612,29 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e0e876b20eb9debf316d3e875536f389070635250f22b5a678cf4632a3e0cf" +checksum = "ab94e446a003dcef86843eea60d05a6cec360eb8e1829e4cf388ef94d799b5cf" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "derive_more", "ethereum_ssz", "ethereum_ssz_derive", "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tree_hash", "tree_hash_derive", ] [[package]] name = "alloy-rpc-types-debug" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeff305b7d10cc1c888456d023e7bb8a5ea82e9e42b951e37619b88cc1a1486d" +checksum = "977698b458738369ba5ca645d2cdb4d51ba07a81db37306ff85322853161ea3a" dependencies = [ "alloy-primitives", "derive_more", @@ -633,9 +644,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222ecadcea6aac65e75e32b6735635ee98517aa63b111849ee01ae988a71d685" +checksum = "07da696cc7fbfead4b1dda8afe408685cae80975cbb024f843ba74d9639cd0d3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -654,9 +665,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db46b0901ee16bbb68d986003c66dcb74a12f9d9b3c44f8e85d51974f2458f0f" +checksum = "a15e4831b71eea9d20126a411c1c09facf1d01d5cac84fd51d532d3c429cfc26" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -671,14 +682,14 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-rpc-types-mev" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a60d4baadd3f278faa4e2305cca095dfd4ab286e071b768ff09181d8ae215" +checksum = "4c5d8f6f2c3b68af83a32d5c7fa1353d9b2e30441a3f0b8c3c5657c603b7238c" dependencies = [ "alloy-consensus", "alloy-eips", @@ -691,23 +702,23 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f10620724bd45f80c79668a8cdbacb6974f860686998abce28f6196ae79444" +checksum = "fb0c800e2ce80829fca1491b3f9063c29092850dc6cf19249d5f678f0ce71bb0" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864f41befa90102d4e02327679699a7e9510930e2924c529e31476086609fa89" +checksum = "2f82e3068673a3cf93fbbc2f60a59059395cd54bbe39af895827faa5e641cc8f" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -717,9 +728,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5413814be7a22fbc81e0f04a2401fcc3eb25e56fd53b04683e8acecc6e1fe01b" +checksum = "751d1887f7d202514a82c5b3caf28ee8bd4a2ad9549e4f498b6f0bff99b52add" dependencies = [ "alloy-primitives", "arbitrary", @@ -729,9 +740,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53410a18a61916e2c073a6519499514e027b01e77eeaf96acd1df7cf96ef6bb2" +checksum = "9cf0b42ffbf558badfecf1dde0c3c5ed91f29bb7e97876d0bed008c3d5d67171" dependencies = [ "alloy-primitives", "async-trait", @@ -739,14 +750,14 @@ dependencies = [ "either", "elliptic-curve", "k256", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "alloy-signer-local" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6006c4cbfa5d08cadec1fcabea6cb56dc585a30a9fce40bcf81e307d6a71c8e" +checksum = "3e7d555ee5f27be29af4ae312be014b57c6cff9acb23fe2cf008500be6ca7e33" dependencies = [ "alloy-consensus", "alloy-network", @@ -757,7 +768,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 2.0.16", + "thiserror 2.0.17", "zeroize", ] @@ -784,7 +795,7 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.11.1", + "indexmap 2.12.0", "proc-macro-error2", "proc-macro2", "quote", @@ -833,12 +844,11 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d94ee404368a3d9910dfe61b203e888c6b0e151a50e147f95da8baff9f9c7763" +checksum = "71b3deee699d6f271eab587624a9fa84d02d0755db7a95a043d52a6488d16ebe" dependencies = [ "alloy-json-rpc", - "alloy-primitives", "auto_impl", "base64 0.22.1", "derive_more", @@ -847,7 +857,7 @@ dependencies = [ "parking_lot", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tower", "tracing", @@ -857,9 +867,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2f8a6338d594f6c6481292215ee8f2fd7b986c80aba23f3f44e761a8658de78" +checksum = "1720bd2ba8fe7e65138aca43bb0f680e4e0bcbd3ca39bf9d3035c9d7d2757f24" dependencies = [ "alloy-json-rpc", "alloy-rpc-types-engine", @@ -878,9 +888,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17a37a8ca18006fa0a58c7489645619ff58cfa073f2b29c4e052c9bd114b123a" +checksum = "ea89c214c7ddd2bcad100da929d6b642bbfed85788caf3b1be473abacd3111f9" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -898,9 +908,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "679b0122b7bca9d4dc5eb2c0549677a3c53153f6e232f23f4b3ba5575f74ebde" +checksum = "571aadf0afce0d515a28b2c6352662a39cb9f48b4eeff9a5c34557d6ea126730" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -936,11 +946,10 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.37" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64c09ec565a90ed8390d82aa08cd3b22e492321b96cb4a3d4f58414683c9e2f" +checksum = "cd7ce8ed34106acd6e21942022b6a15be6454c2c3ead4d76811d3bdcd63cf771" dependencies = [ - "alloy-primitives", "darling 0.21.3", "proc-macro2", "quote", @@ -1389,7 +1398,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e6fa871e4334a622afd6bb2f611635e8083a6f5e2936c0f90f37c7ef9856298" dependencies = [ "async-channel", - "futures-lite", + "futures-lite 1.13.0", "http-types", "log", "memchr", @@ -1677,15 +1686,6 @@ dependencies = [ "wyz", ] -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -1704,6 +1704,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block2" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdeb9d870516001442e364c5220d3574d2da8dc765554b4a617230d33fa58ef5" +dependencies = [ + "objc2", +] + [[package]] name = "blst" version = "0.3.15" @@ -1718,25 +1727,26 @@ dependencies = [ [[package]] name = "boa_ast" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c340fe0f0b267787095cbe35240c6786ff19da63ec7b69367ba338eace8169b" +checksum = "bc119a5ad34c3f459062a96907f53358989b173d104258891bb74f95d93747e8" dependencies = [ "bitflags 2.9.4", "boa_interner", "boa_macros", "boa_string", - "indexmap 2.11.1", + "indexmap 2.12.0", "num-bigint", "rustc-hash 2.1.1", ] [[package]] name = "boa_engine" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f620c3f06f51e65c0504ddf04978be1b814ac6586f0b45f6019801ab5efd37f9" +checksum = "e637ec52ea66d76b0ca86180c259d6c7bb6e6a6e14b2f36b85099306d8b00cc3" dependencies = [ + "aligned-vec", "arrayvec", "bitflags 2.9.4", "boa_ast", @@ -1744,61 +1754,66 @@ dependencies = [ "boa_interner", "boa_macros", "boa_parser", - "boa_profiler", "boa_string", "bytemuck", "cfg-if", + "cow-utils", "dashmap 6.1.0", + "dynify", "fast-float2", - "hashbrown 0.15.5", - "icu_normalizer 1.5.0", - "indexmap 2.11.1", + "float16", + "futures-channel", + "futures-concurrency", + "futures-lite 2.6.1", + "hashbrown 0.16.0", + "icu_normalizer", + "indexmap 2.12.0", "intrusive-collections", - "itertools 0.13.0", + "itertools 0.14.0", "num-bigint", "num-integer", "num-traits", "num_enum", - "once_cell", - "pollster", + "paste", "portable-atomic", - "rand 0.8.5", + "rand 0.9.2", "regress", "rustc-hash 2.1.1", "ryu-js", "serde", "serde_json", - "sptr", + "small_btree", "static_assertions", + "tag_ptr", "tap", "thin-vec", - "thiserror 2.0.16", + "thiserror 2.0.17", "time", + "xsum", ] [[package]] name = "boa_gc" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2425c0b7720d42d73eaa6a883fbb77a5c920da8694964a3d79a67597ac55cce2" +checksum = "f1179f690cbfcbe5364cceee5f1cb577265bb6f07b0be6f210aabe270adcf9da" dependencies = [ "boa_macros", - "boa_profiler", "boa_string", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "thin-vec", ] [[package]] name = "boa_interner" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42407a3b724cfaecde8f7d4af566df4b56af32a2f11f0956f5570bb974e7f749" +checksum = "9626505d33dc63d349662437297df1d3afd9d5fc4a2b3ad34e5e1ce879a78848" dependencies = [ "boa_gc", "boa_macros", - "hashbrown 0.15.5", - "indexmap 2.11.1", + "hashbrown 0.16.0", + "indexmap 2.12.0", "once_cell", "phf", "rustc-hash 2.1.1", @@ -1807,10 +1822,12 @@ dependencies = [ [[package]] name = "boa_macros" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" +checksum = "7f36418a46544b152632c141b0a0b7a453cd69ca150caeef83aee9e2f4b48b7d" dependencies = [ + "cfg-if", + "cow-utils", "proc-macro2", "quote", "syn 2.0.106", @@ -1819,39 +1836,33 @@ dependencies = [ [[package]] name = "boa_parser" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cc142dac798cdc6e2dbccfddeb50f36d2523bb977a976e19bdb3ae19b740804" +checksum = "02f99bf5b684f0de946378fcfe5f38c3a0fbd51cbf83a0f39ff773a0e218541f" dependencies = [ "bitflags 2.9.4", "boa_ast", "boa_interner", "boa_macros", - "boa_profiler", "fast-float2", - "icu_properties 1.5.1", + "icu_properties", "num-bigint", "num-traits", "regress", "rustc-hash 2.1.1", ] -[[package]] -name = "boa_profiler" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4064908e7cdf9b6317179e9b04dcb27f1510c1c144aeab4d0394014f37a0f922" - [[package]] name = "boa_string" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7debc13fbf7997bf38bf8e9b20f1ad5e2a7d27a900e1f6039fe244ce30f589b5" +checksum = "45ce9d7aa5563a2e14eab111e2ae1a06a69a812f6c0c3d843196c9d03fbef440" dependencies = [ "fast-float2", + "itoa", "paste", "rustc-hash 2.1.1", - "sptr", + "ryu-js", "static_assertions", ] @@ -1891,7 +1902,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ - "sha2 0.10.9", + "sha2", "tinyvec", ] @@ -1926,18 +1937,18 @@ checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" [[package]] name = "bytemuck" -version = "1.23.2" +version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.10.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f154e572231cb6ba2bd1176980827e3d5dc04cc183a75dea38109fbdd672d29" +checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" dependencies = [ "proc-macro2", "quote", @@ -2017,7 +2028,7 @@ dependencies = [ "semver 1.0.26", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -2257,7 +2268,7 @@ dependencies = [ "hmac", "k256", "serde", - "sha2 0.10.9", + "sha2", "thiserror 1.0.69", ] @@ -2273,7 +2284,7 @@ dependencies = [ "once_cell", "pbkdf2", "rand 0.8.5", - "sha2 0.10.9", + "sha2", "thiserror 1.0.69", ] @@ -2291,7 +2302,7 @@ dependencies = [ "generic-array", "ripemd", "serde", - "sha2 0.10.9", + "sha2", "sha3", "thiserror 1.0.69", ] @@ -2445,6 +2456,16 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "cordyceps" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688d7fbb8092b8de775ef2536f36c8c31f2bc4006ece2e8d8ad2d17d00ce0a2a" +dependencies = [ + "loom", + "tracing", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -2480,6 +2501,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "cow-utils" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "417bef24afe1460300965a25ff4a24b8b45ad011948302ec221e8a0a81eb2c79" + [[package]] name = "cpufeatures" version = "0.2.17" @@ -2661,6 +2688,17 @@ dependencies = [ "cipher", ] +[[package]] +name = "ctrlc" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73736a89c4aff73035ba2ed2e565061954da00d4970fc9ac25dcc85a2a20d790" +dependencies = [ + "dispatch2", + "nix 0.30.1", + "windows-sys 0.61.0", +] + [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -2688,6 +2726,19 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "custom-hardforks" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "reth-chainspec", + "reth-network-peers", + "serde", +] + [[package]] name = "darling" version = "0.20.11" @@ -2935,6 +2986,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "diatomic-waker" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c" + [[package]] name = "diff" version = "0.1.13" @@ -2956,7 +3013,7 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "const-oid", "crypto-common", "subtle", @@ -3006,9 +3063,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.9.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b4e7798d2ff74e29cee344dc490af947ae657d6ab5273dde35d58ce06a4d71" +checksum = "f170f4f6ed0e1df52bf43b403899f0081917ecf1500bfe312505cc3b515a8899" dependencies = [ "aes", "aes-gcm", @@ -3037,6 +3094,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "dispatch2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" +dependencies = [ + "bitflags 2.9.4", + "block2", + "libc", + "objc2", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -3075,6 +3144,26 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "dynify" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81acb15628a3e22358bf73de5e7e62360b8a777dbcb5fc9ac7dfa9ae73723747" +dependencies = [ + "dynify-macros", +] + +[[package]] +name = "dynify-macros" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec431cd708430d5029356535259c5d645d60edd3d39c54e5eea9782d46caa7d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "ecdsa" version = "0.16.9" @@ -3110,7 +3199,7 @@ dependencies = [ "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.9", + "sha2", "subtle", "zeroize", ] @@ -3129,7 +3218,7 @@ dependencies = [ [[package]] name = "ef-test-runner" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "ef-tests", @@ -3137,7 +3226,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3164,7 +3253,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "walkdir", ] @@ -3274,6 +3363,26 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "equator" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc" +dependencies = [ + "equator-macro", +] + +[[package]] +name = "equator-macro" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -3307,7 +3416,7 @@ checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" dependencies = [ "cpufeatures", "ring", - "sha2 0.10.9", + "sha2", ] [[package]] @@ -3371,7 +3480,7 @@ dependencies = [ "reth-ethereum", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -3414,7 +3523,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -3460,7 +3569,7 @@ dependencies = [ "reth-payload-builder", "reth-tracing", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] @@ -3514,7 +3623,6 @@ dependencies = [ "op-alloy-rpc-types", "op-alloy-rpc-types-engine", "op-revm", - "reth-chain-state", "reth-codecs", "reth-db-api", "reth-engine-primitives", @@ -3531,7 +3639,7 @@ dependencies = [ "revm", "revm-primitives", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -3617,7 +3725,7 @@ dependencies = [ [[package]] name = "example-full-contract-state" -version = "1.8.2" +version = "1.9.0" dependencies = [ "eyre", "reth-ethereum", @@ -3756,7 +3864,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "clap", @@ -3873,6 +3981,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + [[package]] name = "flate2" version = "1.1.2" @@ -3883,6 +3997,16 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float16" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bffafbd079d520191c7c2779ae9cf757601266cf4167d3f659ff09617ff8483" +dependencies = [ + "cfg-if", + "rustc_version 0.2.3", +] + [[package]] name = "fnv" version = "1.0.7" @@ -3955,6 +4079,19 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-buffered" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8e0e1f38ec07ba4abbde21eed377082f17ccb988be9d988a5adbf4bafc118fd" +dependencies = [ + "cordyceps", + "diatomic-waker", + "futures-core", + "pin-project-lite", + "spin", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -3965,6 +4102,21 @@ dependencies = [ "futures-sink", ] +[[package]] +name = "futures-concurrency" +version = "7.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eb68017df91f2e477ed4bea586c59eaecaa47ed885a770d0444e21e62572cd2" +dependencies = [ + "fixedbitset", + "futures-buffered", + "futures-core", + "futures-lite 2.6.1", + "pin-project", + "slab", + "smallvec", +] + [[package]] name = "futures-core" version = "0.3.31" @@ -4003,6 +4155,19 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" +dependencies = [ + "fastrand 2.3.0", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.31" @@ -4080,7 +4245,6 @@ version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ - "serde", "typenum", "version_check", "zeroize", @@ -4238,7 +4402,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.11.1", + "indexmap 2.12.0", "slab", "tokio", "tokio-util", @@ -4299,6 +4463,8 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" dependencies = [ + "allocator-api2", + "equivalent", "foldhash 0.2.0", "serde", ] @@ -4371,7 +4537,7 @@ dependencies = [ "rand 0.9.2", "ring", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tinyvec", "tokio", "tracing", @@ -4395,7 +4561,7 @@ dependencies = [ "resolv-conf", "serde", "smallvec", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -4467,7 +4633,7 @@ dependencies = [ "anyhow", "async-channel", "base64 0.13.1", - "futures-lite", + "futures-lite 1.13.0", "infer", "pin-project-lite", "rand 0.7.3", @@ -4554,6 +4720,19 @@ dependencies = [ "webpki-roots 1.0.2", ] +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "hyper-tls" version = "0.6.0" @@ -4620,18 +4799,6 @@ dependencies = [ "cc", ] -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke 0.7.5", - "zerofrom", - "zerovec 0.10.4", -] - [[package]] name = "icu_collections" version = "2.0.0" @@ -4640,9 +4807,9 @@ checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", "potential_utf", - "yoke 0.8.0", + "yoke", "zerofrom", - "zerovec 0.11.4", + "zerovec", ] [[package]] @@ -4652,61 +4819,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", - "litemap 0.8.0", - "tinystr 0.8.1", - "writeable 0.6.1", - "zerovec 0.11.4", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap 0.7.5", - "tinystr 0.7.6", - "writeable 0.5.5", - "zerovec 0.10.4", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider 1.5.0", - "tinystr 0.7.6", - "zerovec 0.10.4", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections 1.5.0", - "icu_normalizer_data 1.5.1", - "icu_properties 1.5.1", - "icu_provider 1.5.0", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec 0.10.4", + "litemap", + "tinystr", + "writeable", + "zerovec", ] [[package]] @@ -4716,41 +4832,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", - "icu_collections 2.0.0", - "icu_normalizer_data 2.0.0", - "icu_properties 2.0.1", - "icu_provider 2.0.0", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", "smallvec", - "zerovec 0.11.4", + "utf16_iter", + "write16", + "zerovec", ] -[[package]] -name = "icu_normalizer_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" - [[package]] name = "icu_normalizer_data" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" -[[package]] -name = "icu_properties" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" -dependencies = [ - "displaydoc", - "icu_collections 1.5.0", - "icu_locid_transform", - "icu_properties_data 1.5.1", - "icu_provider 1.5.0", - "tinystr 0.7.6", - "zerovec 0.10.4", -] - [[package]] name = "icu_properties" version = "2.0.1" @@ -4758,44 +4855,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", - "icu_collections 2.0.0", + "icu_collections", "icu_locale_core", - "icu_properties_data 2.0.1", - "icu_provider 2.0.0", + "icu_properties_data", + "icu_provider", "potential_utf", "zerotrie", - "zerovec 0.11.4", + "zerovec", ] -[[package]] -name = "icu_properties_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" - [[package]] name = "icu_properties_data" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr 0.7.6", - "writeable 0.5.5", - "yoke 0.7.5", - "zerofrom", - "zerovec 0.10.4", -] - [[package]] name = "icu_provider" version = "2.0.0" @@ -4805,23 +4879,12 @@ dependencies = [ "displaydoc", "icu_locale_core", "stable_deref_trait", - "tinystr 0.8.1", - "writeable 0.6.1", - "yoke 0.8.0", + "tinystr", + "writeable", + "yoke", "zerofrom", "zerotrie", - "zerovec 0.11.4", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", + "zerovec", ] [[package]] @@ -4847,15 +4910,15 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ - "icu_normalizer 2.0.0", - "icu_properties 2.0.1", + "icu_normalizer", + "icu_properties", ] [[package]] name = "if-addrs" -version = "0.13.4" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b2eeee38fef3aa9b4cc5f1beea8a2444fc00e7377cafae396de3f5c2065e24" +checksum = "bf39cc0423ee66021dc5eccface85580e4a001e0c5288bae8bea7ecb69225e90" dependencies = [ "libc", "windows-sys 0.59.0", @@ -4919,14 +4982,15 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206a8042aec68fa4a62e8d3f7aa4ceb508177d9324faf261e1959e495b7a1921" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "serde", + "serde_core", ] [[package]] @@ -5183,7 +5247,7 @@ dependencies = [ "rustls-pki-types", "rustls-platform-verifier", "soketto", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-rustls", "tokio-util", @@ -5211,7 +5275,7 @@ dependencies = [ "rustc-hash 2.1.1", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tower", @@ -5236,7 +5300,7 @@ dependencies = [ "rustls-platform-verifier", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tower", "url", @@ -5274,7 +5338,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -5291,7 +5355,7 @@ dependencies = [ "http", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -5346,7 +5410,7 @@ dependencies = [ "elliptic-curve", "once_cell", "serdect", - "sha2 0.10.9", + "sha2", "signature", ] @@ -5442,8 +5506,8 @@ dependencies = [ "k256", "multihash", "quick-protobuf", - "sha2 0.10.9", - "thiserror 2.0.16", + "sha2", + "thiserror 2.0.17", "tracing", "zeroize", ] @@ -5470,52 +5534,6 @@ dependencies = [ "redox_syscall", ] -[[package]] -name = "libsecp256k1" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79019718125edc905a079a70cfa5f3820bc76139fc91d6f9abc27ea2a887139" -dependencies = [ - "arrayref", - "base64 0.22.1", - "digest 0.9.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.5", - "serde", - "sha2 0.9.9", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" -dependencies = [ - "libsecp256k1-core", -] - [[package]] name = "libz-sys" version = "1.1.22" @@ -5556,12 +5574,6 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" -[[package]] -name = "litemap" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" - [[package]] name = "litemap" version = "0.8.0" @@ -5735,7 +5747,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd7399781913e5393588a8d8c6a2867bf85fb38eaf2502fdce465aad2dc6f034" dependencies = [ "base64 0.22.1", - "indexmap 2.11.1", + "indexmap 2.12.0", "metrics", "metrics-util", "quanta", @@ -5767,7 +5779,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.5", - "indexmap 2.11.1", + "indexmap 2.12.0", "metrics", "ordered-float", "quanta", @@ -5791,7 +5803,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -5958,6 +5970,30 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.9.4", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags 2.9.4", + "cfg-if", + "cfg_aliases", + "libc", +] + [[package]] name = "nom" version = "7.1.3" @@ -6147,6 +6183,21 @@ dependencies = [ "smallvec", ] +[[package]] +name = "objc2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c2599ce0ec54857b29ce62166b0ed9b4f6f1a70ccc9a71165b6154caca8c05" +dependencies = [ + "objc2-encode", +] + +[[package]] +name = "objc2-encode" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" + [[package]] name = "object" version = "0.36.7" @@ -6180,9 +6231,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "op-alloy-consensus" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a501241474c3118833d6195312ae7eb7cc90bbb0d5f524cbb0b06619e49ff67" +checksum = "e42e9de945efe3c2fbd207e69720c9c1af2b8caa6872aee0e216450c25a3ca70" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6195,7 +6246,7 @@ dependencies = [ "derive_more", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -6206,9 +6257,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f80108e3b36901200a4c5df1db1ee9ef6ce685b59ea79d7be1713c845e3765da" +checksum = "9c9da49a2812a0189dd05e81e4418c3ae13fd607a92654107f02ebad8e91ed9e" dependencies = [ "alloy-consensus", "alloy-network", @@ -6222,9 +6273,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8eb878fc5ea95adb5abe55fb97475b3eb0dcc77dfcd6f61bd626a68ae0bdba1" +checksum = "b62ceb771ab9323647093ea2e58dc7f25289a1b95cbef2faa2620f6ca2dee4d9" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -6232,9 +6283,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "753d6f6b03beca1ba9cbd344c05fee075a2ce715ee9d61981c10b9c764a824a2" +checksum = "9cd1eb7bddd2232856ba9d259320a094f9edf2b9061acfe5966e7960208393e6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6247,14 +6298,14 @@ dependencies = [ "op-alloy-consensus", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "op-alloy-rpc-types-engine" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e50c94013a1d036a529df259151991dbbd6cf8dc215e3b68b784f95eec60e6" +checksum = "5429622150d18d8e6847a701135082622413e2451b64d03f979415d764566bef" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6269,12 +6320,12 @@ dependencies = [ "op-alloy-consensus", "serde", "snap", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "op-reth" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "reth-cli-util", @@ -6292,8 +6343,8 @@ dependencies = [ [[package]] name = "op-revm" -version = "10.1.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "12.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "auto_impl", "revm", @@ -6352,39 +6403,37 @@ dependencies = [ [[package]] name = "opentelemetry" -version = "0.29.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e87237e2775f74896f9ad219d26a2081751187eb7c9f5c58dde20a23b95d16c" +checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0" dependencies = [ "futures-core", "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "opentelemetry-http" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46d7ab32b827b5b495bd90fa95a6cb65ccc293555dcc3199ae2937d2d237c8ed" +checksum = "d7a6d09a73194e6b66df7c8f1b680f156d916a1a942abf2de06823dd02b7855d" dependencies = [ "async-trait", "bytes", "http", "opentelemetry", "reqwest", - "tracing", ] [[package]] name = "opentelemetry-otlp" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d899720fe06916ccba71c01d04ecd77312734e2de3467fd30d9d580c8ce85656" +checksum = "7a2366db2dca4d2ad033cad11e6ee42844fd727007af5ad04a1730f4cb8163bf" dependencies = [ - "futures-core", "http", "opentelemetry", "opentelemetry-http", @@ -6392,44 +6441,44 @@ dependencies = [ "opentelemetry_sdk", "prost", "reqwest", - "thiserror 2.0.16", + "thiserror 2.0.17", + "tokio", + "tonic", "tracing", ] [[package]] name = "opentelemetry-proto" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c40da242381435e18570d5b9d50aca2a4f4f4d8e146231adb4e7768023309b3" +checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f" dependencies = [ "opentelemetry", "opentelemetry_sdk", "prost", "tonic", + "tonic-prost", ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b29a9f89f1a954936d5aa92f19b2feec3c8f3971d3e96206640db7f9706ae3" +checksum = "e62e29dfe041afb8ed2a6c9737ab57db4907285d999ef8ad3a59092a36bdc846" [[package]] name = "opentelemetry_sdk" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afdefb21d1d47394abc1ba6c57363ab141be19e27cc70d0e422b7f303e4d290b" +checksum = "e14ae4f5991976fd48df6d843de219ca6d31b01daaab2dad5af2badeded372bd" dependencies = [ "futures-channel", "futures-executor", "futures-util", - "glob", "opentelemetry", "percent-encoding", "rand 0.9.2", - "serde_json", - "thiserror 2.0.16", - "tracing", + "thiserror 2.0.17", ] [[package]] @@ -6456,7 +6505,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "primeorder", - "sha2 0.10.9", + "sha2", ] [[package]] @@ -6567,7 +6616,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21e0a3a33733faeaf8651dfee72dd0f388f0c8e5ad496a3478fa5a922f49cfa8" dependencies = [ "memchr", - "thiserror 2.0.16", + "thiserror 2.0.17", "ucd-trie", ] @@ -6583,9 +6632,9 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" dependencies = [ "phf_macros", "phf_shared", @@ -6594,19 +6643,19 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737" dependencies = [ + "fastrand 2.3.0", "phf_shared", - "rand 0.8.5", ] [[package]] name = "phf_macros" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" +checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef" dependencies = [ "phf_generator", "phf_shared", @@ -6617,9 +6666,9 @@ dependencies = [ [[package]] name = "phf_shared" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" dependencies = [ "siphasher", ] @@ -6709,12 +6758,6 @@ dependencies = [ "plotters-backend", ] -[[package]] -name = "pollster" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f3a9f18d041e6d0e102a0a46750538147e5e8992d3b4873aaafee2520b00ce3" - [[package]] name = "polyval" version = "0.6.2" @@ -6739,7 +6782,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" dependencies = [ - "zerovec 0.11.4", + "zerovec", ] [[package]] @@ -6916,9 +6959,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", "prost-derive", @@ -6926,9 +6969,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", "itertools 0.14.0", @@ -6992,7 +7035,7 @@ dependencies = [ "rustc-hash 2.1.1", "rustls", "socket2 0.6.0", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", "web-time", @@ -7013,7 +7056,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.16", + "thiserror 2.0.17", "tinyvec", "tracing", "web-time", @@ -7035,9 +7078,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.40" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] @@ -7259,7 +7302,7 @@ checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ "getrandom 0.2.16", "libredox", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] @@ -7378,7 +7421,7 @@ checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" [[package]] name = "reth" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7425,7 +7468,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7448,7 +7491,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7479,15 +7522,41 @@ dependencies = [ "reth-tracing", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tower", "tracing", ] +[[package]] +name = "reth-bench-compare" +version = "1.9.0" +dependencies = [ + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "chrono", + "clap", + "csv", + "ctrlc", + "eyre", + "nix 0.29.0", + "reth-chainspec", + "reth-cli-runner", + "reth-cli-util", + "reth-node-core", + "reth-tracing", + "serde", + "serde_json", + "shellexpand", + "shlex", + "tokio", + "tracing", +] + [[package]] name = "reth-chain-state" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7519,7 +7588,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7539,7 +7608,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-genesis", "clap", @@ -7552,7 +7621,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7624,6 +7693,7 @@ dependencies = [ "serde", "serde_json", "tar", + "tempfile", "tokio", "tokio-stream", "toml", @@ -7633,7 +7703,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.8.2" +version = "1.9.0" dependencies = [ "reth-tasks", "tokio", @@ -7642,7 +7712,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7655,14 +7725,14 @@ dependencies = [ "secp256k1 0.30.0", "serde", "snmalloc-rs", - "thiserror 2.0.16", + "thiserror 2.0.17", "tikv-jemallocator", "tracy-client", ] [[package]] name = "reth-codecs" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7686,7 +7756,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.8.2" +version = "1.9.0" dependencies = [ "proc-macro2", "quote", @@ -7696,7 +7766,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "eyre", @@ -7713,19 +7783,19 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", "auto_impl", "reth-execution-types", "reth-primitives-traits", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-consensus-common" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7739,7 +7809,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7764,7 +7834,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7793,12 +7863,12 @@ dependencies = [ "strum 0.27.2", "sysinfo", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-db-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7829,7 +7899,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7853,13 +7923,13 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-db-models" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7875,14 +7945,13 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", "assert_matches", "discv5", "enr", - "generic-array", "itertools 0.14.0", "parking_lot", "rand 0.8.5", @@ -7894,7 +7963,7 @@ dependencies = [ "schnellru", "secp256k1 0.30.0", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -7902,7 +7971,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7920,14 +7989,14 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1 0.30.0", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-dns-discovery" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7947,7 +8016,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -7955,7 +8024,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7984,7 +8053,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -7993,7 +8062,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8050,7 +8119,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.8.2" +version = "1.9.0" dependencies = [ "aes", "alloy-primitives", @@ -8062,25 +8131,23 @@ dependencies = [ "ctr", "digest 0.10.7", "futures", - "generic-array", "hmac", "pin-project", "rand 0.8.5", "reth-network-peers", "secp256k1 0.30.0", - "sha2 0.10.9", + "sha2", "sha3", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", "tracing", - "typenum", ] [[package]] name = "reth-engine-local" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8094,7 +8161,7 @@ dependencies = [ "reth-optimism-chainspec", "reth-payload-builder", "reth-payload-primitives", - "reth-provider", + "reth-storage-api", "reth-transaction-pool", "scroll-alloy-rpc-types-engine", "tokio", @@ -8104,7 +8171,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8122,13 +8189,13 @@ dependencies = [ "reth-primitives-traits", "reth-trie-common", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] [[package]] name = "reth-engine-service" -version = "1.8.2" +version = "1.9.0" dependencies = [ "futures", "pin-project", @@ -8157,7 +8224,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8212,7 +8279,6 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "reth-trie", - "reth-trie-db", "reth-trie-parallel", "reth-trie-sparse", "reth-trie-sparse-parallel", @@ -8222,14 +8288,14 @@ dependencies = [ "schnellru", "serde_json", "smallvec", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-engine-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8256,7 +8322,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8272,13 +8338,13 @@ dependencies = [ "snap", "tempfile", "test-case", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] [[package]] name = "reth-era-downloader" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "bytes", @@ -8287,7 +8353,7 @@ dependencies = [ "futures-util", "reqwest", "reth-fs-util", - "sha2 0.10.9", + "sha2", "tempfile", "test-case", "tokio", @@ -8295,7 +8361,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8321,17 +8387,17 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.8.2" +version = "1.9.0" dependencies = [ "reth-consensus", "reth-execution-errors", "reth-storage-errors", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-eth-wire" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8360,7 +8426,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -8369,7 +8435,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8389,12 +8455,12 @@ dependencies = [ "reth-ethereum-primitives", "reth-primitives-traits", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-ethereum" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8434,7 +8500,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "eyre", @@ -8450,13 +8516,15 @@ dependencies = [ "reth-node-metrics", "reth-rpc-server-types", "reth-tracing", + "reth-tracing-otlp", "tempfile", "tracing", + "url", ] [[package]] name = "reth-ethereum-consensus" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8472,7 +8540,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8484,13 +8552,13 @@ dependencies = [ "reth-primitives-traits", "serde", "serde_json", - "sha2 0.10.9", - "thiserror 2.0.16", + "sha2", + "thiserror 2.0.17", ] [[package]] name = "reth-ethereum-forks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8503,7 +8571,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8531,7 +8599,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8558,7 +8626,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "rayon", @@ -8568,7 +8636,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8593,7 +8661,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8617,19 +8685,19 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-evm", "alloy-primitives", "alloy-rlp", "nybbles", "reth-storage-errors", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-execution-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8649,7 +8717,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8685,7 +8753,7 @@ dependencies = [ "rmp-serde", "secp256k1 0.30.0", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-util", "tracing", @@ -8693,7 +8761,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "eyre", @@ -8718,13 +8786,13 @@ dependencies = [ "reth-tasks", "reth-transaction-pool", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] [[package]] name = "reth-exex-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8741,18 +8809,19 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-invalid-block-hooks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", @@ -8760,23 +8829,29 @@ dependencies = [ "futures", "jsonrpsee", "pretty_assertions", + "reth-chainspec", "reth-engine-primitives", + "reth-ethereum-primitives", "reth-evm", + "reth-evm-ethereum", "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", + "reth-testing-utils", "reth-tracing", "reth-trie", + "revm", "revm-bytecode", "revm-database", "serde", "serde_json", + "tempfile", ] [[package]] name = "reth-ipc" -version = "1.8.2" +version = "1.9.0" dependencies = [ "bytes", "futures", @@ -8788,7 +8863,7 @@ dependencies = [ "reth-tracing", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -8798,7 +8873,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.8.2" +version = "1.9.0" dependencies = [ "bitflags 2.9.4", "byteorder", @@ -8810,13 +8885,13 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-mdbx-sys" -version = "1.8.2" +version = "1.9.0" dependencies = [ "bindgen 0.71.1", "cc", @@ -8824,7 +8899,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.8.2" +version = "1.9.0" dependencies = [ "futures", "metrics", @@ -8835,28 +8910,28 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.8.2" +version = "1.9.0" dependencies = [ "futures-util", "if-addrs", "reqwest", "reth-tracing", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-network" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8907,7 +8982,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "smallvec", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -8917,7 +8992,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8935,14 +9010,14 @@ dependencies = [ "reth-primitives-traits", "reth-tokio-util", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", ] [[package]] name = "reth-network-p2p" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8964,7 +9039,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8974,14 +9049,14 @@ dependencies = [ "secp256k1 0.30.0", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "url", ] [[package]] name = "reth-network-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -8994,7 +9069,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.8.2" +version = "1.9.0" dependencies = [ "anyhow", "bincode 1.3.3", @@ -9005,14 +9080,14 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", "zstd", ] [[package]] name = "reth-node-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -9035,7 +9110,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9106,7 +9181,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9143,12 +9218,12 @@ dependencies = [ "reth-storage-api", "reth-storage-errors", "reth-tracing", + "reth-tracing-otlp", "reth-transaction-pool", "secp256k1 0.30.0", "serde", "shellexpand", "strum 0.27.2", - "thiserror 2.0.16", "tokio", "toml", "tracing", @@ -9159,7 +9234,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9212,7 +9287,7 @@ dependencies = [ [[package]] name = "reth-node-ethstats" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9225,7 +9300,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-tungstenite", @@ -9235,7 +9310,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9258,7 +9333,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.8.2" +version = "1.9.0" dependencies = [ "eyre", "http", @@ -9280,7 +9355,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9291,7 +9366,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.8.2" +version = "1.9.0" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9331,13 +9406,14 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-hardforks", + "alloy-op-hardforks", "alloy-primitives", "derive_more", "miniz_oxide", @@ -9353,12 +9429,12 @@ dependencies = [ "serde", "serde_json", "tar-no-std", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-optimism-cli" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9398,16 +9474,18 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-tracing", + "reth-tracing-otlp", "serde", "tempfile", "tokio", "tokio-util", "tracing", + "url", ] [[package]] name = "reth-optimism-consensus" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9432,13 +9510,13 @@ dependencies = [ "reth-trie", "reth-trie-common", "revm", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-optimism-evm" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9462,12 +9540,12 @@ dependencies = [ "reth-rpc-eth-api", "reth-storage-errors", "revm", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-optimism-flashblocks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9480,20 +9558,20 @@ dependencies = [ "futures-util", "metrics", "reth-chain-state", + "reth-engine-primitives", "reth-errors", "reth-evm", "reth-execution-types", "reth-metrics", - "reth-node-api", "reth-optimism-evm", "reth-optimism-payload-builder", "reth-optimism-primitives", + "reth-payload-primitives", "reth-primitives-traits", "reth-revm", "reth-rpc-eth-types", "reth-storage-api", "reth-tasks", - "reth-trie", "ringbuffer", "serde", "serde_json", @@ -9506,7 +9584,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9516,7 +9594,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9574,19 +9652,20 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", + "alloy-evm", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", "alloy-rpc-types-engine", "derive_more", + "either", "op-alloy-consensus", "op-alloy-rpc-types-engine", "reth-basic-payload-builder", - "reth-chain-state", "reth-chainspec", "reth-evm", "reth-execution-types", @@ -9605,14 +9684,14 @@ dependencies = [ "reth-transaction-pool", "revm", "serde", - "sha2 0.10.9", - "thiserror 2.0.16", + "sha2", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-optimism-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9639,11 +9718,12 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-json-rpc", + "alloy-op-hardforks", "alloy-primitives", "alloy-rpc-client", "alloy-rpc-types-debug", @@ -9682,7 +9762,6 @@ dependencies = [ "reth-primitives-traits", "reth-rpc", "reth-rpc-api", - "reth-rpc-convert", "reth-rpc-engine-api", "reth-rpc-eth-api", "reth-rpc-eth-types", @@ -9692,7 +9771,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tower", @@ -9701,7 +9780,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "reth-codecs", @@ -9713,7 +9792,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9743,14 +9822,14 @@ dependencies = [ "reth-storage-api", "reth-transaction-pool", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-payload-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9770,7 +9849,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "pin-project", "reth-payload-primitives", @@ -9781,7 +9860,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9793,16 +9872,18 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-errors", + "reth-execution-types", "reth-primitives-traits", + "reth-trie-common", "scroll-alloy-rpc-types-engine", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", ] [[package]] name = "reth-payload-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9811,7 +9892,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9820,7 +9901,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9842,7 +9923,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9875,12 +9956,12 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-provider" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9903,7 +9984,6 @@ dependencies = [ "reth-errors", "reth-ethereum-engine-primitives", "reth-ethereum-primitives", - "reth-evm", "reth-execution-types", "reth-fs-util", "reth-metrics", @@ -9916,6 +9996,7 @@ dependencies = [ "reth-storage-api", "reth-storage-errors", "reth-testing-utils", + "reth-tracing", "reth-trie", "reth-trie-db", "revm-database", @@ -9929,16 +10010,14 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.8.2" +version = "1.9.0" dependencies = [ - "alloy-consensus", "alloy-eips", "alloy-primitives", "assert_matches", "itertools 0.14.0", "metrics", "rayon", - "reth-chainspec", "reth-config", "reth-db", "reth-db-api", @@ -9954,14 +10033,18 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash 2.1.1", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] +[[package]] +name = "reth-prune-db" +version = "1.9.0" + [[package]] name = "reth-prune-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -9973,13 +10056,14 @@ dependencies = [ "reth-codecs", "serde", "serde_json", - "thiserror 2.0.16", + "strum 0.27.2", + "thiserror 2.0.17", "toml", ] [[package]] name = "reth-ress-protocol" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10005,7 +10089,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10031,7 +10115,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10045,7 +10129,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10118,8 +10202,8 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "sha2 0.10.9", - "thiserror 2.0.16", + "sha2", + "thiserror 2.0.17", "tokio", "tokio-stream", "tower", @@ -10129,7 +10213,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-genesis", @@ -10156,7 +10240,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10175,7 +10259,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-network", @@ -10220,7 +10304,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-util", "tower", @@ -10230,7 +10314,7 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-json-rpc", @@ -10257,12 +10341,12 @@ dependencies = [ "scroll-alloy-evm", "scroll-alloy-rpc-types", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-rpc-e2e-tests" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -10282,7 +10366,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10311,14 +10395,14 @@ dependencies = [ "reth-testing-utils", "reth-transaction-pool", "serde", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-rpc-eth-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10362,7 +10446,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10401,7 +10485,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -10409,7 +10493,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10426,7 +10510,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10441,7 +10525,7 @@ dependencies = [ [[package]] name = "reth-scroll-chainspec" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -10465,7 +10549,7 @@ dependencies = [ [[package]] name = "reth-scroll-cli" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "eyre", @@ -10490,7 +10574,7 @@ dependencies = [ [[package]] name = "reth-scroll-consensus" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10504,13 +10588,13 @@ dependencies = [ "reth-scroll-primitives", "scroll-alloy-consensus", "scroll-alloy-hardforks", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-scroll-engine-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10532,12 +10616,12 @@ dependencies = [ "scroll-alloy-hardforks", "scroll-alloy-rpc-types-engine", "serde", - "sha2 0.10.9", + "sha2", ] [[package]] name = "reth-scroll-evm" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10561,13 +10645,13 @@ dependencies = [ "scroll-alloy-consensus", "scroll-alloy-evm", "scroll-alloy-hardforks", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-scroll-forks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -10580,7 +10664,7 @@ dependencies = [ [[package]] name = "reth-scroll-node" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10633,7 +10717,7 @@ dependencies = [ [[package]] name = "reth-scroll-payload" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10657,13 +10741,13 @@ dependencies = [ "reth-transaction-pool", "revm", "scroll-alloy-hardforks", - "thiserror 2.0.16", + "thiserror 2.0.17", "tracing", ] [[package]] name = "reth-scroll-primitives" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10684,7 +10768,7 @@ dependencies = [ [[package]] name = "reth-scroll-rpc" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10717,14 +10801,14 @@ dependencies = [ "scroll-alloy-hardforks", "scroll-alloy-network", "scroll-alloy-rpc-types", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-scroll-txpool" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10750,7 +10834,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10800,14 +10884,14 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-stages-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10828,7 +10912,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -10836,7 +10920,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -10852,7 +10936,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10860,6 +10944,7 @@ dependencies = [ "alloy-rpc-types-debug", "alloy-trie", "itertools 0.14.0", + "k256", "reth-chainspec", "reth-consensus", "reth-errors", @@ -10870,14 +10955,15 @@ dependencies = [ "reth-revm", "reth-trie-common", "reth-trie-sparse", + "secp256k1 0.30.0", "serde", "serde_with", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-static-file" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "assert_matches", @@ -10900,19 +10986,20 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "clap", "derive_more", "reth-nippy-jar", "serde", + "serde_json", "strum 0.27.2", ] [[package]] name = "reth-storage-api" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10930,11 +11017,12 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm-database", + "serde_json", ] [[package]] name = "reth-storage-errors" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10944,12 +11032,12 @@ dependencies = [ "reth-prune-types", "reth-static-file-types", "revm-database-interface", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "reth-storage-rpc-provider" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10978,7 +11066,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "auto_impl", "dyn-clone", @@ -10987,7 +11075,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", "tracing-futures", @@ -10995,7 +11083,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11010,7 +11098,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.8.2" +version = "1.9.0" dependencies = [ "tokio", "tokio-stream", @@ -11019,22 +11107,26 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "eyre", + "reth-tracing-otlp", "rolling-file", "tracing", "tracing-appender", "tracing-journald", "tracing-logfmt", "tracing-subscriber 0.3.20", + "url", ] [[package]] name = "reth-tracing-otlp" -version = "1.8.2" +version = "1.9.0" dependencies = [ + "clap", + "eyre", "opentelemetry", "opentelemetry-otlp", "opentelemetry-semantic-conventions", @@ -11042,11 +11134,12 @@ dependencies = [ "tracing", "tracing-opentelemetry", "tracing-subscriber 0.3.20", + "url", ] [[package]] name = "reth-transaction-pool" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11086,7 +11179,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tokio-stream", "tracing", @@ -11094,7 +11187,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11127,7 +11220,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -11137,6 +11230,7 @@ dependencies = [ "alloy-serde", "alloy-trie", "arbitrary", + "arrayvec", "bincode 1.3.3", "bytes", "codspeed-criterion-compat", @@ -11159,7 +11253,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11184,11 +11278,12 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", "codspeed-criterion-compat", + "crossbeam-channel", "dashmap 6.1.0", "derive_more", "itertools 0.14.0", @@ -11197,7 +11292,6 @@ dependencies = [ "proptest-arbitrary-interop", "rand 0.9.2", "rayon", - "reth-db-api", "reth-execution-errors", "reth-metrics", "reth-primitives-traits", @@ -11207,14 +11301,14 @@ dependencies = [ "reth-trie-common", "reth-trie-db", "reth-trie-sparse", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tracing", ] [[package]] name = "reth-trie-sparse" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11247,7 +11341,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11276,15 +11370,15 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.8.2" +version = "1.9.0" dependencies = [ "zstd", ] [[package]] name = "revm" -version = "29.0.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "31.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "revm-bytecode", "revm-context", @@ -11301,8 +11395,8 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "6.2.2" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "7.1.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "bitvec", "phf", @@ -11312,8 +11406,8 @@ dependencies = [ [[package]] name = "revm-context" -version = "9.1.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "11.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "bitvec", "cfg-if", @@ -11328,8 +11422,8 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "10.2.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "12.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -11343,8 +11437,8 @@ dependencies = [ [[package]] name = "revm-database" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "9.0.4" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "alloy-eips", "revm-bytecode", @@ -11356,8 +11450,8 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "8.0.5" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "auto_impl", "either", @@ -11368,8 +11462,8 @@ dependencies = [ [[package]] name = "revm-handler" -version = "10.0.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "12.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "auto_impl", "derive-where", @@ -11386,8 +11480,8 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "10.0.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "12.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "auto_impl", "either", @@ -11403,9 +11497,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.29.2" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fdb678b03faa678a7007a7c761a78efa9ca9adcd9434ef3d1ad894aec6e43d1" +checksum = "21caa99f22184a6818946362778cccd3ff02f743c1e085bee87700671570ecb7" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -11418,24 +11512,25 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.16", + "thiserror 2.0.17", ] [[package]] name = "revm-interpreter" -version = "25.0.3" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "29.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "revm-bytecode", "revm-context-interface", "revm-primitives", + "revm-state", "serde", ] [[package]] name = "revm-precompile" -version = "27.0.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "29.0.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11448,19 +11543,18 @@ dependencies = [ "c-kzg", "cfg-if", "k256", - "libsecp256k1", "p256", "revm-primitives", "ripemd", "rug", "secp256k1 0.31.1", - "sha2 0.10.9", + "sha2", ] [[package]] name = "revm-primitives" -version = "20.2.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "21.0.2" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "alloy-primitives", "num_enum", @@ -11471,7 +11565,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm#919aa258ae00c5533380c9d866e8acc2cb95f8bc" +source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv97#b2dce3260bd04428b242483a40d945342d1df93c" dependencies = [ "auto_impl", "enumn", @@ -11484,8 +11578,8 @@ dependencies = [ [[package]] name = "revm-state" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "8.1.1" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv97#9ca448f5050ba4fc0d01aa855f535193875081c9" dependencies = [ "bitflags 2.9.4", "revm-bytecode", @@ -11702,6 +11796,15 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver 0.9.0", +] + [[package]] name = "rustc_version" version = "0.3.3" @@ -11918,7 +12021,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scroll-alloy-consensus" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11941,7 +12044,7 @@ dependencies = [ [[package]] name = "scroll-alloy-evm" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11963,7 +12066,7 @@ dependencies = [ [[package]] name = "scroll-alloy-hardforks" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-hardforks", "auto_impl", @@ -11972,7 +12075,7 @@ dependencies = [ [[package]] name = "scroll-alloy-network" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-network", @@ -11986,7 +12089,7 @@ dependencies = [ [[package]] name = "scroll-alloy-provider" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-provider", @@ -12020,14 +12123,14 @@ dependencies = [ "reth-transaction-pool", "scroll-alloy-network", "scroll-alloy-rpc-types-engine", - "thiserror 2.0.16", + "thiserror 2.0.17", "tokio", "tower", ] [[package]] name = "scroll-alloy-rpc-types" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12045,7 +12148,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types-engine" -version = "1.8.2" +version = "1.9.0" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -12056,7 +12159,7 @@ dependencies = [ [[package]] name = "scroll-reth" -version = "1.8.2" +version = "1.9.0" dependencies = [ "clap", "reth-cli-util", @@ -12157,13 +12260,22 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser 0.7.0", +] + [[package]] name = "semver" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ - "semver-parser", + "semver-parser 0.10.3", ] [[package]] @@ -12175,6 +12287,12 @@ dependencies = [ "serde", ] +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + [[package]] name = "semver-parser" version = "0.10.3" @@ -12237,14 +12355,15 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.144" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56177480b00303e689183f110b4e727bb4211d692c62d4fcd16d02be93077d40" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.11.1", + "indexmap 2.12.0", "itoa", "memchr", "ryu", + "serde", "serde_core", ] @@ -12290,7 +12409,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.11.1", + "indexmap 2.12.0", "schemars 0.9.0", "schemars 1.0.4", "serde", @@ -12333,19 +12452,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha2" version = "0.10.9" @@ -12470,7 +12576,7 @@ checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 2.0.16", + "thiserror 2.0.17", "time", ] @@ -12507,6 +12613,15 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" +[[package]] +name = "small_btree" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ba60d2df92ba73864714808ca68c059734853e6ab722b40e1cf543ebb3a057a" +dependencies = [ + "arrayvec", +] + [[package]] name = "smallvec" version = "1.15.1" @@ -12578,6 +12693,12 @@ dependencies = [ "sha1", ] +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" + [[package]] name = "spki" version = "0.7.3" @@ -12588,12 +12709,6 @@ dependencies = [ "der", ] -[[package]] -name = "sptr" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" - [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -12749,6 +12864,12 @@ dependencies = [ "libc", ] +[[package]] +name = "tag_ptr" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0e973b34477b7823833469eb0f5a3a60370fef7a453e02d751b59180d0a5a05" + [[package]] name = "tagptr" version = "0.2.0" @@ -12898,11 +13019,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.16" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.16", + "thiserror-impl 2.0.17", ] [[package]] @@ -12918,9 +13039,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.16" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", @@ -12978,11 +13099,12 @@ dependencies = [ [[package]] name = "time" -version = "0.3.43" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83bde6f1ec10e72d583d91623c939f623002284ef622b87de38cfd546cbf2031" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", + "itoa", "js-sys", "libc", "num-conv", @@ -13018,16 +13140,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "tinystr" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" -dependencies = [ - "displaydoc", - "zerovec 0.10.4", -] - [[package]] name = "tinystr" version = "0.8.1" @@ -13035,7 +13147,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", - "zerovec 0.11.4", + "zerovec", ] [[package]] @@ -13185,7 +13297,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.11.1", + "indexmap 2.12.0", "serde", "serde_spanned", "toml_datetime", @@ -13201,9 +13313,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tonic" -version = "0.12.3" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" dependencies = [ "async-trait", "base64 0.22.1", @@ -13211,15 +13323,31 @@ dependencies = [ "http", "http-body", "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", - "prost", + "sync_wrapper", + "tokio", "tokio-stream", + "tower", "tower-layer", "tower-service", "tracing", ] +[[package]] +name = "tonic-prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +dependencies = [ + "bytes", + "prost", + "tonic", +] + [[package]] name = "tower" version = "0.5.2" @@ -13229,7 +13357,7 @@ dependencies = [ "futures-core", "futures-util", "hdrhistogram", - "indexmap 2.11.1", + "indexmap 2.12.0", "pin-project-lite", "slab", "sync_wrapper", @@ -13374,15 +13502,16 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.30.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8e764bd6f5813fd8bebc3117875190c5b0415be8f7f8059bffb6ecd979c444" +checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e" dependencies = [ "js-sys", - "once_cell", "opentelemetry", "opentelemetry_sdk", + "rustversion", "smallvec", + "thiserror 2.0.17", "tracing", "tracing-core", "tracing-log", @@ -13514,7 +13643,7 @@ dependencies = [ "rustls", "rustls-pki-types", "sha1", - "thiserror 2.0.16", + "thiserror 2.0.17", "utf-8", ] @@ -14587,12 +14716,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - [[package]] name = "writeable" version = "0.6.1" @@ -14612,7 +14735,7 @@ dependencies = [ "pharos", "rustc_version 0.4.1", "send_wrapper 0.6.0", - "thiserror 2.0.16", + "thiserror 2.0.17", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -14638,22 +14761,16 @@ dependencies = [ ] [[package]] -name = "yansi" -version = "1.0.1" +name = "xsum" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +checksum = "0637d3a5566a82fa5214bae89087bc8c9fb94cd8e8a3c07feb691bb8d9c632db" [[package]] -name = "yoke" -version = "0.7.5" +name = "yansi" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive 0.7.5", - "zerofrom", -] +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" @@ -14663,22 +14780,10 @@ checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", - "yoke-derive 0.8.0", + "yoke-derive", "zerofrom", ] -[[package]] -name = "yoke-derive" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", - "synstructure", -] - [[package]] name = "yoke-derive" version = "0.8.0" @@ -14759,19 +14864,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" dependencies = [ "displaydoc", - "yoke 0.8.0", - "zerofrom", -] - -[[package]] -name = "zerovec" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" -dependencies = [ - "yoke 0.7.5", + "yoke", "zerofrom", - "zerovec-derive 0.10.3", ] [[package]] @@ -14780,20 +14874,9 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" dependencies = [ - "yoke 0.8.0", + "yoke", "zerofrom", - "zerovec-derive 0.11.1", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", + "zerovec-derive", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e0a1c7da626..12826f50d15 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.8.2" +version = "1.9.0" edition = "2024" rust-version = "1.88" license = "MIT OR Apache-2.0" @@ -10,6 +10,7 @@ exclude = [".github/"] [workspace] members = [ "bin/reth-bench/", + "bin/reth-bench-compare/", "bin/reth/", "crates/storage/rpc-provider/", "crates/chain-state/", @@ -93,6 +94,7 @@ members = [ "crates/payload/util/", "crates/primitives-traits/", "crates/primitives/", + "crates/prune/db", "crates/prune/prune", "crates/prune/types", "crates/ress/protocol", @@ -165,6 +167,7 @@ members = [ "examples/custom-node/", "examples/custom-engine-types/", "examples/custom-evm/", + "examples/custom-hardforks/", "examples/custom-inspector/", "examples/custom-node-components/", "examples/custom-payload-builder/", @@ -344,12 +347,6 @@ inherits = "release" lto = "fat" codegen-units = 1 -[profile.reproducible] -inherits = "release" -panic = "abort" -codegen-units = 1 -incremental = false - [workspace.dependencies] # reth op-reth = { path = "crates/optimism/bin" } @@ -357,6 +354,7 @@ reth = { path = "bin/reth" } reth-storage-rpc-provider = { path = "crates/storage/rpc-provider" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-bench = { path = "bin/reth-bench" } +reth-bench-compare = { path = "bin/reth-bench-compare" } reth-chain-state = { path = "crates/chain-state" } reth-chainspec = { path = "crates/chainspec", default-features = false } reth-cli = { path = "crates/cli/cli" } @@ -390,7 +388,7 @@ reth-era-utils = { path = "crates/era-utils" } reth-errors = { path = "crates/errors" } reth-eth-wire = { path = "crates/net/eth-wire" } reth-eth-wire-types = { path = "crates/net/eth-wire-types" } -reth-ethereum-cli = { path = "crates/ethereum/cli" } +reth-ethereum-cli = { path = "crates/ethereum/cli", default-features = false } reth-ethereum-consensus = { path = "crates/ethereum/consensus", default-features = false } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives", default-features = false } reth-ethereum-forks = { path = "crates/ethereum/hardforks", default-features = false } @@ -431,7 +429,7 @@ reth-optimism-node = { path = "crates/optimism/node" } reth-node-types = { path = "crates/node/types" } reth-op = { path = "crates/optimism/reth", default-features = false } reth-optimism-chainspec = { path = "crates/optimism/chainspec", default-features = false } -reth-optimism-cli = { path = "crates/optimism/cli" } +reth-optimism-cli = { path = "crates/optimism/cli", default-features = false } reth-optimism-consensus = { path = "crates/optimism/consensus", default-features = false } reth-optimism-forks = { path = "crates/optimism/hardforks", default-features = false } reth-optimism-payload-builder = { path = "crates/optimism/payload" } @@ -465,7 +463,7 @@ reth-rpc-convert = { path = "crates/rpc/rpc-convert" } reth-stages = { path = "crates/stages/stages" } reth-stages-api = { path = "crates/stages/api" } reth-stages-types = { path = "crates/stages/types", default-features = false } -reth-stateless = { path = "crates/stateless" } +reth-stateless = { path = "crates/stateless", default-features = false } reth-static-file = { path = "crates/static-file/static-file" } reth-static-file-types = { path = "crates/static-file/types", default-features = false } reth-storage-api = { path = "crates/storage/storage-api", default-features = false } @@ -473,7 +471,8 @@ reth-storage-errors = { path = "crates/storage/errors", default-features = false reth-tasks = { path = "crates/tasks" } reth-testing-utils = { path = "testing/testing-utils" } reth-tokio-util = { path = "crates/tokio-util" } -reth-tracing = { path = "crates/tracing" } +reth-tracing = { path = "crates/tracing", default-features = false } +reth-tracing-otlp = { path = "crates/tracing-otlp" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie/trie", default-features = false } reth-trie-common = { path = "crates/trie/common", default-features = false } @@ -486,60 +485,60 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { git = "https://github.com/scroll-tech/revm", default-features = false, features = ["enable_eip7702", "enable_eip7623"] } -revm-bytecode = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-database = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-state = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-primitives = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-interpreter = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-inspector = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-context = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-context-interface = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-database-interface = { git = "https://github.com/scroll-tech/revm", default-features = false } -op-revm = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", default-features = false } -revm-inspectors = "0.29.0" +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false, features = ["enable_eip7702", "enable_eip7623"] } +revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97", default-features = false } +revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", branch = "feat/v97", default-features = false } +revm-inspectors = "0.32.0" # eth alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.4.1" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.21.2", default-features = false } -alloy-primitives = { version = "1.3.1", default-features = false, features = ["map-foldhash"] } +alloy-evm = { version = "0.23.0", default-features = false } +alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } -alloy-sol-macro = "1.3.1" -alloy-sol-types = { version = "1.3.1", default-features = false } +alloy-sol-macro = "1.4.1" +alloy-sol-types = { version = "1.4.1", default-features = false } alloy-trie = { version = "0.9.1", default-features = false } -alloy-hardforks = "0.3.5" - -alloy-consensus = { version = "1.0.37", default-features = false } -alloy-contract = { version = "1.0.37", default-features = false } -alloy-eips = { version = "1.0.37", default-features = false } -alloy-genesis = { version = "1.0.37", default-features = false } -alloy-json-rpc = { version = "1.0.37", default-features = false } -alloy-network = { version = "1.0.37", default-features = false } -alloy-network-primitives = { version = "1.0.37", default-features = false } -alloy-provider = { version = "1.0.37", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.37", default-features = false } -alloy-rpc-client = { version = "1.0.37", default-features = false } -alloy-rpc-types = { version = "1.0.37", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.37", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.37", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.37", default-features = false } -alloy-rpc-types-debug = { version = "1.0.37", default-features = false } -alloy-rpc-types-engine = { version = "1.0.37", default-features = false } -alloy-rpc-types-eth = { version = "1.0.37", default-features = false } -alloy-rpc-types-mev = { version = "1.0.37", default-features = false } -alloy-rpc-types-trace = { version = "1.0.37", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.37", default-features = false } -alloy-serde = { version = "1.0.37", default-features = false } -alloy-signer = { version = "1.0.37", default-features = false } -alloy-signer-local = { version = "1.0.37", default-features = false } -alloy-transport = { version = "1.0.37" } -alloy-transport-http = { version = "1.0.37", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.37", default-features = false } -alloy-transport-ws = { version = "1.0.37", default-features = false } +alloy-hardforks = "0.4.4" + +alloy-consensus = { version = "1.0.41", default-features = false } +alloy-contract = { version = "1.0.41", default-features = false } +alloy-eips = { version = "1.0.41", default-features = false } +alloy-genesis = { version = "1.0.41", default-features = false } +alloy-json-rpc = { version = "1.0.41", default-features = false } +alloy-network = { version = "1.0.41", default-features = false } +alloy-network-primitives = { version = "1.0.41", default-features = false } +alloy-provider = { version = "1.0.41", features = ["reqwest"], default-features = false } +alloy-pubsub = { version = "1.0.41", default-features = false } +alloy-rpc-client = { version = "1.0.41", default-features = false } +alloy-rpc-types = { version = "1.0.41", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.0.41", default-features = false } +alloy-rpc-types-anvil = { version = "1.0.41", default-features = false } +alloy-rpc-types-beacon = { version = "1.0.41", default-features = false } +alloy-rpc-types-debug = { version = "1.0.41", default-features = false } +alloy-rpc-types-engine = { version = "1.0.41", default-features = false } +alloy-rpc-types-eth = { version = "1.0.41", default-features = false } +alloy-rpc-types-mev = { version = "1.0.41", default-features = false } +alloy-rpc-types-trace = { version = "1.0.41", default-features = false } +alloy-rpc-types-txpool = { version = "1.0.41", default-features = false } +alloy-serde = { version = "1.0.41", default-features = false } +alloy-signer = { version = "1.0.41", default-features = false } +alloy-signer-local = { version = "1.0.41", default-features = false } +alloy-transport = { version = "1.0.41" } +alloy-transport-http = { version = "1.0.41", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.0.41", default-features = false } +alloy-transport-ws = { version = "1.0.41", default-features = false } # scroll scroll-alloy-consensus = { path = "crates/scroll/alloy/consensus", default-features = false } @@ -563,17 +562,18 @@ reth-scroll-trie = { path = "crates/scroll/trie" } reth-scroll-txpool = { path = "crates/scroll/txpool" } # op -alloy-op-evm = { version = "0.21.2", default-features = false } -alloy-op-hardforks = "0.3.5" -op-alloy-rpc-types = { version = "0.20.0", default-features = false } -op-alloy-rpc-types-engine = { version = "0.20.0", default-features = false } -op-alloy-network = { version = "0.20.0", default-features = false } -op-alloy-consensus = { version = "0.20.0", default-features = false } -op-alloy-rpc-jsonrpsee = { version = "0.20.0", default-features = false } +alloy-op-evm = { version = "0.23.0", default-features = false } +alloy-op-hardforks = "0.4.4" +op-alloy-rpc-types = { version = "0.22.0", default-features = false } +op-alloy-rpc-types-engine = { version = "0.22.0", default-features = false } +op-alloy-network = { version = "0.22.0", default-features = false } +op-alloy-consensus = { version = "0.22.0", default-features = false } +op-alloy-rpc-jsonrpsee = { version = "0.22.0", default-features = false } op-alloy-flz = { version = "0.13.1", default-features = false } # misc either = { version = "1.15.0", default-features = false } +arrayvec = { version = "0.7.6", default-features = false } aquamarine = "0.6" auto_impl = { version = "1", default-features = false } backon = { version = "1.2", default-features = false, features = ["std-blocking-sleep", "tokio-sleep"] } @@ -590,7 +590,6 @@ dirs-next = "2.0.0" dyn-clone = "1.0.17" eyre = "0.6" fdlimit = "0.3.0" -generic-array = "0.14" humantime = "2.1" humantime-serde = "1.1" itertools = { version = "0.14", default-features = false } @@ -611,6 +610,7 @@ serde_json = { version = "1.0", default-features = false, features = ["alloc"] } serde_with = { version = "3", default-features = false, features = ["macros"] } sha2 = { version = "0.10", default-features = false } shellexpand = "3.0.0" +shlex = "1.3" smallvec = "1" strum = { version = "0.27", default-features = false } strum_macros = "0.27" @@ -661,8 +661,8 @@ tower = "0.5" tower-http = "0.6" # p2p -discv5 = "0.9" -if-addrs = "0.13" +discv5 = "0.10" +if-addrs = "0.14" # rpc jsonrpsee = "0.26.0" @@ -686,11 +686,18 @@ secp256k1 = { version = "0.30", default-features = false, features = ["global-co rand_08 = { package = "rand", version = "0.8" } # for eip-4844 -c-kzg = "2.1.4" +c-kzg = "2.1.5" # config toml = "0.8" +# otlp obs +opentelemetry_sdk = "0.31" +opentelemetry = "0.31" +opentelemetry-otlp = "0.31" +opentelemetry-semantic-conventions = "0.31" +tracing-opentelemetry = "0.32" + # misc-testing arbitrary = "1.3" assert_matches = "1.5.0" @@ -725,6 +732,7 @@ concat-kdf = "0.1.0" crossbeam-channel = "0.5.13" crossterm = "0.28.0" csv = "1.3.0" +ctrlc = "3.4" ctr = "0.9.2" data-encoding = "2" delegate = "0.13" @@ -763,8 +771,8 @@ walkdir = "2.3.3" vergen-git2 = "1.0.5" [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm" } -op-revm = { git = "https://github.com/scroll-tech/revm" } +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97" } +op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97" } # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } @@ -806,3 +814,6 @@ op-revm = { git = "https://github.com/scroll-tech/revm" } # jsonrpsee-server = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } # jsonrpsee-http-client = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } # jsonrpsee-types = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } + +# alloy-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" } +# alloy-op-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" } diff --git a/Dockerfile b/Dockerfile index fc97c160bbc..b61c177525b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" # Install system dependencies -RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config +RUN apt-get update && apt-get install -y libclang-dev pkg-config # Builds a cargo-chef plan FROM chef AS planner diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible index 602b9b857c0..a0d4a17b5bb 100644 --- a/Dockerfile.reproducible +++ b/Dockerfile.reproducible @@ -1,17 +1,17 @@ -ARG RUST_VERSION=1 +# Use the Rust 1.88 image based on Debian Bookworm +FROM rust:1.88-bookworm AS builder -FROM rust:$RUST_VERSION-bookworm AS builder - -RUN apt-get update && apt-get install -y \ - git \ - libclang-dev=1:14.0-55.7~deb12u1 +# Install specific version of libclang-dev +RUN apt-get update && apt-get install -y libclang-dev=1:14.0-55.7~deb12u1 # Copy the project to the container COPY ./ /app WORKDIR /app -RUN make build-reth-reproducible -RUN mv /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth +# Build the project with the reproducible settings +RUN make build-reproducible + +RUN mv /app/target/x86_64-unknown-linux-gnu/release/reth /reth # Create a minimal final image with just the binary FROM gcr.io/distroless/cc-debian12:nonroot-6755e21ccd99ddead6edc8106ba03888cbeed41a diff --git a/DockerfileOp b/DockerfileOp index d195ca21601..ff65dc276b1 100644 --- a/DockerfileOp +++ b/DockerfileOp @@ -31,7 +31,7 @@ RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --bin op-reth -- RUN ls -la /app/target/$BUILD_PROFILE/op-reth RUN cp /app/target/$BUILD_PROFILE/op-reth /app/op-reth -FROM ubuntu:22.04 AS runtime +FROM ubuntu AS runtime RUN apt-get update && \ apt-get install -y ca-certificates libssl-dev pkg-config strace && \ diff --git a/Makefile b/Makefile index b039610ee6a..6c0a5a433af 100644 --- a/Makefile +++ b/Makefile @@ -70,25 +70,34 @@ install-scroll: ## Build and install the scroll-reth binary under `~/.cargo/bin` build: ## Build the reth binary into `target` directory. cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" -.PHONY: build-reth -build-reth: ## Build the reth binary (alias for build target). - $(MAKE) build - # Environment variables for reproducible builds +# Initialize RUSTFLAGS +RUST_BUILD_FLAGS = +# Enable static linking to ensure reproducibility across builds +RUST_BUILD_FLAGS += --C target-feature=+crt-static +# Set the linker to use static libgcc to ensure reproducibility across builds +RUST_BUILD_FLAGS += -C link-arg=-static-libgcc +# Remove build ID from the binary to ensure reproducibility across builds +RUST_BUILD_FLAGS += -C link-arg=-Wl,--build-id=none +# Remove metadata hash from symbol names to ensure reproducible builds +RUST_BUILD_FLAGS += -C metadata='' # Set timestamp from last git commit for reproducible builds SOURCE_DATE ?= $(shell git log -1 --pretty=%ct) - -# `reproducible` only supports reth on x86_64-unknown-linux-gnu -build-%-reproducible: - @if [ "$*" != "reth" ]; then \ - echo "Error: Reproducible builds are only supported for reth, not $*"; \ - exit 1; \ - fi +# Disable incremental compilation to avoid non-deterministic artifacts +CARGO_INCREMENTAL_VAL = 0 +# Set C locale for consistent string handling and sorting +LOCALE_VAL = C +# Set UTC timezone for consistent time handling across builds +TZ_VAL = UTC + +.PHONY: build-reproducible +build-reproducible: ## Build the reth binary into `target` directory with reproducible builds. Only works for x86_64-unknown-linux-gnu currently SOURCE_DATE_EPOCH=$(SOURCE_DATE) \ - RUSTFLAGS="-C symbol-mangling-version=v0 -C strip=none -C link-arg=-Wl,--build-id=none -C metadata='' --remap-path-prefix $$(pwd)=." \ - LC_ALL=C \ - TZ=UTC \ - cargo build --bin reth --features "$(FEATURES)" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu + RUSTFLAGS="${RUST_BUILD_FLAGS} --remap-path-prefix $$(pwd)=." \ + CARGO_INCREMENTAL=${CARGO_INCREMENTAL_VAL} \ + LC_ALL=${LOCALE_VAL} \ + TZ=${TZ_VAL} \ + cargo build --bin reth --features "$(FEATURES)" --profile "release" --locked --target x86_64-unknown-linux-gnu .PHONY: build-debug build-debug: ## Build the reth binary into `target/debug` directory. @@ -152,22 +161,6 @@ op-build-x86_64-apple-darwin: op-build-aarch64-apple-darwin: $(MAKE) op-build-native-aarch64-apple-darwin -build-deb-%: - @case "$*" in \ - x86_64-unknown-linux-gnu|aarch64-unknown-linux-gnu|riscv64gc-unknown-linux-gnu) \ - echo "Building debian package for $*"; \ - ;; \ - *) \ - echo "Error: Debian packages are only supported for x86_64-unknown-linux-gnu, aarch64-unknown-linux-gnu, and riscv64gc-unknown-linux-gnu, not $*"; \ - exit 1; \ - ;; \ - esac - cargo install cargo-deb@3.6.0 --locked - cargo deb --profile $(PROFILE) --no-build --no-dbgsym --no-strip \ - --target $* \ - $(if $(VERSION),--deb-version "1~$(VERSION)") \ - $(if $(VERSION),--output "target/$*/$(PROFILE)/reth-$(VERSION)-$*-$(PROFILE).deb") - # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary cp $(CARGO_TARGET_DIR)/$(1)/$(PROFILE)/$(2) $(BIN_DIR)/$(2) diff --git a/bin/reth-bench-compare/Cargo.toml b/bin/reth-bench-compare/Cargo.toml new file mode 100644 index 00000000000..11d9b4f8bdb --- /dev/null +++ b/bin/reth-bench-compare/Cargo.toml @@ -0,0 +1,96 @@ +[package] +name = "reth-bench-compare" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Automated reth benchmark comparison between git references" + +[lints] +workspace = true + +[[bin]] +name = "reth-bench-compare" +path = "src/main.rs" + +[dependencies] +# reth +reth-cli-runner.workspace = true +reth-cli-util.workspace = true +reth-node-core.workspace = true +reth-tracing.workspace = true +reth-chainspec.workspace = true + +# alloy +alloy-provider = { workspace = true, features = ["reqwest-rustls-tls"], default-features = false } +alloy-rpc-types-eth.workspace = true +alloy-primitives.workspace = true + +# CLI and argument parsing +clap = { workspace = true, features = ["derive", "env"] } +eyre.workspace = true + +# Async runtime +tokio = { workspace = true, features = ["full"] } +tracing.workspace = true + +# Serialization +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true + +# Time handling +chrono = { workspace = true, features = ["serde"] } + +# Path manipulation +shellexpand.workspace = true + +# CSV handling +csv.workspace = true + +# Process management +ctrlc.workspace = true +shlex.workspace = true + +[target.'cfg(unix)'.dependencies] +nix = { version = "0.29", features = ["signal", "process"] } + +[features] +default = ["jemalloc"] + +asm-keccak = [ + "reth-node-core/asm-keccak", + "alloy-primitives/asm-keccak", +] + +jemalloc = [ + "reth-cli-util/jemalloc", + "reth-node-core/jemalloc", +] +jemalloc-prof = ["reth-cli-util/jemalloc-prof"] +tracy-allocator = ["reth-cli-util/tracy-allocator"] + +min-error-logs = [ + "tracing/release_max_level_error", + "reth-node-core/min-error-logs", +] +min-warn-logs = [ + "tracing/release_max_level_warn", + "reth-node-core/min-warn-logs", +] +min-info-logs = [ + "tracing/release_max_level_info", + "reth-node-core/min-info-logs", +] +min-debug-logs = [ + "tracing/release_max_level_debug", + "reth-node-core/min-debug-logs", +] +min-trace-logs = [ + "tracing/release_max_level_trace", + "reth-node-core/min-trace-logs", +] + +# no-op feature flag for switching between the `optimism` and default functionality in CI matrices +ethereum = [] diff --git a/bin/reth-bench-compare/src/benchmark.rs b/bin/reth-bench-compare/src/benchmark.rs new file mode 100644 index 00000000000..e1b971f5792 --- /dev/null +++ b/bin/reth-bench-compare/src/benchmark.rs @@ -0,0 +1,296 @@ +//! Benchmark execution using reth-bench. + +use crate::cli::Args; +use eyre::{eyre, Result, WrapErr}; +use std::{ + path::Path, + sync::{Arc, Mutex}, +}; +use tokio::{ + fs::File as AsyncFile, + io::{AsyncBufReadExt, AsyncWriteExt, BufReader}, + process::Command, +}; +use tracing::{debug, error, info, warn}; + +/// Manages benchmark execution using reth-bench +pub(crate) struct BenchmarkRunner { + rpc_url: String, + jwt_secret: String, + wait_time: Option, + warmup_blocks: u64, +} + +impl BenchmarkRunner { + /// Create a new `BenchmarkRunner` from CLI arguments + pub(crate) fn new(args: &Args) -> Self { + Self { + rpc_url: args.get_rpc_url(), + jwt_secret: args.jwt_secret_path().to_string_lossy().to_string(), + wait_time: args.wait_time.clone(), + warmup_blocks: args.get_warmup_blocks(), + } + } + + /// Clear filesystem caches (page cache, dentries, and inodes) + pub(crate) async fn clear_fs_caches() -> Result<()> { + info!("Clearing filesystem caches..."); + + // First sync to ensure all pending writes are flushed + let sync_output = + Command::new("sync").output().await.wrap_err("Failed to execute sync command")?; + + if !sync_output.status.success() { + return Err(eyre!("sync command failed")); + } + + // Drop caches - requires sudo/root permissions + // 3 = drop pagecache, dentries, and inodes + let drop_caches_cmd = Command::new("sudo") + .args(["-n", "sh", "-c", "echo 3 > /proc/sys/vm/drop_caches"]) + .output() + .await; + + match drop_caches_cmd { + Ok(output) if output.status.success() => { + info!("Successfully cleared filesystem caches"); + Ok(()) + } + Ok(output) => { + let stderr = String::from_utf8_lossy(&output.stderr); + if stderr.contains("sudo: a password is required") { + warn!("Unable to clear filesystem caches: sudo password required"); + warn!( + "For optimal benchmarking, configure passwordless sudo for cache clearing:" + ); + warn!(" echo '$USER ALL=(ALL) NOPASSWD: /bin/sh -c echo\\\\ [0-9]\\\\ \\\\>\\\\ /proc/sys/vm/drop_caches' | sudo tee /etc/sudoers.d/drop_caches"); + Ok(()) + } else { + Err(eyre!("Failed to clear filesystem caches: {}", stderr)) + } + } + Err(e) => { + warn!("Unable to clear filesystem caches: {}", e); + Ok(()) + } + } + } + + /// Run a warmup benchmark for cache warming + pub(crate) async fn run_warmup(&self, from_block: u64) -> Result<()> { + let to_block = from_block + self.warmup_blocks; + info!( + "Running warmup benchmark from block {} to {} ({} blocks)", + from_block, to_block, self.warmup_blocks + ); + + // Build the reth-bench command for warmup (no output flag) + let mut cmd = Command::new("reth-bench"); + cmd.args([ + "new-payload-fcu", + "--rpc-url", + &self.rpc_url, + "--jwt-secret", + &self.jwt_secret, + "--from", + &from_block.to_string(), + "--to", + &to_block.to_string(), + ]); + + // Add wait-time argument if provided + if let Some(ref wait_time) = self.wait_time { + cmd.args(["--wait-time", wait_time]); + } + + cmd.stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + debug!("Executing warmup reth-bench command: {:?}", cmd); + + // Execute the warmup benchmark + let mut child = cmd.spawn().wrap_err("Failed to start warmup reth-bench process")?; + + // Stream output at debug level + if let Some(stdout) = child.stdout.take() { + tokio::spawn(async move { + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[WARMUP] {}", line); + } + }); + } + + if let Some(stderr) = child.stderr.take() { + tokio::spawn(async move { + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[WARMUP] {}", line); + } + }); + } + + let status = child.wait().await.wrap_err("Failed to wait for warmup reth-bench")?; + + if !status.success() { + return Err(eyre!("Warmup reth-bench failed with exit code: {:?}", status.code())); + } + + info!("Warmup completed successfully"); + Ok(()) + } + + /// Run a benchmark for the specified block range + pub(crate) async fn run_benchmark( + &self, + from_block: u64, + to_block: u64, + output_dir: &Path, + ) -> Result<()> { + info!( + "Running benchmark from block {} to {} (output: {:?})", + from_block, to_block, output_dir + ); + + // Ensure output directory exists + std::fs::create_dir_all(output_dir) + .wrap_err_with(|| format!("Failed to create output directory: {output_dir:?}"))?; + + // Create log file path for reth-bench output + let log_file_path = output_dir.join("reth_bench.log"); + info!("reth-bench logs will be saved to: {:?}", log_file_path); + + // Build the reth-bench command + let mut cmd = Command::new("reth-bench"); + cmd.args([ + "new-payload-fcu", + "--rpc-url", + &self.rpc_url, + "--jwt-secret", + &self.jwt_secret, + "--from", + &from_block.to_string(), + "--to", + &to_block.to_string(), + "--output", + &output_dir.to_string_lossy(), + ]); + + // Add wait-time argument if provided + if let Some(ref wait_time) = self.wait_time { + cmd.args(["--wait-time", wait_time]); + } + + cmd.stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + // Debug log the command + debug!("Executing reth-bench command: {:?}", cmd); + + // Execute the benchmark + let mut child = cmd.spawn().wrap_err("Failed to start reth-bench process")?; + + // Capture stdout and stderr for error reporting + let stdout_lines = Arc::new(Mutex::new(Vec::new())); + let stderr_lines = Arc::new(Mutex::new(Vec::new())); + + // Stream stdout with prefix at debug level, capture for error reporting, and write to log + // file + if let Some(stdout) = child.stdout.take() { + let stdout_lines_clone = stdout_lines.clone(); + let log_file = AsyncFile::create(&log_file_path) + .await + .wrap_err(format!("Failed to create log file: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-BENCH] {}", line); + if let Ok(mut captured) = stdout_lines_clone.lock() { + captured.push(line.clone()); + } + // Write to log file (reth-bench output already has timestamps if needed) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + // Stream stderr with prefix at debug level, capture for error reporting, and write to log + // file + if let Some(stderr) = child.stderr.take() { + let stderr_lines_clone = stderr_lines.clone(); + let log_file = AsyncFile::options() + .create(true) + .append(true) + .open(&log_file_path) + .await + .wrap_err(format!("Failed to open log file for stderr: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-BENCH] {}", line); + if let Ok(mut captured) = stderr_lines_clone.lock() { + captured.push(line.clone()); + } + // Write to log file (reth-bench output already has timestamps if needed) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + let status = child.wait().await.wrap_err("Failed to wait for reth-bench")?; + + if !status.success() { + // Print all captured output when command fails + error!("reth-bench failed with exit code: {:?}", status.code()); + + if let Ok(stdout) = stdout_lines.lock() && + !stdout.is_empty() + { + error!("reth-bench stdout:"); + for line in stdout.iter() { + error!(" {}", line); + } + } + + if let Ok(stderr) = stderr_lines.lock() && + !stderr.is_empty() + { + error!("reth-bench stderr:"); + for line in stderr.iter() { + error!(" {}", line); + } + } + + return Err(eyre!("reth-bench failed with exit code: {:?}", status.code())); + } + + info!("Benchmark completed"); + Ok(()) + } +} diff --git a/bin/reth-bench-compare/src/cli.rs b/bin/reth-bench-compare/src/cli.rs new file mode 100644 index 00000000000..ecb7125c46d --- /dev/null +++ b/bin/reth-bench-compare/src/cli.rs @@ -0,0 +1,931 @@ +//! CLI argument parsing and main command orchestration. + +use alloy_provider::{Provider, ProviderBuilder}; +use clap::Parser; +use eyre::{eyre, Result, WrapErr}; +use reth_chainspec::Chain; +use reth_cli_runner::CliContext; +use reth_node_core::args::{DatadirArgs, LogArgs}; +use reth_tracing::FileWorkerGuard; +use std::{net::TcpListener, path::PathBuf, str::FromStr}; +use tokio::process::Command; +use tracing::{debug, info, warn}; + +use crate::{ + benchmark::BenchmarkRunner, comparison::ComparisonGenerator, compilation::CompilationManager, + git::GitManager, node::NodeManager, +}; + +/// Target for disabling the --debug.startup-sync-state-idle flag +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum DisableStartupSyncStateIdle { + /// Disable for baseline and warmup runs + Baseline, + /// Disable for feature runs only + Feature, + /// Disable for all runs + All, +} + +impl FromStr for DisableStartupSyncStateIdle { + type Err = String; + + fn from_str(s: &str) -> std::result::Result { + match s.to_lowercase().as_str() { + "baseline" => Ok(Self::Baseline), + "feature" => Ok(Self::Feature), + "all" => Ok(Self::All), + _ => Err(format!("Invalid value '{}'. Expected 'baseline', 'feature', or 'all'", s)), + } + } +} + +impl std::fmt::Display for DisableStartupSyncStateIdle { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Baseline => write!(f, "baseline"), + Self::Feature => write!(f, "feature"), + Self::All => write!(f, "all"), + } + } +} + +/// Automated reth benchmark comparison between git references +#[derive(Debug, Parser)] +#[command( + name = "reth-bench-compare", + about = "Compare reth performance between two git references (branches or tags)", + version +)] +pub(crate) struct Args { + /// Git reference (branch or tag) to use as baseline for comparison + #[arg(long, value_name = "REF")] + pub baseline_ref: String, + + /// Git reference (branch or tag) to compare against the baseline + #[arg(long, value_name = "REF")] + pub feature_ref: String, + + #[command(flatten)] + pub datadir: DatadirArgs, + + /// Number of blocks to benchmark + #[arg(long, value_name = "N", default_value = "100")] + pub blocks: u64, + + /// RPC endpoint for fetching block data + #[arg(long, value_name = "URL")] + pub rpc_url: Option, + + /// JWT secret file path + /// + /// If not provided, defaults to `//jwt.hex`. + /// If the file doesn't exist, it will be created automatically. + #[arg(long, value_name = "PATH")] + pub jwt_secret: Option, + + /// Output directory for benchmark results + #[arg(long, value_name = "PATH", default_value = "./reth-bench-compare")] + pub output_dir: String, + + /// Skip git branch validation (useful for testing) + #[arg(long)] + pub skip_git_validation: bool, + + /// Port for reth metrics endpoint + #[arg(long, value_name = "PORT", default_value = "5005")] + pub metrics_port: u16, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain name or numeric chain ID. + #[arg(long, value_name = "CHAIN", default_value = "mainnet", required = false)] + pub chain: Chain, + + /// Run reth binary with sudo (for elevated privileges) + #[arg(long)] + pub sudo: bool, + + /// Generate comparison charts using Python script + #[arg(long)] + pub draw: bool, + + /// Enable CPU profiling with samply during benchmark runs + #[arg(long)] + pub profile: bool, + + /// Wait time between engine API calls (passed to reth-bench) + #[arg(long, value_name = "DURATION")] + pub wait_time: Option, + + /// Number of blocks to run for cache warmup after clearing caches. + /// If not specified, defaults to the same as --blocks + #[arg(long, value_name = "N")] + pub warmup_blocks: Option, + + /// Disable filesystem cache clearing before warmup phase. + /// By default, filesystem caches are cleared before warmup to ensure consistent benchmarks. + #[arg(long)] + pub no_clear_cache: bool, + + #[command(flatten)] + pub logs: LogArgs, + + /// Additional arguments to pass to baseline reth node command + /// + /// Example: `--baseline-args "--debug.tip 0xabc..."` + #[arg(long, value_name = "ARGS")] + pub baseline_args: Option, + + /// Additional arguments to pass to feature reth node command + /// + /// Example: `--feature-args "--debug.tip 0xdef..."` + #[arg(long, value_name = "ARGS")] + pub feature_args: Option, + + /// Additional arguments to pass to reth node command (applied to both baseline and feature) + /// + /// All arguments after `--` will be passed directly to the reth node command. + /// Example: `reth-bench-compare --baseline-ref main --feature-ref pr/123 -- --debug.tip + /// 0xabc...` + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + pub reth_args: Vec, + + /// Comma-separated list of features to enable during reth compilation + /// + /// Example: `jemalloc,asm-keccak` + #[arg(long, value_name = "FEATURES", default_value = "jemalloc,asm-keccak")] + pub features: String, + + /// Disable automatic --debug.startup-sync-state-idle flag for specific runs. + /// Can be "baseline", "feature", or "all". + /// By default, the flag is passed to warmup, baseline, and feature runs. + /// When "baseline" is specified, the flag is NOT passed to warmup OR baseline. + /// When "feature" is specified, the flag is NOT passed to feature. + /// When "all" is specified, the flag is NOT passed to any run. + #[arg(long, value_name = "TARGET")] + pub disable_startup_sync_state_idle: Option, +} + +impl Args { + /// Initializes tracing with the configured options. + pub(crate) fn init_tracing(&self) -> Result> { + let guard = self.logs.init_tracing()?; + Ok(guard) + } + + /// Build additional arguments for a specific ref type, conditionally including + /// --debug.startup-sync-state-idle based on the configuration + pub(crate) fn build_additional_args( + &self, + ref_type: &str, + base_args_str: Option<&String>, + ) -> Vec { + // Parse the base arguments string if provided + let mut args = base_args_str.map(|s| parse_args_string(s)).unwrap_or_default(); + + // Determine if we should add the --debug.startup-sync-state-idle flag + let should_add_flag = match self.disable_startup_sync_state_idle { + None => true, // By default, add the flag + Some(DisableStartupSyncStateIdle::All) => false, + Some(DisableStartupSyncStateIdle::Baseline) => { + ref_type != "baseline" && ref_type != "warmup" + } + Some(DisableStartupSyncStateIdle::Feature) => ref_type != "feature", + }; + + if should_add_flag { + args.push("--debug.startup-sync-state-idle".to_string()); + debug!("Adding --debug.startup-sync-state-idle flag for ref_type: {}", ref_type); + } else { + debug!("Skipping --debug.startup-sync-state-idle flag for ref_type: {}", ref_type); + } + + args + } + + /// Get the default RPC URL for a given chain + const fn get_default_rpc_url(chain: &Chain) -> &'static str { + match chain.id() { + 8453 => "https://base-mainnet.rpc.ithaca.xyz", // base + 84532 => "https://base-sepolia.rpc.ithaca.xyz", // base-sepolia + 27082 => "https://rpc.hoodi.ethpandaops.io", // hoodi + _ => "https://reth-ethereum.ithaca.xyz/rpc", // mainnet and fallback + } + } + + /// Get the RPC URL, using chain-specific default if not provided + pub(crate) fn get_rpc_url(&self) -> String { + self.rpc_url.clone().unwrap_or_else(|| Self::get_default_rpc_url(&self.chain).to_string()) + } + + /// Get the JWT secret path - either provided or derived from datadir + pub(crate) fn jwt_secret_path(&self) -> PathBuf { + match &self.jwt_secret { + Some(path) => { + let jwt_secret_str = path.to_string_lossy(); + let expanded = shellexpand::tilde(&jwt_secret_str); + PathBuf::from(expanded.as_ref()) + } + None => { + // Use the same logic as reth: //jwt.hex + let chain_path = self.datadir.clone().resolve_datadir(self.chain); + chain_path.jwt() + } + } + } + + /// Get the resolved datadir path using the chain + pub(crate) fn datadir_path(&self) -> PathBuf { + let chain_path = self.datadir.clone().resolve_datadir(self.chain); + chain_path.data_dir().to_path_buf() + } + + /// Get the expanded output directory path + pub(crate) fn output_dir_path(&self) -> PathBuf { + let expanded = shellexpand::tilde(&self.output_dir); + PathBuf::from(expanded.as_ref()) + } + + /// Get the effective warmup blocks value - either specified or defaults to blocks + pub(crate) fn get_warmup_blocks(&self) -> u64 { + self.warmup_blocks.unwrap_or(self.blocks) + } +} + +/// Validate that the RPC endpoint chain ID matches the specified chain +async fn validate_rpc_chain_id(rpc_url: &str, expected_chain: &Chain) -> Result<()> { + // Create Alloy provider + let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?; + let provider = ProviderBuilder::new().connect_http(url); + + // Query chain ID using Alloy + let rpc_chain_id = provider + .get_chain_id() + .await + .map_err(|e| eyre!("Failed to get chain ID from RPC endpoint {}: {:?}", rpc_url, e))?; + + let expected_chain_id = expected_chain.id(); + + if rpc_chain_id != expected_chain_id { + return Err(eyre!( + "RPC endpoint chain ID mismatch!\n\ + Expected: {} (chain: {})\n\ + Found: {} at RPC endpoint: {}\n\n\ + Please use an RPC endpoint for the correct network or change the --chain argument.", + expected_chain_id, + expected_chain, + rpc_chain_id, + rpc_url + )); + } + + info!("Validated RPC endpoint chain ID"); + Ok(()) +} + +/// Main comparison workflow execution +pub(crate) async fn run_comparison(args: Args, _ctx: CliContext) -> Result<()> { + // Create a new process group for this process and all its children + #[cfg(unix)] + { + use nix::unistd::{getpid, setpgid}; + if let Err(e) = setpgid(getpid(), getpid()) { + warn!("Failed to create process group: {e}"); + } + } + + info!( + "Starting benchmark comparison between '{}' and '{}'", + args.baseline_ref, args.feature_ref + ); + + if args.sudo { + info!("Running in sudo mode - reth commands will use elevated privileges"); + } + + // Initialize Git manager + let git_manager = GitManager::new()?; + // Fetch all branches, tags, and commits + git_manager.fetch_all()?; + + // Initialize compilation manager + let output_dir = args.output_dir_path(); + let compilation_manager = CompilationManager::new( + git_manager.repo_root().to_string(), + output_dir.clone(), + git_manager.clone(), + args.features.clone(), + )?; + // Initialize node manager + let mut node_manager = NodeManager::new(&args); + + let benchmark_runner = BenchmarkRunner::new(&args); + let mut comparison_generator = ComparisonGenerator::new(&args); + + // Set the comparison directory in node manager to align with results directory + node_manager.set_comparison_dir(comparison_generator.get_output_dir()); + + // Store original git state for restoration + let original_ref = git_manager.get_current_ref()?; + info!("Current git reference: {}", original_ref); + + // Validate git state + if !args.skip_git_validation { + git_manager.validate_clean_state()?; + git_manager.validate_refs(&[&args.baseline_ref, &args.feature_ref])?; + } + + // Validate RPC endpoint chain ID matches the specified chain + let rpc_url = args.get_rpc_url(); + validate_rpc_chain_id(&rpc_url, &args.chain).await?; + + // Setup signal handling for cleanup + let git_manager_cleanup = git_manager.clone(); + let original_ref_cleanup = original_ref.clone(); + ctrlc::set_handler(move || { + eprintln!("Received interrupt signal, cleaning up..."); + + // Send SIGTERM to entire process group to ensure all children exit + #[cfg(unix)] + { + use nix::{ + sys::signal::{kill, Signal}, + unistd::Pid, + }; + + // Send SIGTERM to our process group (negative PID = process group) + let current_pid = std::process::id() as i32; + let pgid = Pid::from_raw(-current_pid); + if let Err(e) = kill(pgid, Signal::SIGTERM) { + eprintln!("Failed to send SIGTERM to process group: {e}"); + } + } + + // Give a moment for any ongoing git operations to complete + std::thread::sleep(std::time::Duration::from_millis(200)); + + if let Err(e) = git_manager_cleanup.switch_ref(&original_ref_cleanup) { + eprintln!("Failed to restore original git reference: {e}"); + eprintln!("You may need to manually run: git checkout {original_ref_cleanup}"); + } + std::process::exit(1); + })?; + + let result = run_benchmark_workflow( + &git_manager, + &compilation_manager, + &mut node_manager, + &benchmark_runner, + &mut comparison_generator, + &args, + ) + .await; + + // Always restore original git reference + info!("Restoring original git reference: {}", original_ref); + git_manager.switch_ref(&original_ref)?; + + // Handle any errors from the workflow + result?; + + Ok(()) +} + +/// Parse a string of arguments into a vector of strings +fn parse_args_string(args_str: &str) -> Vec { + shlex::split(args_str).unwrap_or_else(|| { + // Fallback to simple whitespace splitting if shlex fails + args_str.split_whitespace().map(|s| s.to_string()).collect() + }) +} + +/// Run compilation phase for both baseline and feature binaries +async fn run_compilation_phase( + git_manager: &GitManager, + compilation_manager: &CompilationManager, + args: &Args, + is_optimism: bool, +) -> Result<(String, String)> { + info!("=== Running compilation phase ==="); + + // Ensure required tools are available (only need to check once) + compilation_manager.ensure_reth_bench_available()?; + if args.profile { + compilation_manager.ensure_samply_available()?; + } + + let refs = [&args.baseline_ref, &args.feature_ref]; + let ref_types = ["baseline", "feature"]; + + // First, resolve all refs to commits using a HashMap to avoid race conditions where a ref is + // pushed to mid-run. + let mut ref_commits = std::collections::HashMap::new(); + for &git_ref in &refs { + if !ref_commits.contains_key(git_ref) { + git_manager.switch_ref(git_ref)?; + let commit = git_manager.get_current_commit()?; + ref_commits.insert(git_ref.clone(), commit); + info!("Reference {} resolves to commit: {}", git_ref, &ref_commits[git_ref][..8]); + } + } + + // Now compile each ref using the resolved commits + for (i, &git_ref) in refs.iter().enumerate() { + let ref_type = ref_types[i]; + let commit = &ref_commits[git_ref]; + + info!( + "Compiling {} binary for reference: {} (commit: {})", + ref_type, + git_ref, + &commit[..8] + ); + + // Switch to target reference + git_manager.switch_ref(git_ref)?; + + // Compile reth (with caching) + compilation_manager.compile_reth(commit, is_optimism)?; + + info!("Completed compilation for {} reference", ref_type); + } + + let baseline_commit = ref_commits[&args.baseline_ref].clone(); + let feature_commit = ref_commits[&args.feature_ref].clone(); + + info!("Compilation phase completed"); + Ok((baseline_commit, feature_commit)) +} + +/// Run warmup phase to warm up caches before benchmarking +async fn run_warmup_phase( + git_manager: &GitManager, + compilation_manager: &CompilationManager, + node_manager: &mut NodeManager, + benchmark_runner: &BenchmarkRunner, + args: &Args, + is_optimism: bool, + baseline_commit: &str, +) -> Result<()> { + info!("=== Running warmup phase ==="); + + // Use baseline for warmup + let warmup_ref = &args.baseline_ref; + + // Switch to baseline reference + git_manager.switch_ref(warmup_ref)?; + + // Get the cached binary path for baseline (should already be compiled) + let binary_path = + compilation_manager.get_cached_binary_path_for_commit(baseline_commit, is_optimism); + + // Verify the cached binary exists + if !binary_path.exists() { + return Err(eyre!( + "Cached baseline binary not found at {:?}. Compilation phase should have created it.", + binary_path + )); + } + + info!("Using cached baseline binary for warmup (commit: {})", &baseline_commit[..8]); + + // Build additional args with conditional --debug.startup-sync-state-idle flag + let additional_args = args.build_additional_args("warmup", args.baseline_args.as_ref()); + + // Start reth node for warmup + let mut node_process = + node_manager.start_node(&binary_path, warmup_ref, "warmup", &additional_args).await?; + + // Wait for node to be ready and get its current tip + let current_tip = node_manager.wait_for_node_ready_and_get_tip().await?; + info!("Warmup node is ready at tip: {}", current_tip); + + // Store the tip we'll unwind back to + let original_tip = current_tip; + + // Clear filesystem caches before warmup run only (unless disabled) + if args.no_clear_cache { + info!("Skipping filesystem cache clearing (--no-clear-cache flag set)"); + } else { + BenchmarkRunner::clear_fs_caches().await?; + } + + // Run warmup to warm up caches + benchmark_runner.run_warmup(current_tip).await?; + + // Stop node before unwinding (node must be stopped to release database lock) + node_manager.stop_node(&mut node_process).await?; + + // Unwind back to starting block after warmup + node_manager.unwind_to_block(original_tip).await?; + + info!("Warmup phase completed"); + Ok(()) +} + +/// Execute the complete benchmark workflow for both branches +async fn run_benchmark_workflow( + git_manager: &GitManager, + compilation_manager: &CompilationManager, + node_manager: &mut NodeManager, + benchmark_runner: &BenchmarkRunner, + comparison_generator: &mut ComparisonGenerator, + args: &Args, +) -> Result<()> { + // Detect if this is an Optimism chain once at the beginning + let rpc_url = args.get_rpc_url(); + let is_optimism = compilation_manager.detect_optimism_chain(&rpc_url).await?; + + // Run compilation phase for both binaries + let (baseline_commit, feature_commit) = + run_compilation_phase(git_manager, compilation_manager, args, is_optimism).await?; + + // Run warmup phase before benchmarking (skip if warmup_blocks is 0) + if args.get_warmup_blocks() > 0 { + run_warmup_phase( + git_manager, + compilation_manager, + node_manager, + benchmark_runner, + args, + is_optimism, + &baseline_commit, + ) + .await?; + } else { + info!("Skipping warmup phase (warmup_blocks is 0)"); + } + + let refs = [&args.baseline_ref, &args.feature_ref]; + let ref_types = ["baseline", "feature"]; + let commits = [&baseline_commit, &feature_commit]; + + for (i, &git_ref) in refs.iter().enumerate() { + let ref_type = ref_types[i]; + let commit = commits[i]; + info!("=== Processing {} reference: {} ===", ref_type, git_ref); + + // Switch to target reference + git_manager.switch_ref(git_ref)?; + + // Get the cached binary path for this git reference (should already be compiled) + let binary_path = + compilation_manager.get_cached_binary_path_for_commit(commit, is_optimism); + + // Verify the cached binary exists + if !binary_path.exists() { + return Err(eyre!( + "Cached {} binary not found at {:?}. Compilation phase should have created it.", + ref_type, + binary_path + )); + } + + info!("Using cached {} binary (commit: {})", ref_type, &commit[..8]); + + // Get reference-specific base arguments string + let base_args_str = match ref_type { + "baseline" => args.baseline_args.as_ref(), + "feature" => args.feature_args.as_ref(), + _ => None, + }; + + // Build additional args with conditional --debug.startup-sync-state-idle flag + let additional_args = args.build_additional_args(ref_type, base_args_str); + + // Start reth node + let mut node_process = + node_manager.start_node(&binary_path, git_ref, ref_type, &additional_args).await?; + + // Wait for node to be ready and get its current tip (wherever it is) + let current_tip = node_manager.wait_for_node_ready_and_get_tip().await?; + info!("Node is ready at tip: {}", current_tip); + + // Store the tip we'll unwind back to + let original_tip = current_tip; + + // Calculate benchmark range + // Note: reth-bench has an off-by-one error where it consumes the first block + // of the range, so we add 1 to compensate and get exactly args.blocks blocks + let from_block = original_tip; + let to_block = original_tip + args.blocks; + + // Run benchmark + let output_dir = comparison_generator.get_ref_output_dir(ref_type); + + // Capture start timestamp for the benchmark run + let benchmark_start = chrono::Utc::now(); + + // Run benchmark (comparison logic is handled separately by ComparisonGenerator) + benchmark_runner.run_benchmark(from_block, to_block, &output_dir).await?; + + // Capture end timestamp for the benchmark run + let benchmark_end = chrono::Utc::now(); + + // Stop node + node_manager.stop_node(&mut node_process).await?; + + // Unwind back to original tip + node_manager.unwind_to_block(original_tip).await?; + + // Store results for comparison + comparison_generator.add_ref_results(ref_type, &output_dir)?; + + // Set the benchmark run timestamps + comparison_generator.set_ref_timestamps(ref_type, benchmark_start, benchmark_end)?; + + info!("Completed {} reference benchmark", ref_type); + } + + // Generate comparison report + comparison_generator.generate_comparison_report().await?; + + // Generate charts if requested + if args.draw { + generate_comparison_charts(comparison_generator).await?; + } + + // Start samply servers if profiling was enabled + if args.profile { + start_samply_servers(args).await?; + } + + Ok(()) +} + +/// Generate comparison charts using the Python script +async fn generate_comparison_charts(comparison_generator: &ComparisonGenerator) -> Result<()> { + info!("Generating comparison charts with Python script..."); + + let baseline_output_dir = comparison_generator.get_ref_output_dir("baseline"); + let feature_output_dir = comparison_generator.get_ref_output_dir("feature"); + + let baseline_csv = baseline_output_dir.join("combined_latency.csv"); + let feature_csv = feature_output_dir.join("combined_latency.csv"); + + // Check if CSV files exist + if !baseline_csv.exists() { + return Err(eyre!("Baseline CSV not found: {:?}", baseline_csv)); + } + if !feature_csv.exists() { + return Err(eyre!("Feature CSV not found: {:?}", feature_csv)); + } + + let output_dir = comparison_generator.get_output_dir(); + let chart_output = output_dir.join("latency_comparison.png"); + + let script_path = "bin/reth-bench/scripts/compare_newpayload_latency.py"; + + info!("Running Python comparison script with uv..."); + let mut cmd = Command::new("uv"); + cmd.args([ + "run", + script_path, + &baseline_csv.to_string_lossy(), + &feature_csv.to_string_lossy(), + "-o", + &chart_output.to_string_lossy(), + ]); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + let output = cmd.output().await.map_err(|e| { + eyre!("Failed to execute Python script with uv: {}. Make sure uv is installed.", e) + })?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + let stdout = String::from_utf8_lossy(&output.stdout); + return Err(eyre!( + "Python script failed with exit code {:?}:\nstdout: {}\nstderr: {}", + output.status.code(), + stdout, + stderr + )); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + if !stdout.trim().is_empty() { + info!("Python script output:\n{}", stdout); + } + + info!("Comparison chart generated: {:?}", chart_output); + Ok(()) +} + +/// Start samply servers for viewing profiles +async fn start_samply_servers(args: &Args) -> Result<()> { + info!("Starting samply servers for profile viewing..."); + + let output_dir = args.output_dir_path(); + let profiles_dir = output_dir.join("profiles"); + + // Build profile paths + let baseline_profile = profiles_dir.join("baseline.json.gz"); + let feature_profile = profiles_dir.join("feature.json.gz"); + + // Check if profiles exist + if !baseline_profile.exists() { + warn!("Baseline profile not found: {:?}", baseline_profile); + return Ok(()); + } + if !feature_profile.exists() { + warn!("Feature profile not found: {:?}", feature_profile); + return Ok(()); + } + + // Find two consecutive available ports starting from 3000 + let (baseline_port, feature_port) = find_consecutive_ports(3000)?; + info!("Found available ports: {} and {}", baseline_port, feature_port); + + // Get samply path + let samply_path = get_samply_path().await?; + + // Start baseline server + info!("Starting samply server for baseline '{}' on port {}", args.baseline_ref, baseline_port); + let mut baseline_cmd = Command::new(&samply_path); + baseline_cmd + .args(["load", "--port", &baseline_port.to_string(), &baseline_profile.to_string_lossy()]) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + baseline_cmd.process_group(0); + } + + // Conditionally pipe output based on log level + if tracing::enabled!(tracing::Level::DEBUG) { + baseline_cmd.stdout(std::process::Stdio::piped()).stderr(std::process::Stdio::piped()); + } else { + baseline_cmd.stdout(std::process::Stdio::null()).stderr(std::process::Stdio::null()); + } + + // Debug log the command + debug!("Executing samply load command: {:?}", baseline_cmd); + + let mut baseline_child = + baseline_cmd.spawn().wrap_err("Failed to start samply server for baseline")?; + + // Stream baseline samply output if debug logging is enabled + if tracing::enabled!(tracing::Level::DEBUG) { + if let Some(stdout) = baseline_child.stdout.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-BASELINE] {}", line); + } + }); + } + + if let Some(stderr) = baseline_child.stderr.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-BASELINE] {}", line); + } + }); + } + } + + // Start feature server + info!("Starting samply server for feature '{}' on port {}", args.feature_ref, feature_port); + let mut feature_cmd = Command::new(&samply_path); + feature_cmd + .args(["load", "--port", &feature_port.to_string(), &feature_profile.to_string_lossy()]) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + feature_cmd.process_group(0); + } + + // Conditionally pipe output based on log level + if tracing::enabled!(tracing::Level::DEBUG) { + feature_cmd.stdout(std::process::Stdio::piped()).stderr(std::process::Stdio::piped()); + } else { + feature_cmd.stdout(std::process::Stdio::null()).stderr(std::process::Stdio::null()); + } + + // Debug log the command + debug!("Executing samply load command: {:?}", feature_cmd); + + let mut feature_child = + feature_cmd.spawn().wrap_err("Failed to start samply server for feature")?; + + // Stream feature samply output if debug logging is enabled + if tracing::enabled!(tracing::Level::DEBUG) { + if let Some(stdout) = feature_child.stdout.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-FEATURE] {}", line); + } + }); + } + + if let Some(stderr) = feature_child.stderr.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-FEATURE] {}", line); + } + }); + } + } + + // Give servers time to start + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + + // Print access information + println!("\n=== SAMPLY PROFILE SERVERS STARTED ==="); + println!("Baseline '{}': http://127.0.0.1:{}", args.baseline_ref, baseline_port); + println!("Feature '{}': http://127.0.0.1:{}", args.feature_ref, feature_port); + println!("\nOpen the URLs in your browser to view the profiles."); + println!("Press Ctrl+C to stop the servers and exit."); + println!("=========================================\n"); + + // Wait for Ctrl+C or process termination + let ctrl_c = tokio::signal::ctrl_c(); + let baseline_wait = baseline_child.wait(); + let feature_wait = feature_child.wait(); + + tokio::select! { + _ = ctrl_c => { + info!("Received Ctrl+C, shutting down samply servers..."); + } + result = baseline_wait => { + match result { + Ok(status) => info!("Baseline samply server exited with status: {}", status), + Err(e) => warn!("Baseline samply server error: {}", e), + } + } + result = feature_wait => { + match result { + Ok(status) => info!("Feature samply server exited with status: {}", status), + Err(e) => warn!("Feature samply server error: {}", e), + } + } + } + + // Ensure both processes are terminated + let _ = baseline_child.kill().await; + let _ = feature_child.kill().await; + + info!("Samply servers stopped."); + Ok(()) +} + +/// Find two consecutive available ports starting from the given port +fn find_consecutive_ports(start_port: u16) -> Result<(u16, u16)> { + for port in start_port..=65533 { + // Check if both port and port+1 are available + if is_port_available(port) && is_port_available(port + 1) { + return Ok((port, port + 1)); + } + } + Err(eyre!("Could not find two consecutive available ports starting from {}", start_port)) +} + +/// Check if a port is available by attempting to bind to it +fn is_port_available(port: u16) -> bool { + TcpListener::bind(("127.0.0.1", port)).is_ok() +} + +/// Get the absolute path to samply using 'which' command +async fn get_samply_path() -> Result { + let output = Command::new("which") + .arg("samply") + .output() + .await + .wrap_err("Failed to execute 'which samply' command")?; + + if !output.status.success() { + return Err(eyre!("samply not found in PATH")); + } + + let samply_path = String::from_utf8(output.stdout) + .wrap_err("samply path is not valid UTF-8")? + .trim() + .to_string(); + + if samply_path.is_empty() { + return Err(eyre!("which samply returned empty path")); + } + + Ok(samply_path) +} diff --git a/bin/reth-bench-compare/src/comparison.rs b/bin/reth-bench-compare/src/comparison.rs new file mode 100644 index 00000000000..316609569bf --- /dev/null +++ b/bin/reth-bench-compare/src/comparison.rs @@ -0,0 +1,484 @@ +//! Results comparison and report generation. + +use crate::cli::Args; +use chrono::{DateTime, Utc}; +use csv::Reader; +use eyre::{eyre, Result, WrapErr}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashMap, + fs, + path::{Path, PathBuf}, +}; +use tracing::{info, warn}; + +/// Manages comparison between baseline and feature reference results +pub(crate) struct ComparisonGenerator { + output_dir: PathBuf, + timestamp: String, + baseline_ref_name: String, + feature_ref_name: String, + baseline_results: Option, + feature_results: Option, +} + +/// Represents the results from a single benchmark run +#[derive(Debug, Clone)] +pub(crate) struct BenchmarkResults { + pub ref_name: String, + pub combined_latency_data: Vec, + pub summary: BenchmarkSummary, + pub start_timestamp: Option>, + pub end_timestamp: Option>, +} + +/// Combined latency CSV row structure +#[derive(Debug, Clone, Deserialize, Serialize)] +pub(crate) struct CombinedLatencyRow { + pub block_number: u64, + pub gas_used: u64, + pub new_payload_latency: u128, +} + +/// Total gas CSV row structure +#[derive(Debug, Clone, Deserialize, Serialize)] +pub(crate) struct TotalGasRow { + pub block_number: u64, + pub gas_used: u64, + pub time: u128, +} + +/// Summary statistics for a benchmark run +#[derive(Debug, Clone, Serialize)] +pub(crate) struct BenchmarkSummary { + pub total_blocks: u64, + pub total_gas_used: u64, + pub total_duration_ms: u128, + pub avg_new_payload_latency_ms: f64, + pub gas_per_second: f64, + pub blocks_per_second: f64, +} + +/// Comparison report between two benchmark runs +#[derive(Debug, Serialize)] +pub(crate) struct ComparisonReport { + pub timestamp: String, + pub baseline: RefInfo, + pub feature: RefInfo, + pub comparison_summary: ComparisonSummary, + pub per_block_comparisons: Vec, +} + +/// Information about a reference in the comparison +#[derive(Debug, Serialize)] +pub(crate) struct RefInfo { + pub ref_name: String, + pub summary: BenchmarkSummary, + pub start_timestamp: Option>, + pub end_timestamp: Option>, +} + +/// Summary of the comparison between references +#[derive(Debug, Serialize)] +pub(crate) struct ComparisonSummary { + pub new_payload_latency_change_percent: f64, + pub gas_per_second_change_percent: f64, + pub blocks_per_second_change_percent: f64, +} + +/// Per-block comparison data +#[derive(Debug, Serialize)] +pub(crate) struct BlockComparison { + pub block_number: u64, + pub baseline_new_payload_latency: u128, + pub feature_new_payload_latency: u128, + pub new_payload_latency_change_percent: f64, +} + +impl ComparisonGenerator { + /// Create a new comparison generator + pub(crate) fn new(args: &Args) -> Self { + let now: DateTime = Utc::now(); + let timestamp = now.format("%Y%m%d_%H%M%S").to_string(); + + Self { + output_dir: args.output_dir_path(), + timestamp, + baseline_ref_name: args.baseline_ref.clone(), + feature_ref_name: args.feature_ref.clone(), + baseline_results: None, + feature_results: None, + } + } + + /// Get the output directory for a specific reference + pub(crate) fn get_ref_output_dir(&self, ref_type: &str) -> PathBuf { + self.output_dir.join("results").join(&self.timestamp).join(ref_type) + } + + /// Get the main output directory for this comparison run + pub(crate) fn get_output_dir(&self) -> PathBuf { + self.output_dir.join("results").join(&self.timestamp) + } + + /// Add benchmark results for a reference + pub(crate) fn add_ref_results(&mut self, ref_type: &str, output_path: &Path) -> Result<()> { + let ref_name = match ref_type { + "baseline" => &self.baseline_ref_name, + "feature" => &self.feature_ref_name, + _ => return Err(eyre!("Unknown reference type: {}", ref_type)), + }; + + let results = self.load_benchmark_results(ref_name, output_path)?; + + match ref_type { + "baseline" => self.baseline_results = Some(results), + "feature" => self.feature_results = Some(results), + _ => return Err(eyre!("Unknown reference type: {}", ref_type)), + } + + info!("Loaded benchmark results for {} reference", ref_type); + + Ok(()) + } + + /// Set the benchmark run timestamps for a reference + pub(crate) fn set_ref_timestamps( + &mut self, + ref_type: &str, + start: DateTime, + end: DateTime, + ) -> Result<()> { + match ref_type { + "baseline" => { + if let Some(ref mut results) = self.baseline_results { + results.start_timestamp = Some(start); + results.end_timestamp = Some(end); + } else { + return Err(eyre!("Baseline results not loaded yet")); + } + } + "feature" => { + if let Some(ref mut results) = self.feature_results { + results.start_timestamp = Some(start); + results.end_timestamp = Some(end); + } else { + return Err(eyre!("Feature results not loaded yet")); + } + } + _ => return Err(eyre!("Unknown reference type: {}", ref_type)), + } + + Ok(()) + } + + /// Generate the final comparison report + pub(crate) async fn generate_comparison_report(&self) -> Result<()> { + info!("Generating comparison report..."); + + let baseline = + self.baseline_results.as_ref().ok_or_else(|| eyre!("Baseline results not loaded"))?; + + let feature = + self.feature_results.as_ref().ok_or_else(|| eyre!("Feature results not loaded"))?; + + // Generate comparison + let comparison_summary = + self.calculate_comparison_summary(&baseline.summary, &feature.summary)?; + let per_block_comparisons = self.calculate_per_block_comparisons(baseline, feature)?; + + let report = ComparisonReport { + timestamp: self.timestamp.clone(), + baseline: RefInfo { + ref_name: baseline.ref_name.clone(), + summary: baseline.summary.clone(), + start_timestamp: baseline.start_timestamp, + end_timestamp: baseline.end_timestamp, + }, + feature: RefInfo { + ref_name: feature.ref_name.clone(), + summary: feature.summary.clone(), + start_timestamp: feature.start_timestamp, + end_timestamp: feature.end_timestamp, + }, + comparison_summary, + per_block_comparisons, + }; + + // Write reports + self.write_comparison_reports(&report).await?; + + // Print summary to console + self.print_comparison_summary(&report); + + Ok(()) + } + + /// Load benchmark results from CSV files + fn load_benchmark_results( + &self, + ref_name: &str, + output_path: &Path, + ) -> Result { + let combined_latency_path = output_path.join("combined_latency.csv"); + let total_gas_path = output_path.join("total_gas.csv"); + + let combined_latency_data = self.load_combined_latency_csv(&combined_latency_path)?; + let total_gas_data = self.load_total_gas_csv(&total_gas_path)?; + + let summary = self.calculate_summary(&combined_latency_data, &total_gas_data)?; + + Ok(BenchmarkResults { + ref_name: ref_name.to_string(), + combined_latency_data, + summary, + start_timestamp: None, + end_timestamp: None, + }) + } + + /// Load combined latency CSV data + fn load_combined_latency_csv(&self, path: &Path) -> Result> { + let mut reader = Reader::from_path(path) + .wrap_err_with(|| format!("Failed to open combined latency CSV: {path:?}"))?; + + let mut rows = Vec::new(); + for result in reader.deserialize() { + let row: CombinedLatencyRow = result + .wrap_err_with(|| format!("Failed to parse combined latency row in {path:?}"))?; + rows.push(row); + } + + if rows.is_empty() { + return Err(eyre!("No data found in combined latency CSV: {:?}", path)); + } + + Ok(rows) + } + + /// Load total gas CSV data + fn load_total_gas_csv(&self, path: &Path) -> Result> { + let mut reader = Reader::from_path(path) + .wrap_err_with(|| format!("Failed to open total gas CSV: {path:?}"))?; + + let mut rows = Vec::new(); + for result in reader.deserialize() { + let row: TotalGasRow = + result.wrap_err_with(|| format!("Failed to parse total gas row in {path:?}"))?; + rows.push(row); + } + + if rows.is_empty() { + return Err(eyre!("No data found in total gas CSV: {:?}", path)); + } + + Ok(rows) + } + + /// Calculate summary statistics for a benchmark run + fn calculate_summary( + &self, + combined_data: &[CombinedLatencyRow], + total_gas_data: &[TotalGasRow], + ) -> Result { + if combined_data.is_empty() || total_gas_data.is_empty() { + return Err(eyre!("Cannot calculate summary for empty data")); + } + + let total_blocks = combined_data.len() as u64; + let total_gas_used: u64 = combined_data.iter().map(|r| r.gas_used).sum(); + + let total_duration_ms = total_gas_data.last().unwrap().time / 1000; // Convert microseconds to milliseconds + + let avg_new_payload_latency_ms: f64 = + combined_data.iter().map(|r| r.new_payload_latency as f64 / 1000.0).sum::() / + total_blocks as f64; + + let total_duration_seconds = total_duration_ms as f64 / 1000.0; + let gas_per_second = if total_duration_seconds > f64::EPSILON { + total_gas_used as f64 / total_duration_seconds + } else { + 0.0 + }; + + let blocks_per_second = if total_duration_seconds > f64::EPSILON { + total_blocks as f64 / total_duration_seconds + } else { + 0.0 + }; + + Ok(BenchmarkSummary { + total_blocks, + total_gas_used, + total_duration_ms, + avg_new_payload_latency_ms, + gas_per_second, + blocks_per_second, + }) + } + + /// Calculate comparison summary between baseline and feature + fn calculate_comparison_summary( + &self, + baseline: &BenchmarkSummary, + feature: &BenchmarkSummary, + ) -> Result { + let calc_percent_change = |baseline: f64, feature: f64| -> f64 { + if baseline.abs() > f64::EPSILON { + ((feature - baseline) / baseline) * 100.0 + } else { + 0.0 + } + }; + + Ok(ComparisonSummary { + new_payload_latency_change_percent: calc_percent_change( + baseline.avg_new_payload_latency_ms, + feature.avg_new_payload_latency_ms, + ), + gas_per_second_change_percent: calc_percent_change( + baseline.gas_per_second, + feature.gas_per_second, + ), + blocks_per_second_change_percent: calc_percent_change( + baseline.blocks_per_second, + feature.blocks_per_second, + ), + }) + } + + /// Calculate per-block comparisons + fn calculate_per_block_comparisons( + &self, + baseline: &BenchmarkResults, + feature: &BenchmarkResults, + ) -> Result> { + let mut baseline_map: HashMap = HashMap::new(); + for row in &baseline.combined_latency_data { + baseline_map.insert(row.block_number, row); + } + + let mut comparisons = Vec::new(); + for feature_row in &feature.combined_latency_data { + if let Some(baseline_row) = baseline_map.get(&feature_row.block_number) { + let calc_percent_change = |baseline: u128, feature: u128| -> f64 { + if baseline > 0 { + ((feature as f64 - baseline as f64) / baseline as f64) * 100.0 + } else { + 0.0 + } + }; + + let comparison = BlockComparison { + block_number: feature_row.block_number, + baseline_new_payload_latency: baseline_row.new_payload_latency, + feature_new_payload_latency: feature_row.new_payload_latency, + new_payload_latency_change_percent: calc_percent_change( + baseline_row.new_payload_latency, + feature_row.new_payload_latency, + ), + }; + comparisons.push(comparison); + } else { + warn!("Block {} not found in baseline data", feature_row.block_number); + } + } + + Ok(comparisons) + } + + /// Write comparison reports to files + async fn write_comparison_reports(&self, report: &ComparisonReport) -> Result<()> { + let report_dir = self.output_dir.join("results").join(&self.timestamp); + fs::create_dir_all(&report_dir) + .wrap_err_with(|| format!("Failed to create report directory: {report_dir:?}"))?; + + // Write JSON report + let json_path = report_dir.join("comparison_report.json"); + let json_content = serde_json::to_string_pretty(report) + .wrap_err("Failed to serialize comparison report to JSON")?; + fs::write(&json_path, json_content) + .wrap_err_with(|| format!("Failed to write JSON report: {json_path:?}"))?; + + // Write CSV report for per-block comparisons + let csv_path = report_dir.join("per_block_comparison.csv"); + let mut writer = csv::Writer::from_path(&csv_path) + .wrap_err_with(|| format!("Failed to create CSV writer: {csv_path:?}"))?; + + for comparison in &report.per_block_comparisons { + writer.serialize(comparison).wrap_err("Failed to write comparison row to CSV")?; + } + writer.flush().wrap_err("Failed to flush CSV writer")?; + + info!("Comparison reports written to: {:?}", report_dir); + Ok(()) + } + + /// Print comparison summary to console + fn print_comparison_summary(&self, report: &ComparisonReport) { + // Parse and format timestamp nicely + let formatted_timestamp = if let Ok(dt) = chrono::DateTime::parse_from_str( + &format!("{} +0000", report.timestamp.replace('_', " ")), + "%Y%m%d %H%M%S %z", + ) { + dt.format("%Y-%m-%d %H:%M:%S UTC").to_string() + } else { + // Fallback to original if parsing fails + report.timestamp.clone() + }; + + println!("\n=== BENCHMARK COMPARISON SUMMARY ==="); + println!("Timestamp: {formatted_timestamp}"); + println!("Baseline: {}", report.baseline.ref_name); + println!("Feature: {}", report.feature.ref_name); + println!(); + + let summary = &report.comparison_summary; + + println!("Performance Changes:"); + println!(" NewPayload Latency: {:+.2}%", summary.new_payload_latency_change_percent); + println!(" Gas/Second: {:+.2}%", summary.gas_per_second_change_percent); + println!(" Blocks/Second: {:+.2}%", summary.blocks_per_second_change_percent); + println!(); + + println!("Baseline Summary:"); + let baseline = &report.baseline.summary; + println!( + " Blocks: {}, Gas: {}, Duration: {:.2}s", + baseline.total_blocks, + baseline.total_gas_used, + baseline.total_duration_ms as f64 / 1000.0 + ); + println!(" Avg NewPayload: {:.2}ms", baseline.avg_new_payload_latency_ms); + if let (Some(start), Some(end)) = + (&report.baseline.start_timestamp, &report.baseline.end_timestamp) + { + println!( + " Started: {}, Ended: {}", + start.format("%Y-%m-%d %H:%M:%S UTC"), + end.format("%Y-%m-%d %H:%M:%S UTC") + ); + } + println!(); + + println!("Feature Summary:"); + let feature = &report.feature.summary; + println!( + " Blocks: {}, Gas: {}, Duration: {:.2}s", + feature.total_blocks, + feature.total_gas_used, + feature.total_duration_ms as f64 / 1000.0 + ); + println!(" Avg NewPayload: {:.2}ms", feature.avg_new_payload_latency_ms); + if let (Some(start), Some(end)) = + (&report.feature.start_timestamp, &report.feature.end_timestamp) + { + println!( + " Started: {}, Ended: {}", + start.format("%Y-%m-%d %H:%M:%S UTC"), + end.format("%Y-%m-%d %H:%M:%S UTC") + ); + } + println!(); + } +} diff --git a/bin/reth-bench-compare/src/compilation.rs b/bin/reth-bench-compare/src/compilation.rs new file mode 100644 index 00000000000..0bd9f70ce64 --- /dev/null +++ b/bin/reth-bench-compare/src/compilation.rs @@ -0,0 +1,354 @@ +//! Compilation operations for reth and reth-bench. + +use crate::git::GitManager; +use alloy_primitives::address; +use alloy_provider::{Provider, ProviderBuilder}; +use eyre::{eyre, Result, WrapErr}; +use std::{fs, path::PathBuf, process::Command}; +use tracing::{debug, error, info, warn}; + +/// Manages compilation operations for reth components +#[derive(Debug)] +pub(crate) struct CompilationManager { + repo_root: String, + output_dir: PathBuf, + git_manager: GitManager, + features: String, +} + +impl CompilationManager { + /// Create a new `CompilationManager` + pub(crate) const fn new( + repo_root: String, + output_dir: PathBuf, + git_manager: GitManager, + features: String, + ) -> Result { + Ok(Self { repo_root, output_dir, git_manager, features }) + } + + /// Detect if the RPC endpoint is an Optimism chain + pub(crate) async fn detect_optimism_chain(&self, rpc_url: &str) -> Result { + info!("Detecting chain type from RPC endpoint..."); + + // Create Alloy provider + let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?; + let provider = ProviderBuilder::new().connect_http(url); + + // Check for Optimism predeploy at address 0x420000000000000000000000000000000000000F + let is_optimism = !provider + .get_code_at(address!("0x420000000000000000000000000000000000000F")) + .await? + .is_empty(); + + if is_optimism { + info!("Detected Optimism chain"); + } else { + info!("Detected Ethereum chain"); + } + + Ok(is_optimism) + } + + /// Get the path to the cached binary using explicit commit hash + pub(crate) fn get_cached_binary_path_for_commit( + &self, + commit: &str, + is_optimism: bool, + ) -> PathBuf { + let identifier = &commit[..8]; // Use first 8 chars of commit + + let binary_name = if is_optimism { + format!("op-reth_{}", identifier) + } else { + format!("reth_{}", identifier) + }; + + self.output_dir.join("bin").join(binary_name) + } + + /// Compile reth using cargo build and cache the binary + pub(crate) fn compile_reth(&self, commit: &str, is_optimism: bool) -> Result<()> { + // Validate that current git commit matches the expected commit + let current_commit = self.git_manager.get_current_commit()?; + if current_commit != commit { + return Err(eyre!( + "Git commit mismatch! Expected: {}, but currently at: {}", + &commit[..8], + ¤t_commit[..8] + )); + } + + let cached_path = self.get_cached_binary_path_for_commit(commit, is_optimism); + + // Check if cached binary already exists (since path contains commit hash, it's valid) + if cached_path.exists() { + info!("Using cached binary (commit: {})", &commit[..8]); + return Ok(()); + } + + info!("No cached binary found, compiling (commit: {})...", &commit[..8]); + + let binary_name = if is_optimism { "op-reth" } else { "reth" }; + + info!( + "Compiling {} with profiling configuration (commit: {})...", + binary_name, + &commit[..8] + ); + + let mut cmd = Command::new("cargo"); + cmd.arg("build").arg("--profile").arg("profiling"); + + // Add features + cmd.arg("--features").arg(&self.features); + info!("Using features: {}", self.features); + + // Add bin-specific arguments for optimism + if is_optimism { + cmd.arg("--bin") + .arg("op-reth") + .arg("--manifest-path") + .arg("crates/optimism/bin/Cargo.toml"); + } + + cmd.current_dir(&self.repo_root); + + // Set RUSTFLAGS for native CPU optimization + cmd.env("RUSTFLAGS", "-C target-cpu=native"); + + // Debug log the command + debug!("Executing cargo command: {:?}", cmd); + + let output = cmd.output().wrap_err("Failed to execute cargo build command")?; + + // Print stdout and stderr with prefixes at debug level + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + for line in stdout.lines() { + if !line.trim().is_empty() { + debug!("[CARGO] {}", line); + } + } + + for line in stderr.lines() { + if !line.trim().is_empty() { + debug!("[CARGO] {}", line); + } + } + + if !output.status.success() { + // Print all output when compilation fails + error!("Cargo build failed with exit code: {:?}", output.status.code()); + + if !stdout.trim().is_empty() { + error!("Cargo stdout:"); + for line in stdout.lines() { + error!(" {}", line); + } + } + + if !stderr.trim().is_empty() { + error!("Cargo stderr:"); + for line in stderr.lines() { + error!(" {}", line); + } + } + + return Err(eyre!("Compilation failed with exit code: {:?}", output.status.code())); + } + + info!("{} compilation completed", binary_name); + + // Copy the compiled binary to cache + let source_path = + PathBuf::from(&self.repo_root).join(format!("target/profiling/{}", binary_name)); + if !source_path.exists() { + return Err(eyre!("Compiled binary not found at {:?}", source_path)); + } + + // Create bin directory if it doesn't exist + let bin_dir = self.output_dir.join("bin"); + fs::create_dir_all(&bin_dir).wrap_err("Failed to create bin directory")?; + + // Copy binary to cache + fs::copy(&source_path, &cached_path).wrap_err("Failed to copy binary to cache")?; + + // Make the cached binary executable + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let mut perms = fs::metadata(&cached_path)?.permissions(); + perms.set_mode(0o755); + fs::set_permissions(&cached_path, perms)?; + } + + info!("Cached compiled binary at: {:?}", cached_path); + Ok(()) + } + + /// Check if reth-bench is available in PATH + pub(crate) fn is_reth_bench_available(&self) -> bool { + match Command::new("which").arg("reth-bench").output() { + Ok(output) => { + if output.status.success() { + let path = String::from_utf8_lossy(&output.stdout); + info!("Found reth-bench: {}", path.trim()); + true + } else { + false + } + } + Err(_) => false, + } + } + + /// Check if samply is available in PATH + pub(crate) fn is_samply_available(&self) -> bool { + match Command::new("which").arg("samply").output() { + Ok(output) => { + if output.status.success() { + let path = String::from_utf8_lossy(&output.stdout); + info!("Found samply: {}", path.trim()); + true + } else { + false + } + } + Err(_) => false, + } + } + + /// Install samply using cargo + pub(crate) fn install_samply(&self) -> Result<()> { + info!("Installing samply via cargo..."); + + let mut cmd = Command::new("cargo"); + cmd.args(["install", "--locked", "samply"]); + + // Debug log the command + debug!("Executing cargo command: {:?}", cmd); + + let output = cmd.output().wrap_err("Failed to execute cargo install samply command")?; + + // Print stdout and stderr with prefixes at debug level + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + for line in stdout.lines() { + if !line.trim().is_empty() { + debug!("[CARGO-SAMPLY] {}", line); + } + } + + for line in stderr.lines() { + if !line.trim().is_empty() { + debug!("[CARGO-SAMPLY] {}", line); + } + } + + if !output.status.success() { + // Print all output when installation fails + error!("Cargo install samply failed with exit code: {:?}", output.status.code()); + + if !stdout.trim().is_empty() { + error!("Cargo stdout:"); + for line in stdout.lines() { + error!(" {}", line); + } + } + + if !stderr.trim().is_empty() { + error!("Cargo stderr:"); + for line in stderr.lines() { + error!(" {}", line); + } + } + + return Err(eyre!( + "samply installation failed with exit code: {:?}", + output.status.code() + )); + } + + info!("Samply installation completed"); + Ok(()) + } + + /// Ensure samply is available, installing if necessary + pub(crate) fn ensure_samply_available(&self) -> Result<()> { + if self.is_samply_available() { + Ok(()) + } else { + warn!("samply not found in PATH, installing..."); + self.install_samply() + } + } + + /// Ensure reth-bench is available, compiling if necessary + pub(crate) fn ensure_reth_bench_available(&self) -> Result<()> { + if self.is_reth_bench_available() { + Ok(()) + } else { + warn!("reth-bench not found in PATH, compiling and installing..."); + self.compile_reth_bench() + } + } + + /// Compile and install reth-bench using `make install-reth-bench` + pub(crate) fn compile_reth_bench(&self) -> Result<()> { + info!("Compiling and installing reth-bench..."); + + let mut cmd = Command::new("make"); + cmd.arg("install-reth-bench").current_dir(&self.repo_root); + + // Debug log the command + debug!("Executing make command: {:?}", cmd); + + let output = cmd.output().wrap_err("Failed to execute make install-reth-bench command")?; + + // Print stdout and stderr with prefixes at debug level + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + for line in stdout.lines() { + if !line.trim().is_empty() { + debug!("[MAKE-BENCH] {}", line); + } + } + + for line in stderr.lines() { + if !line.trim().is_empty() { + debug!("[MAKE-BENCH] {}", line); + } + } + + if !output.status.success() { + // Print all output when compilation fails + error!("Make install-reth-bench failed with exit code: {:?}", output.status.code()); + + if !stdout.trim().is_empty() { + error!("Make stdout:"); + for line in stdout.lines() { + error!(" {}", line); + } + } + + if !stderr.trim().is_empty() { + error!("Make stderr:"); + for line in stderr.lines() { + error!(" {}", line); + } + } + + return Err(eyre!( + "reth-bench compilation failed with exit code: {:?}", + output.status.code() + )); + } + + info!("Reth-bench compilation completed"); + Ok(()) + } +} diff --git a/bin/reth-bench-compare/src/git.rs b/bin/reth-bench-compare/src/git.rs new file mode 100644 index 00000000000..001466969d4 --- /dev/null +++ b/bin/reth-bench-compare/src/git.rs @@ -0,0 +1,328 @@ +//! Git operations for branch management. + +use eyre::{eyre, Result, WrapErr}; +use std::process::Command; +use tracing::{info, warn}; + +/// Manages git operations for branch switching +#[derive(Debug, Clone)] +pub(crate) struct GitManager { + repo_root: String, +} + +impl GitManager { + /// Create a new `GitManager`, detecting the repository root + pub(crate) fn new() -> Result { + let output = Command::new("git") + .args(["rev-parse", "--show-toplevel"]) + .output() + .wrap_err("Failed to execute git command - is git installed?")?; + + if !output.status.success() { + return Err(eyre!("Not in a git repository or git command failed")); + } + + let repo_root = String::from_utf8(output.stdout) + .wrap_err("Git output is not valid UTF-8")? + .trim() + .to_string(); + + let manager = Self { repo_root }; + info!( + "Detected git repository at: {}, current reference: {}", + manager.repo_root(), + manager.get_current_ref()? + ); + + Ok(manager) + } + + /// Get the current git branch name + pub(crate) fn get_current_branch(&self) -> Result { + let output = Command::new("git") + .args(["branch", "--show-current"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current branch")?; + + if !output.status.success() { + return Err(eyre!("Failed to determine current branch")); + } + + let branch = String::from_utf8(output.stdout) + .wrap_err("Branch name is not valid UTF-8")? + .trim() + .to_string(); + + if branch.is_empty() { + return Err(eyre!("Not on a named branch (detached HEAD?)")); + } + + Ok(branch) + } + + /// Get the current git reference (branch name, tag, or commit hash) + pub(crate) fn get_current_ref(&self) -> Result { + // First try to get branch name + if let Ok(branch) = self.get_current_branch() { + return Ok(branch); + } + + // If not on a branch, check if we're on a tag + let tag_output = Command::new("git") + .args(["describe", "--exact-match", "--tags", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to check for tag")?; + + if tag_output.status.success() { + let tag = String::from_utf8(tag_output.stdout) + .wrap_err("Tag name is not valid UTF-8")? + .trim() + .to_string(); + return Ok(tag); + } + + // If not on a branch or tag, return the commit hash + let commit_output = Command::new("git") + .args(["rev-parse", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current commit")?; + + if !commit_output.status.success() { + return Err(eyre!("Failed to get current commit hash")); + } + + let commit_hash = String::from_utf8(commit_output.stdout) + .wrap_err("Commit hash is not valid UTF-8")? + .trim() + .to_string(); + + Ok(commit_hash) + } + + /// Check if the git working directory has uncommitted changes to tracked files + pub(crate) fn validate_clean_state(&self) -> Result<()> { + let output = Command::new("git") + .args(["status", "--porcelain"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to check git status")?; + + if !output.status.success() { + return Err(eyre!("Git status command failed")); + } + + let status_output = + String::from_utf8(output.stdout).wrap_err("Git status output is not valid UTF-8")?; + + // Check for uncommitted changes to tracked files + // Status codes: M = modified, A = added, D = deleted, R = renamed, C = copied, U = updated + // ?? = untracked files (we want to ignore these) + let has_uncommitted_changes = status_output.lines().any(|line| { + if line.len() >= 2 { + let status = &line[0..2]; + // Ignore untracked files (??) and ignored files (!!) + !matches!(status, "??" | "!!") + } else { + false + } + }); + + if has_uncommitted_changes { + warn!("Git working directory has uncommitted changes to tracked files:"); + for line in status_output.lines() { + if line.len() >= 2 && !matches!(&line[0..2], "??" | "!!") { + warn!(" {}", line); + } + } + return Err(eyre!( + "Git working directory has uncommitted changes to tracked files. Please commit or stash changes before running benchmark comparison." + )); + } + + // Check if there are untracked files and log them as info + let untracked_files: Vec<&str> = + status_output.lines().filter(|line| line.starts_with("??")).collect(); + + if !untracked_files.is_empty() { + info!( + "Git working directory has {} untracked files (this is OK)", + untracked_files.len() + ); + } + + info!("Git working directory is clean (no uncommitted changes to tracked files)"); + Ok(()) + } + + /// Fetch all refs from remote to ensure we have latest branches and tags + pub(crate) fn fetch_all(&self) -> Result<()> { + let output = Command::new("git") + .args(["fetch", "--all", "--tags", "--quiet", "--force"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to fetch latest refs")?; + + if output.status.success() { + info!("Fetched latest refs"); + } else { + let stderr = String::from_utf8_lossy(&output.stderr); + // Only warn if there's actual error content, not just fetch progress + if !stderr.trim().is_empty() && !stderr.contains("-> origin/") { + warn!("Git fetch encountered issues (continuing anyway): {}", stderr); + } + } + + Ok(()) + } + + /// Validate that the specified git references exist (branches, tags, or commits) + pub(crate) fn validate_refs(&self, refs: &[&str]) -> Result<()> { + for &git_ref in refs { + // Try to resolve the ref similar to `git checkout` by peeling to a commit. + // First try the ref as-is with ^{commit}, then fall back to origin/{ref}^{commit}. + let as_is = format!("{git_ref}^{{commit}}"); + let ref_check = Command::new("git") + .args(["rev-parse", "--verify", &as_is]) + .current_dir(&self.repo_root) + .output(); + + let found = if let Ok(output) = ref_check && + output.status.success() + { + info!("Validated reference exists: {}", git_ref); + true + } else { + // Try remote-only branches via origin/{ref} + let origin_ref = format!("origin/{git_ref}^{{commit}}"); + let origin_check = Command::new("git") + .args(["rev-parse", "--verify", &origin_ref]) + .current_dir(&self.repo_root) + .output(); + + if let Ok(output) = origin_check && + output.status.success() + { + info!("Validated remote reference exists: origin/{}", git_ref); + true + } else { + false + } + }; + + if !found { + return Err(eyre!( + "Git reference '{}' does not exist as branch, tag, or commit (tried '{}' and 'origin/{}^{{commit}}')", + git_ref, + format!("{git_ref}^{{commit}}"), + git_ref, + )); + } + } + + Ok(()) + } + + /// Switch to the specified git reference (branch, tag, or commit) + pub(crate) fn switch_ref(&self, git_ref: &str) -> Result<()> { + // First checkout the reference + let output = Command::new("git") + .args(["checkout", git_ref]) + .current_dir(&self.repo_root) + .output() + .wrap_err_with(|| format!("Failed to switch to reference '{git_ref}'"))?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(eyre!("Failed to switch to reference '{}': {}", git_ref, stderr)); + } + + // Check if this is a branch that tracks a remote and pull latest changes + let is_branch = Command::new("git") + .args(["show-ref", "--verify", "--quiet", &format!("refs/heads/{git_ref}")]) + .current_dir(&self.repo_root) + .status() + .map(|s| s.success()) + .unwrap_or(false); + + if is_branch { + // Check if the branch tracks a remote + let tracking_output = Command::new("git") + .args([ + "rev-parse", + "--abbrev-ref", + "--symbolic-full-name", + &format!("{git_ref}@{{upstream}}"), + ]) + .current_dir(&self.repo_root) + .output(); + + if let Ok(output) = tracking_output && + output.status.success() + { + let upstream = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if !upstream.is_empty() && upstream != format!("{git_ref}@{{upstream}}") { + // Branch tracks a remote, pull latest changes + info!("Pulling latest changes for branch: {}", git_ref); + + let pull_output = Command::new("git") + .args(["pull", "--ff-only"]) + .current_dir(&self.repo_root) + .output() + .wrap_err_with(|| { + format!("Failed to pull latest changes for branch '{git_ref}'") + })?; + + if pull_output.status.success() { + info!("Successfully pulled latest changes for branch: {}", git_ref); + } else { + let stderr = String::from_utf8_lossy(&pull_output.stderr); + warn!("Failed to pull latest changes for branch '{}': {}", git_ref, stderr); + // Continue anyway, we'll use whatever version we have + } + } + } + } + + // Verify the checkout succeeded by checking the current commit + let current_commit_output = Command::new("git") + .args(["rev-parse", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current commit")?; + + if !current_commit_output.status.success() { + return Err(eyre!("Failed to verify git checkout")); + } + + info!("Switched to reference: {}", git_ref); + Ok(()) + } + + /// Get the current commit hash + pub(crate) fn get_current_commit(&self) -> Result { + let output = Command::new("git") + .args(["rev-parse", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current commit")?; + + if !output.status.success() { + return Err(eyre!("Failed to get current commit hash")); + } + + let commit_hash = String::from_utf8(output.stdout) + .wrap_err("Commit hash is not valid UTF-8")? + .trim() + .to_string(); + + Ok(commit_hash) + } + + /// Get the repository root path + pub(crate) fn repo_root(&self) -> &str { + &self.repo_root + } +} diff --git a/bin/reth-bench-compare/src/main.rs b/bin/reth-bench-compare/src/main.rs new file mode 100644 index 00000000000..e866afb2509 --- /dev/null +++ b/bin/reth-bench-compare/src/main.rs @@ -0,0 +1,45 @@ +//! # reth-bench-compare +//! +//! Automated tool for comparing reth performance between two git branches. +//! This tool automates the complete workflow of compiling, running, and benchmarking +//! reth on different branches to provide meaningful performance comparisons. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +#[global_allocator] +static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); + +mod benchmark; +mod cli; +mod comparison; +mod compilation; +mod git; +mod node; + +use clap::Parser; +use cli::{run_comparison, Args}; +use eyre::Result; +use reth_cli_runner::CliRunner; + +fn main() -> Result<()> { + // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. + if std::env::var_os("RUST_BACKTRACE").is_none() { + unsafe { + std::env::set_var("RUST_BACKTRACE", "1"); + } + } + + let args = Args::parse(); + + // Initialize tracing + let _guard = args.init_tracing()?; + + // Run until either exit or sigint or sigterm + let runner = CliRunner::try_default_runtime()?; + runner.run_command_until_exit(|ctx| run_comparison(args, ctx)) +} diff --git a/bin/reth-bench-compare/src/node.rs b/bin/reth-bench-compare/src/node.rs new file mode 100644 index 00000000000..01eb9961f9f --- /dev/null +++ b/bin/reth-bench-compare/src/node.rs @@ -0,0 +1,511 @@ +//! Node management for starting, stopping, and controlling reth instances. + +use crate::cli::Args; +use alloy_provider::{Provider, ProviderBuilder}; +use alloy_rpc_types_eth::SyncStatus; +use eyre::{eyre, OptionExt, Result, WrapErr}; +#[cfg(unix)] +use nix::sys::signal::{killpg, Signal}; +#[cfg(unix)] +use nix::unistd::Pid; +use reth_chainspec::Chain; +use std::{fs, path::PathBuf, time::Duration}; +use tokio::{ + fs::File as AsyncFile, + io::{AsyncBufReadExt, AsyncWriteExt, BufReader as AsyncBufReader}, + process::Command, + time::{sleep, timeout}, +}; +use tracing::{debug, info, warn}; + +/// Manages reth node lifecycle and operations +pub(crate) struct NodeManager { + datadir: Option, + metrics_port: u16, + chain: Chain, + use_sudo: bool, + binary_path: Option, + enable_profiling: bool, + output_dir: PathBuf, + additional_reth_args: Vec, + comparison_dir: Option, +} + +impl NodeManager { + /// Create a new `NodeManager` with configuration from CLI args + pub(crate) fn new(args: &Args) -> Self { + Self { + datadir: Some(args.datadir_path().to_string_lossy().to_string()), + metrics_port: args.metrics_port, + chain: args.chain, + use_sudo: args.sudo, + binary_path: None, + enable_profiling: args.profile, + output_dir: args.output_dir_path(), + additional_reth_args: args.reth_args.clone(), + comparison_dir: None, + } + } + + /// Set the comparison directory path for logging + pub(crate) fn set_comparison_dir(&mut self, dir: PathBuf) { + self.comparison_dir = Some(dir); + } + + /// Get the log file path for a given reference type + fn get_log_file_path(&self, ref_type: &str) -> Result { + let comparison_dir = self + .comparison_dir + .as_ref() + .ok_or_eyre("Comparison directory not set. Call set_comparison_dir first.")?; + + // The comparison directory already contains the full path to results/ + let log_dir = comparison_dir.join(ref_type); + + // Create the directory if it doesn't exist + fs::create_dir_all(&log_dir) + .wrap_err(format!("Failed to create log directory: {:?}", log_dir))?; + + let log_file = log_dir.join("reth_node.log"); + Ok(log_file) + } + + /// Get the perf event max sample rate from the system, capped at 10000 + fn get_perf_sample_rate(&self) -> Option { + let perf_rate_file = "/proc/sys/kernel/perf_event_max_sample_rate"; + if let Ok(content) = fs::read_to_string(perf_rate_file) { + let rate_str = content.trim(); + if !rate_str.is_empty() { + if let Ok(system_rate) = rate_str.parse::() { + let capped_rate = std::cmp::min(system_rate, 10000); + info!( + "Detected perf_event_max_sample_rate: {}, using: {}", + system_rate, capped_rate + ); + return Some(capped_rate.to_string()); + } + warn!("Failed to parse perf_event_max_sample_rate: {}", rate_str); + } + } + None + } + + /// Get the absolute path to samply using 'which' command + async fn get_samply_path(&self) -> Result { + let output = Command::new("which") + .arg("samply") + .output() + .await + .wrap_err("Failed to execute 'which samply' command")?; + + if !output.status.success() { + return Err(eyre!("samply not found in PATH")); + } + + let samply_path = String::from_utf8(output.stdout) + .wrap_err("samply path is not valid UTF-8")? + .trim() + .to_string(); + + if samply_path.is_empty() { + return Err(eyre!("which samply returned empty path")); + } + + Ok(samply_path) + } + + /// Build reth arguments as a vector of strings + fn build_reth_args( + &self, + binary_path_str: &str, + additional_args: &[String], + ) -> (Vec, String) { + let mut reth_args = vec![binary_path_str.to_string(), "node".to_string()]; + + // Add chain argument (skip for mainnet as it's the default) + let chain_str = self.chain.to_string(); + if chain_str != "mainnet" { + reth_args.extend_from_slice(&["--chain".to_string(), chain_str.clone()]); + } + + // Add datadir if specified + if let Some(ref datadir) = self.datadir { + reth_args.extend_from_slice(&["--datadir".to_string(), datadir.clone()]); + } + + // Add reth-specific arguments + let metrics_arg = format!("0.0.0.0:{}", self.metrics_port); + reth_args.extend_from_slice(&[ + "--engine.accept-execution-requests-hash".to_string(), + "--metrics".to_string(), + metrics_arg, + "--http".to_string(), + "--http.api".to_string(), + "eth".to_string(), + "--disable-discovery".to_string(), + "--trusted-only".to_string(), + ]); + + // Add any additional arguments passed via command line (common to both baseline and + // feature) + reth_args.extend_from_slice(&self.additional_reth_args); + + // Add reference-specific additional arguments + reth_args.extend_from_slice(additional_args); + + (reth_args, chain_str) + } + + /// Create a command for profiling mode + async fn create_profiling_command( + &self, + ref_type: &str, + reth_args: &[String], + ) -> Result { + // Create profiles directory if it doesn't exist + let profile_dir = self.output_dir.join("profiles"); + fs::create_dir_all(&profile_dir).wrap_err("Failed to create profiles directory")?; + + let profile_path = profile_dir.join(format!("{}.json.gz", ref_type)); + info!("Starting reth node with samply profiling..."); + info!("Profile output: {:?}", profile_path); + + // Get absolute path to samply + let samply_path = self.get_samply_path().await?; + + let mut cmd = if self.use_sudo { + let mut sudo_cmd = Command::new("sudo"); + sudo_cmd.arg(&samply_path); + sudo_cmd + } else { + Command::new(&samply_path) + }; + + // Add samply arguments + cmd.args(["record", "--save-only", "-o", &profile_path.to_string_lossy()]); + + // Add rate argument if available + if let Some(rate) = self.get_perf_sample_rate() { + cmd.args(["--rate", &rate]); + } + + // Add separator and complete reth command + cmd.arg("--"); + cmd.args(reth_args); + + Ok(cmd) + } + + /// Create a command for direct reth execution + fn create_direct_command(&self, reth_args: &[String]) -> Command { + let binary_path = &reth_args[0]; + + if self.use_sudo { + info!("Starting reth node with sudo..."); + let mut cmd = Command::new("sudo"); + cmd.args(reth_args); + cmd + } else { + info!("Starting reth node..."); + let mut cmd = Command::new(binary_path); + cmd.args(&reth_args[1..]); // Skip the binary path since it's the command + cmd + } + } + + /// Start a reth node using the specified binary path and return the process handle + pub(crate) async fn start_node( + &mut self, + binary_path: &std::path::Path, + _git_ref: &str, + ref_type: &str, + additional_args: &[String], + ) -> Result { + // Store the binary path for later use (e.g., in unwind_to_block) + self.binary_path = Some(binary_path.to_path_buf()); + + let binary_path_str = binary_path.to_string_lossy(); + let (reth_args, _) = self.build_reth_args(&binary_path_str, additional_args); + + // Log additional arguments if any + if !self.additional_reth_args.is_empty() { + info!("Using common additional reth arguments: {:?}", self.additional_reth_args); + } + if !additional_args.is_empty() { + info!("Using reference-specific additional reth arguments: {:?}", additional_args); + } + + let mut cmd = if self.enable_profiling { + self.create_profiling_command(ref_type, &reth_args).await? + } else { + self.create_direct_command(&reth_args) + }; + + // Set process group for better signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + debug!("Executing reth command: {cmd:?}"); + + let mut child = cmd + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .kill_on_drop(true) // Kill on drop so that on Ctrl-C for parent process we stop all child processes + .spawn() + .wrap_err("Failed to start reth node")?; + + info!( + "Reth node started with PID: {:?} (binary: {})", + child.id().ok_or_eyre("Reth node is not running")?, + binary_path_str + ); + + // Prepare log file path + let log_file_path = self.get_log_file_path(ref_type)?; + info!("Reth node logs will be saved to: {:?}", log_file_path); + + // Stream stdout and stderr with prefixes at debug level and to log file + if let Some(stdout) = child.stdout.take() { + let log_file = AsyncFile::create(&log_file_path) + .await + .wrap_err(format!("Failed to create log file: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = AsyncBufReader::new(stdout); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH] {}", line); + // Write to log file (reth already includes timestamps) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + if let Some(stderr) = child.stderr.take() { + let log_file = AsyncFile::options() + .create(true) + .append(true) + .open(&log_file_path) + .await + .wrap_err(format!("Failed to open log file for stderr: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = AsyncBufReader::new(stderr); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH] {}", line); + // Write to log file (reth already includes timestamps) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + // Give the node a moment to start up + sleep(Duration::from_secs(5)).await; + + Ok(child) + } + + /// Wait for the node to be ready and return its current tip + pub(crate) async fn wait_for_node_ready_and_get_tip(&self) -> Result { + info!("Waiting for node to be ready and synced..."); + + let max_wait = Duration::from_secs(120); // 2 minutes to allow for sync + let check_interval = Duration::from_secs(2); + let rpc_url = "http://localhost:8545"; + + // Create Alloy provider + let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?; + let provider = ProviderBuilder::new().connect_http(url); + + timeout(max_wait, async { + loop { + // First check if RPC is up and node is not syncing + match provider.syncing().await { + Ok(sync_result) => { + match sync_result { + SyncStatus::Info(sync_info) => { + debug!("Node is still syncing {sync_info:?}, waiting..."); + } + _ => { + // Node is not syncing, now get the tip + match provider.get_block_number().await { + Ok(tip) => { + info!("Node is ready and not syncing at block: {}", tip); + return Ok(tip); + } + Err(e) => { + debug!("Failed to get block number: {}", e); + } + } + } + } + } + Err(e) => { + debug!("Node RPC not ready yet or failed to check sync status: {}", e); + } + } + + sleep(check_interval).await; + } + }) + .await + .wrap_err("Timed out waiting for node to be ready and synced")? + } + + /// Stop the reth node gracefully + pub(crate) async fn stop_node(&self, child: &mut tokio::process::Child) -> Result<()> { + let pid = child.id().expect("Child process ID should be available"); + + // Check if the process has already exited + match child.try_wait() { + Ok(Some(status)) => { + info!("Reth node (PID: {}) has already exited with status: {:?}", pid, status); + return Ok(()); + } + Ok(None) => { + // Process is still running, proceed to stop it + info!("Stopping process gracefully with SIGINT (PID: {})...", pid); + } + Err(e) => { + return Err(eyre!("Failed to check process status: {}", e)); + } + } + + #[cfg(unix)] + { + // Send SIGINT to process group to mimic Ctrl-C behavior + let nix_pgid = Pid::from_raw(pid as i32); + + match killpg(nix_pgid, Signal::SIGINT) { + Ok(()) => {} + Err(nix::errno::Errno::ESRCH) => { + info!("Process group {} has already exited", pid); + } + Err(e) => { + return Err(eyre!("Failed to send SIGINT to process group {}: {}", pid, e)); + } + } + } + + #[cfg(not(unix))] + { + // On non-Unix systems, fall back to using external kill command + let output = Command::new("taskkill") + .args(["/PID", &pid.to_string(), "/F"]) + .output() + .await + .wrap_err("Failed to execute taskkill command")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + // Check if the error is because the process doesn't exist + if stderr.contains("not found") || stderr.contains("not exist") { + info!("Process {} has already exited", pid); + } else { + return Err(eyre!("Failed to kill process {}: {}", pid, stderr)); + } + } + } + + // Wait for the process to exit + match child.wait().await { + Ok(status) => { + info!("Reth node (PID: {}) exited with status: {:?}", pid, status); + } + Err(e) => { + // If we get an error here, it might be because the process already exited + debug!("Error waiting for process exit (may have already exited): {}", e); + } + } + + Ok(()) + } + + /// Unwind the node to a specific block + pub(crate) async fn unwind_to_block(&self, block_number: u64) -> Result<()> { + if self.use_sudo { + info!("Unwinding node to block: {} (with sudo)", block_number); + } else { + info!("Unwinding node to block: {}", block_number); + } + + // Use the binary path from the last start_node call, or fallback to default + let binary_path = self + .binary_path + .as_ref() + .map(|p| p.to_string_lossy().to_string()) + .unwrap_or_else(|| "./target/profiling/reth".to_string()); + + let mut cmd = if self.use_sudo { + let mut sudo_cmd = Command::new("sudo"); + sudo_cmd.args([&binary_path, "stage", "unwind"]); + sudo_cmd + } else { + let mut reth_cmd = Command::new(&binary_path); + reth_cmd.args(["stage", "unwind"]); + reth_cmd + }; + + // Add chain argument (skip for mainnet as it's the default) + let chain_str = self.chain.to_string(); + if chain_str != "mainnet" { + cmd.args(["--chain", &chain_str]); + } + + // Add datadir if specified + if let Some(ref datadir) = self.datadir { + cmd.args(["--datadir", datadir]); + } + + cmd.args(["to-block", &block_number.to_string()]); + + // Debug log the command + debug!("Executing reth unwind command: {:?}", cmd); + + let mut child = cmd + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn() + .wrap_err("Failed to start unwind command")?; + + // Stream stdout and stderr with prefixes in real-time + if let Some(stdout) = child.stdout.take() { + tokio::spawn(async move { + let reader = AsyncBufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-UNWIND] {}", line); + } + }); + } + + if let Some(stderr) = child.stderr.take() { + tokio::spawn(async move { + let reader = AsyncBufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-UNWIND] {}", line); + } + }); + } + + // Wait for the command to complete + let status = child.wait().await.wrap_err("Failed to wait for unwind command")?; + + if !status.success() { + return Err(eyre!("Unwind command failed with exit code: {:?}", status.code())); + } + + info!("Unwound to block: {}", block_number); + Ok(()) + } +} diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 891fa4f9780..a07d0f5200e 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -81,11 +81,26 @@ jemalloc = [ jemalloc-prof = ["reth-cli-util/jemalloc-prof"] tracy-allocator = ["reth-cli-util/tracy-allocator"] -min-error-logs = ["tracing/release_max_level_error"] -min-warn-logs = ["tracing/release_max_level_warn"] -min-info-logs = ["tracing/release_max_level_info"] -min-debug-logs = ["tracing/release_max_level_debug"] -min-trace-logs = ["tracing/release_max_level_trace"] +min-error-logs = [ + "tracing/release_max_level_error", + "reth-node-core/min-error-logs", +] +min-warn-logs = [ + "tracing/release_max_level_warn", + "reth-node-core/min-warn-logs", +] +min-info-logs = [ + "tracing/release_max_level_info", + "reth-node-core/min-info-logs", +] +min-debug-logs = [ + "tracing/release_max_level_debug", + "reth-node-core/min-debug-logs", +] +min-trace-logs = [ + "tracing/release_max_level_trace", + "reth-node-core/min-trace-logs", +] # no-op feature flag for switching between the `optimism` and default functionality in CI matrices ethereum = [] diff --git a/bin/reth-bench/src/bench/context.rs b/bin/reth-bench/src/bench/context.rs index 75c8592ad3c..1d53ce8e1a3 100644 --- a/bin/reth-bench/src/bench/context.rs +++ b/bin/reth-bench/src/bench/context.rs @@ -7,6 +7,7 @@ use alloy_primitives::address; use alloy_provider::{network::AnyNetwork, Provider, RootProvider}; use alloy_rpc_client::ClientBuilder; use alloy_rpc_types_engine::JwtSecret; +use alloy_transport::layers::RetryBackoffLayer; use reqwest::Url; use reth_node_core::args::BenchmarkArgs; use tracing::info; @@ -49,7 +50,9 @@ impl BenchContext { } // set up alloy client for blocks - let client = ClientBuilder::default().http(rpc_url.parse()?); + let client = ClientBuilder::default() + .layer(RetryBackoffLayer::new(10, 800, u64::MAX)) + .http(rpc_url.parse()?); let block_provider = RootProvider::::new(client); // Check if this is an OP chain by checking code at a predeploy address. diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index 0303c7d014d..1d1bf59b365 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -30,8 +30,18 @@ pub struct Command { rpc_url: String, /// How long to wait after a forkchoice update before sending the next payload. - #[arg(long, value_name = "WAIT_TIME", value_parser = parse_duration, verbatim_doc_comment)] - wait_time: Option, + #[arg(long, value_name = "WAIT_TIME", value_parser = parse_duration, default_value = "250ms", verbatim_doc_comment)] + wait_time: Duration, + + /// The size of the block buffer (channel capacity) for prefetching blocks from the RPC + /// endpoint. + #[arg( + long = "rpc-block-buffer-size", + value_name = "RPC_BLOCK_BUFFER_SIZE", + default_value = "20", + verbatim_doc_comment + )] + rpc_block_buffer_size: usize, #[command(flatten)] benchmark: BenchmarkArgs, @@ -48,7 +58,12 @@ impl Command { is_optimism, } = BenchContext::new(&self.benchmark, self.rpc_url).await?; - let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); + let buffer_size = self.rpc_block_buffer_size; + + // Use a oneshot channel to propagate errors from the spawned task + let (error_sender, mut error_receiver) = tokio::sync::oneshot::channel(); + let (sender, mut receiver) = tokio::sync::mpsc::channel(buffer_size); + tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { let block_res = block_provider @@ -60,6 +75,7 @@ impl Command { Ok(block) => block, Err(e) => { tracing::error!("Failed to fetch block {next_block}: {e}"); + let _ = error_sender.send(e); break; } }; @@ -69,6 +85,7 @@ impl Command { Ok(result) => result, Err(e) => { tracing::error!("Failed to convert block to new payload: {e}"); + let _ = error_sender.send(e); break; } }; @@ -153,16 +170,19 @@ impl Command { // convert gas used to gigagas, then compute gigagas per second info!(%combined_result); - // wait if we need to - if let Some(wait_time) = self.wait_time { - tokio::time::sleep(wait_time).await; - } + // wait before sending the next payload + tokio::time::sleep(self.wait_time).await; // record the current result let gas_row = TotalGasRow { block_number, gas_used, time: current_duration }; results.push((gas_row, combined_result)); } + // Check if the spawned task encountered an error + if let Ok(error) = error_receiver.try_recv() { + return Err(error); + } + let (gas_output_results, combined_results): (_, Vec) = results.into_iter().unzip(); diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 34fe3780553..3dfa619ec7b 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -13,7 +13,7 @@ use crate::{ use alloy_provider::Provider; use clap::Parser; use csv::Writer; -use eyre::Context; +use eyre::{Context, OptionExt}; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; use std::time::{Duration, Instant}; @@ -26,6 +26,16 @@ pub struct Command { #[arg(long, value_name = "RPC_URL", verbatim_doc_comment)] rpc_url: String, + /// The size of the block buffer (channel capacity) for prefetching blocks from the RPC + /// endpoint. + #[arg( + long = "rpc-block-buffer-size", + value_name = "RPC_BLOCK_BUFFER_SIZE", + default_value = "20", + verbatim_doc_comment + )] + rpc_block_buffer_size: usize, + #[command(flatten)] benchmark: BenchmarkArgs, } @@ -41,7 +51,12 @@ impl Command { is_optimism, } = BenchContext::new(&self.benchmark, self.rpc_url).await?; - let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); + let buffer_size = self.rpc_block_buffer_size; + + // Use a oneshot channel to propagate errors from the spawned task + let (error_sender, mut error_receiver) = tokio::sync::oneshot::channel(); + let (sender, mut receiver) = tokio::sync::mpsc::channel(buffer_size); + tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { let block_res = block_provider @@ -49,13 +64,30 @@ impl Command { .full() .await .wrap_err_with(|| format!("Failed to fetch block by number {next_block}")); - let block = block_res.unwrap().unwrap(); + let block = match block_res.and_then(|opt| opt.ok_or_eyre("Block not found")) { + Ok(block) => block, + Err(e) => { + tracing::error!("Failed to fetch block {next_block}: {e}"); + let _ = error_sender.send(e); + break; + } + }; let header = block.header.clone(); - let (version, params) = block_to_new_payload(block, is_optimism).unwrap(); + let (version, params) = match block_to_new_payload(block, is_optimism) { + Ok(result) => result, + Err(e) => { + tracing::error!("Failed to convert block to new payload: {e}"); + let _ = error_sender.send(e); + break; + } + }; next_block += 1; - sender.send((header, version, params)).await.unwrap(); + if let Err(e) = sender.send((header, version, params)).await { + tracing::error!("Failed to send block data: {e}"); + break; + } } }); @@ -96,6 +128,11 @@ impl Command { results.push((row, new_payload_result)); } + // Check if the spawned task encountered an error + if let Ok(error) = error_receiver.try_recv() { + return Err(error); + } + let (gas_output_results, new_payload_results): (_, Vec) = results.into_iter().unzip(); diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index d4e134bf48c..eb0cf0bd2b2 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -9,20 +9,6 @@ repository.workspace = true description = "Reth node implementation" default-run = "reth" -[package.metadata.deb] -maintainer = "reth team" -depends = "$auto" -section = "network" -priority = "optional" -maintainer-scripts = "../../pkg/reth/debian/" -assets = [ - "$auto", - ["../../README.md", "usr/share/doc/reth/", "644"], - ["../../LICENSE-APACHE", "usr/share/doc/reth/", "644"], - ["../../LICENSE-MIT", "usr/share/doc/reth/", "644"], -] -systemd-units = { enable = false, start = false, unit-name = "reth", unit-scripts = "../../pkg/reth/debian" } - [lints] workspace = true @@ -54,7 +40,7 @@ reth-node-api.workspace = true reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true reth-ethereum-primitives.workspace = true -reth-node-ethereum = { workspace = true, features = ["js-tracer"] } +reth-node-ethereum.workspace = true reth-node-builder.workspace = true reth-node-metrics.workspace = true reth-consensus.workspace = true @@ -81,7 +67,18 @@ backon.workspace = true tempfile.workspace = true [features] -default = ["jemalloc", "reth-revm/portable"] +default = ["jemalloc", "otlp", "reth-revm/portable", "js-tracer"] + +otlp = [ + "reth-ethereum-cli/otlp", + "reth-node-core/otlp", +] +js-tracer = [ + "reth-node-builder/js-tracer", + "reth-node-ethereum/js-tracer", + "reth-rpc/js-tracer", + "reth-rpc-eth-types/js-tracer", +] dev = ["reth-ethereum-cli/dev"] @@ -123,22 +120,27 @@ snmalloc-native = [ min-error-logs = [ "tracing/release_max_level_error", "reth-ethereum-cli/min-error-logs", + "reth-node-core/min-error-logs", ] min-warn-logs = [ "tracing/release_max_level_warn", "reth-ethereum-cli/min-warn-logs", + "reth-node-core/min-warn-logs", ] min-info-logs = [ "tracing/release_max_level_info", "reth-ethereum-cli/min-info-logs", + "reth-node-core/min-info-logs", ] min-debug-logs = [ "tracing/release_max_level_debug", "reth-ethereum-cli/min-debug-logs", + "reth-node-core/min-debug-logs", ] min-trace-logs = [ "tracing/release_max_level_trace", "reth-ethereum-cli/min-trace-logs", + "reth-node-core/min-trace-logs", ] [[bin]] diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index cba12995015..d21c83ae7c4 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -54,6 +54,7 @@ reth-testing-utils.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true rand.workspace = true +revm-state.workspace = true criterion.workspace = true [features] diff --git a/crates/chain-state/benches/canonical_hashes_range.rs b/crates/chain-state/benches/canonical_hashes_range.rs index 58fdd73bf99..c19ce25ec4f 100644 --- a/crates/chain-state/benches/canonical_hashes_range.rs +++ b/crates/chain-state/benches/canonical_hashes_range.rs @@ -2,7 +2,7 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; use reth_chain_state::{ - test_utils::TestBlockBuilder, ExecutedBlockWithTrieUpdates, MemoryOverlayStateProviderRef, + test_utils::TestBlockBuilder, ExecutedBlock, MemoryOverlayStateProviderRef, }; use reth_ethereum_primitives::EthPrimitives; use reth_storage_api::{noop::NoopProvider, BlockHashReader}; @@ -84,10 +84,7 @@ fn bench_canonical_hashes_range(c: &mut Criterion) { fn setup_provider_with_blocks( num_blocks: usize, -) -> ( - MemoryOverlayStateProviderRef<'static, EthPrimitives>, - Vec>, -) { +) -> (MemoryOverlayStateProviderRef<'static, EthPrimitives>, Vec>) { let mut builder = TestBlockBuilder::::default(); let blocks: Vec<_> = builder.get_executed_blocks(1000..1000 + num_blocks as u64).collect(); diff --git a/crates/chain-state/src/chain_info.rs b/crates/chain-state/src/chain_info.rs index a8a08430566..dd6afc8db1a 100644 --- a/crates/chain-state/src/chain_info.rs +++ b/crates/chain-state/src/chain_info.rs @@ -77,22 +77,22 @@ where self.inner.finalized_block.borrow().clone() } - /// Returns the canonical head of the chain. + /// Returns the `BlockNumHash` of the canonical head. pub fn get_canonical_num_hash(&self) -> BlockNumHash { self.inner.canonical_head.read().num_hash() } - /// Returns the canonical head of the chain. + /// Returns the block number of the canonical head. pub fn get_canonical_block_number(&self) -> BlockNumber { self.inner.canonical_head_number.load(Ordering::Relaxed) } - /// Returns the safe header of the chain. + /// Returns the `BlockNumHash` of the safe header. pub fn get_safe_num_hash(&self) -> Option { self.inner.safe_block.borrow().as_ref().map(SealedHeader::num_hash) } - /// Returns the finalized header of the chain. + /// Returns the `BlockNumHash` of the finalized header. pub fn get_finalized_num_hash(&self) -> Option { self.inner.finalized_block.borrow().as_ref().map(SealedHeader::num_hash) } diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 335f7de6195..a6c85538107 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -242,7 +242,7 @@ impl CanonicalInMemoryState { /// Updates the pending block with the given block. /// /// Note: This assumes that the parent block of the pending block is canonical. - pub fn set_pending_block(&self, pending: ExecutedBlockWithTrieUpdates) { + pub fn set_pending_block(&self, pending: ExecutedBlock) { // fetch the state of the pending block's parent block let parent = self.state_by_hash(pending.recovered_block().parent_hash()); let pending = BlockState::with_parent(pending, parent); @@ -258,7 +258,7 @@ impl CanonicalInMemoryState { /// them to their parent blocks. fn update_blocks(&self, new_blocks: I, reorged: R) where - I: IntoIterator>, + I: IntoIterator>, R: IntoIterator>, { { @@ -568,22 +568,19 @@ impl CanonicalInMemoryState { #[derive(Debug, PartialEq, Eq, Clone)] pub struct BlockState { /// The executed block that determines the state after this block has been executed. - block: ExecutedBlockWithTrieUpdates, + block: ExecutedBlock, /// The block's parent block if it exists. parent: Option>, } impl BlockState { /// [`BlockState`] constructor. - pub const fn new(block: ExecutedBlockWithTrieUpdates) -> Self { + pub const fn new(block: ExecutedBlock) -> Self { Self { block, parent: None } } /// [`BlockState`] constructor with parent. - pub const fn with_parent( - block: ExecutedBlockWithTrieUpdates, - parent: Option>, - ) -> Self { + pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { Self { block, parent } } @@ -597,12 +594,12 @@ impl BlockState { } /// Returns the executed block that determines the state. - pub fn block(&self) -> ExecutedBlockWithTrieUpdates { + pub fn block(&self) -> ExecutedBlock { self.block.clone() } /// Returns a reference to the executed block that determines the state. - pub const fn block_ref(&self) -> &ExecutedBlockWithTrieUpdates { + pub const fn block_ref(&self) -> &ExecutedBlock { &self.block } @@ -730,6 +727,8 @@ pub struct ExecutedBlock { pub execution_output: Arc>, /// Block's hashed state. pub hashed_state: Arc, + /// Trie updates that result from calculating the state root for the block. + pub trie_updates: Arc, } impl Default for ExecutedBlock { @@ -738,6 +737,7 @@ impl Default for ExecutedBlock { recovered_block: Default::default(), execution_output: Default::default(), hashed_state: Default::default(), + trie_updates: Default::default(), } } } @@ -767,113 +767,16 @@ impl ExecutedBlock { &self.hashed_state } - /// Returns a [`BlockNumber`] of the block. + /// Returns a reference to the trie updates resulting from the execution outcome #[inline] - pub fn block_number(&self) -> BlockNumber { - self.recovered_block.header().number() - } -} - -/// Trie updates that result from calculating the state root for the block. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum ExecutedTrieUpdates { - /// Trie updates present. State root was calculated, and the trie updates can be applied to the - /// database. - Present(Arc), - /// Trie updates missing. State root was calculated, but the trie updates cannot be applied to - /// the current database state. To apply the updates, the state root must be recalculated, and - /// new trie updates must be generated. - /// - /// This can happen when processing fork chain blocks that are building on top of the - /// historical database state. Since we don't store the historical trie state, we cannot - /// generate the trie updates for it. - Missing, -} - -impl ExecutedTrieUpdates { - /// Creates a [`ExecutedTrieUpdates`] with present but empty trie updates. - pub fn empty() -> Self { - Self::Present(Arc::default()) - } - - /// Sets the trie updates to the provided value as present. - pub fn set_present(&mut self, updates: Arc) { - *self = Self::Present(updates); - } - - /// Takes the present trie updates, leaving the state as missing. - pub fn take_present(&mut self) -> Option> { - match self { - Self::Present(updates) => { - let updates = core::mem::take(updates); - *self = Self::Missing; - Some(updates) - } - Self::Missing => None, - } - } - - /// Returns a reference to the trie updates if present. - #[allow(clippy::missing_const_for_fn)] // false positive - pub fn as_ref(&self) -> Option<&TrieUpdates> { - match self { - Self::Present(updates) => Some(updates), - Self::Missing => None, - } - } - - /// Returns `true` if the trie updates are present. - pub const fn is_present(&self) -> bool { - matches!(self, Self::Present(_)) + pub fn trie_updates(&self) -> &TrieUpdates { + &self.trie_updates } - /// Returns `true` if the trie updates are missing. - pub const fn is_missing(&self) -> bool { - matches!(self, Self::Missing) - } -} - -/// An [`ExecutedBlock`] with its [`TrieUpdates`]. -/// -/// We store it as separate type because [`TrieUpdates`] are only available for blocks stored in -/// memory and can't be obtained for canonical persisted blocks. -#[derive( - Clone, Debug, PartialEq, Eq, derive_more::Deref, derive_more::DerefMut, derive_more::Into, -)] -pub struct ExecutedBlockWithTrieUpdates { - /// Inner [`ExecutedBlock`]. - #[deref] - #[deref_mut] - #[into] - pub block: ExecutedBlock, - /// Trie updates that result from calculating the state root for the block. - /// - /// If [`ExecutedTrieUpdates::Missing`], the trie updates should be computed when persisting - /// the block **on top of the canonical parent**. - pub trie: ExecutedTrieUpdates, -} - -impl ExecutedBlockWithTrieUpdates { - /// [`ExecutedBlock`] constructor. - pub const fn new( - recovered_block: Arc>, - execution_output: Arc>, - hashed_state: Arc, - trie: ExecutedTrieUpdates, - ) -> Self { - Self { block: ExecutedBlock { recovered_block, execution_output, hashed_state }, trie } - } - - /// Returns a reference to the trie updates for the block, if present. + /// Returns a [`BlockNumber`] of the block. #[inline] - pub fn trie_updates(&self) -> Option<&TrieUpdates> { - self.trie.as_ref() - } - - /// Converts the value into [`SealedBlock`]. - pub fn into_sealed_block(self) -> SealedBlock { - let block = Arc::unwrap_or_clone(self.block.recovered_block); - block.into_sealed_block() + pub fn block_number(&self) -> BlockNumber { + self.recovered_block.header().number() } } @@ -883,18 +786,14 @@ pub enum NewCanonicalChain { /// A simple append to the current canonical head Commit { /// all blocks that lead back to the canonical head - new: Vec>, + new: Vec>, }, /// A reorged chain consists of two chains that trace back to a shared ancestor block at which /// point they diverge. Reorg { /// All blocks of the _new_ chain - new: Vec>, + new: Vec>, /// All blocks of the _old_ chain - /// - /// These are not [`ExecutedBlockWithTrieUpdates`] because we don't always have the trie - /// updates for the old canonical chain. For example, in case of node being restarted right - /// before the reorg [`TrieUpdates`] can't be fetched from database. old: Vec>, }, } @@ -1257,7 +1156,7 @@ mod tests { block1.recovered_block().hash() ); - let chain = NewCanonicalChain::Reorg { new: vec![block2.clone()], old: vec![block1.block] }; + let chain = NewCanonicalChain::Reorg { new: vec![block2.clone()], old: vec![block1] }; state.update_chain(chain); assert_eq!( state.head_state().unwrap().block_ref().recovered_block().hash(), @@ -1380,8 +1279,7 @@ mod tests { #[test] fn test_canonical_in_memory_state_canonical_chain_empty() { let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); - let chain: Vec<_> = state.canonical_chain().collect(); - assert!(chain.is_empty()); + assert!(state.canonical_chain().next().is_none()); } #[test] @@ -1540,7 +1438,7 @@ mod tests { // Test reorg notification let chain_reorg = NewCanonicalChain::Reorg { new: vec![block1a.clone(), block2a.clone()], - old: vec![block1.block.clone(), block2.block.clone()], + old: vec![block1.clone(), block2.clone()], }; assert_eq!( diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index a035d833a46..254edb248b4 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -1,4 +1,4 @@ -use super::ExecutedBlockWithTrieUpdates; +use super::ExecutedBlock; use alloy_consensus::BlockHeader; use alloy_primitives::{keccak256, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; use reth_errors::ProviderResult; @@ -24,7 +24,7 @@ pub struct MemoryOverlayStateProviderRef< /// Historical state provider for state lookups that are not found in memory blocks. pub(crate) historical: Box, /// The collection of executed parent blocks. Expected order is newest to oldest. - pub(crate) in_memory: Vec>, + pub(crate) in_memory: Vec>, /// Lazy-loaded in-memory trie data. pub(crate) trie_input: OnceLock, } @@ -41,10 +41,7 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { /// - `in_memory` - the collection of executed ancestor blocks in reverse. /// - `historical` - a historical state provider for the latest ancestor block stored in the /// database. - pub fn new( - historical: Box, - in_memory: Vec>, - ) -> Self { + pub fn new(historical: Box, in_memory: Vec>) -> Self { Self { historical, in_memory, trie_input: OnceLock::new() } } @@ -60,10 +57,17 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { self.in_memory .iter() .rev() - .map(|block| (block.hashed_state.as_ref(), block.trie.as_ref())), + .map(|block| (block.hashed_state.as_ref(), block.trie_updates.as_ref())), ) }) } + + fn merged_hashed_storage(&self, address: Address, storage: HashedStorage) -> HashedStorage { + let state = &self.trie_input().state; + let mut hashed = state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed.extend(&storage); + hashed + } } impl BlockHashReader for MemoryOverlayStateProviderRef<'_, N> { @@ -148,11 +152,8 @@ impl StateRootProvider for MemoryOverlayStateProviderRef<'_, impl StorageRootProvider for MemoryOverlayStateProviderRef<'_, N> { // TODO: Currently this does not reuse available in-memory trie nodes. fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { - let state = &self.trie_input().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_root(address, hashed_storage) + let merged = self.merged_hashed_storage(address, storage); + self.historical.storage_root(address, merged) } // TODO: Currently this does not reuse available in-memory trie nodes. @@ -162,11 +163,8 @@ impl StorageRootProvider for MemoryOverlayStateProviderRef<'_ slot: B256, storage: HashedStorage, ) -> ProviderResult { - let state = &self.trie_input().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_proof(address, slot, hashed_storage) + let merged = self.merged_hashed_storage(address, storage); + self.historical.storage_proof(address, slot, merged) } // TODO: Currently this does not reuse available in-memory trie nodes. @@ -176,11 +174,8 @@ impl StorageRootProvider for MemoryOverlayStateProviderRef<'_ slots: &[B256], storage: HashedStorage, ) -> ProviderResult { - let state = &self.trie_input().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_multiproof(address, slots, hashed_storage) + let merged = self.merged_hashed_storage(address, storage); + self.historical.storage_multiproof(address, slots, merged) } } diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index ace30b9cb35..5d318aca56c 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -1,6 +1,6 @@ use crate::{ - in_memory::ExecutedBlockWithTrieUpdates, CanonStateNotification, CanonStateNotifications, - CanonStateSubscriptions, ExecutedTrieUpdates, + in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, + CanonStateSubscriptions, }; use alloy_consensus::{Header, SignableTransaction, TxEip1559, TxReceipt, EMPTY_ROOT_HASH}; use alloy_eips::{ @@ -23,7 +23,7 @@ use reth_primitives_traits::{ SignedTransaction, }; use reth_storage_api::NodePrimitivesProvider; -use reth_trie::{root::state_root_unhashed, HashedPostState}; +use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm_database::BundleState; use revm_state::AccountInfo; use std::{ @@ -198,45 +198,45 @@ impl TestBlockBuilder { fork } - /// Gets an [`ExecutedBlockWithTrieUpdates`] with [`BlockNumber`], receipts and parent hash. + /// Gets an [`ExecutedBlock`] with [`BlockNumber`], receipts and parent hash. fn get_executed_block( &mut self, block_number: BlockNumber, receipts: Vec>, parent_hash: B256, - ) -> ExecutedBlockWithTrieUpdates { + ) -> ExecutedBlock { let block_with_senders = self.generate_random_block(block_number, parent_hash); let (block, senders) = block_with_senders.split_sealed(); - ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed(block, senders)), - Arc::new(ExecutionOutcome::new( + ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed(block, senders)), + execution_output: Arc::new(ExecutionOutcome::new( BundleState::default(), receipts, block_number, vec![Requests::default()], )), - Arc::new(HashedPostState::default()), - ExecutedTrieUpdates::empty(), - ) + hashed_state: Arc::new(HashedPostState::default()), + trie_updates: Arc::new(TrieUpdates::default()), + } } - /// Generates an [`ExecutedBlockWithTrieUpdates`] that includes the given receipts. + /// Generates an [`ExecutedBlock`] that includes the given receipts. pub fn get_executed_block_with_receipts( &mut self, receipts: Vec>, parent_hash: B256, - ) -> ExecutedBlockWithTrieUpdates { + ) -> ExecutedBlock { let number = rand::rng().random::(); self.get_executed_block(number, receipts, parent_hash) } - /// Generates an [`ExecutedBlockWithTrieUpdates`] with the given [`BlockNumber`]. + /// Generates an [`ExecutedBlock`] with the given [`BlockNumber`]. pub fn get_executed_block_with_number( &mut self, block_number: BlockNumber, parent_hash: B256, - ) -> ExecutedBlockWithTrieUpdates { + ) -> ExecutedBlock { self.get_executed_block(block_number, vec![vec![]], parent_hash) } @@ -244,7 +244,7 @@ impl TestBlockBuilder { pub fn get_executed_blocks( &mut self, range: Range, - ) -> impl Iterator + '_ { + ) -> impl Iterator + '_ { let mut parent_hash = B256::default(); range.map(move |number| { let current_parent_hash = parent_hash; diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index 7d53c9d2d5a..70fe3a23daa 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,7 +1,6 @@ use crate::{ChainSpec, DepositContract}; use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; -use alloy_consensus::Header; use alloy_eips::{calc_next_block_base_fee, eip1559::BaseFeeParams, eip7840::BlobParams}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; @@ -75,8 +74,8 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { } } -impl EthChainSpec for ChainSpec { - type Header = Header; +impl EthChainSpec for ChainSpec { + type Header = H; fn chain(&self) -> Chain { self.chain diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 09e51196067..8dff97f75c9 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -3,9 +3,14 @@ use alloy_evm::eth::spec::EthExecutorSpec; use crate::{ constants::{MAINNET_DEPOSIT_CONTRACT, MAINNET_PRUNE_DELETE_LIMIT}, - holesky, hoodi, mainnet, sepolia, EthChainSpec, + ethereum::SEPOLIA_PARIS_TTD, + holesky, hoodi, mainnet, + mainnet::{MAINNET_PARIS_BLOCK, MAINNET_PARIS_TTD}, + sepolia, + sepolia::SEPOLIA_PARIS_BLOCK, + EthChainSpec, }; -use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use alloc::{boxed::Box, format, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; use alloy_consensus::{ constants::{ @@ -100,7 +105,7 @@ pub static MAINNET: LazyLock> = LazyLock::new(|| { genesis, // paris_block_and_final_difficulty: Some(( - 15537394, + MAINNET_PARIS_BLOCK, U256::from(58_750_003_716_598_352_816_469u128), )), hardforks, @@ -130,7 +135,10 @@ pub static SEPOLIA: LazyLock> = LazyLock::new(|| { ), genesis, // - paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))), + paris_block_and_final_difficulty: Some(( + SEPOLIA_PARIS_BLOCK, + U256::from(17_000_018_015_853_232u128), + )), hardforks, // https://sepolia.etherscan.io/tx/0x025ecbf81a2f1220da6285d1701dc89fb5a956b62562ee922e1a9efd73eb4b14 deposit_contract: Some(DepositContract::new( @@ -266,7 +274,7 @@ impl From for BaseFeeParamsKind { #[derive(Clone, Debug, PartialEq, Eq, From)] pub struct ForkBaseFeeParams(Vec<(Box, BaseFeeParams)>); -impl core::ops::Deref for ChainSpec { +impl core::ops::Deref for ChainSpec { type Target = ChainHardforks; fn deref(&self) -> &Self::Target { @@ -435,7 +443,26 @@ impl ChainSpec { /// Returns the hardfork display helper. pub fn display_hardforks(&self) -> DisplayHardforks { - DisplayHardforks::new(self.hardforks.forks_iter()) + // Create an iterator with hardfork, condition, and optional blob metadata + let hardforks_with_meta = self.hardforks.forks_iter().map(|(fork, condition)| { + // Generate blob metadata for timestamp-based hardforks that have blob params + let metadata = match condition { + ForkCondition::Timestamp(timestamp) => { + // Try to get blob params for this timestamp + // This automatically handles all hardforks with blob support + EthChainSpec::blob_params_at_timestamp(self, timestamp).map(|params| { + format!( + "blob: (target: {}, max: {}, fraction: {})", + params.target_blob_count, params.max_blob_count, params.update_fraction + ) + }) + } + _ => None, + }; + (fork, condition, metadata) + }); + + DisplayHardforks::with_meta(hardforks_with_meta) } /// Get the fork id for the given hardfork. @@ -683,26 +710,50 @@ impl From for ChainSpec { // We expect no new networks to be configured with the merge, so we ignore the TTD field // and merge netsplit block from external genesis files. All existing networks that have // merged should have a static ChainSpec already (namely mainnet and sepolia). - let paris_block_and_final_difficulty = - if let Some(ttd) = genesis.config.terminal_total_difficulty { - hardforks.push(( - EthereumHardfork::Paris.boxed(), - ForkCondition::TTD { - // NOTE: this will not work properly if the merge is not activated at - // genesis, and there is no merge netsplit block - activation_block_number: genesis - .config - .merge_netsplit_block - .unwrap_or_default(), - total_difficulty: ttd, - fork_block: genesis.config.merge_netsplit_block, - }, - )); + let paris_block_and_final_difficulty = if let Some(ttd) = + genesis.config.terminal_total_difficulty + { + hardforks.push(( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { + // NOTE: this will not work properly if the merge is not activated at + // genesis, and there is no merge netsplit block + activation_block_number: genesis + .config + .merge_netsplit_block + .or_else(|| { + // due to this limitation we can't determine the merge block, + // this is the case for perfnet testing for example + // at the time of this fix, only two networks transitioned: MAINNET + + // SEPOLIA and this parsing from genesis is used for shadowforking, so + // we can reasonably assume that if the TTD and the chainid matches + // those networks we use the activation + // blocks of those networks + match genesis.config.chain_id { + 1 => { + if ttd == MAINNET_PARIS_TTD { + return Some(MAINNET_PARIS_BLOCK) + } + } + 11155111 => { + if ttd == SEPOLIA_PARIS_TTD { + return Some(SEPOLIA_PARIS_BLOCK) + } + } + _ => {} + }; + None + }) + .unwrap_or_default(), + total_difficulty: ttd, + fork_block: genesis.config.merge_netsplit_block, + }, + )); - genesis.config.merge_netsplit_block.map(|block| (block, ttd)) - } else { - None - }; + genesis.config.merge_netsplit_block.map(|block| (block, ttd)) + } else { + None + }; // Time-based hardforks let time_hardfork_opts = [ @@ -856,7 +907,7 @@ impl ChainSpecBuilder { /// Remove the given fork from the spec. pub fn without_fork(mut self, fork: H) -> Self { - self.hardforks.remove(fork); + self.hardforks.remove(&fork); self } @@ -1035,7 +1086,7 @@ impl From<&Arc> for ChainSpecBuilder { } } -impl EthExecutorSpec for ChainSpec { +impl EthExecutorSpec for ChainSpec { fn deposit_contract_address(&self) -> Option
{ self.deposit_contract.map(|deposit_contract| deposit_contract.address) } @@ -1130,11 +1181,11 @@ Merge hard forks: - Paris @58750000000000000000000 (network is known to be merged) Post-merge hard forks (timestamp based): - Shanghai @1681338455 -- Cancun @1710338135 -- Prague @1746612311 -- Osaka @1764798551 -- Bpo1 @1765978199 -- Bpo2 @1767747671" +- Cancun @1710338135 blob: (target: 3, max: 6, fraction: 3338477) +- Prague @1746612311 blob: (target: 6, max: 9, fraction: 5007716) +- Osaka @1764798551 blob: (target: 6, max: 9, fraction: 5007716) +- Bpo1 @1765290071 blob: (target: 10, max: 15, fraction: 8346193) +- Bpo2 @1767747671 blob: (target: 14, max: 21, fraction: 11684671)" ); } @@ -1524,7 +1575,7 @@ Post-merge hard forks (timestamp based): ), // First Prague block ( - Head { number: 20000002, timestamp: 1746612311, ..Default::default() }, + Head { number: 20000004, timestamp: 1746612311, ..Default::default() }, ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: mainnet::MAINNET_OSAKA_TIMESTAMP, @@ -1533,7 +1584,7 @@ Post-merge hard forks (timestamp based): // Osaka block ( Head { - number: 20000002, + number: 20000004, timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP, ..Default::default() }, @@ -2522,7 +2573,7 @@ Post-merge hard forks (timestamp based): #[test] fn latest_eth_mainnet_fork_id() { // BPO2 - assert_eq!(ForkId { hash: ForkHash(hex!("0xfd414558")), next: 0 }, MAINNET.latest_fork_id()) + assert_eq!(ForkId { hash: ForkHash(hex!("0x07c9462e")), next: 0 }, MAINNET.latest_fork_id()) } #[test] @@ -2677,4 +2728,71 @@ Post-merge hard forks (timestamp based): }; assert_eq!(hardfork_params, expected); } + + #[test] + fn parse_perf_net_genesis() { + let s = r#"{ + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficulty": 58750000000000000000000, + "terminalTotalDifficultyPassed": true, + "shanghaiTime": 1681338455, + "cancunTime": 1710338135, + "pragueTime": 1746612311, + "ethash": {}, + "depositContractAddress": "0x00000000219ab540356cBB839Cbe05303d7705Fa", + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "prague": { + "target": 6, + "max": 9, + "baseFeeUpdateFraction": 5007716 + } + } + }, + "nonce": "0x42", + "timestamp": "0x0", + "extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa", + "gasLimit": "0x1388", + "difficulty": "0x400000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas": null +}"#; + + let genesis = serde_json::from_str::(s).unwrap(); + let chainspec = ChainSpec::from_genesis(genesis); + let activation = chainspec.hardforks.fork(EthereumHardfork::Paris); + assert_eq!( + activation, + ForkCondition::TTD { + activation_block_number: MAINNET_PARIS_BLOCK, + total_difficulty: MAINNET_PARIS_TTD, + fork_block: None, + } + ) + } } diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 242cc6d5d9d..da1a5318f25 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -99,6 +99,8 @@ proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] reth-ethereum-cli.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +tempfile.workspace = true [features] default = [] diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 1ceba8f57da..4d18d811841 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -126,14 +126,13 @@ impl EnvironmentArgs { where C: ChainSpecParser, { - let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); - let prune_modes = - config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); + let has_receipt_pruning = config.prune.has_receipts_pruning(); + let prune_modes = config.prune.segments.clone(); let factory = ProviderFactory::>>::new( db, self.chain.clone(), static_file_provider, - ) + )? .with_prune_modes(prune_modes.clone()); // Check for consistency between database and static files. diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 6214df0ec98..2f0fc05311a 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -1,4 +1,3 @@ -use alloy_consensus::Header; use alloy_primitives::{hex, BlockHash}; use clap::Parser; use reth_db::{ @@ -66,9 +65,10 @@ impl Command { } Subcommand::StaticFile { segment, key, raw } => { let (key, mask): (u64, _) = match segment { - StaticFileSegment::Headers => { - (table_key::(&key)?, >::MASK) - } + StaticFileSegment::Headers => ( + table_key::(&key)?, + >>::MASK, + ), StaticFileSegment::Transactions => { (table_key::(&key)?, >>::MASK) } @@ -77,17 +77,15 @@ impl Command { } }; - let content = tool.provider_factory.static_file_provider().find_static_file( - segment, - |provider| { - let mut cursor = provider.cursor()?; - cursor.get(key.into(), mask).map(|result| { - result.map(|vec| { - vec.iter().map(|slice| slice.to_vec()).collect::>() - }) - }) - }, - )?; + let content = tool + .provider_factory + .static_file_provider() + .get_segment_provider(segment, key)? + .cursor()? + .get(key.into(), mask) + .map(|result| { + result.map(|vec| vec.iter().map(|slice| slice.to_vec()).collect::>()) + })?; match content { Some(content) => { diff --git a/crates/cli/commands/src/db/list.rs b/crates/cli/commands/src/db/list.rs index 9288a56a86c..2540e77c111 100644 --- a/crates/cli/commands/src/db/list.rs +++ b/crates/cli/commands/src/db/list.rs @@ -97,7 +97,7 @@ impl TableViewer<()> for ListTableViewer<'_, N> { fn view(&self) -> Result<(), Self::Error> { self.tool.provider_factory.db_ref().view(|tx| { let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?; - let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", stringify!($table)))?; + let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", self.args.table.name()))?; let total_entries = stats.entries(); let final_entry_idx = total_entries.saturating_sub(1); if self.args.skip > final_entry_idx { diff --git a/crates/cli/commands/src/db/repair_trie.rs b/crates/cli/commands/src/db/repair_trie.rs index e7ee8d7977c..f7dea67b76f 100644 --- a/crates/cli/commands/src/db/repair_trie.rs +++ b/crates/cli/commands/src/db/repair_trie.rs @@ -52,7 +52,7 @@ fn verify_only(provider_factory: ProviderFactory) -> eyre // Create the verifier let hashed_cursor_factory = DatabaseHashedCursorFactory::new(&tx); let trie_cursor_factory = DatabaseTrieCursorFactory::new(&tx); - let verifier = Verifier::new(trie_cursor_factory, hashed_cursor_factory)?; + let verifier = Verifier::new(&trie_cursor_factory, hashed_cursor_factory)?; let mut inconsistent_nodes = 0; let start_time = Instant::now(); @@ -136,7 +136,7 @@ fn verify_and_repair( let trie_cursor_factory = DatabaseTrieCursorFactory::new(tx); // Create the verifier - let verifier = Verifier::new(trie_cursor_factory, hashed_cursor_factory)?; + let verifier = Verifier::new(&trie_cursor_factory, hashed_cursor_factory)?; let mut inconsistent_nodes = 0; let start_time = Instant::now(); @@ -179,8 +179,17 @@ fn verify_and_repair( Output::StorageWrong { account, path, expected: node, .. } | Output::StorageMissing(account, path, node) => { // Wrong/missing storage node value, upsert it + // (We can't just use `upsert` method with a dup cursor, it's not properly + // supported) let nibbles = StoredNibblesSubKey(path); - let entry = StorageTrieEntry { nibbles, node }; + let entry = StorageTrieEntry { nibbles: nibbles.clone(), node }; + if storage_trie_cursor + .seek_by_key_subkey(account, nibbles.clone())? + .filter(|v| v.nibbles == nibbles) + .is_some() + { + storage_trie_cursor.delete_current()?; + } storage_trie_cursor.upsert(account, &entry)?; } Output::Progress(path) => { diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index c8398d795ce..2aef43c582d 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -191,10 +191,11 @@ impl Command { mut segment_config_size, ) = (0, 0, 0, 0, 0, 0); - for (block_range, tx_range) in &ranges { - let fixed_block_range = static_file_provider.find_fixed_range(block_range.start()); + for (block_range, header) in &ranges { + let fixed_block_range = + static_file_provider.find_fixed_range(segment, block_range.start()); let jar_provider = static_file_provider - .get_segment_provider(segment, || Some(fixed_block_range), None)? + .get_segment_provider_for_range(segment, || Some(fixed_block_range), None)? .ok_or_else(|| { eyre::eyre!("Failed to get segment provider for segment: {}", segment) })?; @@ -220,7 +221,7 @@ impl Command { row.add_cell(Cell::new(segment)) .add_cell(Cell::new(format!("{block_range}"))) .add_cell(Cell::new( - tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")), + header.tx_range().map_or("N/A".to_string(), |range| format!("{range}")), )) .add_cell(Cell::new(format!("{columns} x {rows}"))); if self.detailed_sizes { @@ -270,10 +271,12 @@ impl Command { let tx_range = { let start = ranges .iter() - .find_map(|(_, tx_range)| tx_range.map(|r| r.start())) + .find_map(|(_, header)| header.tx_range().map(|range| range.start())) .unwrap_or_default(); - let end = - ranges.iter().rev().find_map(|(_, tx_range)| tx_range.map(|r| r.end())); + let end = ranges + .iter() + .rev() + .find_map(|(_, header)| header.tx_range().map(|range| range.end())); end.map(|end| SegmentRangeInclusive::new(start, end)) }; diff --git a/crates/cli/commands/src/download.rs b/crates/cli/commands/src/download.rs index 8f09dc9b893..20bc7081f05 100644 --- a/crates/cli/commands/src/download.rs +++ b/crates/cli/commands/src/download.rs @@ -7,9 +7,10 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_fs_util as fs; use std::{ + borrow::Cow, io::{self, Read, Write}, path::Path, - sync::Arc, + sync::{Arc, OnceLock}, time::{Duration, Instant}, }; use tar::Archive; @@ -22,24 +23,109 @@ const MERKLE_BASE_URL: &str = "https://downloads.merkle.io"; const EXTENSION_TAR_LZ4: &str = ".tar.lz4"; const EXTENSION_TAR_ZSTD: &str = ".tar.zst"; +/// Global static download defaults +static DOWNLOAD_DEFAULTS: OnceLock = OnceLock::new(); + +/// Download configuration defaults +/// +/// Global defaults can be set via [`DownloadDefaults::try_init`]. +#[derive(Debug, Clone)] +pub struct DownloadDefaults { + /// List of available snapshot sources + pub available_snapshots: Vec>, + /// Default base URL for snapshots + pub default_base_url: Cow<'static, str>, + /// Optional custom long help text that overrides the generated help + pub long_help: Option, +} + +impl DownloadDefaults { + /// Initialize the global download defaults with this configuration + pub fn try_init(self) -> Result<(), Self> { + DOWNLOAD_DEFAULTS.set(self) + } + + /// Get a reference to the global download defaults + pub fn get_global() -> &'static DownloadDefaults { + DOWNLOAD_DEFAULTS.get_or_init(DownloadDefaults::default_download_defaults) + } + + /// Default download configuration with defaults from merkle.io and publicnode + pub fn default_download_defaults() -> Self { + Self { + available_snapshots: vec![ + Cow::Borrowed("https://www.merkle.io/snapshots (default, mainnet archive)"), + Cow::Borrowed("https://publicnode.com/snapshots (full nodes & testnets)"), + ], + default_base_url: Cow::Borrowed(MERKLE_BASE_URL), + long_help: None, + } + } + + /// Generates the long help text for the download URL argument using these defaults. + /// + /// If a custom long_help is set, it will be returned. Otherwise, help text is generated + /// from the available_snapshots list. + pub fn long_help(&self) -> String { + if let Some(ref custom_help) = self.long_help { + return custom_help.clone(); + } + + let mut help = String::from( + "Specify a snapshot URL or let the command propose a default one.\n\nAvailable snapshot sources:\n", + ); + + for source in &self.available_snapshots { + help.push_str("- "); + help.push_str(source); + help.push('\n'); + } + + help.push_str( + "\nIf no URL is provided, the latest mainnet archive snapshot\nwill be proposed for download from ", + ); + help.push_str(self.default_base_url.as_ref()); + help + } + + /// Add a snapshot source to the list + pub fn with_snapshot(mut self, source: impl Into>) -> Self { + self.available_snapshots.push(source.into()); + self + } + + /// Replace all snapshot sources + pub fn with_snapshots(mut self, sources: Vec>) -> Self { + self.available_snapshots = sources; + self + } + + /// Set the default base URL, e.g. `https://downloads.merkle.io`. + pub fn with_base_url(mut self, url: impl Into>) -> Self { + self.default_base_url = url.into(); + self + } + + /// Builder: Set custom long help text, overriding the generated help + pub fn with_long_help(mut self, help: impl Into) -> Self { + self.long_help = Some(help.into()); + self + } +} + +impl Default for DownloadDefaults { + fn default() -> Self { + Self::default_download_defaults() + } +} + #[derive(Debug, Parser)] pub struct DownloadCommand { #[command(flatten)] env: EnvironmentArgs, - #[arg( - long, - short, - help = "Custom URL to download the snapshot from", - long_help = "Specify a snapshot URL or let the command propose a default one.\n\ - \n\ - Available snapshot sources:\n\ - - https://www.merkle.io/snapshots (default, mainnet archive)\n\ - - https://publicnode.com/snapshots (full nodes & testnets)\n\ - \n\ - If no URL is provided, the latest mainnet archive snapshot\n\ - will be proposed for download from merkle.io" - )] + /// Custom URL to download the snapshot from + #[arg(long, short, long_help = DownloadDefaults::get_global().long_help())] url: Option, } @@ -207,9 +293,10 @@ async fn stream_and_extract(url: &str, target_dir: &Path) -> Result<()> { Ok(()) } -// Builds default URL for latest mainnet archive snapshot +// Builds default URL for latest mainnet archive snapshot using configured defaults async fn get_latest_snapshot_url() -> Result { - let latest_url = format!("{MERKLE_BASE_URL}/latest.txt"); + let base_url = &DownloadDefaults::get_global().default_base_url; + let latest_url = format!("{base_url}/latest.txt"); let filename = Client::new() .get(latest_url) .send() @@ -220,5 +307,64 @@ async fn get_latest_snapshot_url() -> Result { .trim() .to_string(); - Ok(format!("{MERKLE_BASE_URL}/{filename}")) + Ok(format!("{base_url}/{filename}")) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_download_defaults_builder() { + let defaults = DownloadDefaults::default() + .with_snapshot("https://example.com/snapshots (example)") + .with_base_url("https://example.com"); + + assert_eq!(defaults.default_base_url, "https://example.com"); + assert_eq!(defaults.available_snapshots.len(), 3); // 2 defaults + 1 added + } + + #[test] + fn test_download_defaults_replace_snapshots() { + let defaults = DownloadDefaults::default().with_snapshots(vec![ + Cow::Borrowed("https://custom1.com"), + Cow::Borrowed("https://custom2.com"), + ]); + + assert_eq!(defaults.available_snapshots.len(), 2); + assert_eq!(defaults.available_snapshots[0], "https://custom1.com"); + } + + #[test] + fn test_long_help_generation() { + let defaults = DownloadDefaults::default(); + let help = defaults.long_help(); + + assert!(help.contains("Available snapshot sources:")); + assert!(help.contains("merkle.io")); + assert!(help.contains("publicnode.com")); + } + + #[test] + fn test_long_help_override() { + let custom_help = "This is custom help text for downloading snapshots."; + let defaults = DownloadDefaults::default().with_long_help(custom_help); + + let help = defaults.long_help(); + assert_eq!(help, custom_help); + assert!(!help.contains("Available snapshot sources:")); + } + + #[test] + fn test_builder_chaining() { + let defaults = DownloadDefaults::default() + .with_base_url("https://custom.example.com") + .with_snapshot("https://snapshot1.com") + .with_snapshot("https://snapshot2.com") + .with_long_help("Custom help for snapshots"); + + assert_eq!(defaults.default_base_url, "https://custom.example.com"); + assert_eq!(defaults.available_snapshots.len(), 4); // 2 defaults + 2 added + assert_eq!(defaults.long_help, Some("Custom help for snapshots".to_string())); + } } diff --git a/crates/cli/commands/src/export_era.rs b/crates/cli/commands/src/export_era.rs index dbedf1852e5..5f4f0306bb0 100644 --- a/crates/cli/commands/src/export_era.rs +++ b/crates/cli/commands/src/export_era.rs @@ -4,7 +4,7 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::{Args, Parser}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; +use reth_era::era1::types::execution::MAX_BLOCKS_PER_ERA1; use reth_era_utils as era1; use reth_provider::DatabaseProviderFactory; use std::{path::PathBuf, sync::Arc}; diff --git a/crates/cli/commands/src/import_core.rs b/crates/cli/commands/src/import_core.rs index 2370ebaa039..98f888bb9e3 100644 --- a/crates/cli/commands/src/import_core.rs +++ b/crates/cli/commands/src/import_core.rs @@ -102,6 +102,9 @@ where .sealed_header(provider_factory.last_block_number()?)? .expect("should have genesis"); + let static_file_producer = + StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); + while let Some(file_client) = reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? { @@ -121,7 +124,7 @@ where provider_factory.clone(), &consensus, Arc::new(file_client), - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), + static_file_producer.clone(), import_config.no_state, executor.clone(), )?; diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index 68618361e7f..4b5c51585b3 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -2,7 +2,7 @@ use crate::common::{AccessRights, CliHeader, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_consensus::BlockHeader as AlloyBlockHeader; -use alloy_primitives::{B256, U256}; +use alloy_primitives::{Sealable, B256}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -13,7 +13,7 @@ use reth_provider::{ BlockNumReader, DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; -use std::{io::BufReader, path::PathBuf, str::FromStr, sync::Arc}; +use std::{io::BufReader, path::PathBuf, sync::Arc}; use tracing::info; pub mod without_evm; @@ -58,13 +58,9 @@ pub struct InitStateCommand { #[arg(long, value_name = "HEADER_FILE", verbatim_doc_comment)] pub header: Option, - /// Total difficulty of the header. - #[arg(long, value_name = "TOTAL_DIFFICULTY", verbatim_doc_comment)] - pub total_difficulty: Option, - /// Hash of the header. #[arg(long, value_name = "HEADER_HASH", verbatim_doc_comment)] - pub header_hash: Option, + pub header_hash: Option, } impl> InitStateCommand { @@ -88,16 +84,9 @@ impl> InitStateC let header = self.header.ok_or_else(|| eyre::eyre!("Header file must be provided"))?; let header = without_evm::read_header_from_file::< ::BlockHeader, - >(header)?; - - let header_hash = - self.header_hash.ok_or_else(|| eyre::eyre!("Header hash must be provided"))?; - let header_hash = B256::from_str(&header_hash)?; + >(&header)?; - let total_difficulty = self - .total_difficulty - .ok_or_else(|| eyre::eyre!("Total difficulty must be provided"))?; - let total_difficulty = U256::from_str(&total_difficulty)?; + let header_hash = self.header_hash.unwrap_or_else(|| header.hash_slow()); let last_block_number = provider_rw.last_block_number()?; @@ -105,7 +94,6 @@ impl> InitStateC without_evm::setup_without_evm( &provider_rw, SealedHeader::new(header, header_hash), - total_difficulty, |number| { let mut header = <::BlockHeader>::default(); @@ -146,3 +134,32 @@ impl InitStateCommand { Some(&self.env.chain) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::b256; + use reth_ethereum_cli::chainspec::EthereumChainSpecParser; + + #[test] + fn parse_init_state_command_with_without_evm() { + let cmd: InitStateCommand = InitStateCommand::parse_from([ + "reth", + "--chain", + "sepolia", + "--without-evm", + "--header", + "header.rlp", + "--header-hash", + "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "state.jsonl", + ]); + assert_eq!(cmd.state.to_str().unwrap(), "state.jsonl"); + assert!(cmd.without_evm); + assert_eq!(cmd.header.unwrap().to_str().unwrap(), "header.rlp"); + assert_eq!( + cmd.header_hash.unwrap(), + b256!("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") + ); + } +} diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index 09711d45880..8da0bde068c 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,5 +1,5 @@ use alloy_consensus::BlockHeader; -use alloy_primitives::{BlockNumber, B256, U256}; +use alloy_primitives::{BlockNumber, B256}; use alloy_rlp::Decodable; use reth_codecs::Compact; use reth_node_builder::NodePrimitives; @@ -10,16 +10,22 @@ use reth_provider::{ }; use reth_stages::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; -use std::{fs::File, io::Read, path::PathBuf}; +use std::path::Path; use tracing::info; + /// Reads the header RLP from a file and returns the Header. -pub(crate) fn read_header_from_file(path: PathBuf) -> Result +/// +/// This supports both raw rlp bytes and rlp hex string. +pub(crate) fn read_header_from_file(path: &Path) -> Result where H: Decodable, { - let mut file = File::open(path)?; - let mut buf = Vec::new(); - file.read_to_end(&mut buf)?; + let buf = if let Ok(content) = reth_fs_util::read_to_string(path) { + alloy_primitives::hex::decode(content.trim())? + } else { + // If UTF-8 decoding fails, read as raw bytes + reth_fs_util::read(path)? + }; let header = H::decode(&mut &buf[..])?; Ok(header) @@ -30,7 +36,6 @@ where pub fn setup_without_evm( provider_rw: &Provider, header: SealedHeader<::BlockHeader>, - total_difficulty: U256, header_factory: F, ) -> ProviderResult<()> where @@ -50,7 +55,7 @@ where info!(target: "reth::cli", "Appending first valid block."); - append_first_block(provider_rw, &header, total_difficulty)?; + append_first_block(provider_rw, &header)?; for stage in StageId::ALL { provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number()))?; @@ -68,7 +73,6 @@ where fn append_first_block( provider_rw: &Provider, header: &SealedHeaderFor, - total_difficulty: U256, ) -> ProviderResult<()> where Provider: BlockWriter::Block> @@ -85,16 +89,8 @@ where let sf_provider = provider_rw.static_file_provider(); - sf_provider.latest_writer(StaticFileSegment::Headers)?.append_header( - header, - total_difficulty, - &header.hash(), - )?; - sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number())?; - sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number())?; - Ok(()) } @@ -137,7 +133,7 @@ where for block_num in 1..=target_height { // TODO: should we fill with real parent_hash? let header = header_factory(block_num); - writer.append_header(&header, U256::ZERO, &B256::ZERO)?; + writer.append_header(&header, &B256::ZERO)?; } Ok(()) }); @@ -167,3 +163,85 @@ where Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::Header; + use alloy_primitives::{address, b256}; + use reth_db_common::init::init_genesis; + use reth_provider::{test_utils::create_test_provider_factory, DatabaseProviderFactory}; + use std::io::Write; + use tempfile::NamedTempFile; + + #[test] + fn test_read_header_from_file_hex_string() { + let header_rlp = "0xf90212a00d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479471562b71999873db5b286df957af199ec94617f7a0f496f3d199c51a1aaee67dac95f24d92ac13c60d25181e1eecd6eca5ddf32ac0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808206a4840365908a808468e975f09ad983011003846765746888676f312e32352e308664617277696ea06f485a167165ec12e0ab3e6ab59a7b88560b90306ac98a26eb294abf95a8c59b88000000000000000007"; + + let mut temp_file = NamedTempFile::new().unwrap(); + temp_file.write_all(header_rlp.as_bytes()).unwrap(); + temp_file.flush().unwrap(); + + let header: Header = read_header_from_file(temp_file.path()).unwrap(); + + assert_eq!(header.number, 1700); + assert_eq!( + header.parent_hash, + b256!("0d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dd") + ); + assert_eq!(header.beneficiary, address!("71562b71999873db5b286df957af199ec94617f7")); + } + + #[test] + fn test_read_header_from_file_raw_bytes() { + let header_rlp = "0xf90212a00d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479471562b71999873db5b286df957af199ec94617f7a0f496f3d199c51a1aaee67dac95f24d92ac13c60d25181e1eecd6eca5ddf32ac0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808206a4840365908a808468e975f09ad983011003846765746888676f312e32352e308664617277696ea06f485a167165ec12e0ab3e6ab59a7b88560b90306ac98a26eb294abf95a8c59b88000000000000000007"; + let header_bytes = + alloy_primitives::hex::decode(header_rlp.trim_start_matches("0x")).unwrap(); + + let mut temp_file = NamedTempFile::new().unwrap(); + temp_file.write_all(&header_bytes).unwrap(); + temp_file.flush().unwrap(); + + let header: Header = read_header_from_file(temp_file.path()).unwrap(); + + assert_eq!(header.number, 1700); + assert_eq!( + header.parent_hash, + b256!("0d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dd") + ); + assert_eq!(header.beneficiary, address!("71562b71999873db5b286df957af199ec94617f7")); + } + + #[test] + fn test_setup_without_evm_succeeds() { + let header_rlp = "0xf90212a00d84d79f59fc384a1f6402609a5b7253b4bfe7a4ae12608ed107273e5422b6dda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479471562b71999873db5b286df957af199ec94617f7a0f496f3d199c51a1aaee67dac95f24d92ac13c60d25181e1eecd6eca5ddf32ac0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808206a4840365908a808468e975f09ad983011003846765746888676f312e32352e308664617277696ea06f485a167165ec12e0ab3e6ab59a7b88560b90306ac98a26eb294abf95a8c59b88000000000000000007"; + let header_bytes = + alloy_primitives::hex::decode(header_rlp.trim_start_matches("0x")).unwrap(); + + let mut temp_file = NamedTempFile::new().unwrap(); + temp_file.write_all(&header_bytes).unwrap(); + temp_file.flush().unwrap(); + + let header: Header = read_header_from_file(temp_file.path()).unwrap(); + let header_hash = b256!("4f05e4392969fc82e41f6d6a8cea379323b0b2d3ddf7def1a33eec03883e3a33"); + + let provider_factory = create_test_provider_factory(); + + init_genesis(&provider_factory).unwrap(); + + let provider_rw = provider_factory.database_provider_rw().unwrap(); + + setup_without_evm(&provider_rw, SealedHeader::new(header, header_hash), |number| Header { + number, + ..Default::default() + }) + .unwrap(); + + let static_files = provider_factory.static_file_provider(); + let writer = static_files.latest_writer(StaticFileSegment::Headers).unwrap(); + let actual_next_height = writer.next_block_number(); + let expected_next_height = 1701; + + assert_eq!(actual_next_height, expected_next_height); + } +} diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index c3e997e8343..d0d4facdaf4 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -5,18 +5,17 @@ use clap::{value_parser, Args, Parser}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; -use reth_cli_util::parse_socket_address; use reth_db::init_db; use reth_node_builder::NodeBuilder; use reth_node_core::{ args::{ - DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, NetworkArgs, - PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, + DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, MetricArgs, + NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, node_config::NodeConfig, version, }; -use std::{ffi::OsString, fmt, net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{ffi::OsString, fmt, path::PathBuf, sync::Arc}; /// Start the node #[derive(Debug, Parser)] @@ -39,11 +38,9 @@ pub struct NodeCommand, - /// Enable Prometheus metrics. - /// - /// The metrics will be served at the given interface and port. - #[arg(long, value_name = "SOCKET", value_parser = parse_socket_address, help_heading = "Metrics")] - pub metrics: Option, + /// Prometheus metrics configuration. + #[command(flatten)] + pub metrics: MetricArgs, /// Add a new instance of a node. /// @@ -225,7 +222,7 @@ mod tests { use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ethereum_cli::chainspec::{EthereumChainSpecParser, SUPPORTED_CHAINS}; use std::{ - net::{IpAddr, Ipv4Addr}, + net::{IpAddr, Ipv4Addr, SocketAddr}, path::Path, }; @@ -310,35 +307,38 @@ mod tests { #[test] fn parse_metrics_port() { - let cmd: NodeCommand = NodeCommand::try_parse_args_from([ - "reth", - "--metrics", - "9001", - "--builder.gaslimit", - "10000000", - ]) - .unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + let cmd: NodeCommand = + NodeCommand::try_parse_args_from([ "reth", + "--metrics", + "9001", + "--builder.gaslimit", + "10000000",]).unwrap(); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + ); - let cmd: NodeCommand = NodeCommand::try_parse_args_from([ - "reth", - "--metrics", - ":9001", - "--builder.gaslimit", - "10000000", - ]) - .unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + let cmd: NodeCommand = + NodeCommand::try_parse_args_from([ "reth", + "--metrics", + ":9001", + "--builder.gaslimit", + "10000000",]).unwrap(); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + ); - let cmd: NodeCommand = NodeCommand::try_parse_args_from([ - "reth", - "--metrics", - "localhost:9001", - "--builder.gaslimit", - "10000000", - ]) - .unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + let cmd: NodeCommand = + NodeCommand::try_parse_args_from([ "reth", + "--metrics", + "localhost:9001", + "--builder.gaslimit", + "10000000",]).unwrap(); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + ); } #[test] diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index 861fd836e76..c72ceca78e6 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -38,9 +38,9 @@ impl let header = (move || get_single_header(fetch_client.clone(), id)) .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting header. Retrying...")) .await?; - println!("Successfully downloaded header: {header:?}"); + tracing::info!(target: "reth::cli", ?header, "Successfully downloaded header"); } Subcommands::Body { args, id } => { @@ -51,13 +51,13 @@ impl let hash = match id { BlockHashOrNumber::Hash(hash) => hash, BlockHashOrNumber::Number(number) => { - println!("Block number provided. Downloading header first..."); + tracing::info!(target: "reth::cli", "Block number provided. Downloading header first..."); let client = fetch_client.clone(); let header = (move || { get_single_header(client.clone(), BlockHashOrNumber::Number(number)) }) .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting header. Retrying...")) .await?; header.hash() } @@ -67,7 +67,7 @@ impl client.get_block_bodies(vec![hash]) }) .retry(backoff) - .notify(|err, _| println!("Error requesting block: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting block. Retrying...")) .await? .split(); if result.len() != 1 { @@ -77,7 +77,7 @@ impl ) } let body = result.into_iter().next().unwrap(); - println!("Successfully downloaded body: {body:?}") + tracing::info!(target: "reth::cli", ?body, "Successfully downloaded body") } Subcommands::Rlpx(command) => { command.execute().await?; @@ -192,6 +192,7 @@ impl DownloadArgs { let net = NetworkConfigBuilder::::new(p2p_secret_key) .peer_config(config.peers_config_with_basic_nodes_from_file(None)) .external_ip_resolver(self.network.nat) + .network_id(self.network.network_id) .boot_nodes(boot_nodes.clone()) .apply(|builder| { self.network.discovery.apply_to_builder(builder, rlpx_socket, boot_nodes) diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs index de60fbfdb3b..cae0fa00901 100644 --- a/crates/cli/commands/src/prune.rs +++ b/crates/cli/commands/src/prune.rs @@ -1,5 +1,5 @@ //! Command that runs pruning without any limits. -use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -18,22 +18,23 @@ pub struct PruneCommand { impl> PruneCommand { /// Execute the `prune` command pub async fn execute>(self) -> eyre::Result<()> { - let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; - let prune_config = config.prune.unwrap_or_default(); + let env = self.env.init::(AccessRights::RW)?; + let provider_factory = env.provider_factory; + let config = env.config.prune; // Copy data from database to static files info!(target: "reth::cli", "Copying data from database to static files..."); let static_file_producer = - StaticFileProducer::new(provider_factory.clone(), prune_config.segments.clone()); + StaticFileProducer::new(provider_factory.clone(), config.segments.clone()); let lowest_static_file_height = static_file_producer.lock().copy_to_static_files()?.min_block_num(); info!(target: "reth::cli", ?lowest_static_file_height, "Copied data from database to static files"); // Delete data which has been copied to static files. if let Some(prune_tip) = lowest_static_file_height { - info!(target: "reth::cli", ?prune_tip, ?prune_config, "Pruning data from database..."); + info!(target: "reth::cli", ?prune_tip, ?config, "Pruning data from database..."); // Run the pruner according to the configuration, and don't enforce any limits on it - let mut pruner = PrunerBuilder::new(prune_config) + let mut pruner = PrunerBuilder::new(config) .delete_limit(usize::MAX) .build_with_provider_factory(provider_factory); diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 66227e10271..2c6e911d7bd 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -15,7 +15,7 @@ use reth_db_common::{ }; use reth_node_api::{HeaderTy, ReceiptTy, TxTy}; use reth_node_core::args::StageEnum; -use reth_provider::{DBProvider, DatabaseProviderFactory, StaticFileProviderFactory}; +use reth_provider::{DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, TrieWriter}; use reth_prune::PruneSegment; use reth_stages::StageId; use reth_static_file_types::StaticFileSegment; @@ -70,7 +70,6 @@ impl Command { StageEnum::Headers => { tx.clear::()?; tx.clear::>>()?; - tx.clear::()?; tx.clear::()?; reset_stage_checkpoint(tx, StageId::Headers)?; @@ -79,7 +78,6 @@ impl Command { StageEnum::Bodies => { tx.clear::()?; tx.clear::>>()?; - reset_prune_checkpoint(tx, PruneSegment::Transactions)?; tx.clear::()?; tx.clear::>>()?; @@ -138,6 +136,10 @@ impl Command { None, )?; } + StageEnum::MerkleChangeSets => { + provider_rw.clear_trie_changesets()?; + reset_stage_checkpoint(tx, StageId::MerkleChangeSets)?; + } StageEnum::AccountHistory | StageEnum::StorageHistory => { tx.clear::()?; tx.clear::()?; diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 921af75c78b..887f97ddddf 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -42,7 +42,7 @@ where Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, - ), + )?, to, from, evm_config, @@ -69,13 +69,6 @@ fn import_tables_with_range( to, ) })??; - output_db.update(|tx| { - tx.import_table_with_range::( - &db_tool.provider_factory.db_ref().tx()?, - Some(from), - to, - ) - })??; output_db.update(|tx| { tx.import_table_with_range::( &db_tool.provider_factory.db_ref().tx()?, diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 8b9ba5e937e..0e976d4235f 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -39,7 +39,7 @@ pub(crate) async fn dump_hashing_account_stage( let unwind_inner_tx = provider.into_tx(); - // TODO optimize we can actually just get the entries we need - output_db - .update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; + output_db.update(|tx| { + tx.import_table_with_range::( + &unwind_inner_tx, + Some(BlockNumberAddress((from, Address::ZERO))), + BlockNumberAddress((to, Address::repeat_byte(0xff))), + ) + })??; output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 83363184a82..c13f3a440fa 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -116,7 +116,6 @@ impl let components = components(provider_factory.chain_spec()); if let Some(listen_addr) = self.metrics { - info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr); let config = MetricServerConfig::new( listen_addr, VersionInfo { @@ -151,7 +150,7 @@ impl let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1); let etl_config = config.stages.etl.clone(); - let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); + let prune_modes = config.prune.segments.clone(); let (mut exec_stage, mut unwind_stage): (Box>, Option>>) = match self.stage { diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index 9ef2085a065..ffd8e330062 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -15,10 +15,7 @@ use reth_db::DatabaseEnv; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::ConfigureEvm; use reth_exex::ExExManagerHandle; -use reth_provider::{ - providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, ChainStateBlockReader, - ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, -}; +use reth_provider::{providers::ProviderNodeTypes, BlockNumReader, ProviderFactory}; use reth_stages::{ sets::{DefaultStages, OfflineStages}, stages::ExecutionStage, @@ -60,54 +57,21 @@ impl> Command let components = components(provider_factory.chain_spec()); - let highest_static_file_block = provider_factory - .static_file_provider() - .get_highest_static_files() - .max_block_num() - .filter(|highest_static_file_block| *highest_static_file_block > target); - - // Execute a pipeline unwind if the start of the range overlaps the existing static - // files. If that's the case, then copy all available data from MDBX to static files, and - // only then, proceed with the unwind. - // - // We also execute a pipeline unwind if `offline` is specified, because we need to only - // unwind the data associated with offline stages. - if highest_static_file_block.is_some() || self.offline { - if self.offline { - info!(target: "reth::cli", "Performing an unwind for offline-only data!"); - } - - if let Some(highest_static_file_block) = highest_static_file_block { - info!(target: "reth::cli", ?target, ?highest_static_file_block, "Executing a pipeline unwind."); - } else { - info!(target: "reth::cli", ?target, "Executing a pipeline unwind."); - } - info!(target: "reth::cli", prune_config=?config.prune, "Using prune settings"); - - // This will build an offline-only pipeline if the `offline` flag is enabled - let mut pipeline = - self.build_pipeline(config, provider_factory, components.evm_config().clone())?; - - // Move all applicable data from database to static files. - pipeline.move_to_static_files()?; + if self.offline { + info!(target: "reth::cli", "Performing an unwind for offline-only data!"); + } - pipeline.unwind(target, None)?; - } else { - info!(target: "reth::cli", ?target, "Executing a database unwind."); - let provider = provider_factory.provider_rw()?; + let highest_static_file_block = provider_factory.provider()?.last_block_number()?; + info!(target: "reth::cli", ?target, ?highest_static_file_block, prune_config=?config.prune, "Executing a pipeline unwind."); - provider - .remove_block_and_execution_above(target) - .map_err(|err| eyre::eyre!("Transaction error on unwind: {err}"))?; + // This will build an offline-only pipeline if the `offline` flag is enabled + let mut pipeline = + self.build_pipeline(config, provider_factory, components.evm_config().clone())?; - // update finalized block if needed - let last_saved_finalized_block_number = provider.last_finalized_block_number()?; - if last_saved_finalized_block_number.is_none_or(|f| f > target) { - provider.save_finalized_block_number(target)?; - } + // Move all applicable data from database to static files. + pipeline.move_to_static_files()?; - provider.commit()?; - } + pipeline.unwind(target, None)?; info!(target: "reth::cli", ?target, "Unwound blocks"); @@ -121,7 +85,7 @@ impl> Command evm_config: impl ConfigureEvm + 'static, ) -> Result, eyre::Error> { let stage_conf = &config.stages; - let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); + let prune_modes = config.prune.segments.clone(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index ca88c131ff6..f4636f5f83b 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -283,7 +283,7 @@ pub fn type_name() -> String { // With alloy type transition the types are renamed, we map them here to the original name so that test vector files remain consistent let name = std::any::type_name::(); match name { - "alloy_consensus::transaction::typed::EthereumTypedTransaction" => "Transaction".to_string(), + "alloy_consensus::transaction::envelope::EthereumTypedTransaction" => "Transaction".to_string(), "alloy_consensus::transaction::envelope::EthereumTxEnvelope" => "TransactionSigned".to_string(), name => { name.split("::").last().unwrap_or(std::any::type_name::()).to_string() diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index 1bbd2604f97..10b94695399 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -54,7 +54,7 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { match table.as_str() { $( stringify!($table_type) => { - println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME); + tracing::info!(target: "reth::cli", "Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME); generate_vector!($table_type$(<$($generic),+>)?, $per_table, $table_or_dup); }, @@ -69,7 +69,6 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { generate!([ (CanonicalHeaders, PER_TABLE, TABLE), - (HeaderTerminalDifficulties, PER_TABLE, TABLE), (HeaderNumbers, PER_TABLE, TABLE), (Headers
, PER_TABLE, TABLE), (BlockBodyIndices, PER_TABLE, TABLE), diff --git a/crates/cli/runner/src/lib.rs b/crates/cli/runner/src/lib.rs index 4f8e13ce8cb..79dc6b21142 100644 --- a/crates/cli/runner/src/lib.rs +++ b/crates/cli/runner/src/lib.rs @@ -36,11 +36,15 @@ impl CliRunner { pub const fn from_runtime(tokio_runtime: tokio::runtime::Runtime) -> Self { Self { tokio_runtime } } -} -// === impl CliRunner === + /// Executes an async block on the runtime and blocks until completion. + pub fn block_on(&self, fut: F) -> T + where + F: Future, + { + self.tokio_runtime.block_on(fut) + } -impl CliRunner { /// Executes the given _async_ command on the tokio runtime until the command future resolves or /// until the process receives a `SIGINT` or `SIGTERM` signal. /// diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index c1c5ef96075..dd2e7046b0c 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -23,8 +23,8 @@ pub struct Config { // TODO(onbjerg): Can we make this easier to maintain when we add/remove stages? pub stages: StageConfig, /// Configuration for pruning. - #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))] - pub prune: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub prune: PruneConfig, /// Configuration for the discovery service. pub peers: PeersConfig, /// Configuration for peer sessions. @@ -33,8 +33,8 @@ pub struct Config { impl Config { /// Sets the pruning configuration. - pub fn update_prune_config(&mut self, prune_config: PruneConfig) { - self.prune = Some(prune_config); + pub const fn set_prune_config(&mut self, prune_config: PruneConfig) { + self.prune = prune_config; } } @@ -440,20 +440,25 @@ pub struct PruneConfig { impl Default for PruneConfig { fn default() -> Self { - Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::none() } + Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::default() } } } impl PruneConfig { + /// Returns whether this configuration is the default one. + pub fn is_default(&self) -> bool { + self == &Self::default() + } + /// Returns whether there is any kind of receipt pruning configuration. - pub fn has_receipts_pruning(&self) -> bool { - self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty() + pub const fn has_receipts_pruning(&self) -> bool { + self.segments.receipts.is_some() } /// Merges another `PruneConfig` into this one, taking values from the other config if and only /// if the corresponding value in this config is not set. - pub fn merge(&mut self, other: Option) { - let Some(other) = other else { return }; + pub fn merge(&mut self, other: Self) { + #[expect(deprecated)] let Self { block_interval, segments: @@ -464,7 +469,8 @@ impl PruneConfig { account_history, storage_history, bodies_history, - receipts_log_filter, + merkle_changesets, + receipts_log_filter: (), }, } = other; @@ -480,10 +486,8 @@ impl PruneConfig { self.segments.account_history = self.segments.account_history.or(account_history); self.segments.storage_history = self.segments.storage_history.or(storage_history); self.segments.bodies_history = self.segments.bodies_history.or(bodies_history); - - if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() { - self.segments.receipts_log_filter = receipts_log_filter; - } + // Merkle changesets is not optional, so we just replace it if provided + self.segments.merkle_changesets = merkle_changesets; } } @@ -510,10 +514,9 @@ where mod tests { use super::{Config, EXTENSION}; use crate::PruneConfig; - use alloy_primitives::Address; use reth_network_peers::TrustedPeer; - use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig}; - use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration}; + use reth_prune_types::{PruneMode, PruneModes}; + use std::{path::Path, str::FromStr, time::Duration}; fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) { let temp_dir = tempfile::tempdir().unwrap(); @@ -1001,10 +1004,9 @@ receipts = 'full' account_history: None, storage_history: Some(PruneMode::Before(5000)), bodies_history: None, - receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([( - Address::random(), - PruneMode::Full, - )])), + merkle_changesets: PruneMode::Before(0), + #[expect(deprecated)] + receipts_log_filter: (), }, }; @@ -1017,15 +1019,13 @@ receipts = 'full' account_history: Some(PruneMode::Distance(2000)), storage_history: Some(PruneMode::Distance(3000)), bodies_history: None, - receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([ - (Address::random(), PruneMode::Distance(1000)), - (Address::random(), PruneMode::Before(2000)), - ])), + merkle_changesets: PruneMode::Distance(10000), + #[expect(deprecated)] + receipts_log_filter: (), }, }; - let original_filter = config1.segments.receipts_log_filter.clone(); - config1.merge(Some(config2)); + config1.merge(config2); // Check that the configuration has been merged. Any configuration present in config1 // should not be overwritten by config2 @@ -1035,7 +1035,7 @@ receipts = 'full' assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000))); assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000))); assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000))); - assert_eq!(config1.segments.receipts_log_filter, original_filter); + assert_eq!(config1.segments.merkle_changesets, PruneMode::Distance(10000)); } #[test] diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 22e349af0ba..f6498529eed 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -16,7 +16,7 @@ use alloy_consensus::Header; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256}; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{ - constants::{MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT}, + constants::{GAS_LIMIT_BOUND_DIVISOR, MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT}, transaction::error::InvalidTransactionError, Block, BlockHeader, GotExpected, GotExpectedBoxed, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, @@ -62,8 +62,9 @@ pub trait Consensus: HeaderValidator { /// Validate a block disregarding world state, i.e. things that can be checked before sender /// recovery and execution. /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and - /// 11.1 "Ommer Validation". + /// See the Yellow Paper sections 4.4.2 "Holistic Validity", 4.4.4 "Block Header Validity". + /// Note: Ommer Validation (previously section 11.1) has been deprecated since the Paris hard + /// fork transition to proof of stake. /// /// **This should not be called for the genesis block**. /// @@ -364,7 +365,7 @@ pub enum ConsensusError { }, /// Error when the child gas limit exceeds the maximum allowed increase. - #[error("child gas_limit {child_gas_limit} max increase is {parent_gas_limit}/1024")] + #[error("child gas_limit {child_gas_limit} exceeds the max allowed increase ({parent_gas_limit}/{GAS_LIMIT_BOUND_DIVISOR})")] GasLimitInvalidIncrease { /// The parent gas limit. parent_gas_limit: u64, @@ -393,7 +394,7 @@ pub enum ConsensusError { }, /// Error when the child gas limit exceeds the maximum allowed decrease. - #[error("child gas_limit {child_gas_limit} max decrease is {parent_gas_limit}/1024")] + #[error("child gas_limit {child_gas_limit} is below the max allowed decrease ({parent_gas_limit}/{GAS_LIMIT_BOUND_DIVISOR})")] GasLimitInvalidDecrease { /// The parent gas limit. parent_gas_limit: u64, diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index dd0f9ae1e30..0b917ebc616 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,3 +1,23 @@ +//! A consensus implementation that does nothing. +//! +//! This module provides `NoopConsensus`, a consensus implementation that performs no validation +//! and always returns `Ok(())` for all validation methods. Useful for testing and scenarios +//! where consensus validation is not required. +//! +//! # Examples +//! +//! ```rust +//! use reth_consensus::noop::NoopConsensus; +//! use std::sync::Arc; +//! +//! let consensus = NoopConsensus::default(); +//! let consensus_arc = NoopConsensus::arc(); +//! ``` +//! +//! # Warning +//! +//! **Not for production use** - provides no security guarantees or consensus validation. + use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; use alloc::sync::Arc; use alloy_primitives::B256; @@ -7,6 +27,9 @@ use reth_primitives_traits::{ }; /// A Consensus implementation that does nothing. +/// +/// Always returns `Ok(())` for all validation methods. Suitable for testing and scenarios +/// where consensus validation is not required. #[derive(Debug, Copy, Clone, Default)] #[non_exhaustive] pub struct NoopConsensus; @@ -19,10 +42,12 @@ impl NoopConsensus { } impl HeaderValidator for NoopConsensus { + /// Validates a header (no-op implementation). fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { Ok(()) } + /// Validates a header against its parent (no-op implementation). fn validate_header_against_parent( &self, _header: &SealedHeader, @@ -39,6 +64,7 @@ impl HeaderValidator for NoopConsensus { impl Consensus for NoopConsensus { type Error = ConsensusError; + /// Validates body against header (no-op implementation). fn validate_body_against_header( &self, _body: &B::Body, @@ -47,12 +73,14 @@ impl Consensus for NoopConsensus { Ok(()) } + /// Validates block before execution (no-op implementation). fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { Ok(()) } } impl FullConsensus for NoopConsensus { + /// Validates block after execution (no-op implementation). fn validate_block_post_execution( &self, _block: &RecoveredBlock, diff --git a/crates/consensus/debug-client/src/providers/rpc.rs b/crates/consensus/debug-client/src/providers/rpc.rs index 0c9dfbce7de..f670639dc66 100644 --- a/crates/consensus/debug-client/src/providers/rpc.rs +++ b/crates/consensus/debug-client/src/providers/rpc.rs @@ -61,34 +61,42 @@ where type Block = PrimitiveBlock; async fn subscribe_blocks(&self, tx: Sender) { - let Ok(mut stream) = self.full_block_stream().await.inspect_err(|err| { - warn!( - target: "consensus::debug-client", - %err, - url=%self.url, - "Failed to subscribe to blocks", - ); - }) else { - return - }; + loop { + let Ok(mut stream) = self.full_block_stream().await.inspect_err(|err| { + warn!( + target: "consensus::debug-client", + %err, + url=%self.url, + "Failed to subscribe to blocks", + ); + }) else { + return + }; - while let Some(res) = stream.next().await { - match res { - Ok(block) => { - if tx.send((self.convert)(block)).await.is_err() { - // Channel closed. - break; + while let Some(res) = stream.next().await { + match res { + Ok(block) => { + if tx.send((self.convert)(block)).await.is_err() { + // Channel closed. + break; + } + } + Err(err) => { + warn!( + target: "consensus::debug-client", + %err, + url=%self.url, + "Failed to fetch a block", + ); } - } - Err(err) => { - warn!( - target: "consensus::debug-client", - %err, - url=%self.url, - "Failed to fetch a block", - ); } } + // if stream terminated we want to re-establish it again + debug!( + target: "consensus::debug-client", + url=%self.url, + "Re-estbalishing block subscription", + ); } } diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 89d07b19023..afc08ca061e 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -1,7 +1,7 @@ //! Utilities for end-to-end tests. use node::NodeTestContext; -use reth_chainspec::{ChainSpec, EthChainSpec}; +use reth_chainspec::ChainSpec; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_network_api::test_utils::PeersHandleProvider; @@ -13,12 +13,9 @@ use reth_node_builder::{ NodeConfig, NodeHandle, NodePrimitives, NodeTypes, NodeTypesWithDBAdapter, PayloadAttributesBuilder, PayloadTypes, }; -use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider}; -use reth_rpc_server_types::RpcModuleSelection; use reth_tasks::TaskManager; use std::sync::Arc; -use tracing::{span, Level}; use wallet::Wallet; /// Wrapper type to create test nodes @@ -46,6 +43,10 @@ mod rpc; /// Utilities for creating and writing RLP test data pub mod test_rlp_utils; +/// Builder for configuring test node setups +mod setup_builder; +pub use setup_builder::E2ETestSetupBuilder; + /// Creates the initial setup with `num_nodes` started and interconnected. pub async fn setup( num_nodes: usize, @@ -54,61 +55,15 @@ pub async fn setup( attributes_generator: impl Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + Send + Sync + Copy + 'static, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + Node> + NodeTypesForProvider, - N::ComponentsBuilder: NodeComponentsBuilder< - TmpNodeAdapter, - Components: NodeComponents, Network: PeersHandleProvider>, - >, - N::AddOns: RethRpcAddOns> + EngineValidatorAddOn>, + N: NodeBuilderHelper, LocalPayloadAttributesBuilder: PayloadAttributesBuilder<<::Payload as PayloadTypes>::PayloadAttributes>, TmpNodeAddOnsHandle: RpcHandleProvider, TmpNodeEthApi>, { - let tasks = TaskManager::current(); - let exec = tasks.executor(); - - let network_config = NetworkArgs { - discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - ..NetworkArgs::default() - }; - - // Create nodes and peer them - let mut nodes: Vec> = Vec::with_capacity(num_nodes); - - for idx in 0..num_nodes { - let node_config = NodeConfig::new(chain_spec.clone()) - .with_network(network_config.clone()) - .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()) - .set_dev(is_dev); - - let span = span!(Level::INFO, "node", idx); - let _enter = span.enter(); - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) - .testing_node(exec.clone()) - .node(Default::default()) - .launch() - .await?; - - let mut node = NodeTestContext::new(node, attributes_generator).await?; - - // Connect each node in a chain. - if let Some(previous_node) = nodes.last_mut() { - previous_node.connect(&mut node).await; - } - - // Connect last node with the first if there are more than two - if idx + 1 == num_nodes && - num_nodes > 2 && - let Some(first_node) = nodes.first_mut() - { - node.connect(first_node).await; - } - - nodes.push(node); - } - - Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) + E2ETestSetupBuilder::new(num_nodes, chain_spec, attributes_generator) + .with_node_config_modifier(move |config| config.set_dev(is_dev)) + .build() + .await } /// Creates the initial setup with `num_nodes` started and interconnected. @@ -159,71 +114,12 @@ where PayloadAttributesBuilder<::PayloadAttributes>, TmpNodeAddOnsHandle: RpcHandleProvider, TmpNodeEthApi>, { - let tasks = TaskManager::current(); - let exec = tasks.executor(); - - let network_config = NetworkArgs { - discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - ..NetworkArgs::default() - }; - - // Create nodes and peer them - let mut nodes: Vec> = Vec::with_capacity(num_nodes); - - for idx in 0..num_nodes { - let node_config = NodeConfig::new(chain_spec.clone()) - .with_network(network_config.clone()) - .with_unused_ports() - .with_rpc( - RpcServerArgs::default() - .with_unused_ports() - .with_http() - .with_http_api(RpcModuleSelection::All), - ) - .set_dev(is_dev); - - let span = span!(Level::INFO, "node", idx); - let _enter = span.enter(); - let node = N::default(); - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) - .testing_node(exec.clone()) - .with_types_and_provider::>() - .with_components(node.components_builder()) - .with_add_ons(node.add_ons()) - .launch_with_fn(|builder| { - let launcher = EngineNodeLauncher::new( - builder.task_executor().clone(), - builder.config().datadir(), - tree_config.clone(), - ); - builder.launch_with(launcher) - }) - .await?; - - let mut node = NodeTestContext::new(node, attributes_generator).await?; - - let genesis = node.block_hash(0); - node.update_forkchoice(genesis, genesis).await?; - - // Connect each node in a chain if requested. - if connect_nodes { - if let Some(previous_node) = nodes.last_mut() { - previous_node.connect(&mut node).await; - } - - // Connect last node with the first if there are more than two - if idx + 1 == num_nodes && - num_nodes > 2 && - let Some(first_node) = nodes.first_mut() - { - node.connect(first_node).await; - } - } - - nodes.push(node); - } - - Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) + E2ETestSetupBuilder::new(num_nodes, chain_spec, attributes_generator) + .with_tree_config_modifier(move |_| tree_config.clone()) + .with_node_config_modifier(move |config| config.set_dev(is_dev)) + .with_connect_nodes(connect_nodes) + .build() + .await } // Type aliases @@ -263,12 +159,6 @@ where >, > + Node< TmpNodeAdapter>>, - Primitives: NodePrimitives< - BlockHeader = alloy_consensus::Header, - BlockBody = alloy_consensus::BlockBody< - ::SignedTx, - >, - >, ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, Components: NodeComponents< @@ -298,12 +188,6 @@ where >, > + Node< TmpNodeAdapter>>, - Primitives: NodePrimitives< - BlockHeader = alloy_consensus::Header, - BlockBody = alloy_consensus::BlockBody< - ::SignedTx, - >, - >, ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, Components: NodeComponents< diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index b3f9b027fba..4e185ce9693 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -57,8 +57,9 @@ impl PayloadTestContext { /// Wait until the best built payload is ready pub async fn wait_for_built_payload(&self, payload_id: PayloadId) { loop { - let payload = self.payload_builder.best_payload(payload_id).await.unwrap().unwrap(); - if payload.block().body().transactions().is_empty() { + let payload = + self.payload_builder.best_payload(payload_id).await.transpose().ok().flatten(); + if payload.is_none_or(|p| p.block().body().transactions().is_empty()) { tokio::time::sleep(std::time::Duration::from_millis(20)).await; continue } diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 96dda811735..ff030c390b9 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,4 +1,5 @@ -use alloy_consensus::TxEnvelope; +use alloy_consensus::{EthereumTxEnvelope, TxEip4844Variant}; +use alloy_eips::eip7594::BlobTransactionSidecarVariant; use alloy_network::eip2718::Decodable2718; use alloy_primitives::{Bytes, B256}; use reth_chainspec::EthereumHardforks; @@ -30,9 +31,12 @@ where } /// Retrieves a transaction envelope by its hash - pub async fn envelope_by_hash(&self, hash: B256) -> eyre::Result { + pub async fn envelope_by_hash( + &self, + hash: B256, + ) -> eyre::Result>> { let tx = self.inner.debug_api().raw_transaction(hash).await?.unwrap(); let tx = tx.to_vec(); - Ok(TxEnvelope::decode_2718(&mut tx.as_ref()).unwrap()) + Ok(EthereumTxEnvelope::decode_2718(&mut tx.as_ref()).unwrap()) } } diff --git a/crates/e2e-test-utils/src/setup_builder.rs b/crates/e2e-test-utils/src/setup_builder.rs new file mode 100644 index 00000000000..8de2280fe41 --- /dev/null +++ b/crates/e2e-test-utils/src/setup_builder.rs @@ -0,0 +1,210 @@ +//! Builder for configuring and creating test node setups. +//! +//! This module provides a flexible builder API for setting up test nodes with custom +//! configurations through closures that modify `NodeConfig` and `TreeConfig`. + +use crate::{node::NodeTestContext, wallet::Wallet, NodeBuilderHelper, NodeHelperType, TmpDB}; +use reth_chainspec::EthChainSpec; +use reth_engine_local::LocalPayloadAttributesBuilder; +use reth_node_builder::{ + EngineNodeLauncher, NodeBuilder, NodeConfig, NodeHandle, NodeTypes, NodeTypesWithDBAdapter, + PayloadAttributesBuilder, PayloadTypes, +}; +use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; +use reth_provider::providers::BlockchainProvider; +use reth_rpc_server_types::RpcModuleSelection; +use reth_tasks::TaskManager; +use std::sync::Arc; +use tracing::{span, Level}; + +/// Type alias for tree config modifier closure +type TreeConfigModifier = + Box reth_node_api::TreeConfig + Send + Sync>; + +/// Type alias for node config modifier closure +type NodeConfigModifier = Box) -> NodeConfig + Send + Sync>; + +/// Builder for configuring and creating test node setups. +/// +/// This builder allows customizing test node configurations through closures that +/// modify `NodeConfig` and `TreeConfig`. It avoids code duplication by centralizing +/// the node creation logic. +pub struct E2ETestSetupBuilder +where + N: NodeBuilderHelper, + F: Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + + Send + + Sync + + Copy + + 'static, + LocalPayloadAttributesBuilder: + PayloadAttributesBuilder<::PayloadAttributes>, +{ + num_nodes: usize, + chain_spec: Arc, + attributes_generator: F, + connect_nodes: bool, + tree_config_modifier: Option, + node_config_modifier: Option>, +} + +impl E2ETestSetupBuilder +where + N: NodeBuilderHelper, + F: Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + + Send + + Sync + + Copy + + 'static, + LocalPayloadAttributesBuilder: + PayloadAttributesBuilder<::PayloadAttributes>, +{ + /// Creates a new builder with the required parameters. + pub fn new(num_nodes: usize, chain_spec: Arc, attributes_generator: F) -> Self { + Self { + num_nodes, + chain_spec, + attributes_generator, + connect_nodes: true, + tree_config_modifier: None, + node_config_modifier: None, + } + } + + /// Sets whether nodes should be interconnected (default: true). + pub const fn with_connect_nodes(mut self, connect_nodes: bool) -> Self { + self.connect_nodes = connect_nodes; + self + } + + /// Sets a modifier function for the tree configuration. + /// + /// The closure receives the base tree config and returns a modified version. + pub fn with_tree_config_modifier(mut self, modifier: G) -> Self + where + G: Fn(reth_node_api::TreeConfig) -> reth_node_api::TreeConfig + Send + Sync + 'static, + { + self.tree_config_modifier = Some(Box::new(modifier)); + self + } + + /// Sets a modifier function for the node configuration. + /// + /// The closure receives the base node config and returns a modified version. + pub fn with_node_config_modifier(mut self, modifier: G) -> Self + where + G: Fn(NodeConfig) -> NodeConfig + Send + Sync + 'static, + { + self.node_config_modifier = Some(Box::new(modifier)); + self + } + + /// Builds and launches the test nodes. + pub async fn build( + self, + ) -> eyre::Result<( + Vec>>>, + TaskManager, + Wallet, + )> { + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + // Apply tree config modifier if present + let tree_config = if let Some(modifier) = self.tree_config_modifier { + modifier(reth_node_api::TreeConfig::default()) + } else { + reth_node_api::TreeConfig::default() + }; + + let mut nodes: Vec> = Vec::with_capacity(self.num_nodes); + + for idx in 0..self.num_nodes { + // Create base node config + let base_config = NodeConfig::new(self.chain_spec.clone()) + .with_network(network_config.clone()) + .with_unused_ports() + .with_rpc( + RpcServerArgs::default() + .with_unused_ports() + .with_http() + .with_http_api(RpcModuleSelection::All), + ); + + // Apply node config modifier if present + let node_config = if let Some(modifier) = &self.node_config_modifier { + modifier(base_config) + } else { + base_config + }; + + let span = span!(Level::INFO, "node", idx); + let _enter = span.enter(); + let node = N::default(); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) + .testing_node(exec.clone()) + .with_types_and_provider::>() + .with_components(node.components_builder()) + .with_add_ons(node.add_ons()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + tree_config.clone(), + ); + builder.launch_with(launcher) + }) + .await?; + + let mut node = NodeTestContext::new(node, self.attributes_generator).await?; + + let genesis = node.block_hash(0); + node.update_forkchoice(genesis, genesis).await?; + + // Connect nodes if requested + if self.connect_nodes { + if let Some(previous_node) = nodes.last_mut() { + previous_node.connect(&mut node).await; + } + + // Connect last node with the first if there are more than two + if idx + 1 == self.num_nodes && + self.num_nodes > 2 && + let Some(first_node) = nodes.first_mut() + { + node.connect(first_node).await; + } + } + + nodes.push(node); + } + + Ok((nodes, tasks, Wallet::default().with_chain_id(self.chain_spec.chain().into()))) + } +} + +impl std::fmt::Debug for E2ETestSetupBuilder +where + N: NodeBuilderHelper, + F: Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + + Send + + Sync + + Copy + + 'static, + LocalPayloadAttributesBuilder: + PayloadAttributesBuilder<::PayloadAttributes>, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("E2ETestSetupBuilder") + .field("num_nodes", &self.num_nodes) + .field("connect_nodes", &self.connect_nodes) + .field("tree_config_modifier", &self.tree_config_modifier.as_ref().map(|_| "")) + .field("node_config_modifier", &self.node_config_modifier.as_ref().map(|_| "")) + .finish_non_exhaustive() + } +} diff --git a/crates/e2e-test-utils/src/setup_import.rs b/crates/e2e-test-utils/src/setup_import.rs index 81e5a386aac..c93d27d258c 100644 --- a/crates/e2e-test-utils/src/setup_import.rs +++ b/crates/e2e-test-utils/src/setup_import.rs @@ -125,7 +125,7 @@ pub async fn setup_engine_with_chain_import( db.clone(), chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone())?, - ); + )?; // Initialize genesis if needed reth_db_common::init::init_genesis(&provider_factory)?; @@ -324,7 +324,8 @@ mod tests { chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone()) .unwrap(), - ); + ) + .expect("failed to create provider factory"); // Initialize genesis reth_db_common::init::init_genesis(&provider_factory).unwrap(); @@ -384,7 +385,8 @@ mod tests { chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_only(static_files_path, false) .unwrap(), - ); + ) + .expect("failed to create provider factory"); let provider = provider_factory.database_provider_ro().unwrap(); @@ -475,7 +477,8 @@ mod tests { db.clone(), chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path).unwrap(), - ); + ) + .expect("failed to create provider factory"); // Initialize genesis reth_db_common::init::init_genesis(&provider_factory).unwrap(); diff --git a/crates/e2e-test-utils/src/test_rlp_utils.rs b/crates/e2e-test-utils/src/test_rlp_utils.rs index b33b598fd0b..bcfb9faa9d8 100644 --- a/crates/e2e-test-utils/src/test_rlp_utils.rs +++ b/crates/e2e-test-utils/src/test_rlp_utils.rs @@ -157,7 +157,6 @@ pub fn write_blocks_to_rlp(blocks: &[SealedBlock], path: &Path) -> std::io::Resu ); // Debug: check what's in the encoded data - debug!(target: "e2e::import", "Block {} encoded to {} bytes", i, buf.len()); if buf.len() < 20 { debug!(target: "e2e::import", " Raw bytes: {:?}", &buf); } else { diff --git a/crates/e2e-test-utils/src/testsuite/actions/engine_api.rs b/crates/e2e-test-utils/src/testsuite/actions/engine_api.rs index 6548fc951c6..d4053228d9c 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/engine_api.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/engine_api.rs @@ -8,6 +8,7 @@ use alloy_rpc_types_engine::{ use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::Result; use futures_util::future::BoxFuture; +use reth_ethereum_primitives::TransactionSigned; use reth_node_api::{EngineTypes, PayloadTypes}; use reth_rpc_api::clients::{EngineApiClient, EthApiClient}; use std::marker::PhantomData; @@ -85,7 +86,14 @@ where const MAX_RETRIES: u32 = 5; while retries < MAX_RETRIES { - match EthApiClient::::block_by_number( + match EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::block_by_number( source_rpc, alloy_eips::BlockNumberOrTag::Number(self.block_number), true, // include transactions diff --git a/crates/e2e-test-utils/src/testsuite/actions/fork.rs b/crates/e2e-test-utils/src/testsuite/actions/fork.rs index 1511d90fa59..154b695adde 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/fork.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/fork.rs @@ -8,6 +8,7 @@ use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::Result; use futures_util::future::BoxFuture; +use reth_ethereum_primitives::TransactionSigned; use reth_node_api::{EngineTypes, PayloadTypes}; use reth_rpc_api::clients::EthApiClient; use std::marker::PhantomData; @@ -136,6 +137,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( rpc_client, alloy_eips::BlockNumberOrTag::Number(self.fork_base_block), @@ -248,11 +250,14 @@ where // walk backwards through the chain until we reach the fork base while current_number > self.fork_base_number { - let block = EthApiClient::::block_by_hash( - rpc_client, - current_hash, - false, - ) + let block = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::block_by_hash(rpc_client, current_hash, false) .await? .ok_or_else(|| { eyre::eyre!("Block with hash {} not found during fork validation", current_hash) diff --git a/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs index a00ab5e8675..da1cf98e617 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs @@ -4,6 +4,7 @@ use crate::testsuite::{Action, Environment}; use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::Result; use futures_util::future::BoxFuture; +use reth_ethereum_primitives::TransactionSigned; use reth_node_api::EngineTypes; use reth_rpc_api::clients::EthApiClient; use std::time::Duration; @@ -74,18 +75,28 @@ where let node_b_client = &env.node_clients[self.node_b]; // Get latest block from each node - let block_a = EthApiClient::::block_by_number( - &node_a_client.rpc, - alloy_eips::BlockNumberOrTag::Latest, - false, + let block_a = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::block_by_number( + &node_a_client.rpc, alloy_eips::BlockNumberOrTag::Latest, false ) .await? .ok_or_else(|| eyre::eyre!("Failed to get latest block from node {}", self.node_a))?; - let block_b = EthApiClient::::block_by_number( - &node_b_client.rpc, - alloy_eips::BlockNumberOrTag::Latest, - false, + let block_b = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::block_by_number( + &node_b_client.rpc, alloy_eips::BlockNumberOrTag::Latest, false ) .await? .ok_or_else(|| eyre::eyre!("Failed to get latest block from node {}", self.node_b))?; @@ -278,6 +289,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( &node_a_client.rpc, alloy_eips::BlockNumberOrTag::Latest, @@ -294,6 +306,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( &node_b_client.rpc, alloy_eips::BlockNumberOrTag::Latest, diff --git a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs index 74a5e2ba1d5..fe9e9133aec 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs @@ -11,6 +11,7 @@ use alloy_rpc_types_engine::{ use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::Result; use futures_util::future::BoxFuture; +use reth_ethereum_primitives::TransactionSigned; use reth_node_api::{EngineTypes, PayloadTypes}; use reth_rpc_api::clients::{EngineApiClient, EthApiClient}; use std::{collections::HashSet, marker::PhantomData, time::Duration}; @@ -79,6 +80,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( rpc_client, alloy_eips::BlockNumberOrTag::Latest, false ) @@ -96,31 +98,66 @@ where finalized_block_hash: parent_hash, }; - let fcu_result = EngineApiClient::::fork_choice_updated_v2( + // Try v2 first for backwards compatibility, fall back to v3 on error. + match EngineApiClient::::fork_choice_updated_v2( &engine_client, fork_choice_state, Some(self.payload_attributes.clone()), ) - .await?; - - debug!("FCU result: {:?}", fcu_result); - - // check if we got a valid payload ID - match fcu_result.payload_status.status { - PayloadStatusEnum::Valid => { - if let Some(payload_id) = fcu_result.payload_id { - debug!("Got payload ID: {payload_id}"); + .await + { + Ok(fcu_result) => { + debug!(?fcu_result, "FCU v2 result"); + match fcu_result.payload_status.status { + PayloadStatusEnum::Valid => { + if let Some(payload_id) = fcu_result.payload_id { + debug!(id=%payload_id, "Got payload"); + let _engine_payload = EngineApiClient::::get_payload_v2( + &engine_client, + payload_id, + ) + .await?; + Ok(()) + } else { + Err(eyre::eyre!("No payload ID returned from forkchoiceUpdated")) + } + } + _ => Err(eyre::eyre!( + "Payload status not valid: {:?}", + fcu_result.payload_status + ))?, + } + } + Err(_) => { + // If v2 fails due to unsupported fork/missing fields, try v3 + let fcu_result = EngineApiClient::::fork_choice_updated_v3( + &engine_client, + fork_choice_state, + Some(self.payload_attributes.clone()), + ) + .await?; - // get the payload that was built - let _engine_payload = - EngineApiClient::::get_payload_v2(&engine_client, payload_id) + debug!(?fcu_result, "FCU v3 result"); + match fcu_result.payload_status.status { + PayloadStatusEnum::Valid => { + if let Some(payload_id) = fcu_result.payload_id { + debug!(id=%payload_id, "Got payload"); + let _engine_payload = EngineApiClient::::get_payload_v3( + &engine_client, + payload_id, + ) .await?; - Ok(()) - } else { - Err(eyre::eyre!("No payload ID returned from forkchoiceUpdated")) + Ok(()) + } else { + Err(eyre::eyre!("No payload ID returned from forkchoiceUpdated")) + } + } + _ => Err(eyre::eyre!( + "Payload status not valid: {:?}", + fcu_result.payload_status + )), } } - _ => Err(eyre::eyre!("Payload status not valid: {:?}", fcu_result.payload_status)), } }) } @@ -348,6 +385,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( rpc_client, alloy_eips::BlockNumberOrTag::Latest, false ) @@ -421,6 +459,7 @@ where Block, Receipt, Header, + TransactionSigned, >::block_by_number( rpc_client, alloy_eips::BlockNumberOrTag::Latest, false ) @@ -531,6 +570,7 @@ where Block, Receipt, Header, + TransactionSigned, >::header_by_number( rpc_client, alloy_eips::BlockNumberOrTag::Latest ) diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index 3ee437ce376..683bfb16a25 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -1,5 +1,7 @@ -use alloy_consensus::{EnvKzgSettings, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope}; -use alloy_eips::eip7702::SignedAuthorization; +use alloy_consensus::{ + EnvKzgSettings, EthereumTxEnvelope, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope, +}; +use alloy_eips::{eip7594::BlobTransactionSidecarVariant, eip7702::SignedAuthorization}; use alloy_network::{ eip2718::Encodable2718, Ethereum, EthereumWallet, TransactionBuilder, TransactionBuilder4844, }; @@ -157,11 +159,13 @@ impl TransactionTestContext { /// Validates the sidecar of a given tx envelope and returns the versioned hashes #[track_caller] - pub fn validate_sidecar(tx: TxEnvelope) -> Vec { + pub fn validate_sidecar( + tx: EthereumTxEnvelope>, + ) -> Vec { let proof_setting = EnvKzgSettings::Default; match tx { - TxEnvelope::Eip4844(signed) => match signed.tx() { + EthereumTxEnvelope::Eip4844(signed) => match signed.tx() { TxEip4844Variant::TxEip4844WithSidecar(tx) => { tx.validate_blob(proof_setting.get()).unwrap(); tx.sidecar.versioned_hashes().collect() diff --git a/crates/e2e-test-utils/tests/e2e-testsuite/main.rs b/crates/e2e-test-utils/tests/e2e-testsuite/main.rs index 5cd1bfe8c6c..4a2ac77ec65 100644 --- a/crates/e2e-test-utils/tests/e2e-testsuite/main.rs +++ b/crates/e2e-test-utils/tests/e2e-testsuite/main.rs @@ -15,9 +15,11 @@ use reth_e2e_test_utils::{ setup::{NetworkSetup, Setup}, Environment, TestBuilder, }, + E2ETestSetupBuilder, }; use reth_node_api::TreeConfig; use reth_node_ethereum::{EthEngineTypes, EthereumNode}; +use reth_payload_builder::EthPayloadBuilderAttributes; use std::sync::Arc; use tempfile::TempDir; use tracing::debug; @@ -82,6 +84,7 @@ async fn test_apply_with_import() -> Result<()> { alloy_rpc_types_eth::Block, alloy_rpc_types_eth::Receipt, alloy_rpc_types_eth::Header, + reth_ethereum_primitives::TransactionSigned, >::block_by_number( &client.rpc, alloy_eips::BlockNumberOrTag::Number(10), @@ -349,3 +352,38 @@ async fn test_testsuite_multinode_block_production() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_setup_builder_with_custom_tree_config() -> Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) + .cancun_activated() + .build(), + ); + + let (nodes, _tasks, _wallet) = + E2ETestSetupBuilder::::new(1, chain_spec, |_| { + EthPayloadBuilderAttributes::default() + }) + .with_tree_config_modifier(|config| { + config.with_persistence_threshold(0).with_memory_block_buffer_target(5) + }) + .build() + .await?; + + assert_eq!(nodes.len(), 1); + + let genesis_hash = nodes[0].block_hash(0); + assert_ne!(genesis_hash, B256::ZERO); + + Ok(()) +} diff --git a/crates/engine/invalid-block-hooks/Cargo.toml b/crates/engine/invalid-block-hooks/Cargo.toml index 8d4a469ee16..5b3563c7ac3 100644 --- a/crates/engine/invalid-block-hooks/Cargo.toml +++ b/crates/engine/invalid-block-hooks/Cargo.toml @@ -12,6 +12,7 @@ workspace = true [dependencies] # reth +revm.workspace = true revm-bytecode.workspace = true revm-database.workspace = true reth-engine-primitives.workspace = true @@ -38,3 +39,13 @@ jsonrpsee.workspace = true pretty_assertions.workspace = true serde.workspace = true serde_json.workspace = true + +[dev-dependencies] +alloy-eips.workspace = true +reth-chainspec.workspace = true +reth-ethereum-primitives.workspace = true +reth-evm-ethereum.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-revm = { workspace = true, features = ["test-utils"] } +reth-testing-utils.workspace = true +tempfile.workspace = true diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index f979958a198..d00f3b8287b 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,31 +1,50 @@ use alloy_consensus::BlockHeader; -use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use pretty_assertions::Comparison; use reth_engine_primitives::InvalidBlockHook; use reth_evm::{execute::Executor, ConfigureEvm}; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader}; -use reth_provider::{BlockExecutionOutput, StateProviderFactory}; -use reth_revm::{database::StateProviderDatabase, db::BundleState, state::AccountInfo}; +use reth_provider::{BlockExecutionOutput, StateProvider, StateProviderFactory}; +use reth_revm::{ + database::StateProviderDatabase, + db::{BundleState, State}, +}; use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; use reth_trie::{updates::TrieUpdates, HashedStorage}; +use revm::state::AccountInfo; use revm_bytecode::Bytecode; -use revm_database::states::{ - reverts::{AccountInfoRevert, RevertToSlot}, - AccountStatus, StorageSlot, +use revm_database::{ + states::{reverts::AccountInfoRevert, StorageSlot}, + AccountStatus, RevertToSlot, }; use serde::Serialize; use std::{collections::BTreeMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; +type CollectionResult = + (BTreeMap, BTreeMap, reth_trie::HashedPostState, BundleState); + +/// Serializable version of `BundleState` for deterministic comparison #[derive(Debug, PartialEq, Eq)] -struct AccountRevertSorted { - pub account: AccountInfoRevert, - pub storage: BTreeMap, - pub previous_status: AccountStatus, - pub wipe_storage: bool, +struct BundleStateSorted { + /// Account state + pub state: BTreeMap, + /// All created contracts in this block. + pub contracts: BTreeMap, + /// Changes to revert + /// + /// **Note**: Inside vector is *not* sorted by address. + /// + /// But it is unique by address. + pub reverts: Vec>, + /// The size of the plain state in the bundle state + pub state_size: usize, + /// The size of reverts in the bundle state + pub reverts_size: usize, } +/// Serializable version of `BundleAccount` #[derive(Debug, PartialEq, Eq)] struct BundleAccountSorted { pub info: Option, @@ -40,74 +59,120 @@ struct BundleAccountSorted { pub status: AccountStatus, } +/// Serializable version of `AccountRevert` #[derive(Debug, PartialEq, Eq)] -struct BundleStateSorted { - /// Account state - pub state: BTreeMap, - /// All created contracts in this block. - pub contracts: BTreeMap, - /// Changes to revert - /// - /// **Note**: Inside vector is *not* sorted by address. - /// - /// But it is unique by address. - pub reverts: Vec>, - /// The size of the plain state in the bundle state - pub state_size: usize, - /// The size of reverts in the bundle state - pub reverts_size: usize, +struct AccountRevertSorted { + pub account: AccountInfoRevert, + pub storage: BTreeMap, + pub previous_status: AccountStatus, + pub wipe_storage: bool, } -impl BundleStateSorted { - fn from_bundle_state(bundle_state: &BundleState) -> Self { - let state = bundle_state +/// Converts bundle state to sorted format for deterministic comparison +fn sort_bundle_state_for_comparison(bundle_state: &BundleState) -> BundleStateSorted { + BundleStateSorted { + state: bundle_state .state - .clone() - .into_iter() - .map(|(address, account)| { + .iter() + .map(|(addr, acc)| { ( - address, + *addr, BundleAccountSorted { - info: account.info, - original_info: account.original_info, - status: account.status, - storage: BTreeMap::from_iter(account.storage), + info: acc.info.clone(), + original_info: acc.original_info.clone(), + storage: BTreeMap::from_iter(acc.storage.clone()), + status: acc.status, }, ) }) - .collect(); - - let contracts = BTreeMap::from_iter(bundle_state.contracts.clone()); - - let reverts = bundle_state + .collect(), + contracts: BTreeMap::from_iter(bundle_state.contracts.clone()), + reverts: bundle_state .reverts .iter() .map(|block| { block .iter() - .map(|(address, account_revert)| { + .map(|(addr, rev)| { ( - *address, + *addr, AccountRevertSorted { - account: account_revert.account.clone(), - previous_status: account_revert.previous_status, - wipe_storage: account_revert.wipe_storage, - storage: BTreeMap::from_iter(account_revert.storage.clone()), + account: rev.account.clone(), + storage: BTreeMap::from_iter(rev.storage.clone()), + previous_status: rev.previous_status, + wipe_storage: rev.wipe_storage, }, ) }) .collect() }) - .collect(); + .collect(), + state_size: bundle_state.state_size, + reverts_size: bundle_state.reverts_size, + } +} + +/// Extracts execution data including codes, preimages, and hashed state from database +fn collect_execution_data( + mut db: State>>, +) -> eyre::Result { + let bundle_state = db.take_bundle(); + let mut codes = BTreeMap::new(); + let mut preimages = BTreeMap::new(); + let mut hashed_state = db.database.hashed_post_state(&bundle_state); + + // Collect codes + db.cache.contracts.values().chain(bundle_state.contracts.values()).for_each(|code| { + let code_bytes = code.original_bytes(); + codes.insert(keccak256(&code_bytes), code_bytes); + }); - let state_size = bundle_state.state_size; - let reverts_size = bundle_state.reverts_size; + // Collect preimages + for (address, account) in db.cache.accounts { + let hashed_address = keccak256(address); + hashed_state + .accounts + .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); - Self { state, contracts, reverts, state_size, reverts_size } + if let Some(account_data) = account.account { + preimages.insert(hashed_address, alloy_rlp::encode(address).into()); + let storage = hashed_state + .storages + .entry(hashed_address) + .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); + + for (slot, value) in account_data.storage { + let slot_bytes = B256::from(slot); + let hashed_slot = keccak256(slot_bytes); + storage.storage.insert(hashed_slot, value); + preimages.insert(hashed_slot, alloy_rlp::encode(slot_bytes).into()); + } + } } + + Ok((codes, preimages, hashed_state, bundle_state)) } -/// Generates a witness for the given block and saves it to a file. +/// Generates execution witness from collected codes, preimages, and hashed state +fn generate( + codes: BTreeMap, + preimages: BTreeMap, + hashed_state: reth_trie::HashedPostState, + state_provider: Box, +) -> eyre::Result { + let state = state_provider.witness(Default::default(), hashed_state)?; + Ok(ExecutionWitness { + state, + codes: codes.into_values().collect(), + keys: preimages.into_values().collect(), + ..Default::default() + }) +} + +/// Hook for generating execution witnesses when invalid blocks are detected. +/// +/// This hook captures the execution state and generates witness data that can be used +/// for debugging and analysis of invalid block execution. #[derive(Debug)] pub struct InvalidBlockWitnessHook { /// The provider to read the historical state and do the EVM execution. @@ -139,103 +204,51 @@ where E: ConfigureEvm + 'static, N: NodePrimitives, { - fn on_invalid_block( + /// Re-executes the block and collects execution data + fn re_execute_block( &self, parent_header: &SealedHeader, block: &RecoveredBlock, - output: &BlockExecutionOutput, - trie_updates: Option<(&TrieUpdates, B256)>, - ) -> eyre::Result<()> { - // TODO(alexey): unify with `DebugApi::debug_execution_witness` - + ) -> eyre::Result<(ExecutionWitness, BundleState)> { let mut executor = self.evm_config.batch_executor(StateProviderDatabase::new( self.provider.state_by_block_hash(parent_header.hash())?, )); executor.execute_one(block)?; + let db = executor.into_state(); + let (codes, preimages, hashed_state, bundle_state) = collect_execution_data(db)?; - // Take the bundle state - let mut db = executor.into_state(); - let bundle_state = db.take_bundle(); - - // Initialize a map of preimages. - let mut state_preimages = Vec::default(); - - // Get codes - let codes = db - .cache - .contracts - .values() - .map(|code| code.original_bytes()) - .chain( - // cache state does not have all the contracts, especially when - // a contract is created within the block - // the contract only exists in bundle state, therefore we need - // to include them as well - bundle_state.contracts.values().map(|code| code.original_bytes()), - ) - .collect(); - - // Grab all account proofs for the data accessed during block execution. - // - // Note: We grab *all* accounts in the cache here, as the `BundleState` prunes - // referenced accounts + storage slots. - let mut hashed_state = db.database.hashed_post_state(&bundle_state); - for (address, account) in db.cache.accounts { - let hashed_address = keccak256(address); - hashed_state - .accounts - .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); + let state_provider = self.provider.state_by_block_hash(parent_header.hash())?; + let witness = generate(codes, preimages, hashed_state, state_provider)?; - let storage = hashed_state - .storages - .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); - - if let Some(account) = account.account { - state_preimages.push(alloy_rlp::encode(address).into()); - - for (slot, value) in account.storage { - let slot = B256::from(slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, value); + Ok((witness, bundle_state)) + } - state_preimages.push(alloy_rlp::encode(slot).into()); - } - } - } + /// Handles witness generation, saving, and comparison with healthy node + fn handle_witness_operations( + &self, + witness: &ExecutionWitness, + block_prefix: &str, + block_number: u64, + ) -> eyre::Result<()> { + let filename = format!("{}.witness.re_executed.json", block_prefix); + let re_executed_witness_path = self.save_file(filename, witness)?; - // Generate an execution witness for the aggregated state of accessed accounts. - // Destruct the cache database to retrieve the state provider. - let state_provider = db.database.into_inner(); - let state = state_provider.witness(Default::default(), hashed_state.clone())?; - - // Write the witness to the output directory. - let response = - ExecutionWitness { state, codes, keys: state_preimages, ..Default::default() }; - let re_executed_witness_path = self.save_file( - format!("{}_{}.witness.re_executed.json", block.number(), block.hash()), - &response, - )?; if let Some(healthy_node_client) = &self.healthy_node_client { - // Compare the witness against the healthy node. let healthy_node_witness = futures::executor::block_on(async move { DebugApiClient::<()>::debug_execution_witness( healthy_node_client, - block.number().into(), + block_number.into(), ) .await })?; - let healthy_path = self.save_file( - format!("{}_{}.witness.healthy.json", block.number(), block.hash()), - &healthy_node_witness, - )?; + let filename = format!("{}.witness.healthy.json", block_prefix); + let healthy_path = self.save_file(filename, &healthy_node_witness)?; - // If the witnesses are different, write the diff to the output directory. - if response != healthy_node_witness { - let filename = format!("{}_{}.witness.diff", block.number(), block.hash()); - let diff_path = self.save_diff(filename, &response, &healthy_node_witness)?; + if witness != &healthy_node_witness { + let filename = format!("{}.witness.diff", block_prefix); + let diff_path = self.save_diff(filename, witness, &healthy_node_witness)?; warn!( target: "engine::invalid_block_hooks::witness", diff_path = %diff_path.display(), @@ -245,29 +258,26 @@ where ); } } + Ok(()) + } - // The bundle state after re-execution should match the original one. - // - // Reverts now supports order-independent equality, so we can compare directly without - // sorting the reverts vectors. - // - // See: https://github.com/bluealloy/revm/pull/1827 - if bundle_state != output.state { - let original_path = self.save_file( - format!("{}_{}.bundle_state.original.json", block.number(), block.hash()), - &output.state, - )?; - let re_executed_path = self.save_file( - format!("{}_{}.bundle_state.re_executed.json", block.number(), block.hash()), - &bundle_state, - )?; - - let filename = format!("{}_{}.bundle_state.diff", block.number(), block.hash()); - // Convert bundle state to sorted struct which has BTreeMap instead of HashMap to - // have deterministic ordering - let bundle_state_sorted = BundleStateSorted::from_bundle_state(&bundle_state); - let output_state_sorted = BundleStateSorted::from_bundle_state(&output.state); + /// Validates that the bundle state after re-execution matches the original + fn validate_bundle_state( + &self, + re_executed_state: &BundleState, + original_state: &BundleState, + block_prefix: &str, + ) -> eyre::Result<()> { + if re_executed_state != original_state { + let original_filename = format!("{}.bundle_state.original.json", block_prefix); + let original_path = self.save_file(original_filename, original_state)?; + let re_executed_filename = format!("{}.bundle_state.re_executed.json", block_prefix); + let re_executed_path = self.save_file(re_executed_filename, re_executed_state)?; + // Convert bundle state to sorted format for deterministic comparison + let bundle_state_sorted = sort_bundle_state_for_comparison(re_executed_state); + let output_state_sorted = sort_bundle_state_for_comparison(original_state); + let filename = format!("{}.bundle_state.diff", block_prefix); let diff_path = self.save_diff(filename, &bundle_state_sorted, &output_state_sorted)?; warn!( @@ -278,37 +288,44 @@ where "Bundle state mismatch after re-execution" ); } + Ok(()) + } - // Calculate the state root and trie updates after re-execution. They should match - // the original ones. + /// Validates state root and trie updates after re-execution + fn validate_state_root_and_trie( + &self, + parent_header: &SealedHeader, + block: &RecoveredBlock, + bundle_state: &BundleState, + trie_updates: Option<(&TrieUpdates, B256)>, + block_prefix: &str, + ) -> eyre::Result<()> { + let state_provider = self.provider.state_by_block_hash(parent_header.hash())?; + let hashed_state = state_provider.hashed_post_state(bundle_state); let (re_executed_root, trie_output) = state_provider.state_root_with_updates(hashed_state)?; + if let Some((original_updates, original_root)) = trie_updates { if re_executed_root != original_root { - let filename = format!("{}_{}.state_root.diff", block.number(), block.hash()); + let filename = format!("{}.state_root.diff", block_prefix); let diff_path = self.save_diff(filename, &re_executed_root, &original_root)?; warn!(target: "engine::invalid_block_hooks::witness", ?original_root, ?re_executed_root, diff_path = %diff_path.display(), "State root mismatch after re-execution"); } - // If the re-executed state root does not match the _header_ state root, also log that. if re_executed_root != block.state_root() { - let filename = - format!("{}_{}.header_state_root.diff", block.number(), block.hash()); + let filename = format!("{}.header_state_root.diff", block_prefix); let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root())?; warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root(), ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root"); } if &trie_output != original_updates { - // Trie updates are too big to diff, so we just save the original and re-executed - let trie_output_sorted = &trie_output.into_sorted_ref(); - let original_updates_sorted = &original_updates.into_sorted_ref(); let original_path = self.save_file( - format!("{}_{}.trie_updates.original.json", block.number(), block.hash()), - original_updates_sorted, + format!("{}.trie_updates.original.json", block_prefix), + &original_updates.into_sorted_ref(), )?; let re_executed_path = self.save_file( - format!("{}_{}.trie_updates.re_executed.json", block.number(), block.hash()), - trie_output_sorted, + format!("{}.trie_updates.re_executed.json", block_prefix), + &trie_output.into_sorted_ref(), )?; warn!( target: "engine::invalid_block_hooks::witness", @@ -318,11 +335,44 @@ where ); } } + Ok(()) + } + + fn on_invalid_block( + &self, + parent_header: &SealedHeader, + block: &RecoveredBlock, + output: &BlockExecutionOutput, + trie_updates: Option<(&TrieUpdates, B256)>, + ) -> eyre::Result<()> { + // TODO(alexey): unify with `DebugApi::debug_execution_witness` + let (witness, bundle_state) = self.re_execute_block(parent_header, block)?; + + let block_prefix = format!("{}_{}", block.number(), block.hash()); + self.handle_witness_operations(&witness, &block_prefix, block.number())?; + + self.validate_bundle_state(&bundle_state, &output.state, &block_prefix)?; + + self.validate_state_root_and_trie( + parent_header, + block, + &bundle_state, + trie_updates, + &block_prefix, + )?; Ok(()) } - /// Saves the diff of two values into a file with the given name in the output directory. + /// Serializes and saves a value to a JSON file in the output directory + fn save_file(&self, filename: String, value: &T) -> eyre::Result { + let path = self.output_directory.join(filename); + File::create(&path)?.write_all(serde_json::to_string(value)?.as_bytes())?; + + Ok(path) + } + + /// Compares two values and saves their diff to a file in the output directory fn save_diff( &self, filename: String, @@ -335,13 +385,6 @@ where Ok(path) } - - fn save_file(&self, filename: String, value: &T) -> eyre::Result { - let path = self.output_directory.join(filename); - File::create(&path)?.write_all(serde_json::to_string(value)?.as_bytes())?; - - Ok(path) - } } impl InvalidBlockHook for InvalidBlockWitnessHook @@ -361,3 +404,655 @@ where } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::eip7685::Requests; + use alloy_primitives::{map::HashMap, Address, Bytes, B256, U256}; + use reth_chainspec::ChainSpec; + use reth_ethereum_primitives::EthPrimitives; + use reth_evm_ethereum::EthEvmConfig; + use reth_provider::test_utils::MockEthProvider; + use reth_revm::db::{BundleAccount, BundleState}; + use revm_database::states::reverts::AccountRevert; + use tempfile::TempDir; + + use reth_revm::test_utils::StateProviderTest; + use reth_testing_utils::generators::{self, random_block, random_eoa_accounts, BlockParams}; + use revm_bytecode::Bytecode; + + /// Creates a test `BundleState` with realistic accounts, contracts, and reverts + fn create_bundle_state() -> BundleState { + let mut rng = generators::rng(); + let mut bundle_state = BundleState::default(); + + // Generate realistic EOA accounts using generators + let accounts = random_eoa_accounts(&mut rng, 3); + + for (i, (addr, account)) in accounts.into_iter().enumerate() { + // Create storage entries for each account + let mut storage = HashMap::default(); + let storage_key = U256::from(i + 1); + storage.insert( + storage_key, + StorageSlot { + present_value: U256::from((i + 1) * 10), + previous_or_original_value: U256::from((i + 1) * 15), + }, + ); + + let bundle_account = BundleAccount { + info: Some(AccountInfo { + balance: account.balance, + nonce: account.nonce, + code_hash: account.bytecode_hash.unwrap_or_default(), + code: None, + }), + original_info: (i == 0).then(|| AccountInfo { + balance: account.balance.checked_div(U256::from(2)).unwrap_or(U256::ZERO), + nonce: 0, + code_hash: account.bytecode_hash.unwrap_or_default(), + code: None, + }), + storage, + status: AccountStatus::default(), + }; + + bundle_state.state.insert(addr, bundle_account); + } + + // Generate realistic contract bytecode using generators + let contract_hashes: Vec = (0..3).map(|_| B256::random()).collect(); + for (i, hash) in contract_hashes.iter().enumerate() { + let bytecode = match i { + 0 => Bytes::from(vec![0x60, 0x80, 0x60, 0x40, 0x52]), // Simple contract + 1 => Bytes::from(vec![0x61, 0x81, 0x60, 0x00, 0x39]), // Another contract + _ => Bytes::from(vec![0x60, 0x00, 0x60, 0x00, 0xfd]), // REVERT contract + }; + bundle_state.contracts.insert(*hash, Bytecode::new_raw(bytecode)); + } + + // Add reverts for multiple blocks using different accounts + let addresses: Vec
= bundle_state.state.keys().copied().collect(); + for (i, addr) in addresses.iter().take(2).enumerate() { + let revert = AccountRevert { + wipe_storage: i == 0, // First account has storage wiped + ..AccountRevert::default() + }; + bundle_state.reverts.push(vec![(*addr, revert)]); + } + + // Set realistic sizes + bundle_state.state_size = bundle_state.state.len(); + bundle_state.reverts_size = bundle_state.reverts.len(); + + bundle_state + } + #[test] + fn test_sort_bundle_state_for_comparison() { + // Use the fixture function to create test data + let bundle_state = create_bundle_state(); + + // Call the function under test + let sorted = sort_bundle_state_for_comparison(&bundle_state); + + // Verify state_size and reverts_size values match the fixture + assert_eq!(sorted.state_size, 3); + assert_eq!(sorted.reverts_size, 2); + + // Verify state contains our mock accounts + assert_eq!(sorted.state.len(), 3); // We added 3 accounts + + // Verify contracts contains our mock contracts + assert_eq!(sorted.contracts.len(), 3); // We added 3 contracts + + // Verify reverts is an array with multiple blocks of reverts + let reverts = &sorted.reverts; + assert_eq!(reverts.len(), 2); // Fixture has two blocks of reverts + + // Verify that the state accounts have the expected structure + for account_data in sorted.state.values() { + // BundleAccountSorted has info, original_info, storage, and status fields + // Just verify the structure exists by accessing the fields + let _info = &account_data.info; + let _original_info = &account_data.original_info; + let _storage = &account_data.storage; + let _status = &account_data.status; + } + } + + #[test] + fn test_data_collector_collect() { + // Create test data using the fixture function + let bundle_state = create_bundle_state(); + + // Create a State with StateProviderTest + let state_provider = StateProviderTest::default(); + let mut state = State::builder() + .with_database(StateProviderDatabase::new( + Box::new(state_provider) as Box + )) + .with_bundle_update() + .build(); + + // Insert contracts from the fixture into the state cache + for (code_hash, bytecode) in &bundle_state.contracts { + state.cache.contracts.insert(*code_hash, bytecode.clone()); + } + + // Manually set the bundle state in the state object + state.bundle_state = bundle_state; + + // Call the collect function + let result = collect_execution_data(state); + // Verify the function returns successfully + assert!(result.is_ok()); + + let (codes, _preimages, _hashed_state, returned_bundle_state) = result.unwrap(); + + // Verify that the returned data contains expected values + // Since we used the fixture data, we should have some codes and state + assert!(!codes.is_empty(), "Expected some bytecode entries"); + assert!(!returned_bundle_state.state.is_empty(), "Expected some state entries"); + + // Verify the bundle state structure matches our fixture + assert_eq!(returned_bundle_state.state.len(), 3, "Expected 3 accounts from fixture"); + assert_eq!(returned_bundle_state.contracts.len(), 3, "Expected 3 contracts from fixture"); + } + + #[test] + fn test_re_execute_block() { + // Create hook instance + let (hook, _output_directory, _temp_dir) = create_test_hook(); + + // Setup to call re_execute_block + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + + // Create a random block that inherits from the parent header + let recovered_block = random_block( + &mut rng, + 2, // block number + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let result = hook.re_execute_block(&parent_header, &recovered_block); + + // Verify the function behavior with mock data + assert!(result.is_ok(), "re_execute_block should return Ok"); + } + + /// Creates test `InvalidBlockWitnessHook` with temporary directory + fn create_test_hook() -> ( + InvalidBlockWitnessHook, EthEvmConfig>, + PathBuf, + TempDir, + ) { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let output_directory = temp_dir.path().to_path_buf(); + + let provider = MockEthProvider::::default(); + let evm_config = EthEvmConfig::mainnet(); + + let hook = + InvalidBlockWitnessHook::new(provider, evm_config, output_directory.clone(), None); + + (hook, output_directory, temp_dir) + } + + #[test] + fn test_handle_witness_operations_with_healthy_client_mock() { + // Create hook instance with mock healthy client + let (hook, output_directory, _temp_dir) = create_test_hook(); + + // Create sample ExecutionWitness with correct types + let witness = ExecutionWitness { + state: vec![Bytes::from("state_data")], + codes: vec![Bytes::from("code_data")], + keys: vec![Bytes::from("key_data")], + ..Default::default() + }; + + // Call handle_witness_operations + let result = hook.handle_witness_operations(&witness, "test_block_healthy", 67890); + + // Should succeed + assert!(result.is_ok()); + + // Check that witness file was created + let witness_file = output_directory.join("test_block_healthy.witness.re_executed.json"); + assert!(witness_file.exists()); + } + + #[test] + fn test_handle_witness_operations_file_creation() { + // Test file creation and content validation + let (hook, output_directory, _temp_dir) = create_test_hook(); + + let witness = ExecutionWitness { + state: vec![Bytes::from("test_state")], + codes: vec![Bytes::from("test_code")], + keys: vec![Bytes::from("test_key")], + ..Default::default() + }; + + let block_prefix = "file_test_block"; + let block_number = 11111; + + // Call handle_witness_operations + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + + // Verify file was created with correct name + let expected_file = + output_directory.join(format!("{}.witness.re_executed.json", block_prefix)); + assert!(expected_file.exists()); + + // Read and verify file content is valid JSON and contains witness structure + let file_content = std::fs::read_to_string(&expected_file).expect("Failed to read file"); + let parsed_witness: serde_json::Value = + serde_json::from_str(&file_content).expect("File should contain valid JSON"); + + // Verify the JSON structure contains expected fields + assert!(parsed_witness.get("state").is_some(), "JSON should contain 'state' field"); + assert!(parsed_witness.get("codes").is_some(), "JSON should contain 'codes' field"); + assert!(parsed_witness.get("keys").is_some(), "JSON should contain 'keys' field"); + } + + #[test] + fn test_proof_generator_generate() { + // Use existing MockEthProvider + let mock_provider = MockEthProvider::default(); + let state_provider: Box = Box::new(mock_provider); + + // Mock Data + let mut codes = BTreeMap::new(); + codes.insert(B256::from([1u8; 32]), Bytes::from("contract_code_1")); + codes.insert(B256::from([2u8; 32]), Bytes::from("contract_code_2")); + + let mut preimages = BTreeMap::new(); + preimages.insert(B256::from([3u8; 32]), Bytes::from("preimage_1")); + preimages.insert(B256::from([4u8; 32]), Bytes::from("preimage_2")); + + let hashed_state = reth_trie::HashedPostState::default(); + + // Call generate function + let result = generate(codes.clone(), preimages.clone(), hashed_state, state_provider); + + // Verify result + assert!(result.is_ok(), "generate function should succeed"); + let execution_witness = result.unwrap(); + + assert!(execution_witness.state.is_empty(), "State should be empty from MockEthProvider"); + + let expected_codes: Vec = codes.into_values().collect(); + assert_eq!( + execution_witness.codes.len(), + expected_codes.len(), + "Codes length should match" + ); + for code in &expected_codes { + assert!( + execution_witness.codes.contains(code), + "Codes should contain expected bytecode" + ); + } + + let expected_keys: Vec = preimages.into_values().collect(); + assert_eq!(execution_witness.keys.len(), expected_keys.len(), "Keys length should match"); + for key in &expected_keys { + assert!(execution_witness.keys.contains(key), "Keys should contain expected preimage"); + } + } + + #[test] + fn test_validate_bundle_state_matching() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + let block_prefix = "test_block_123"; + + // Test with identical states - should not produce any warnings or files + let result = hook.validate_bundle_state(&bundle_state, &bundle_state, block_prefix); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_mismatch() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let original_state = create_bundle_state(); + let mut modified_state = create_bundle_state(); + + // Modify the state to create a mismatch + let addr = Address::from([1u8; 20]); + if let Some(account) = modified_state.state.get_mut(&addr) && + let Some(ref mut info) = account.info + { + info.balance = U256::from(999); + } + + let block_prefix = "test_block_mismatch"; + + // Test with different states - should save files and log warning + let result = hook.validate_bundle_state(&modified_state, &original_state, block_prefix); + assert!(result.is_ok()); + + // Verify that files were created + let original_file = output_dir.join(format!("{}.bundle_state.original.json", block_prefix)); + let re_executed_file = + output_dir.join(format!("{}.bundle_state.re_executed.json", block_prefix)); + let diff_file = output_dir.join(format!("{}.bundle_state.diff", block_prefix)); + + assert!(original_file.exists(), "Original bundle state file should be created"); + assert!(re_executed_file.exists(), "Re-executed bundle state file should be created"); + assert!(diff_file.exists(), "Diff file should be created"); + } + + /// Creates test `TrieUpdates` with account nodes and removed nodes + fn create_test_trie_updates() -> TrieUpdates { + use alloy_primitives::map::HashMap; + use reth_trie::{updates::TrieUpdates, BranchNodeCompact, Nibbles}; + use std::collections::HashSet; + + let mut account_nodes = HashMap::default(); + let nibbles = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3]); + let branch_node = BranchNodeCompact::new( + 0b1010, // state_mask + 0b1010, // tree_mask - must be subset of state_mask + 0b1000, // hash_mask + vec![B256::from([1u8; 32])], // hashes + None, // root_hash + ); + account_nodes.insert(nibbles, branch_node); + + let mut removed_nodes = HashSet::default(); + removed_nodes.insert(Nibbles::from_nibbles_unchecked([0x4, 0x5, 0x6])); + + TrieUpdates { account_nodes, removed_nodes, storage_tries: HashMap::default() } + } + + #[test] + fn test_validate_state_root_and_trie_with_trie_updates() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + // Generate test data + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let trie_updates = create_test_trie_updates(); + let original_root = B256::from([2u8; 32]); // Different from what will be computed + let block_prefix = "test_state_root_with_trie"; + + // Test with trie updates - this will likely produce warnings due to mock data + let result = hook.validate_state_root_and_trie( + &parent_header, + &recovered_block, + &bundle_state, + Some((&trie_updates, original_root)), + block_prefix, + ); + assert!(result.is_ok()); + } + + #[test] + fn test_on_invalid_block_calls_all_validation_methods() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + // Generate test data + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + // Create mock BlockExecutionOutput + let output = BlockExecutionOutput { + state: bundle_state, + result: reth_provider::BlockExecutionResult { + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + blob_gas_used: 0, + }, + }; + + // Create test trie updates + let trie_updates = create_test_trie_updates(); + let state_root = B256::random(); + + // Test that on_invalid_block attempts to call all its internal methods + // by checking that it doesn't panic and tries to create files + let files_before = output_dir.read_dir().unwrap().count(); + + let _result = hook.on_invalid_block( + &parent_header, + &recovered_block, + &output, + Some((&trie_updates, state_root)), + ); + + // Verify that the function attempted to process the block: + // Either it succeeded, or it created some output files during processing + let files_after = output_dir.read_dir().unwrap().count(); + + // The function should attempt to execute its workflow + assert!( + files_after >= files_before, + "on_invalid_block should attempt to create output files during processing" + ); + } + + #[test] + fn test_handle_witness_operations_with_empty_witness() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let witness = ExecutionWitness::default(); + let block_prefix = "empty_witness_test"; + let block_number = 12345; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_handle_witness_operations_with_zero_block_number() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let witness = ExecutionWitness { + state: vec![Bytes::from("test_state")], + codes: vec![Bytes::from("test_code")], + keys: vec![Bytes::from("test_key")], + ..Default::default() + }; + let block_prefix = "zero_block_test"; + let block_number = 0; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_handle_witness_operations_with_large_witness_data() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let large_data = vec![0u8; 10000]; // 10KB of data + let witness = ExecutionWitness { + state: vec![Bytes::from(large_data.clone())], + codes: vec![Bytes::from(large_data.clone())], + keys: vec![Bytes::from(large_data)], + ..Default::default() + }; + let block_prefix = "large_witness_test"; + let block_number = 999999; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_with_empty_states() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let empty_state = BundleState::default(); + let block_prefix = "empty_states_test"; + + let result = hook.validate_bundle_state(&empty_state, &empty_state, block_prefix); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_with_different_contract_counts() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let state1 = create_bundle_state(); + let mut state2 = create_bundle_state(); + + // Add extra contract to state2 + let extra_contract_hash = B256::random(); + state2.contracts.insert( + extra_contract_hash, + Bytecode::new_raw(Bytes::from(vec![0x60, 0x00, 0x60, 0x00, 0xfd])), // REVERT opcode + ); + + let block_prefix = "different_contracts_test"; + let result = hook.validate_bundle_state(&state1, &state2, block_prefix); + assert!(result.is_ok()); + + // Verify diff files were created + let diff_file = output_dir.join(format!("{}.bundle_state.diff", block_prefix)); + assert!(diff_file.exists()); + } + + #[test] + fn test_save_diff_with_identical_values() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let value1 = "identical_value"; + let value2 = "identical_value"; + let filename = "identical_diff_test".to_string(); + + let result = hook.save_diff(filename.clone(), &value1, &value2); + assert!(result.is_ok()); + + let diff_file = output_dir.join(filename); + assert!(diff_file.exists()); + } + + #[test] + fn test_validate_state_root_and_trie_without_trie_updates() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let block_prefix = "no_trie_updates_test"; + + // Test without trie updates (None case) + let result = hook.validate_state_root_and_trie( + &parent_header, + &recovered_block, + &bundle_state, + None, + block_prefix, + ); + assert!(result.is_ok()); + } + + #[test] + fn test_complete_invalid_block_workflow() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let mut rng = generators::rng(); + + // Create a realistic block scenario + let parent_header = generators::random_header(&mut rng, 100, None); + let invalid_block = random_block( + &mut rng, + 101, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(3), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let bundle_state = create_bundle_state(); + let trie_updates = create_test_trie_updates(); + + // Test validation methods + let validation_result = + hook.validate_bundle_state(&bundle_state, &bundle_state, "integration_test"); + assert!(validation_result.is_ok(), "Bundle state validation should succeed"); + + let state_root_result = hook.validate_state_root_and_trie( + &parent_header, + &invalid_block, + &bundle_state, + Some((&trie_updates, B256::random())), + "integration_test", + ); + assert!(state_root_result.is_ok(), "State root validation should succeed"); + } + + #[test] + fn test_integration_workflow_components() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let mut rng = generators::rng(); + + // Create test data + let parent_header = generators::random_header(&mut rng, 50, None); + let _invalid_block = random_block( + &mut rng, + 51, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(2), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let bundle_state = create_bundle_state(); + let _trie_updates = create_test_trie_updates(); + + // Test individual components that would be part of the complete flow + let validation_result = + hook.validate_bundle_state(&bundle_state, &bundle_state, "integration_component_test"); + assert!(validation_result.is_ok(), "Component validation should succeed"); + } +} diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index 2de5ec3c882..9f11c19125b 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -11,11 +11,11 @@ exclude.workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-engine-primitives.workspace = true +reth-engine-primitives = { workspace = true, features = ["std"] } reth-ethereum-engine-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true -reth-provider.workspace = true +reth-storage-api.workspace = true reth-transaction-pool.workspace = true # alloy diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 818848000f6..d6298502fb5 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -10,7 +10,7 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, PayloadAttributesBuilder, PayloadKind, PayloadTypes, }; -use reth_provider::BlockReader; +use reth_storage_api::BlockReader; use reth_transaction_pool::TransactionPool; use std::{ collections::VecDeque, diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index e5f58523d03..0b9b7d9f821 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -6,8 +6,28 @@ pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; -/// Default maximum concurrency for proof tasks -pub const DEFAULT_MAX_PROOF_TASK_CONCURRENCY: u64 = 256; +/// Minimum number of workers we allow configuring explicitly. +pub const MIN_WORKER_COUNT: usize = 32; + +/// Returns the default number of storage worker threads based on available parallelism. +fn default_storage_worker_count() -> usize { + #[cfg(feature = "std")] + { + std::thread::available_parallelism().map_or(8, |n| n.get() * 2).min(MIN_WORKER_COUNT) + } + #[cfg(not(feature = "std"))] + { + 8 + } +} + +/// Returns the default number of account worker threads. +/// +/// Account workers coordinate storage proof collection and account trie traversal. +/// They are set to the same count as storage workers for simplicity. +fn default_account_worker_count() -> usize { + default_storage_worker_count() +} /// The size of proof targets chunk to spawn in one multiproof calculation. pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 10; @@ -69,8 +89,8 @@ pub struct TreeConfig { /// Whether to always compare trie updates from the state root task to the trie updates from /// the regular state root calculation. always_compare_trie_updates: bool, - /// Whether to disable cross-block caching and parallel prewarming. - disable_caching_and_prewarming: bool, + /// Whether to disable parallel prewarming. + disable_prewarming: bool, /// Whether to disable the parallel sparse trie state root algorithm. disable_parallel_sparse_trie: bool, /// Whether to enable state provider metrics. @@ -79,8 +99,6 @@ pub struct TreeConfig { cross_block_cache_size: u64, /// Whether the host has enough parallelism to run state root task. has_enough_parallelism: bool, - /// Maximum number of concurrent proof tasks - max_proof_task_concurrency: u64, /// Whether multiproof task should chunk proof targets. multiproof_chunking_enabled: bool, /// Multiproof task chunk size for proof targets. @@ -109,6 +127,10 @@ pub struct TreeConfig { prewarm_max_concurrency: usize, /// Whether to unwind canonical header to ancestor during forkchoice updates. allow_unwind_canonical_header: bool, + /// Number of storage proof worker threads. + storage_worker_count: usize, + /// Number of account proof worker threads. + account_worker_count: usize, } impl Default for TreeConfig { @@ -121,12 +143,11 @@ impl Default for TreeConfig { max_execute_block_batch_size: DEFAULT_MAX_EXECUTE_BLOCK_BATCH_SIZE, legacy_state_root: false, always_compare_trie_updates: false, - disable_caching_and_prewarming: false, + disable_prewarming: false, disable_parallel_sparse_trie: false, state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE, has_enough_parallelism: has_enough_parallelism(), - max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY, multiproof_chunking_enabled: true, multiproof_chunk_size: DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE, reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, @@ -135,6 +156,8 @@ impl Default for TreeConfig { always_process_payload_attributes_on_canonical_head: false, prewarm_max_concurrency: DEFAULT_PREWARM_MAX_CONCURRENCY, allow_unwind_canonical_header: false, + storage_worker_count: default_storage_worker_count(), + account_worker_count: default_account_worker_count(), } } } @@ -150,12 +173,11 @@ impl TreeConfig { max_execute_block_batch_size: usize, legacy_state_root: bool, always_compare_trie_updates: bool, - disable_caching_and_prewarming: bool, + disable_prewarming: bool, disable_parallel_sparse_trie: bool, state_provider_metrics: bool, cross_block_cache_size: u64, has_enough_parallelism: bool, - max_proof_task_concurrency: u64, multiproof_chunking_enabled: bool, multiproof_chunk_size: usize, reserved_cpu_cores: usize, @@ -164,6 +186,8 @@ impl TreeConfig { always_process_payload_attributes_on_canonical_head: bool, prewarm_max_concurrency: usize, allow_unwind_canonical_header: bool, + storage_worker_count: usize, + account_worker_count: usize, ) -> Self { Self { persistence_threshold, @@ -173,12 +197,11 @@ impl TreeConfig { max_execute_block_batch_size, legacy_state_root, always_compare_trie_updates, - disable_caching_and_prewarming, + disable_prewarming, disable_parallel_sparse_trie, state_provider_metrics, cross_block_cache_size, has_enough_parallelism, - max_proof_task_concurrency, multiproof_chunking_enabled, multiproof_chunk_size, reserved_cpu_cores, @@ -187,6 +210,8 @@ impl TreeConfig { always_process_payload_attributes_on_canonical_head, prewarm_max_concurrency, allow_unwind_canonical_header, + storage_worker_count, + account_worker_count, } } @@ -215,11 +240,6 @@ impl TreeConfig { self.max_execute_block_batch_size } - /// Return the maximum proof task concurrency. - pub const fn max_proof_task_concurrency(&self) -> u64 { - self.max_proof_task_concurrency - } - /// Return whether the multiproof task chunking is enabled. pub const fn multiproof_chunking_enabled(&self) -> bool { self.multiproof_chunking_enabled @@ -251,9 +271,9 @@ impl TreeConfig { self.disable_parallel_sparse_trie } - /// Returns whether or not cross-block caching and parallel prewarming should be used. - pub const fn disable_caching_and_prewarming(&self) -> bool { - self.disable_caching_and_prewarming + /// Returns whether or not parallel prewarming should be used. + pub const fn disable_prewarming(&self) -> bool { + self.disable_prewarming } /// Returns whether to always compare trie updates from the state root task to the trie updates @@ -343,12 +363,9 @@ impl TreeConfig { self } - /// Setter for whether to disable cross-block caching and parallel prewarming. - pub const fn without_caching_and_prewarming( - mut self, - disable_caching_and_prewarming: bool, - ) -> Self { - self.disable_caching_and_prewarming = disable_caching_and_prewarming; + /// Setter for whether to disable parallel prewarming. + pub const fn without_prewarming(mut self, disable_prewarming: bool) -> Self { + self.disable_prewarming = disable_prewarming; self } @@ -389,15 +406,6 @@ impl TreeConfig { self } - /// Setter for maximum number of concurrent proof tasks. - pub const fn with_max_proof_task_concurrency( - mut self, - max_proof_task_concurrency: u64, - ) -> Self { - self.max_proof_task_concurrency = max_proof_task_concurrency; - self - } - /// Setter for whether multiproof task should chunk proof targets. pub const fn with_multiproof_chunking_enabled( mut self, @@ -452,4 +460,26 @@ impl TreeConfig { pub const fn prewarm_max_concurrency(&self) -> usize { self.prewarm_max_concurrency } + + /// Return the number of storage proof worker threads. + pub const fn storage_worker_count(&self) -> usize { + self.storage_worker_count + } + + /// Setter for the number of storage proof worker threads. + pub fn with_storage_worker_count(mut self, storage_worker_count: usize) -> Self { + self.storage_worker_count = storage_worker_count.max(MIN_WORKER_COUNT); + self + } + + /// Return the number of account proof worker threads. + pub const fn account_worker_count(&self) -> usize { + self.account_worker_count + } + + /// Setter for the number of account proof worker threads. + pub fn with_account_worker_count(mut self, account_worker_count: usize) -> Self { + self.account_worker_count = account_worker_count.max(MIN_WORKER_COUNT); + self + } } diff --git a/crates/engine/primitives/src/event.rs b/crates/engine/primitives/src/event.rs index 1c74282cba5..8cced031524 100644 --- a/crates/engine/primitives/src/event.rs +++ b/crates/engine/primitives/src/event.rs @@ -10,7 +10,7 @@ use core::{ fmt::{Display, Formatter, Result}, time::Duration, }; -use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_chain_state::ExecutedBlock; use reth_ethereum_primitives::EthPrimitives; use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader}; @@ -24,11 +24,11 @@ pub enum ConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), /// A block was added to the fork chain. - ForkBlockAdded(ExecutedBlockWithTrieUpdates, Duration), + ForkBlockAdded(ExecutedBlock, Duration), /// A new block was received from the consensus engine BlockReceived(BlockNumHash), /// A block was added to the canonical chain, and the elapsed time validating the block - CanonicalBlockAdded(ExecutedBlockWithTrieUpdates, Duration), + CanonicalBlockAdded(ExecutedBlock, Duration), /// A canonical chain was committed, and the elapsed time committing the data CanonicalChainCommitted(Box>, Duration), /// The consensus engine processed an invalid block. diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 8fd87a22bd1..ba99898a842 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -30,7 +30,6 @@ reth-prune.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true -reth-trie-db.workspace = true reth-trie-parallel.workspace = true reth-trie-sparse = { workspace = true, features = ["std", "metrics"] } reth-trie-sparse-parallel = { workspace = true, features = ["std"] } @@ -65,6 +64,7 @@ rayon.workspace = true tracing.workspace = true derive_more.workspace = true parking_lot.workspace = true +crossbeam-channel.workspace = true # optional deps for test-utils reth-prune-types = { workspace = true, optional = true } @@ -133,7 +133,6 @@ test-utils = [ "reth-trie/test-utils", "reth-trie-sparse/test-utils", "reth-prune-types?/test-utils", - "reth-trie-db/test-utils", "reth-trie-parallel/test-utils", "reth-ethereum-primitives/test-utils", "reth-node-ethereum/test-utils", diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index 0e7b153106f..0004448c3f9 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -20,11 +20,10 @@ use reth_evm::OnStateHook; use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::{Account as RethAccount, Recovered, StorageEntry}; use reth_provider::{ - providers::{BlockchainProvider, ConsistentDbView}, + providers::{BlockchainProvider, OverlayStateProviderFactory}, test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, AccountReader, ChainSpecProvider, HashingWriter, ProviderFactory, }; -use reth_trie::TrieInput; use revm_primitives::{HashMap, U256}; use revm_state::{Account as RevmAccount, AccountInfo, AccountStatus, EvmState, EvmStorageSlot}; use std::{hint::black_box, sync::Arc}; @@ -236,8 +235,7 @@ fn bench_state_root(c: &mut Criterion) { Result, core::convert::Infallible>, >(), StateProviderBuilder::new(provider.clone(), genesis_hash, None), - ConsistentDbView::new_with_latest_tip(provider).unwrap(), - TrieInput::default(), + OverlayStateProviderFactory::new(provider), &TreeConfig::default(), ); diff --git a/crates/engine/tree/src/chain.rs b/crates/engine/tree/src/chain.rs index e2893bb976a..3e6207c9d40 100644 --- a/crates/engine/tree/src/chain.rs +++ b/crates/engine/tree/src/chain.rs @@ -71,7 +71,7 @@ where /// Internal function used to advance the chain. /// /// Polls the `ChainOrchestrator` for the next event. - #[tracing::instrument(level = "debug", name = "ChainOrchestrator::poll", skip(self, cx))] + #[tracing::instrument(level = "debug", target = "engine::tree::chain_orchestrator", skip_all)] fn poll_next_event(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index bee52a46438..f08195b205e 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -7,7 +7,7 @@ use crate::{ }; use alloy_primitives::B256; use futures::{Stream, StreamExt}; -use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_chain_state::ExecutedBlock; use reth_engine_primitives::{BeaconEngineMessage, ConsensusEngineEvent}; use reth_ethereum_primitives::EthPrimitives; use reth_payload_primitives::PayloadTypes; @@ -246,7 +246,7 @@ pub enum EngineApiRequest { /// A request received from the consensus engine. Beacon(BeaconEngineMessage), /// Request to insert an already executed block, e.g. via payload building. - InsertExecutedBlock(ExecutedBlockWithTrieUpdates), + InsertExecutedBlock(ExecutedBlock), } impl Display for EngineApiRequest { diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index de5b10c331c..12482b1a162 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -1,7 +1,7 @@ use crate::metrics::PersistenceMetrics; use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; -use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; use reth_ethereum_primitives::EthPrimitives; use reth_primitives_traits::NodePrimitives; @@ -140,9 +140,12 @@ where fn on_save_blocks( &self, - blocks: Vec>, + blocks: Vec>, ) -> Result, PersistenceError> { - debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.recovered_block.num_hash()), last=?blocks.last().map(|b| b.recovered_block.num_hash()), "Saving range of blocks"); + let first_block_hash = blocks.first().map(|b| b.recovered_block.num_hash()); + let last_block_hash = blocks.last().map(|b| b.recovered_block.num_hash()); + debug!(target: "engine::persistence", first=?first_block_hash, last=?last_block_hash, "Saving range of blocks"); + let start_time = Instant::now(); let last_block_hash_num = blocks.last().map(|block| BlockNumHash { hash: block.recovered_block().hash(), @@ -155,6 +158,9 @@ where provider_rw.save_blocks(blocks)?; provider_rw.commit()?; } + + debug!(target: "engine::persistence", first=?first_block_hash, last=?last_block_hash, "Saved range of blocks"); + self.metrics.save_blocks_duration_seconds.record(start_time.elapsed()); Ok(last_block_hash_num) } @@ -180,7 +186,7 @@ pub enum PersistenceAction { /// /// First, header, transaction, and receipt-related data should be written to static files. /// Then the execution history-related data will be written to the database. - SaveBlocks(Vec>, oneshot::Sender>), + SaveBlocks(Vec>, oneshot::Sender>), /// Removes block data above the given block number from the database. /// @@ -257,7 +263,7 @@ impl PersistenceHandle { /// If there are no blocks to persist, then `None` is sent in the sender. pub fn save_blocks( &self, - blocks: Vec>, + blocks: Vec>, tx: oneshot::Sender>, ) -> Result<(), SendError>> { self.send_action(PersistenceAction::SaveBlocks(blocks, tx)) diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index 2ec00f9b918..e011a54b73c 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -3,9 +3,8 @@ use reth_chainspec::ChainSpec; use reth_ethereum_primitives::BlockBody; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives_traits::SealedHeader; -use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ExecutionOutcome, +use reth_provider::test_utils::{ + create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }; use reth_prune_types::PruneModes; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; @@ -18,13 +17,12 @@ use tokio::sync::watch; #[derive(Default, Debug)] pub struct TestPipelineBuilder { pipeline_exec_outputs: VecDeque>, - executor_results: Vec, } impl TestPipelineBuilder { /// Create a new [`TestPipelineBuilder`]. pub const fn new() -> Self { - Self { pipeline_exec_outputs: VecDeque::new(), executor_results: Vec::new() } + Self { pipeline_exec_outputs: VecDeque::new() } } /// Set the pipeline execution outputs to use for the test consensus engine. @@ -37,8 +35,14 @@ impl TestPipelineBuilder { } /// Set the executor results to use for the test consensus engine. - pub fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.executor_results = executor_results; + #[deprecated( + note = "no-op: executor results are not used and will be removed in a future release" + )] + pub fn with_executor_results( + self, + executor_results: Vec, + ) -> Self { + let _ = executor_results; self } diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index 9f4eb8398df..fd9999b9eba 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -16,7 +16,7 @@ use reth_trie::{ }; use revm_primitives::map::DefaultHashBuilder; use std::{sync::Arc, time::Duration}; -use tracing::trace; +use tracing::{debug_span, instrument, trace}; pub(crate) type Cache = mini_moka::sync::Cache; @@ -302,7 +302,7 @@ pub(crate) struct ExecutionCache { /// Per-account storage cache: outer cache keyed by Address, inner cache tracks that account’s /// storage slots. - storage_cache: Cache, + storage_cache: Cache>, /// Cache for basic account information (nonce, balance, code hash). account_cache: Cache>, @@ -340,15 +340,15 @@ impl ExecutionCache { where I: IntoIterator)>, { - let account_cache = self.storage_cache.get(&address).unwrap_or_else(|| { - let account_cache = AccountStorageCache::default(); - self.storage_cache.insert(address, account_cache.clone()); - account_cache - }); + let account_cache = self.storage_cache.get(&address).unwrap_or_default(); for (key, value) in storage_entries { account_cache.insert_storage(key, value); } + + // Insert to the cache so that moka picks up on the changed size, even though the actual + // value (the Arc) is the same + self.storage_cache.insert(address, account_cache); } /// Invalidate storage for specific account @@ -379,12 +379,25 @@ impl ExecutionCache { /// ## Error Handling /// /// Returns an error if the state updates are inconsistent and should be discarded. + #[instrument(level = "debug", target = "engine::caching", skip_all)] pub(crate) fn insert_state(&self, state_updates: &BundleState) -> Result<(), ()> { + let _enter = + debug_span!(target: "engine::tree", "contracts", len = state_updates.contracts.len()) + .entered(); // Insert bytecodes for (code_hash, bytecode) in &state_updates.contracts { self.code_cache.insert(*code_hash, Some(Bytecode(bytecode.clone()))); } - + drop(_enter); + + let _enter = debug_span!( + target: "engine::tree", + "accounts", + accounts = state_updates.state.len(), + storages = + state_updates.state.values().map(|account| account.storage.len()).sum::() + ) + .entered(); for (addr, account) in &state_updates.state { // If the account was not modified, as in not changed and not destroyed, then we have // nothing to do w.r.t. this particular account and can move on @@ -452,7 +465,7 @@ impl ExecutionCacheBuilder { const TIME_TO_IDLE: Duration = Duration::from_secs(3600); // 1 hour let storage_cache = CacheBuilder::new(self.storage_cache_entries) - .weigher(|_key: &Address, value: &AccountStorageCache| -> u32 { + .weigher(|_key: &Address, value: &Arc| -> u32 { // values based on results from measure_storage_cache_overhead test let base_weight = 39_000; let slots_weight = value.len() * 218; @@ -464,9 +477,9 @@ impl ExecutionCacheBuilder { .build_with_hasher(DefaultHashBuilder::default()); let account_cache = CacheBuilder::new(self.account_cache_entries) - .weigher(|_key: &Address, _value: &Option| -> u32 { + .weigher(|_key: &Address, value: &Option| -> u32 { // Account has a fixed size (none, balance,code_hash) - size_of::>() as u32 + 20 + size_of_val(value) as u32 }) .max_capacity(account_cache_size) .time_to_live(EXPIRY_TIME) @@ -475,13 +488,19 @@ impl ExecutionCacheBuilder { let code_cache = CacheBuilder::new(self.code_cache_entries) .weigher(|_key: &B256, value: &Option| -> u32 { - match value { + let code_size = match value { Some(bytecode) => { - // base weight + actual bytecode size - (40 + bytecode.len()) as u32 + // base weight + actual (padded) bytecode size + size of the jump table + (size_of_val(value) + + bytecode.bytecode().len() + + bytecode + .legacy_jump_table() + .map(|table| table.as_slice().len()) + .unwrap_or_default()) as u32 } - None => 8, // size of None variant - } + None => size_of_val(value) as u32, + }; + 32 + code_size }) .max_capacity(code_cache_size) .time_to_live(EXPIRY_TIME) diff --git a/crates/engine/tree/src/tree/error.rs b/crates/engine/tree/src/tree/error.rs index f7b1111df06..8589bc59d3d 100644 --- a/crates/engine/tree/src/tree/error.rs +++ b/crates/engine/tree/src/tree/error.rs @@ -1,7 +1,6 @@ //! Internal errors for the tree module. use alloy_consensus::BlockHeader; -use alloy_primitives::B256; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError}; use reth_evm::execute::InternalBlockExecutionError; @@ -19,20 +18,6 @@ pub enum AdvancePersistenceError { /// A provider error #[error(transparent)] Provider(#[from] ProviderError), - /// Missing ancestor. - /// - /// This error occurs when we need to compute the state root for a block with missing trie - /// updates, but the ancestor block is not available. State root computation requires the state - /// from the parent block as a starting point. - /// - /// A block may be missing the trie updates when it's a fork chain block building on top of the - /// historical database state. Since we don't store the historical trie state, we cannot - /// generate the trie updates for it until the moment when database is unwound to the canonical - /// chain. - /// - /// Also see [`reth_chain_state::ExecutedTrieUpdates::Missing`]. - #[error("Missing ancestor with hash {0}")] - MissingAncestor(B256), } #[derive(thiserror::Error)] diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 4d3310543d1..3adb16b0f6b 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -1,11 +1,13 @@ -use crate::tree::MeteredStateHook; +use crate::tree::{error::InsertBlockFatalError, MeteredStateHook, TreeOutcome}; use alloy_consensus::transaction::TxHashRef; use alloy_evm::{ block::{BlockExecutor, ExecutableTx}, Evm, }; +use alloy_rpc_types_engine::{PayloadStatus, PayloadStatusEnum}; use core::borrow::BorrowMut; -use reth_errors::BlockExecutionError; +use reth_engine_primitives::{ForkchoiceStatus, OnForkChoiceUpdated}; +use reth_errors::{BlockExecutionError, ProviderError}; use reth_evm::{metrics::ExecutorMetrics, OnStateHook}; use reth_execution_types::BlockExecutionOutput; use reth_metrics::{ @@ -15,7 +17,7 @@ use reth_metrics::{ use reth_primitives_traits::SignedTransaction; use reth_trie::updates::TrieUpdates; use revm::database::{states::bundle_state::BundleRetention, State}; -use std::time::Instant; +use std::time::{Duration, Instant}; use tracing::{debug_span, trace}; /// Metrics for the `EngineApi`. @@ -79,7 +81,7 @@ impl EngineApiMetrics { for tx in transactions { let tx = tx?; let span = - debug_span!(target: "engine::tree", "execute_tx", tx_hash=?tx.tx().tx_hash()); + debug_span!(target: "engine::tree", "execute tx", tx_hash=?tx.tx().tx_hash()); let _enter = span.enter(); trace!(target: "engine::tree", "Executing transaction"); executor.execute_transaction(tx)?; @@ -122,26 +124,30 @@ pub(crate) struct TreeMetrics { pub reorgs: Counter, /// The latest reorg depth pub latest_reorg_depth: Gauge, + /// The current safe block height (this is required by optimism) + pub safe_block_height: Gauge, + /// The current finalized block height (this is required by optimism) + pub finalized_block_height: Gauge, } /// Metrics for the `EngineApi`. #[derive(Metrics)] #[metrics(scope = "consensus.engine.beacon")] pub(crate) struct EngineMetrics { + /// Engine API forkchoiceUpdated response type metrics + #[metric(skip)] + pub(crate) forkchoice_updated: ForkchoiceUpdatedMetrics, + /// Engine API newPayload response type metrics + #[metric(skip)] + pub(crate) new_payload: NewPayloadStatusMetrics, /// How many executed blocks are currently stored. pub(crate) executed_blocks: Gauge, /// How many already executed blocks were directly inserted into the tree. pub(crate) inserted_already_executed_blocks: Counter, /// The number of times the pipeline was run. pub(crate) pipeline_runs: Counter, - /// The total count of forkchoice updated messages received. - pub(crate) forkchoice_updated_messages: Counter, - /// The total count of forkchoice updated messages with payload received. - pub(crate) forkchoice_with_attributes_updated_messages: Counter, /// Newly arriving block hash is not present in executed blocks cache storage pub(crate) executed_new_block_cache_miss: Counter, - /// The total count of new payload messages received. - pub(crate) new_payload_messages: Counter, /// Histogram of persistence operation durations (in seconds) pub(crate) persistence_duration: Histogram, /// Tracks the how often we failed to deliver a newPayload response. @@ -156,6 +162,115 @@ pub(crate) struct EngineMetrics { pub(crate) block_insert_total_duration: Histogram, } +/// Metrics for engine forkchoiceUpdated responses. +#[derive(Metrics)] +#[metrics(scope = "consensus.engine.beacon")] +pub(crate) struct ForkchoiceUpdatedMetrics { + /// The total count of forkchoice updated messages received. + pub(crate) forkchoice_updated_messages: Counter, + /// The total count of forkchoice updated messages with payload received. + pub(crate) forkchoice_with_attributes_updated_messages: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [`Valid`](ForkchoiceStatus::Valid). + pub(crate) forkchoice_updated_valid: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [`Invalid`](ForkchoiceStatus::Invalid). + pub(crate) forkchoice_updated_invalid: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [`Syncing`](ForkchoiceStatus::Syncing). + pub(crate) forkchoice_updated_syncing: Counter, + /// The total count of forkchoice updated messages that were unsuccessful, i.e. we responded + /// with an error type that is not a [`PayloadStatusEnum`]. + pub(crate) forkchoice_updated_error: Counter, + /// Latency for the forkchoice updated calls. + pub(crate) forkchoice_updated_latency: Histogram, + /// Latency for the last forkchoice updated call. + pub(crate) forkchoice_updated_last: Gauge, +} + +impl ForkchoiceUpdatedMetrics { + /// Increment the forkchoiceUpdated counter based on the given result + pub(crate) fn update_response_metrics( + &self, + has_attrs: bool, + result: &Result, ProviderError>, + elapsed: Duration, + ) { + match result { + Ok(outcome) => match outcome.outcome.forkchoice_status() { + ForkchoiceStatus::Valid => self.forkchoice_updated_valid.increment(1), + ForkchoiceStatus::Invalid => self.forkchoice_updated_invalid.increment(1), + ForkchoiceStatus::Syncing => self.forkchoice_updated_syncing.increment(1), + }, + Err(_) => self.forkchoice_updated_error.increment(1), + } + self.forkchoice_updated_messages.increment(1); + if has_attrs { + self.forkchoice_with_attributes_updated_messages.increment(1); + } + self.forkchoice_updated_latency.record(elapsed); + self.forkchoice_updated_last.set(elapsed); + } +} + +/// Metrics for engine newPayload responses. +#[derive(Metrics)] +#[metrics(scope = "consensus.engine.beacon")] +pub(crate) struct NewPayloadStatusMetrics { + /// The total count of new payload messages received. + pub(crate) new_payload_messages: Counter, + /// The total count of new payload messages that we responded to with + /// [Valid](PayloadStatusEnum::Valid). + pub(crate) new_payload_valid: Counter, + /// The total count of new payload messages that we responded to with + /// [Invalid](PayloadStatusEnum::Invalid). + pub(crate) new_payload_invalid: Counter, + /// The total count of new payload messages that we responded to with + /// [Syncing](PayloadStatusEnum::Syncing). + pub(crate) new_payload_syncing: Counter, + /// The total count of new payload messages that we responded to with + /// [Accepted](PayloadStatusEnum::Accepted). + pub(crate) new_payload_accepted: Counter, + /// The total count of new payload messages that were unsuccessful, i.e. we responded with an + /// error type that is not a [`PayloadStatusEnum`]. + pub(crate) new_payload_error: Counter, + /// The total gas of valid new payload messages received. + pub(crate) new_payload_total_gas: Histogram, + /// The gas per second of valid new payload messages received. + pub(crate) new_payload_gas_per_second: Histogram, + /// Latency for the new payload calls. + pub(crate) new_payload_latency: Histogram, + /// Latency for the last new payload call. + pub(crate) new_payload_last: Gauge, +} + +impl NewPayloadStatusMetrics { + /// Increment the newPayload counter based on the given result + pub(crate) fn update_response_metrics( + &self, + result: &Result, InsertBlockFatalError>, + gas_used: u64, + elapsed: Duration, + ) { + match result { + Ok(outcome) => match outcome.outcome.status { + PayloadStatusEnum::Valid => { + self.new_payload_valid.increment(1); + self.new_payload_total_gas.record(gas_used as f64); + self.new_payload_gas_per_second.record(gas_used as f64 / elapsed.as_secs_f64()); + } + PayloadStatusEnum::Syncing => self.new_payload_syncing.increment(1), + PayloadStatusEnum::Accepted => self.new_payload_accepted.increment(1), + PayloadStatusEnum::Invalid { .. } => self.new_payload_invalid.increment(1), + }, + Err(_) => self.new_payload_error.increment(1), + } + self.new_payload_messages.increment(1); + self.new_payload_latency.record(elapsed); + self.new_payload_last.set(elapsed); + } +} + /// Metrics for non-execution related block validation. #[derive(Metrics)] #[metrics(scope = "sync.block_validation")] @@ -310,6 +425,7 @@ mod tests { receipts: vec![], requests: Requests::default(), gas_used: 1000, + blob_gas_used: 0, }, )) } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 24bdc069f09..7b73d844729 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -13,10 +13,8 @@ use alloy_rpc_types_engine::{ ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; use error::{InsertBlockError, InsertBlockFatalError}; -use persistence_state::CurrentPersistenceAction; use reth_chain_state::{ - CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, - MemoryOverlayStateProvider, NewCanonicalChain, + CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; use reth_consensus::{Consensus, FullConsensus}; use reth_engine_primitives::{ @@ -31,14 +29,11 @@ use reth_payload_primitives::{ }; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; use reth_provider::{ - providers::ConsistentDbView, BlockNumReader, BlockReader, DBProvider, DatabaseProviderFactory, - HashedPostStateProvider, ProviderError, StateProviderBox, StateProviderFactory, StateReader, - StateRootProvider, TransactionVariant, + BlockReader, DatabaseProviderFactory, HashedPostStateProvider, ProviderError, StateProviderBox, + StateProviderFactory, StateReader, TransactionVariant, TrieReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; -use reth_trie::{HashedPostState, TrieInput}; -use reth_trie_db::DatabaseHashedPostState; use revm::state::EvmState; use state::TreeState; use std::{ @@ -78,7 +73,6 @@ pub use payload_processor::*; pub use payload_validator::{BasicEngineValidator, EngineValidator}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::TreeConfig; -use reth_trie::KeccakKeyHasher; pub mod state; @@ -101,7 +95,7 @@ pub struct StateProviderBuilder { /// The historical block hash to fetch state from. historical: B256, /// The blocks that form the chain from historical to target and are in memory. - overlay: Option>>, + overlay: Option>>, } impl StateProviderBuilder { @@ -110,7 +104,7 @@ impl StateProviderBuilder { pub const fn new( provider_factory: P, historical: B256, - overlay: Option>>, + overlay: Option>>, ) -> Self { Self { provider_factory, historical, overlay } } @@ -318,6 +312,7 @@ where + StateProviderFactory + StateReader + HashedPostStateProvider + + TrieReader + Clone + 'static,

::Provider: @@ -500,13 +495,17 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip_all, fields(block_hash = %payload.block_hash(), block_num = %payload.block_number(),), target = "engine::tree")] + #[instrument( + level = "debug", + target = "engine::tree", + skip_all, + fields(block_hash = %payload.block_hash(), block_num = %payload.block_number()), + )] fn on_new_payload( &mut self, payload: T::ExecutionData, ) -> Result, InsertBlockFatalError> { trace!(target: "engine::tree", "invoked new payload"); - self.metrics.engine.new_payload_messages.increment(1); // start timing for the new payload process let start = Instant::now(); @@ -581,6 +580,7 @@ where /// - `Valid`: Payload successfully validated and inserted /// - `Syncing`: Parent missing, payload buffered for later /// - Error status: Payload is invalid + #[instrument(level = "debug", target = "engine::tree", skip_all)] fn try_insert_payload( &mut self, payload: T::ExecutionData, @@ -823,7 +823,7 @@ where for block_num in (new_head_number + 1)..=current_head_number { if let Some(block_state) = self.canonical_in_memory_state.state_by_number(block_num) { - let executed_block = block_state.block_ref().block.clone(); + let executed_block = block_state.block_ref().clone(); old_blocks.push(executed_block); debug!( target: "engine::tree", @@ -855,14 +855,9 @@ where // Try to load the canonical ancestor's block match self.canonical_block_by_hash(new_head_hash)? { Some(executed_block) => { - let block_with_trie = ExecutedBlockWithTrieUpdates { - block: executed_block, - trie: ExecutedTrieUpdates::Missing, - }; - // Perform the reorg to properly handle the unwind self.canonical_in_memory_state.update_chain(NewCanonicalChain::Reorg { - new: vec![block_with_trie], + new: vec![executed_block], old: old_blocks, }); @@ -915,13 +910,8 @@ where // Try to load the block from storage if let Some(executed_block) = self.canonical_block_by_hash(block_hash)? { - let block_with_trie = ExecutedBlockWithTrieUpdates { - block: executed_block, - trie: ExecutedTrieUpdates::Missing, - }; - self.canonical_in_memory_state - .update_chain(NewCanonicalChain::Commit { new: vec![block_with_trie] }); + .update_chain(NewCanonicalChain::Commit { new: vec![executed_block] }); debug!( target: "engine::tree", @@ -976,29 +966,6 @@ where Ok(true) } - /// Returns the persisting kind for the input block. - fn persisting_kind_for(&self, block: BlockWithParent) -> PersistingKind { - // Check that we're currently persisting. - let Some(action) = self.persistence_state.current_action() else { - return PersistingKind::NotPersisting - }; - // Check that the persistince action is saving blocks, not removing them. - let CurrentPersistenceAction::SavingBlocks { highest } = action else { - return PersistingKind::PersistingNotDescendant - }; - - // The block being validated can only be a descendant if its number is higher than - // the highest block persisting. Otherwise, it's likely a fork of a lower block. - if block.block.number > highest.number && - self.state.tree_state.is_descendant(*highest, block) - { - return PersistingKind::PersistingDescendant - } - - // In all other cases, the block is not a descendant. - PersistingKind::PersistingNotDescendant - } - /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid /// chain. @@ -1007,7 +974,7 @@ where /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). /// /// Returns an error if an internal error occurred like a database error. - #[instrument(level = "trace", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash))] fn on_forkchoice_updated( &mut self, state: ForkchoiceState, @@ -1015,23 +982,75 @@ where version: EngineApiMessageVersion, ) -> ProviderResult> { trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); - self.metrics.engine.forkchoice_updated_messages.increment(1); - if attrs.is_some() { - self.metrics.engine.forkchoice_with_attributes_updated_messages.increment(1); + + // Record metrics + self.record_forkchoice_metrics(); + + // Pre-validation of forkchoice state + if let Some(early_result) = self.validate_forkchoice_state(state)? { + return Ok(TreeOutcome::new(early_result)); } + + // Return early if we are on the correct fork + if let Some(result) = self.handle_canonical_head(state, &attrs, version)? { + return Ok(result); + } + + // Attempt to apply a chain update when the head differs from our canonical chain. + // This handles reorgs and chain extensions by making the specified head canonical. + if let Some(result) = self.apply_chain_update(state, &attrs, version)? { + return Ok(result); + } + + // Fallback that ensures to catch up to the network's state. + self.handle_missing_block(state) + } + + /// Records metrics for forkchoice updated calls + fn record_forkchoice_metrics(&self) { self.canonical_in_memory_state.on_forkchoice_update_received(); + } - if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { - return Ok(TreeOutcome::new(on_updated)) + /// Pre-validates the forkchoice state and returns early if validation fails. + /// + /// Returns `Some(OnForkChoiceUpdated)` if validation fails and an early response should be + /// returned. Returns `None` if validation passes and processing should continue. + fn validate_forkchoice_state( + &mut self, + state: ForkchoiceState, + ) -> ProviderResult> { + if state.head_block_hash.is_zero() { + return Ok(Some(OnForkChoiceUpdated::invalid_state())); } - let valid_outcome = |head| { - TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( - PayloadStatusEnum::Valid, - Some(head), - ))) - }; + // Check if the new head hash is connected to any ancestor that we previously marked as + // invalid + let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); + if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { + return Ok(Some(OnForkChoiceUpdated::with_invalid(status))); + } + + if !self.backfill_sync_state.is_idle() { + // We can only process new forkchoice updates if the pipeline is idle, since it requires + // exclusive access to the database + trace!(target: "engine::tree", "Pipeline is syncing, skipping forkchoice update"); + return Ok(Some(OnForkChoiceUpdated::syncing())); + } + + Ok(None) + } + /// Handles the case where the forkchoice head is already canonical. + /// + /// Returns `Some(TreeOutcome)` if the head is already canonical and + /// processing is complete. Returns `None` if the head is not canonical and processing + /// should continue. + fn handle_canonical_head( + &self, + state: ForkchoiceState, + attrs: &Option, // Changed to reference + version: EngineApiMessageVersion, + ) -> ProviderResult>> { // Process the forkchoice update by trying to make the head block canonical // // We can only process this forkchoice update if: @@ -1046,34 +1065,58 @@ where // - emitting a canonicalization event for the new chain (including reorg) // - if we have payload attributes, delegate them to the payload service - // 1. ensure we have a new head block - if self.state.tree_state.canonical_block_hash() == state.head_block_hash { - trace!(target: "engine::tree", "fcu head hash is already canonical"); + if self.state.tree_state.canonical_block_hash() != state.head_block_hash { + return Ok(None); + } - // update the safe and finalized blocks and ensure their values are valid - if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { - // safe or finalized hashes are invalid - return Ok(TreeOutcome::new(outcome)) - } + trace!(target: "engine::tree", "fcu head hash is already canonical"); - // we still need to process payload attributes if the head is already canonical - if let Some(attr) = attrs { - let tip = self - .sealed_header_by_hash(self.state.tree_state.canonical_block_hash())? - .ok_or_else(|| { - // If we can't find the canonical block, then something is wrong and we need - // to return an error - ProviderError::HeaderNotFound(state.head_block_hash.into()) - })?; - let updated = self.process_payload_attributes(attr, &tip, state, version); - return Ok(TreeOutcome::new(updated)) - } + // Update the safe and finalized blocks and ensure their values are valid + if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { + // safe or finalized hashes are invalid + return Ok(Some(TreeOutcome::new(outcome))); + } - // the head block is already canonical - return Ok(valid_outcome(state.head_block_hash)) + // Process payload attributes if the head is already canonical + if let Some(attr) = attrs { + let tip = self + .sealed_header_by_hash(self.state.tree_state.canonical_block_hash())? + .ok_or_else(|| { + // If we can't find the canonical block, then something is wrong and we need + // to return an error + ProviderError::HeaderNotFound(state.head_block_hash.into()) + })?; + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes(attr.clone(), &tip, state, version); + return Ok(Some(TreeOutcome::new(updated))); } - // 2. check if the head is already part of the canonical chain + // The head block is already canonical + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + Ok(Some(outcome)) + } + + /// Applies chain update for the new head block and processes payload attributes. + /// + /// This method handles the case where the forkchoice head differs from our current canonical + /// head. It attempts to make the specified head block canonical by: + /// - Checking if the head is already part of the canonical chain + /// - Applying chain reorganizations (reorgs) if necessary + /// - Processing payload attributes if provided + /// - Returning the appropriate forkchoice update response + /// + /// Returns `Some(TreeOutcome)` if a chain update was successfully applied. + /// Returns `None` if no chain update was needed or possible. + fn apply_chain_update( + &mut self, + state: ForkchoiceState, + attrs: &Option, + version: EngineApiMessageVersion, + ) -> ProviderResult>> { + // Check if the head is already part of the canonical chain if let Ok(Some(canonical_header)) = self.find_canonical_header(state.head_block_hash) { debug!(target: "engine::tree", head = canonical_header.number(), "fcu head block is already canonical"); @@ -1084,9 +1127,14 @@ where { if let Some(attr) = attrs { debug!(target: "engine::tree", head = canonical_header.number(), "handling payload attributes for canonical head"); - let updated = - self.process_payload_attributes(attr, &canonical_header, state, version); - return Ok(TreeOutcome::new(updated)) + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes( + attr.clone(), + &canonical_header, + state, + version, + ); + return Ok(Some(TreeOutcome::new(updated))); } // At this point, no alternative block has been triggered, so we need effectively @@ -1095,52 +1143,75 @@ where // canonical ancestor. This ensures that state providers and the // transaction pool operate with the correct chain state after // forkchoice update processing. + if self.config.unwind_canonical_header() { self.update_latest_block_to_canonical_ancestor(&canonical_header)?; } } - // 2. Client software MAY skip an update of the forkchoice state and MUST NOT begin a - // payload build process if `forkchoiceState.headBlockHash` references a `VALID` - // ancestor of the head of canonical chain, i.e. the ancestor passed payload - // validation process and deemed `VALID`. In the case of such an event, client - // software MUST return `{payloadStatus: {status: VALID, latestValidHash: - // forkchoiceState.headBlockHash, validationError: null}, payloadId: null}` + // According to the Engine API specification, client software MAY skip an update of the + // forkchoice state and MUST NOT begin a payload build process if + // `forkchoiceState.headBlockHash` references a `VALID` ancestor of the head + // of canonical chain, i.e. the ancestor passed payload validation process + // and deemed `VALID`. In the case of such an event, client software MUST + // return `{payloadStatus: {status: VALID, latestValidHash: + // forkchoiceState.headBlockHash, validationError: null}, payloadId: null}` + + // The head block is already canonical and we're not processing payload attributes, + // so we're not triggering a payload job and can return right away - // the head block is already canonical, so we're not triggering a payload job and can - // return right away - return Ok(valid_outcome(state.head_block_hash)) + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + return Ok(Some(outcome)); } - // 3. ensure we can apply a new chain update for the head block + // Ensure we can apply a new chain update for the head block if let Some(chain_update) = self.on_new_head(state.head_block_hash)? { let tip = chain_update.tip().clone_sealed_header(); self.on_canonical_chain_update(chain_update); - // update the safe and finalized blocks and ensure their values are valid + // Update the safe and finalized blocks and ensure their values are valid if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { // safe or finalized hashes are invalid - return Ok(TreeOutcome::new(outcome)) + return Ok(Some(TreeOutcome::new(outcome))); } if let Some(attr) = attrs { - let updated = self.process_payload_attributes(attr, &tip, state, version); - return Ok(TreeOutcome::new(updated)) + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes(attr.clone(), &tip, state, version); + return Ok(Some(TreeOutcome::new(updated))); } - return Ok(valid_outcome(state.head_block_hash)) + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + return Ok(Some(outcome)); } - // 4. we don't have the block to perform the update - // we assume the FCU is valid and at least the head is missing, + Ok(None) + } + + /// Handles the case where the head block is missing and needs to be downloaded. + /// + /// This is the fallback case when all other forkchoice update scenarios have been exhausted. + /// Returns a `TreeOutcome` with syncing status and download event. + fn handle_missing_block( + &self, + state: ForkchoiceState, + ) -> ProviderResult> { + // We don't have the block to perform the forkchoice update + // We assume the FCU is valid and at least the head is missing, // so we need to start syncing to it // // find the appropriate target to sync to, if we don't have the safe block hash then we // start syncing to the safe block via backfill first let target = if self.state.forkchoice_state_tracker.is_empty() && - // check that safe block is valid and missing - !state.safe_block_hash.is_zero() && - self.find_canonical_header(state.safe_block_hash).ok().flatten().is_none() + // check that safe block is valid and missing + !state.safe_block_hash.is_zero() && + self.find_canonical_header(state.safe_block_hash).ok().flatten().is_none() { debug!(target: "engine::tree", "missing safe block on initial FCU, downloading safe block"); state.safe_block_hash @@ -1197,7 +1268,7 @@ where /// Helper method to save blocks and set the persistence state. This ensures we keep track of /// the current persistence action while we're saving blocks. - fn persist_blocks(&mut self, blocks_to_persist: Vec>) { + fn persist_blocks(&mut self, blocks_to_persist: Vec>) { if blocks_to_persist.is_empty() { debug!(target: "engine::tree", "Returned empty set of blocks to persist"); return @@ -1210,7 +1281,7 @@ where .map(|b| b.recovered_block().num_hash()) .expect("Checked non-empty persisting blocks"); - debug!(target: "engine::tree", blocks = ?blocks_to_persist.iter().map(|block| block.recovered_block().num_hash()).collect::>(), "Persisting blocks"); + debug!(target: "engine::tree", count=blocks_to_persist.len(), blocks = ?blocks_to_persist.iter().map(|block| block.recovered_block().num_hash()).collect::>(), "Persisting blocks"); let (tx, rx) = oneshot::channel(); let _ = self.persistence.save_blocks(blocks_to_persist, tx); @@ -1317,6 +1388,9 @@ where tx, version, } => { + let has_attrs = payload_attrs.is_some(); + + let start = Instant::now(); let mut output = self.on_forkchoice_updated(state, payload_attrs, version); @@ -1336,6 +1410,12 @@ where self.on_maybe_tree_event(res.event.take())?; } + let elapsed = start.elapsed(); + self.metrics + .engine + .forkchoice_updated + .update_response_metrics(has_attrs, &output, elapsed); + if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(Into::into)) { @@ -1347,7 +1427,14 @@ where } } BeaconEngineMessage::NewPayload { payload, tx } => { + let start = Instant::now(); + let gas_used = payload.gas_used(); let mut output = self.on_new_payload(payload); + let elapsed = start.elapsed(); + self.metrics + .engine + .new_payload + .update_response_metrics(&output, gas_used, elapsed); let maybe_event = output.as_mut().ok().and_then(|out| out.event.take()); @@ -1498,6 +1585,32 @@ where return Ok(()) }; + // Check if there are more blocks to sync between current head and FCU target + if let Some(lowest_buffered) = + self.state.buffer.lowest_ancestor(&sync_target_state.head_block_hash) + { + let current_head_num = self.state.tree_state.current_canonical_head.number; + let target_head_num = lowest_buffered.number(); + + if let Some(distance) = self.distance_from_local_tip(current_head_num, target_head_num) + { + // There are blocks between current head and FCU target, download them + debug!( + target: "engine::tree", + %current_head_num, + %target_head_num, + %distance, + "Backfill complete, downloading remaining blocks to reach FCU target" + ); + + self.emit_event(EngineApiEvent::Download(DownloadRequest::BlockRange( + lowest_buffered.parent_hash(), + distance, + ))); + return Ok(()); + } + } + // try to close the gap by executing buffered blocks that are child blocks of the new head self.try_connect_buffered_blocks(self.state.tree_state.current_canonical_head) } @@ -1588,17 +1701,9 @@ where /// Returns a batch of consecutive canonical blocks to persist in the range /// `(last_persisted_number .. canonical_head - threshold]`. The expected /// order is oldest -> newest. - /// - /// If any blocks are missing trie updates, all blocks are persisted, not taking `threshold` - /// into account. - /// - /// For those blocks that didn't have the trie updates calculated, runs the state root - /// calculation, and saves the trie updates. - /// - /// Returns an error if the state root calculation fails. fn get_canonical_blocks_to_persist( - &mut self, - ) -> Result>, AdvancePersistenceError> { + &self, + ) -> Result>, AdvancePersistenceError> { // We will calculate the state root using the database, so we need to be sure there are no // changes debug_assert!(!self.persistence_state.in_progress()); @@ -1607,27 +1712,16 @@ where let mut current_hash = self.state.tree_state.canonical_block_hash(); let last_persisted_number = self.persistence_state.last_persisted_block.number; let canonical_head_number = self.state.tree_state.canonical_block_number(); - let all_blocks_have_trie_updates = self - .state - .tree_state - .blocks_by_hash - .values() - .all(|block| block.trie_updates().is_some()); - - let target_number = if all_blocks_have_trie_updates { - // Persist only up to block buffer target if all blocks have trie updates - canonical_head_number.saturating_sub(self.config.memory_block_buffer_target()) - } else { - // Persist all blocks if any block is missing trie updates - canonical_head_number - }; + + // Persist only up to block buffer target + let target_number = + canonical_head_number.saturating_sub(self.config.memory_block_buffer_target()); debug!( target: "engine::tree", ?current_hash, ?last_persisted_number, ?canonical_head_number, - ?all_blocks_have_trie_updates, ?target_number, "Returning canonical blocks to persist" ); @@ -1646,48 +1740,6 @@ where // Reverse the order so that the oldest block comes first blocks_to_persist.reverse(); - // Calculate missing trie updates - for block in &mut blocks_to_persist { - if block.trie.is_present() { - continue - } - - debug!( - target: "engine::tree", - block = ?block.recovered_block().num_hash(), - "Calculating trie updates before persisting" - ); - - let provider = self - .state_provider_builder(block.recovered_block().parent_hash())? - .ok_or(AdvancePersistenceError::MissingAncestor( - block.recovered_block().parent_hash(), - ))? - .build()?; - - let mut trie_input = self.compute_trie_input( - self.persisting_kind_for(block.recovered_block.block_with_parent()), - self.provider.database_provider_ro()?, - block.recovered_block().parent_hash(), - None, - )?; - // Extend with block we are generating trie updates for. - trie_input.append_ref(block.hashed_state()); - let (_root, updates) = provider.state_root_from_nodes_with_updates(trie_input)?; - debug_assert_eq!(_root, block.recovered_block().state_root()); - - // Update trie updates in both tree state and blocks to persist that we return - let trie_updates = Arc::new(updates); - let tree_state_block = self - .state - .tree_state - .blocks_by_hash - .get_mut(&block.recovered_block().hash()) - .expect("blocks to persist are constructed from tree state blocks"); - tree_state_block.trie.set_present(trie_updates.clone()); - block.trie.set_present(trie_updates); - } - Ok(blocks_to_persist) } @@ -1726,7 +1778,7 @@ where trace!(target: "engine::tree", ?hash, "Fetching executed block by hash"); // check memory first if let Some(block) = self.state.tree_state.executed_block_by_hash(hash) { - return Ok(Some(block.block.clone())) + return Ok(Some(block.clone())) } let (block, senders) = self @@ -1739,11 +1791,13 @@ where .get_state(block.header().number())? .ok_or_else(|| ProviderError::StateForNumberNotFound(block.header().number()))?; let hashed_state = self.provider.hashed_post_state(execution_output.state()); + let trie_updates = self.provider.get_block_trie_updates(block.number())?; Ok(Some(ExecutedBlock { recovered_block: Arc::new(RecoveredBlock::new_sealed(block, senders)), execution_output: Arc::new(execution_output), hashed_state: Arc::new(hashed_state), + trie_updates: Arc::new(trie_updates.into()), })) } @@ -1929,8 +1983,18 @@ where fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { // check if the head was previously marked as invalid let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; - // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent)?)) + + // Try to prepare invalid response, but handle errors gracefully + match self.prepare_invalid_response(header.parent) { + Ok(status) => Ok(Some(status)), + Err(err) => { + debug!(target: "engine::tree", %err, "Failed to prepare invalid response for ancestor check"); + // Return a basic invalid status without latest valid hash + Ok(Some(PayloadStatus::from_status(PayloadStatusEnum::Invalid { + validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), + }))) + } + } } /// Validate if block is correct and satisfies all the consensus rules that concern the header @@ -1950,7 +2014,7 @@ where } /// Attempts to connect any buffered blocks that are connected to the given parent hash. - #[instrument(level = "trace", skip(self), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip(self))] fn try_connect_buffered_blocks( &mut self, parent: BlockNumHash, @@ -2171,25 +2235,7 @@ where self.update_reorg_metrics(old.len()); self.reinsert_reorged_blocks(new.clone()); - // Try reinserting the reorged canonical chain. This is only possible if we have - // `persisted_trie_updates` for those blocks. - let old = old - .iter() - .filter_map(|block| { - let trie = self - .state - .tree_state - .persisted_trie_updates - .get(&block.recovered_block.hash())? - .1 - .clone(); - Some(ExecutedBlockWithTrieUpdates { - block: block.clone(), - trie: ExecutedTrieUpdates::Present(trie), - }) - }) - .collect::>(); - self.reinsert_reorged_blocks(old); + self.reinsert_reorged_blocks(old.clone()); } // update the tracked in-memory state with the new chain @@ -2216,7 +2262,7 @@ where } /// This reinserts any blocks in the new chain that do not already exist in the tree - fn reinsert_reorged_blocks(&mut self, new_chain: Vec>) { + fn reinsert_reorged_blocks(&mut self, new_chain: Vec>) { for block in new_chain { if self .state @@ -2277,7 +2323,7 @@ where /// Returns an event with the appropriate action to take, such as: /// - download more missing blocks /// - try to canonicalize the target if the `block` is the tracked target (head) block. - #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number(),), target = "engine::tree")] + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(block_hash = %block.hash(), block_num = %block.number()))] fn on_downloaded_block( &mut self, block: RecoveredBlock, @@ -2383,15 +2429,12 @@ where /// Returns `InsertPayloadOk::Inserted(BlockStatus::Valid)` on successful execution, /// `InsertPayloadOk::AlreadySeen` if the block already exists, or /// `InsertPayloadOk::Inserted(BlockStatus::Disconnected)` if parent state is missing. + #[instrument(level = "debug", target = "engine::tree", skip_all, fields(block_id))] fn insert_block_or_payload( &mut self, block_id: BlockWithParent, input: Input, - execute: impl FnOnce( - &mut V, - Input, - TreeCtx<'_, N>, - ) -> Result, Err>, + execute: impl FnOnce(&mut V, Input, TreeCtx<'_, N>) -> Result, Err>, convert_to_block: impl FnOnce(&mut Self, Input) -> Result, Err>, ) -> Result where @@ -2452,8 +2495,7 @@ where Ok(is_fork) => is_fork, }; - let ctx = - TreeCtx::new(&mut self.state, &self.persistence_state, &self.canonical_in_memory_state); + let ctx = TreeCtx::new(&mut self.state, &self.canonical_in_memory_state); let start = Instant::now(); @@ -2486,109 +2528,6 @@ where Ok(InsertPayloadOk::Inserted(BlockStatus::Valid)) } - /// Computes the trie input at the provided parent hash. - /// - /// The goal of this function is to take in-memory blocks and generate a [`TrieInput`] that - /// serves as an overlay to the database blocks. - /// - /// It works as follows: - /// 1. Collect in-memory blocks that are descendants of the provided parent hash using - /// [`TreeState::blocks_by_hash`]. - /// 2. If the persistence is in progress, and the block that we're computing the trie input for - /// is a descendant of the currently persisting blocks, we need to be sure that in-memory - /// blocks are not overlapping with the database blocks that may have been already persisted. - /// To do that, we're filtering out in-memory blocks that are lower than the highest database - /// block. - /// 3. Once in-memory blocks are collected and optionally filtered, we compute the - /// [`HashedPostState`] from them. - fn compute_trie_input( - &self, - persisting_kind: PersistingKind, - provider: TP, - parent_hash: B256, - allocated_trie_input: Option, - ) -> ProviderResult { - // get allocated trie input or use a default trie input - let mut input = allocated_trie_input.unwrap_or_default(); - - let best_block_number = provider.best_block_number()?; - - let (mut historical, mut blocks) = self - .state - .tree_state - .blocks_by_hash(parent_hash) - .map_or_else(|| (parent_hash.into(), vec![]), |(hash, blocks)| (hash.into(), blocks)); - - // If the current block is a descendant of the currently persisting blocks, then we need to - // filter in-memory blocks, so that none of them are already persisted in the database. - if persisting_kind.is_descendant() { - // Iterate over the blocks from oldest to newest. - while let Some(block) = blocks.last() { - let recovered_block = block.recovered_block(); - if recovered_block.number() <= best_block_number { - // Remove those blocks that lower than or equal to the highest database - // block. - blocks.pop(); - } else { - // If the block is higher than the best block number, stop filtering, as it's - // the first block that's not in the database. - break - } - } - - historical = if let Some(block) = blocks.last() { - // If there are any in-memory blocks left after filtering, set the anchor to the - // parent of the oldest block. - (block.recovered_block().number() - 1).into() - } else { - // Otherwise, set the anchor to the original provided parent hash. - parent_hash.into() - }; - } - - if blocks.is_empty() { - debug!(target: "engine::tree", %parent_hash, "Parent found on disk"); - } else { - debug!(target: "engine::tree", %parent_hash, %historical, blocks = blocks.len(), "Parent found in memory"); - } - - // Convert the historical block to the block number. - let block_number = provider - .convert_hash_or_number(historical)? - .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; - - // Retrieve revert state for historical block. - let revert_state = if block_number == best_block_number { - // We do not check against the `last_block_number` here because - // `HashedPostState::from_reverts` only uses the database tables, and not static files. - debug!(target: "engine::tree", block_number, best_block_number, "Empty revert state"); - HashedPostState::default() - } else { - let revert_state = HashedPostState::from_reverts::( - provider.tx_ref(), - block_number + 1.., - ) - .map_err(ProviderError::from)?; - debug!( - target: "engine::tree", - block_number, - best_block_number, - accounts = revert_state.accounts.len(), - storages = revert_state.storages.len(), - "Non-empty revert state" - ); - revert_state - }; - input.append(revert_state); - - // Extend with contents of parent in-memory blocks. - input.extend_with_blocks( - blocks.iter().rev().map(|block| (block.hashed_state(), block.trie_updates())), - ); - - Ok(input) - } - /// Handles an error that occurred while inserting a block. /// /// If this is a validation error this will mark the block as invalid. @@ -2687,7 +2626,9 @@ where // we're also persisting the finalized block on disk so we can reload it on // restart this is required by optimism which queries the finalized block: let _ = self.persistence.save_finalized_block_number(finalized.number()); - self.canonical_in_memory_state.set_finalized(finalized); + self.canonical_in_memory_state.set_finalized(finalized.clone()); + // Update finalized block height metric + self.metrics.tree.finalized_block_height.set(finalized.number() as f64); } } Err(err) => { @@ -2715,7 +2656,9 @@ where // we're also persisting the safe block on disk so we can reload it on // restart this is required by optimism which queries the safe block: let _ = self.persistence.save_safe_block_number(safe.number()); - self.canonical_in_memory_state.set_safe(safe); + self.canonical_in_memory_state.set_safe(safe.clone()); + // Update safe block height metric + self.metrics.tree.safe_block_height.set(safe.number() as f64); } } Err(err) => { @@ -2753,35 +2696,6 @@ where self.update_safe_block(state.safe_block_hash) } - /// Pre-validate forkchoice update and check whether it can be processed. - /// - /// This method returns the update outcome if validation fails or - /// the node is syncing and the update cannot be processed at the moment. - fn pre_validate_forkchoice_update( - &mut self, - state: ForkchoiceState, - ) -> ProviderResult> { - if state.head_block_hash.is_zero() { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // check if the new head hash is connected to any ancestor that we previously marked as - // invalid - let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); - if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { - return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) - } - - if !self.backfill_sync_state.is_idle() { - // We can only process new forkchoice updates if the pipeline is idle, since it requires - // exclusive access to the database - trace!(target: "engine::tree", "Pipeline is syncing, skipping forkchoice update"); - return Ok(Some(OnForkChoiceUpdated::syncing())) - } - - Ok(None) - } - /// Validates the payload attributes with respect to the header and fork choice state. /// /// Note: At this point, the fork choice update is considered to be VALID, however, we can still @@ -2924,30 +2838,3 @@ pub enum InsertPayloadOk { /// The payload was valid and inserted into the tree. Inserted(BlockStatus), } - -/// Whether or not the blocks are currently persisting and the input block is a descendant. -#[derive(Debug, Clone, Copy)] -pub enum PersistingKind { - /// The blocks are not currently persisting. - NotPersisting, - /// The blocks are currently persisting but the input block is not a descendant. - PersistingNotDescendant, - /// The blocks are currently persisting and the input block is a descendant. - PersistingDescendant, -} - -impl PersistingKind { - /// Returns true if the parallel state root can be run. - /// - /// We only run the parallel state root if we are not currently persisting any blocks or - /// persisting blocks that are all ancestors of the one we are calculating the state root for. - pub const fn can_run_parallel_state_root(&self) -> bool { - matches!(self, Self::NotPersisting | Self::PersistingDescendant) - } - - /// Returns true if the blocks are currently being persisted and the input block is a - /// descendant. - pub const fn is_descendant(&self) -> bool { - matches!(self, Self::PersistingDescendant) - } -} diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index 176cffcd8fa..b587a721398 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -172,4 +172,17 @@ impl SparseTrieInterface for ConfiguredSparseTrie { Self::Parallel(trie) => trie.updates_ref(), } } + fn shrink_nodes_to(&mut self, size: usize) { + match self { + Self::Serial(trie) => trie.shrink_nodes_to(size), + Self::Parallel(trie) => trie.shrink_nodes_to(size), + } + } + + fn shrink_values_to(&mut self, size: usize) { + match self { + Self::Serial(trie) => trie.shrink_values_to(size), + Self::Parallel(trie) => trie.shrink_values_to(size), + } + } } diff --git a/crates/engine/tree/src/tree/payload_processor/executor.rs b/crates/engine/tree/src/tree/payload_processor/executor.rs index 3013c5e1c72..28165d5e8f2 100644 --- a/crates/engine/tree/src/tree/payload_processor/executor.rs +++ b/crates/engine/tree/src/tree/payload_processor/executor.rs @@ -1,10 +1,6 @@ //! Executor for mixed I/O and CPU workloads. -use rayon::ThreadPool as RayonPool; -use std::{ - sync::{Arc, OnceLock}, - time::Duration, -}; +use std::{sync::OnceLock, time::Duration}; use tokio::{ runtime::{Builder, Handle, Runtime}, task::JoinHandle, @@ -12,9 +8,8 @@ use tokio::{ /// An executor for mixed I/O and CPU workloads. /// -/// This type has access to its own rayon pool and uses tokio to spawn blocking tasks. -/// -/// It will reuse an existing tokio runtime if available or create its own. +/// This type uses tokio to spawn blocking tasks and will reuse an existing tokio +/// runtime if available or create its own. #[derive(Debug, Clone)] pub struct WorkloadExecutor { inner: WorkloadExecutorInner, @@ -22,21 +17,11 @@ pub struct WorkloadExecutor { impl Default for WorkloadExecutor { fn default() -> Self { - Self { inner: WorkloadExecutorInner::new(rayon::ThreadPoolBuilder::new().build().unwrap()) } + Self { inner: WorkloadExecutorInner::new() } } } impl WorkloadExecutor { - /// Creates a new executor with the given number of threads for cpu bound work (rayon). - #[expect(unused)] - pub(super) fn with_num_cpu_threads(cpu_threads: usize) -> Self { - Self { - inner: WorkloadExecutorInner::new( - rayon::ThreadPoolBuilder::new().num_threads(cpu_threads).build().unwrap(), - ), - } - } - /// Returns the handle to the tokio runtime pub(super) const fn handle(&self) -> &Handle { &self.inner.handle @@ -51,22 +36,15 @@ impl WorkloadExecutor { { self.inner.handle.spawn_blocking(func) } - - /// Returns access to the rayon pool - #[expect(unused)] - pub(super) const fn rayon_pool(&self) -> &Arc { - &self.inner.rayon_pool - } } #[derive(Debug, Clone)] struct WorkloadExecutorInner { handle: Handle, - rayon_pool: Arc, } impl WorkloadExecutorInner { - fn new(rayon_pool: rayon::ThreadPool) -> Self { + fn new() -> Self { fn get_runtime_handle() -> Handle { Handle::try_current().unwrap_or_else(|_| { // Create a new runtime if no runtime is available @@ -90,6 +68,6 @@ impl WorkloadExecutorInner { }) } - Self { handle: get_runtime_handle(), rayon_pool: Arc::new(rayon_pool) } + Self { handle: get_runtime_handle() } } } diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 8d9bd1ba2e0..d1f7531e9dd 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -15,6 +15,7 @@ use crate::tree::{ }; use alloy_evm::{block::StateChangeSource, ToTxEnv}; use alloy_primitives::B256; +use crossbeam_channel::Sender as CrossbeamSender; use executor::WorkloadExecutor; use multiproof::{SparseTrieUpdate, *}; use parking_lot::RwLock; @@ -25,14 +26,11 @@ use reth_evm::{ ConfigureEvm, EvmEnvFor, OnStateHook, SpecFor, TxEnvFor, }; use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, StateProviderFactory, - StateReader, -}; +use reth_provider::{BlockReader, DatabaseProviderROFactory, StateProviderFactory, StateReader}; use reth_revm::{db::BundleState, state::EvmState}; -use reth_trie::TrieInput; +use reth_trie::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; use reth_trie_parallel::{ - proof_task::{ProofTaskCtx, ProofTaskManager}, + proof_task::{ProofTaskCtx, ProofWorkerHandle}, root::ParallelStateRootError, }; use reth_trie_sparse::{ @@ -40,12 +38,15 @@ use reth_trie_sparse::{ ClearedSparseStateTrie, SparseStateTrie, SparseTrie, }; use reth_trie_sparse_parallel::{ParallelSparseTrie, ParallelismThresholds}; -use std::sync::{ - atomic::AtomicBool, - mpsc::{self, channel, Sender}, - Arc, +use std::{ + sync::{ + atomic::AtomicBool, + mpsc::{self, channel}, + Arc, + }, + time::Instant, }; -use tracing::{debug, instrument}; +use tracing::{debug, debug_span, instrument, warn}; mod configured_sparse_trie; pub mod executor; @@ -63,6 +64,29 @@ use configured_sparse_trie::ConfiguredSparseTrie; pub const PARALLEL_SPARSE_TRIE_PARALLELISM_THRESHOLDS: ParallelismThresholds = ParallelismThresholds { min_revealed_nodes: 100, min_updated_nodes: 100 }; +/// Default node capacity for shrinking the sparse trie. This is used to limit the number of trie +/// nodes in allocated sparse tries. +/// +/// Node maps have a key of `Nibbles` and value of `SparseNode`. +/// The `size_of::` is 40, and `size_of::` is 80. +/// +/// If we have 1 million entries of 120 bytes each, this conservative estimate comes out at around +/// 120MB. +pub const SPARSE_TRIE_MAX_NODES_SHRINK_CAPACITY: usize = 1_000_000; + +/// Default value capacity for shrinking the sparse trie. This is used to limit the number of values +/// in allocated sparse tries. +/// +/// There are storage and account values, the largest of the two being account values, which are +/// essentially `TrieAccount`s. +/// +/// Account value maps have a key of `Nibbles` and value of `TrieAccount`. +/// The `size_of::` is 40, and `size_of::` is 104. +/// +/// If we have 1 million entries of 144 bytes each, this conservative estimate comes out at around +/// 144MB. +pub const SPARSE_TRIE_MAX_VALUES_SHRINK_CAPACITY: usize = 1_000_000; + /// Entrypoint for executing the payload. #[derive(Debug)] pub struct PayloadProcessor @@ -94,8 +118,6 @@ where >, /// Whether to disable the parallel sparse trie. disable_parallel_sparse_trie: bool, - /// A cleared trie input, kept around to be reused so allocations can be minimized. - trie_input: Option, /// Maximum concurrency for prewarm task. prewarm_max_concurrency: usize, } @@ -117,12 +139,11 @@ where execution_cache: Default::default(), trie_metrics: Default::default(), cross_block_cache_size: config.cross_block_cache_size(), - disable_transaction_prewarming: config.disable_caching_and_prewarming(), + disable_transaction_prewarming: config.disable_prewarming(), evm_config, precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, sparse_state_trie: Arc::default(), - trie_input: None, disable_parallel_sparse_trie: config.disable_parallel_sparse_trie(), prewarm_max_concurrency: config.prewarm_max_concurrency(), } @@ -166,52 +187,50 @@ where /// /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) - pub fn spawn>( + #[allow(clippy::type_complexity)] + #[instrument( + level = "debug", + target = "engine::tree::payload_processor", + name = "payload processor", + skip_all + )] + pub fn spawn>( &mut self, env: ExecutionEnv, transactions: I, provider_builder: StateProviderBuilder, - consistent_view: ConsistentDbView

, - trie_input: TrieInput, + multiproof_provider_factory: F, config: &TreeConfig, ) -> PayloadHandle, I::Tx>, I::Error> where - P: DatabaseProviderFactory - + BlockReader - + StateProviderFactory - + StateReader + P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, + F: DatabaseProviderROFactory + Clone + + Send + 'static, { + let span = tracing::Span::current(); let (to_sparse_trie, sparse_trie_rx) = channel(); - // spawn multiproof task, save the trie input - let (trie_input, state_root_config) = - MultiProofConfig::new_from_input(consistent_view, trie_input); - self.trie_input = Some(trie_input); + + // We rely on the cursor factory to provide whatever DB overlay is necessary to see a + // consistent view of the database, including the trie tables. Because of this there is no + // need for an overarching prefix set to invalidate any section of the trie tables, and so + // we use an empty prefix set. // Create and spawn the storage proof task - let task_ctx = ProofTaskCtx::new( - state_root_config.nodes_sorted.clone(), - state_root_config.state_sorted.clone(), - state_root_config.prefix_sets.clone(), - ); - let max_proof_task_concurrency = config.max_proof_task_concurrency() as usize; - let proof_task = ProofTaskManager::new( + let task_ctx = ProofTaskCtx::new(multiproof_provider_factory); + let storage_worker_count = config.storage_worker_count(); + let account_worker_count = config.account_worker_count(); + let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), - state_root_config.consistent_view.clone(), task_ctx, - max_proof_task_concurrency, + storage_worker_count, + account_worker_count, ); - // We set it to half of the proof task concurrency, because often for each multiproof we - // spawn one Tokio task for the account proof, and one Tokio task for the storage proof. - let max_multi_proof_task_concurrency = max_proof_task_concurrency / 2; let multi_proof_task = MultiProofTask::new( - state_root_config, - self.executor.clone(), - proof_task.handle(), + proof_handle.clone(), to_sparse_trie, - max_multi_proof_task_concurrency, config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), ); @@ -231,6 +250,7 @@ where // spawn multi-proof task self.executor.spawn_blocking(move || { + let _enter = span.entered(); multi_proof_task.run(); }); @@ -238,19 +258,7 @@ where let (state_root_tx, state_root_rx) = channel(); // Spawn the sparse trie task using any stored trie and parallel trie configuration. - self.spawn_sparse_trie_task(sparse_trie_rx, proof_task.handle(), state_root_tx); - - // spawn the proof task - self.executor.spawn_blocking(move || { - if let Err(err) = proof_task.run() { - // At least log if there is an error at any point - tracing::error!( - target: "engine::root", - ?err, - "Storage proof task returned an error" - ); - } - }); + self.spawn_sparse_trie_task(sparse_trie_rx, proof_handle, state_root_tx); PayloadHandle { to_multi_proof, @@ -263,6 +271,7 @@ where /// Spawns a task that exclusively handles cache prewarming for transaction execution. /// /// Returns a [`PayloadHandle`] to communicate with the task. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] pub(super) fn spawn_cache_exclusive>( &self, env: ExecutionEnv, @@ -302,7 +311,7 @@ where let (execute_tx, execute_rx) = mpsc::channel(); self.executor.spawn_blocking(move || { for tx in transactions { - let tx = tx.map(|tx| WithTxEnv { tx_env: tx.to_tx_env(), tx }); + let tx = tx.map(|tx| WithTxEnv { tx_env: tx.to_tx_env(), tx: Arc::new(tx) }); // only send Ok(_) variants to prewarming task if let Ok(tx) = &tx { let _ = prewarm_tx.send(tx.clone()); @@ -321,7 +330,7 @@ where mut transactions: mpsc::Receiver + Clone + Send + 'static>, transaction_count_hint: usize, provider_builder: StateProviderBuilder, - to_multi_proof: Option>, + to_multi_proof: Option>, ) -> CacheTaskHandle where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, @@ -359,7 +368,9 @@ where // spawn pre-warm task { let to_prewarm_task = to_prewarm_task.clone(); + let span = debug_span!(target: "engine::tree::payload_processor", "prewarm task"); self.executor.spawn_blocking(move || { + let _enter = span.entered(); prewarm_task.run(transactions, to_prewarm_task); }); } @@ -367,16 +378,11 @@ where CacheTaskHandle { cache, to_prewarm_task: Some(to_prewarm_task), cache_metrics } } - /// Takes the trie input from the inner payload processor, if it exists. - pub const fn take_trie_input(&mut self) -> Option { - self.trie_input.take() - } - /// Returns the cache for the given parent hash. /// /// If the given hash is different then what is recently cached, then this will create a new /// instance. - #[instrument(target = "engine::caching", skip(self))] + #[instrument(level = "debug", target = "engine::caching", skip(self))] fn cache_for(&self, parent_hash: B256) -> SavedCache { if let Some(cache) = self.execution_cache.get_cache_for(parent_hash) { debug!("reusing execution cache"); @@ -389,10 +395,11 @@ where } /// Spawns the [`SparseTrieTask`] for this payload processor. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] fn spawn_sparse_trie_task( &self, sparse_trie_rx: mpsc::Receiver, - proof_task_handle: BPF, + proof_worker_handle: BPF, state_root_tx: mpsc::Sender>, ) where BPF: TrieNodeProviderFactory + Clone + Send + Sync + 'static, @@ -422,19 +429,32 @@ where let task = SparseTrieTask::<_, ConfiguredSparseTrie, ConfiguredSparseTrie>::new_with_cleared_trie( sparse_trie_rx, - proof_task_handle, + proof_worker_handle, self.trie_metrics.clone(), sparse_state_trie, ); + let span = tracing::Span::current(); self.executor.spawn_blocking(move || { + let _enter = span.entered(); + let (result, trie) = task.run(); // Send state root computation result let _ = state_root_tx.send(result); - // Clear the SparseStateTrie and replace it back into the mutex _after_ sending results - // to the next step, so that time spent clearing doesn't block the step after this one. - cleared_sparse_trie.lock().replace(ClearedSparseStateTrie::from_state_trie(trie)); + // Clear the SparseStateTrie, shrink, and replace it back into the mutex _after_ sending + // results to the next step, so that time spent clearing doesn't block the step after + // this one. + let _enter = debug_span!(target: "engine::tree::payload_processor", "clear").entered(); + let mut cleared_trie = ClearedSparseStateTrie::from_state_trie(trie); + + // Shrink the sparse trie so that we don't have ever increasing memory. + cleared_trie.shrink_to( + SPARSE_TRIE_MAX_NODES_SHRINK_CAPACITY, + SPARSE_TRIE_MAX_VALUES_SHRINK_CAPACITY, + ); + + cleared_sparse_trie.lock().replace(cleared_trie); }); } } @@ -443,7 +463,7 @@ where #[derive(Debug)] pub struct PayloadHandle { /// Channel for evm state updates - to_multi_proof: Option>, + to_multi_proof: Option>, // must include the receiver of the state root wired to the sparse trie prewarm_handle: CacheTaskHandle, /// Receiver for the state root @@ -458,6 +478,7 @@ impl PayloadHandle { /// # Panics /// /// If payload processing was started without background tasks. + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] pub fn state_root(&mut self) -> Result { self.state_root .take() @@ -520,7 +541,7 @@ pub(crate) struct CacheTaskHandle { /// Metrics for the caches cache_metrics: CachedStateMetrics, /// Channel to the spawned prewarm task if any - to_prewarm_task: Option>, + to_prewarm_task: Option>, } impl CacheTaskHandle { @@ -589,8 +610,16 @@ impl ExecutionCache { /// A cache is considered available when: /// - It exists and matches the requested parent hash /// - No other tasks are currently using it (checked via Arc reference count) + #[instrument(level = "debug", target = "engine::tree::payload_processor", skip(self))] pub(crate) fn get_cache_for(&self, parent_hash: B256) -> Option { + let start = Instant::now(); let cache = self.inner.read(); + + let elapsed = start.elapsed(); + if elapsed.as_millis() > 5 { + warn!(blocked_for=?elapsed, "Blocked waiting for execution cache mutex"); + } + cache .as_ref() .filter(|c| c.executed_block_hash() == parent_hash && c.is_available()) @@ -669,12 +698,12 @@ mod tests { use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::{Account, Recovered, StorageEntry}; use reth_provider::{ - providers::{BlockchainProvider, ConsistentDbView}, + providers::{BlockchainProvider, OverlayStateProviderFactory}, test_utils::create_test_provider_factory_with_chain_spec, ChainSpecProvider, HashingWriter, }; use reth_testing_utils::generators; - use reth_trie::{test_utils::state_root, HashedPostState, TrieInput}; + use reth_trie::{test_utils::state_root, HashedPostState}; use revm_primitives::{Address, HashMap, B256, KECCAK_EMPTY, U256}; use revm_state::{AccountInfo, AccountStatus, EvmState, EvmStorageSlot}; use std::sync::Arc; @@ -856,13 +885,14 @@ mod tests { &TreeConfig::default(), PrecompileCacheMap::default(), ); - let provider = BlockchainProvider::new(factory).unwrap(); + + let provider_factory = BlockchainProvider::new(factory).unwrap(); + let mut handle = payload_processor.spawn( Default::default(), core::iter::empty::, core::convert::Infallible>>(), - StateProviderBuilder::new(provider.clone(), genesis_hash, None), - ConsistentDbView::new_with_latest_tip(provider).unwrap(), - TrieInput::from_state(hashed_state), + StateProviderBuilder::new(provider_factory.clone(), genesis_hash, None), + OverlayStateProviderFactory::new(provider_factory), &TreeConfig::default(), ); diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 6c7f5de40a3..7da199dd636 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1,35 +1,31 @@ //! Multiproof task related functionality. -use crate::tree::payload_processor::executor::WorkloadExecutor; use alloy_evm::block::StateChangeSource; use alloy_primitives::{ keccak256, map::{B256Set, HashSet}, B256, }; +use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use dashmap::DashMap; use derive_more::derive::Deref; -use metrics::Histogram; -use reth_errors::ProviderError; +use metrics::{Gauge, Histogram}; use reth_metrics::Metrics; -use reth_provider::{providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, FactoryTx}; use reth_revm::state::EvmState; use reth_trie::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::TriePrefixSetsMut, updates::TrieUpdatesSorted, DecodedMultiProof, HashedPostState, HashedPostStateSorted, HashedStorage, MultiProofTargets, TrieInput, }; -use reth_trie_parallel::{proof::ParallelProof, proof_task::ProofTaskManagerHandle}; -use std::{ - collections::{BTreeMap, VecDeque}, - ops::DerefMut, - sync::{ - mpsc::{channel, Receiver, Sender}, - Arc, +use reth_trie_parallel::{ + proof::ParallelProof, + proof_task::{ + AccountMultiproofInput, ProofResultContext, ProofResultMessage, ProofWorkerHandle, + StorageProofInput, }, - time::{Duration, Instant}, }; -use tracing::{debug, error, trace}; +use std::{collections::BTreeMap, ops::DerefMut, sync::Arc, time::Instant}; +use tracing::{debug, error, instrument, trace}; /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. @@ -61,10 +57,8 @@ impl SparseTrieUpdate { } /// Common configuration for multi proof tasks -#[derive(Debug, Clone)] -pub(super) struct MultiProofConfig { - /// View over the state in the database. - pub consistent_view: ConsistentDbView, +#[derive(Debug, Clone, Default)] +pub(crate) struct MultiProofConfig { /// The sorted collection of cached in-memory intermediate trie nodes that /// can be reused for computation. pub nodes_sorted: Arc, @@ -76,17 +70,13 @@ pub(super) struct MultiProofConfig { pub prefix_sets: Arc, } -impl MultiProofConfig { - /// Creates a new state root config from the consistent view and the trie input. +impl MultiProofConfig { + /// Creates a new state root config from the trie input. /// /// This returns a cleared [`TrieInput`] so that we can reuse any allocated space in the /// [`TrieInput`]. - pub(super) fn new_from_input( - consistent_view: ConsistentDbView, - mut input: TrieInput, - ) -> (TrieInput, Self) { + pub(crate) fn from_input(mut input: TrieInput) -> (TrieInput, Self) { let config = Self { - consistent_view, nodes_sorted: Arc::new(input.nodes.drain_into_sorted()), state_sorted: Arc::new(input.state.drain_into_sorted()), prefix_sets: Arc::new(input.prefix_sets.clone()), @@ -112,10 +102,6 @@ pub(super) enum MultiProofMessage { /// The state update that was used to calculate the proof state: HashedPostState, }, - /// Proof calculation completed for a specific state update - ProofCalculated(Box), - /// Error during proof calculation - ProofCalculationError(ProviderError), /// Signals state update stream end. /// /// This is triggered by block execution, indicating that no additional state updates are @@ -123,17 +109,6 @@ pub(super) enum MultiProofMessage { FinishedStateUpdates, } -/// Message about completion of proof calculation for a specific state update -#[derive(Debug)] -pub(super) struct ProofCalculated { - /// The index of this proof in the sequence of state updates - sequence_number: u64, - /// Sparse trie update - update: SparseTrieUpdate, - /// The time taken to calculate the proof. - elapsed: Duration, -} - /// Handle to track proof calculation ordering. #[derive(Debug, Default)] struct ProofSequencer { @@ -196,10 +171,10 @@ impl ProofSequencer { /// This should trigger once the block has been executed (after) the last state update has been /// sent. This triggers the exit condition of the multi proof task. #[derive(Deref, Debug)] -pub(super) struct StateHookSender(Sender); +pub(super) struct StateHookSender(CrossbeamSender); impl StateHookSender { - pub(crate) const fn new(inner: Sender) -> Self { + pub(crate) const fn new(inner: CrossbeamSender) -> Self { Self(inner) } } @@ -217,7 +192,7 @@ pub(crate) fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostStat for (address, account) in update { if account.is_touched() { let hashed_address = keccak256(address); - trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); + trace!(target: "engine::tree::payload_processor::multiproof", ?address, ?hashed_address, "Adding account to state update"); let destroyed = account.is_selfdestructed(); let info = if destroyed { None } else { Some(account.info.into()) }; @@ -245,14 +220,14 @@ pub(crate) fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostStat /// A pending multiproof task, either [`StorageMultiproofInput`] or [`MultiproofInput`]. #[derive(Debug)] -enum PendingMultiproofTask { +enum PendingMultiproofTask { /// A storage multiproof task input. - Storage(StorageMultiproofInput), + Storage(StorageMultiproofInput), /// A regular multiproof task input. - Regular(MultiproofInput), + Regular(MultiproofInput), } -impl PendingMultiproofTask { +impl PendingMultiproofTask { /// Returns the proof sequence number of the task. const fn proof_sequence_number(&self) -> u64 { match self { @@ -278,32 +253,30 @@ impl PendingMultiproofTask { } } -impl From> for PendingMultiproofTask { - fn from(input: StorageMultiproofInput) -> Self { +impl From for PendingMultiproofTask { + fn from(input: StorageMultiproofInput) -> Self { Self::Storage(input) } } -impl From> for PendingMultiproofTask { - fn from(input: MultiproofInput) -> Self { +impl From for PendingMultiproofTask { + fn from(input: MultiproofInput) -> Self { Self::Regular(input) } } -/// Input parameters for spawning a dedicated storage multiproof calculation. +/// Input parameters for dispatching a dedicated storage multiproof calculation. #[derive(Debug)] -struct StorageMultiproofInput { - config: MultiProofConfig, - source: Option, +struct StorageMultiproofInput { hashed_state_update: HashedPostState, hashed_address: B256, proof_targets: B256Set, proof_sequence_number: u64, - state_root_message_sender: Sender, + state_root_message_sender: CrossbeamSender, multi_added_removed_keys: Arc, } -impl StorageMultiproofInput { +impl StorageMultiproofInput { /// Destroys the input and sends a [`MultiProofMessage::EmptyProof`] message to the sender. fn send_empty_proof(self) { let _ = self.state_root_message_sender.send(MultiProofMessage::EmptyProof { @@ -313,19 +286,18 @@ impl StorageMultiproofInput { } } -/// Input parameters for spawning a multiproof calculation. +/// Input parameters for dispatching a multiproof calculation. #[derive(Debug)] -struct MultiproofInput { - config: MultiProofConfig, +struct MultiproofInput { source: Option, hashed_state_update: HashedPostState, proof_targets: MultiProofTargets, proof_sequence_number: u64, - state_root_message_sender: Sender, + state_root_message_sender: CrossbeamSender, multi_added_removed_keys: Option>, } -impl MultiproofInput { +impl MultiproofInput { /// Destroys the input and sends a [`MultiProofMessage::EmptyProof`] message to the sender. fn send_empty_proof(self) { let _ = self.state_root_message_sender.send(MultiProofMessage::EmptyProof { @@ -335,22 +307,20 @@ impl MultiproofInput { } } -/// Manages concurrent multiproof calculations. -/// Takes care of not having more calculations in flight than a given maximum -/// concurrency, further calculation requests are queued and spawn later, after -/// availability has been signaled. +/// Coordinates multiproof dispatch between `MultiProofTask` and the parallel trie workers. +/// +/// # Flow +/// 1. `MultiProofTask` asks the manager to dispatch either storage or account proof work. +/// 2. The manager builds the request, clones `proof_result_tx`, and hands everything to +/// [`ProofWorkerHandle`]. +/// 3. A worker finishes the proof and sends a [`ProofResultMessage`] through the channel included +/// in the job. +/// 4. `MultiProofTask` consumes the message from the same channel and sequences it with +/// `ProofSequencer`. #[derive(Debug)] -pub struct MultiproofManager { - /// Maximum number of concurrent calculations. - max_concurrent: usize, - /// Currently running calculations. - inflight: usize, - /// Queued calculations. - pending: VecDeque>, - /// Executor for tasks - executor: WorkloadExecutor, - /// Sender to the storage proof task. - storage_proof_task_handle: ProofTaskManagerHandle>, +pub struct MultiproofManager { + /// Handle to the proof worker pools (storage and account). + proof_worker_handle: ProofWorkerHandle, /// Cached storage proof roots for missed leaves; this maps /// hashed (missed) addresses to their storage proof roots. /// @@ -363,39 +333,34 @@ pub struct MultiproofManager { /// a big account change into different chunks, which may repeatedly /// revisit missed leaves. missed_leaves_storage_roots: Arc>, + /// Channel sender cloned into each dispatched job so workers can send back the + /// `ProofResultMessage`. + proof_result_tx: CrossbeamSender, /// Metrics metrics: MultiProofTaskMetrics, } -impl MultiproofManager -where - Factory: DatabaseProviderFactory + Clone + 'static, -{ +impl MultiproofManager { /// Creates a new [`MultiproofManager`]. fn new( - executor: WorkloadExecutor, metrics: MultiProofTaskMetrics, - storage_proof_task_handle: ProofTaskManagerHandle>, - max_concurrent: usize, + proof_worker_handle: ProofWorkerHandle, + proof_result_tx: CrossbeamSender, ) -> Self { + // Initialize the max worker gauges with the worker pool sizes + metrics.max_storage_workers.set(proof_worker_handle.total_storage_workers() as f64); + metrics.max_account_workers.set(proof_worker_handle.total_account_workers() as f64); + Self { - pending: VecDeque::with_capacity(max_concurrent), - max_concurrent, - executor, - inflight: 0, metrics, - storage_proof_task_handle, + proof_worker_handle, missed_leaves_storage_roots: Default::default(), + proof_result_tx, } } - const fn is_full(&self) -> bool { - self.inflight >= self.max_concurrent - } - - /// Spawns a new multiproof calculation or enqueues it for later if - /// `max_concurrent` are already inflight. - fn spawn_or_queue(&mut self, input: PendingMultiproofTask) { + /// Dispatches a new multiproof calculation to worker pools. + fn dispatch(&self, input: PendingMultiproofTask) { // If there are no proof targets, we can just send an empty multiproof back immediately if input.proof_targets_is_empty() { debug!( @@ -406,198 +371,180 @@ where return } - if self.is_full() { - self.pending.push_back(input); - self.metrics.pending_multiproofs_histogram.record(self.pending.len() as f64); - return; - } - - self.spawn_multiproof_task(input); - } - - /// Signals that a multiproof calculation has finished and there's room to - /// spawn a new calculation if needed. - fn on_calculation_complete(&mut self) { - self.inflight = self.inflight.saturating_sub(1); - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); - - if let Some(input) = self.pending.pop_front() { - self.metrics.pending_multiproofs_histogram.record(self.pending.len() as f64); - self.spawn_multiproof_task(input); - } - } - - /// Spawns a multiproof task, dispatching to `spawn_storage_proof` if the input is a storage - /// multiproof, and dispatching to `spawn_multiproof` otherwise. - fn spawn_multiproof_task(&mut self, input: PendingMultiproofTask) { match input { PendingMultiproofTask::Storage(storage_input) => { - self.spawn_storage_proof(storage_input); + self.dispatch_storage_proof(storage_input); } PendingMultiproofTask::Regular(multiproof_input) => { - self.spawn_multiproof(multiproof_input); + self.dispatch_multiproof(multiproof_input); } } } - /// Spawns a single storage proof calculation task. - fn spawn_storage_proof(&mut self, storage_multiproof_input: StorageMultiproofInput) { + /// Dispatches a single storage proof calculation to worker pool. + fn dispatch_storage_proof(&self, storage_multiproof_input: StorageMultiproofInput) { let StorageMultiproofInput { - config, - source, hashed_state_update, hashed_address, proof_targets, proof_sequence_number, - state_root_message_sender, multi_added_removed_keys, + state_root_message_sender: _, } = storage_multiproof_input; - let storage_proof_task_handle = self.storage_proof_task_handle.clone(); - let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone(); + let storage_targets = proof_targets.len(); + + trace!( + target: "engine::tree::payload_processor::multiproof", + proof_sequence_number, + ?proof_targets, + storage_targets, + "Dispatching storage proof to workers" + ); - self.executor.spawn_blocking(move || { - let storage_targets = proof_targets.len(); + let start = Instant::now(); - trace!( - target: "engine::root", - proof_sequence_number, - ?proof_targets, - storage_targets, - "Starting dedicated storage proof calculation", - ); - let start = Instant::now(); - let proof_result = ParallelProof::new( - config.consistent_view, - config.nodes_sorted, - config.state_sorted, - config.prefix_sets, - missed_leaves_storage_roots, - storage_proof_task_handle.clone(), - ) - .with_branch_node_masks(true) - .with_multi_added_removed_keys(Some(multi_added_removed_keys)) - .storage_proof(hashed_address, proof_targets); - let elapsed = start.elapsed(); - trace!( - target: "engine::root", + // Create prefix set from targets + let prefix_set = reth_trie::prefix_set::PrefixSetMut::from( + proof_targets.iter().map(reth_trie::Nibbles::unpack), + ); + let prefix_set = prefix_set.freeze(); + + // Build computation input (data only) + let input = StorageProofInput::new( + hashed_address, + prefix_set, + proof_targets, + true, // with_branch_node_masks + Some(multi_added_removed_keys), + ); + + // Dispatch to storage worker + if let Err(e) = self.proof_worker_handle.dispatch_storage_proof( + input, + ProofResultContext::new( + self.proof_result_tx.clone(), proof_sequence_number, - ?elapsed, - ?source, - storage_targets, - "Storage multiproofs calculated", - ); + hashed_state_update, + start, + ), + ) { + error!(target: "engine::tree::payload_processor::multiproof", ?e, "Failed to dispatch storage proof"); + return; + } - match proof_result { - Ok(proof) => { - let _ = state_root_message_sender.send(MultiProofMessage::ProofCalculated( - Box::new(ProofCalculated { - sequence_number: proof_sequence_number, - update: SparseTrieUpdate { - state: hashed_state_update, - multiproof: DecodedMultiProof::from_storage_proof( - hashed_address, - proof, - ), - }, - elapsed, - }), - )); - } - Err(error) => { - let _ = state_root_message_sender - .send(MultiProofMessage::ProofCalculationError(error.into())); - } - } - }); + self.metrics + .active_storage_workers_histogram + .record(self.proof_worker_handle.active_storage_workers() as f64); + self.metrics + .active_account_workers_histogram + .record(self.proof_worker_handle.active_account_workers() as f64); + self.metrics + .pending_storage_multiproofs_histogram + .record(self.proof_worker_handle.pending_storage_tasks() as f64); + self.metrics + .pending_account_multiproofs_histogram + .record(self.proof_worker_handle.pending_account_tasks() as f64); + } - self.inflight += 1; - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + /// Signals that a multiproof calculation has finished. + fn on_calculation_complete(&self) { + self.metrics + .active_storage_workers_histogram + .record(self.proof_worker_handle.active_storage_workers() as f64); + self.metrics + .active_account_workers_histogram + .record(self.proof_worker_handle.active_account_workers() as f64); + self.metrics + .pending_storage_multiproofs_histogram + .record(self.proof_worker_handle.pending_storage_tasks() as f64); + self.metrics + .pending_account_multiproofs_histogram + .record(self.proof_worker_handle.pending_account_tasks() as f64); } - /// Spawns a single multiproof calculation task. - fn spawn_multiproof(&mut self, multiproof_input: MultiproofInput) { + /// Dispatches a single multiproof calculation to worker pool. + fn dispatch_multiproof(&self, multiproof_input: MultiproofInput) { let MultiproofInput { - config, source, hashed_state_update, proof_targets, proof_sequence_number, - state_root_message_sender, + state_root_message_sender: _, multi_added_removed_keys, } = multiproof_input; - let storage_proof_task_handle = self.storage_proof_task_handle.clone(); + let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone(); + let account_targets = proof_targets.len(); + let storage_targets = proof_targets.values().map(|slots| slots.len()).sum::(); - self.executor.spawn_blocking(move || { - let account_targets = proof_targets.len(); - let storage_targets = proof_targets.values().map(|slots| slots.len()).sum::(); + trace!( + target: "engine::tree::payload_processor::multiproof", + proof_sequence_number, + ?proof_targets, + account_targets, + storage_targets, + ?source, + "Dispatching multiproof to workers" + ); - trace!( - target: "engine::root", - proof_sequence_number, - ?proof_targets, - account_targets, - storage_targets, - ?source, - "Starting multiproof calculation", - ); + let start = Instant::now(); + + // Extend prefix sets with targets + let frozen_prefix_sets = + ParallelProof::extend_prefix_sets_with_targets(&Default::default(), &proof_targets); - let start = Instant::now(); - let proof_result = ParallelProof::new( - config.consistent_view, - config.nodes_sorted, - config.state_sorted, - config.prefix_sets, - missed_leaves_storage_roots, - storage_proof_task_handle.clone(), - ) - .with_branch_node_masks(true) - .with_multi_added_removed_keys(multi_added_removed_keys) - .decoded_multiproof(proof_targets); - let elapsed = start.elapsed(); - trace!( - target: "engine::root", + // Dispatch account multiproof to worker pool with result sender + let input = AccountMultiproofInput { + targets: proof_targets, + prefix_sets: frozen_prefix_sets, + collect_branch_node_masks: true, + multi_added_removed_keys, + missed_leaves_storage_roots, + // Workers will send ProofResultMessage directly to proof_result_rx + proof_result_sender: ProofResultContext::new( + self.proof_result_tx.clone(), proof_sequence_number, - ?elapsed, - ?source, - account_targets, - storage_targets, - "Multiproof calculated", - ); + hashed_state_update, + start, + ), + }; - match proof_result { - Ok(proof) => { - let _ = state_root_message_sender.send(MultiProofMessage::ProofCalculated( - Box::new(ProofCalculated { - sequence_number: proof_sequence_number, - update: SparseTrieUpdate { - state: hashed_state_update, - multiproof: proof, - }, - elapsed, - }), - )); - } - Err(error) => { - let _ = state_root_message_sender - .send(MultiProofMessage::ProofCalculationError(error.into())); - } - } - }); + if let Err(e) = self.proof_worker_handle.dispatch_account_multiproof(input) { + error!(target: "engine::tree::payload_processor::multiproof", ?e, "Failed to dispatch account multiproof"); + return; + } - self.inflight += 1; - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + self.metrics + .active_storage_workers_histogram + .record(self.proof_worker_handle.active_storage_workers() as f64); + self.metrics + .active_account_workers_histogram + .record(self.proof_worker_handle.active_account_workers() as f64); + self.metrics + .pending_storage_multiproofs_histogram + .record(self.proof_worker_handle.pending_storage_tasks() as f64); + self.metrics + .pending_account_multiproofs_histogram + .record(self.proof_worker_handle.pending_account_tasks() as f64); } } #[derive(Metrics, Clone)] #[metrics(scope = "tree.root")] pub(crate) struct MultiProofTaskMetrics { - /// Histogram of inflight multiproofs. - pub inflight_multiproofs_histogram: Histogram, - /// Histogram of pending multiproofs. - pub pending_multiproofs_histogram: Histogram, + /// Histogram of active storage workers processing proofs. + pub active_storage_workers_histogram: Histogram, + /// Histogram of active account workers processing proofs. + pub active_account_workers_histogram: Histogram, + /// Gauge for the maximum number of storage workers in the pool. + pub max_storage_workers: Gauge, + /// Gauge for the maximum number of account workers in the pool. + pub max_account_workers: Gauge, + /// Histogram of pending storage multiproofs in the queue. + pub pending_storage_multiproofs_histogram: Histogram, + /// Histogram of pending account multiproofs in the queue. + pub pending_account_multiproofs_histogram: Histogram, /// Histogram of the number of prefetch proof target accounts. pub prefetch_proof_targets_accounts_histogram: Histogram, @@ -638,26 +585,115 @@ pub(crate) struct MultiProofTaskMetrics { /// Standalone task that receives a transaction state stream and updates relevant /// data structures to calculate state root. /// -/// It is responsible of initializing a blinded sparse trie and subscribe to -/// transaction state stream. As it receives transaction execution results, it -/// fetches the proofs for relevant accounts from the database and reveal them -/// to the tree. -/// Then it updates relevant leaves according to the result of the transaction. -/// This feeds updates to the sparse trie task. +/// ## Architecture: Dual-Channel Multiproof System +/// +/// This task orchestrates parallel proof computation using a dual-channel architecture that +/// separates control messages from proof computation results: +/// +/// ```text +/// ┌─────────────────────────────────────────────────────────────────┐ +/// │ MultiProofTask │ +/// │ Event Loop (crossbeam::select!) │ +/// └──┬──────────────────────────────────────────────────────────▲───┘ +/// │ │ +/// │ (1) Send proof request │ +/// │ via tx (control channel) │ +/// │ │ +/// ▼ │ +/// ┌──────────────────────────────────────────────────────────────┐ │ +/// │ MultiproofManager │ │ +/// │ - Deduplicates against fetched_proof_targets │ │ +/// │ - Routes to appropriate worker pool │ │ +/// └──┬───────────────────────────────────────────────────────────┘ │ +/// │ │ +/// │ (2) Dispatch to workers │ +/// │ OR send EmptyProof (fast path) │ +/// ▼ │ +/// ┌──────────────────────────────────────────────────────────────┐ │ +/// │ ProofWorkerHandle │ │ +/// │ ┌─────────────────────┐ ┌────────────────────────┐ │ │ +/// │ │ Storage Worker Pool │ │ Account Worker Pool │ │ │ +/// │ │ (spawn_blocking) │ │ (spawn_blocking) │ │ │ +/// │ └─────────────────────┘ └────────────────────────┘ │ │ +/// └──┬───────────────────────────────────────────────────────────┘ │ +/// │ │ +/// │ (3) Compute proofs in parallel │ +/// │ Send results back │ +/// │ │ +/// ▼ │ +/// ┌──────────────────────────────────────────────────────────────┐ │ +/// │ proof_result_tx (crossbeam unbounded channel) │ │ +/// │ → ProofResultMessage { multiproof, sequence_number, ... } │ │ +/// └──────────────────────────────────────────────────────────────┘ │ +/// │ +/// (4) Receive via crossbeam::select! on two channels: ───────────┘ +/// - rx: Control messages (PrefetchProofs, StateUpdate, +/// EmptyProof, FinishedStateUpdates) +/// - proof_result_rx: Computed proof results from workers +/// ``` +/// +/// ## Component Responsibilities +/// +/// - **[`MultiProofTask`]**: Event loop coordinator +/// - Receives state updates from transaction execution +/// - Deduplicates proof targets against already-fetched proofs +/// - Sequences proofs to maintain transaction ordering +/// - Feeds sequenced updates to sparse trie task +/// +/// - **[`MultiproofManager`]**: Calculation orchestrator +/// - Decides between fast path ([`EmptyProof`]) and worker dispatch +/// - Routes storage-only vs full multiproofs to appropriate workers +/// - Records metrics for monitoring +/// +/// - **[`ProofWorkerHandle`]**: Worker pool manager +/// - Maintains separate pools for storage and account proofs +/// - Dispatches work to blocking threads (CPU-intensive) +/// - Sends results directly via `proof_result_tx` (bypasses control channel) +/// +/// [`EmptyProof`]: MultiProofMessage::EmptyProof +/// [`ProofWorkerHandle`]: reth_trie_parallel::proof_task::ProofWorkerHandle +/// +/// ## Dual-Channel Design Rationale +/// +/// The system uses two separate crossbeam channels: +/// +/// 1. **Control Channel (`tx`/`rx`)**: For orchestration messages +/// - `PrefetchProofs`: Pre-fetch proofs before execution +/// - `StateUpdate`: New transaction execution results +/// - `EmptyProof`: Fast path when all targets already fetched +/// - `FinishedStateUpdates`: Signal to drain pending work +/// +/// 2. **Proof Result Channel (`proof_result_tx`/`proof_result_rx`)**: For worker results +/// - `ProofResultMessage`: Computed multiproofs from worker pools +/// - Direct path from workers to event loop (no intermediate hops) +/// - Keeps control messages separate from high-throughput proof data +/// +/// This separation enables: +/// - **Non-blocking control**: Control messages never wait behind large proof data +/// - **Backpressure management**: Each channel can apply different policies +/// - **Clear ownership**: Workers only need proof result sender, not control channel +/// +/// ## Initialization and Lifecycle +/// +/// The task initializes a blinded sparse trie and subscribes to transaction state streams. +/// As it receives transaction execution results, it fetches proofs for relevant accounts +/// from the database and reveals them to the tree, then updates relevant leaves according +/// to transaction results. This feeds updates to the sparse trie task. +/// +/// See the `run()` method documentation for detailed lifecycle flow. #[derive(Debug)] -pub(super) struct MultiProofTask { +pub(super) struct MultiProofTask { /// The size of proof targets chunk to spawn in one calculation. - /// - /// If [`None`], then chunking is disabled. + /// If None, chunking is disabled and all targets are processed in a single proof. chunk_size: Option, - /// Task configuration. - config: MultiProofConfig, - /// Receiver for state root related messages. - rx: Receiver, + /// Receiver for state root related messages (prefetch, state updates, finish signal). + rx: CrossbeamReceiver, /// Sender for state root related messages. - tx: Sender, + tx: CrossbeamSender, + /// Receiver for proof results directly from workers. + proof_result_rx: CrossbeamReceiver, /// Sender for state updates emitted by this type. - to_sparse_trie: Sender, + to_sparse_trie: std::sync::mpsc::Sender, /// Proof targets that have been already fetched. fetched_proof_targets: MultiProofTargets, /// Tracks keys which have been added and removed throughout the entire block. @@ -665,54 +701,54 @@ pub(super) struct MultiProofTask { /// Proof sequencing handler. proof_sequencer: ProofSequencer, /// Manages calculation of multiproofs. - multiproof_manager: MultiproofManager, + multiproof_manager: MultiproofManager, /// multi proof task metrics metrics: MultiProofTaskMetrics, } -impl MultiProofTask -where - Factory: DatabaseProviderFactory + Clone + 'static, -{ +impl MultiProofTask { /// Creates a new multi proof task with the unified message channel pub(super) fn new( - config: MultiProofConfig, - executor: WorkloadExecutor, - proof_task_handle: ProofTaskManagerHandle>, - to_sparse_trie: Sender, - max_concurrency: usize, + proof_worker_handle: ProofWorkerHandle, + to_sparse_trie: std::sync::mpsc::Sender, chunk_size: Option, ) -> Self { - let (tx, rx) = channel(); + let (tx, rx) = unbounded(); + let (proof_result_tx, proof_result_rx) = unbounded(); let metrics = MultiProofTaskMetrics::default(); Self { chunk_size, - config, rx, tx, + proof_result_rx, to_sparse_trie, fetched_proof_targets: Default::default(), multi_added_removed_keys: MultiAddedRemovedKeys::new(), proof_sequencer: ProofSequencer::default(), multiproof_manager: MultiproofManager::new( - executor, metrics.clone(), - proof_task_handle, - max_concurrency, + proof_worker_handle, + proof_result_tx, ), metrics, } } - /// Returns a [`Sender`] that can be used to send arbitrary [`MultiProofMessage`]s to this task. - pub(super) fn state_root_message_sender(&self) -> Sender { + /// Returns a sender that can be used to send arbitrary [`MultiProofMessage`]s to this task. + pub(super) fn state_root_message_sender(&self) -> CrossbeamSender { self.tx.clone() } /// Handles request for proof prefetch. /// /// Returns a number of proofs that were spawned. + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::multiproof", + skip_all, + fields(accounts = targets.len(), chunks = 0) + )] fn on_prefetch_proof(&mut self, targets: MultiProofTargets) -> u64 { let proof_targets = self.get_prefetch_proof_targets(targets); self.fetched_proof_targets.extend_ref(&proof_targets); @@ -722,7 +758,7 @@ where // we still want to optimistically fetch extension children for the leaf addition case. self.multi_added_removed_keys.touch_accounts(proof_targets.keys().copied()); - // Clone+Arc MultiAddedRemovedKeys for sharing with the spawned multiproof tasks + // Clone+Arc MultiAddedRemovedKeys for sharing with the dispatched multiproof tasks let multi_added_removed_keys = Arc::new(self.multi_added_removed_keys.clone()); self.metrics.prefetch_proof_targets_accounts_histogram.record(proof_targets.len() as f64); @@ -732,12 +768,16 @@ where // Process proof targets in chunks. let mut chunks = 0; - let should_chunk = !self.multiproof_manager.is_full(); - let mut spawn = |proof_targets| { - self.multiproof_manager.spawn_or_queue( + // Only chunk if multiple account or storage workers are available to take advantage of + // parallelism. + let should_chunk = self.multiproof_manager.proof_worker_handle.available_account_workers() > + 1 || + self.multiproof_manager.proof_worker_handle.available_storage_workers() > 1; + + let mut dispatch = |proof_targets| { + self.multiproof_manager.dispatch( MultiproofInput { - config: self.config.clone(), source: None, hashed_state_update: Default::default(), proof_targets, @@ -750,12 +790,18 @@ where chunks += 1; }; - if should_chunk && let Some(chunk_size) = self.chunk_size { + if should_chunk && + let Some(chunk_size) = self.chunk_size && + proof_targets.chunking_length() > chunk_size + { + let mut chunks = 0usize; for proof_targets_chunk in proof_targets.chunks(chunk_size) { - spawn(proof_targets_chunk); + dispatch(proof_targets_chunk); + chunks += 1; } + tracing::Span::current().record("chunks", chunks); } else { - spawn(proof_targets); + dispatch(proof_targets); } self.metrics.prefetch_proof_chunks_histogram.record(chunks as f64); @@ -774,8 +820,8 @@ where let all_proofs_processed = proofs_processed >= state_update_proofs_requested + prefetch_proofs_requested; let no_pending = !self.proof_sequencer.has_pending(); - debug!( - target: "engine::root", + trace!( + target: "engine::tree::payload_processor::multiproof", proofs_processed, state_update_proofs_requested, prefetch_proofs_requested, @@ -830,7 +876,7 @@ where } if duplicates > 0 { - trace!(target: "engine::root", duplicates, "Removed duplicate prefetch proof targets"); + trace!(target: "engine::tree::payload_processor::multiproof", duplicates, "Removed duplicate prefetch proof targets"); } targets @@ -839,6 +885,12 @@ where /// Handles state updates. /// /// Returns a number of proofs that were spawned. + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::multiproof", + skip(self, update), + fields(accounts = update.len(), chunks = 0) + )] fn on_state_update(&mut self, source: StateChangeSource, update: EvmState) -> u64 { let hashed_state_update = evm_state_to_hashed_post_state(update); @@ -852,7 +904,7 @@ where let mut state_updates = 0; // If there are any accounts or storage slots that we already fetched the proofs for, - // send them immediately, as they don't require spawning any additional multiproofs. + // send them immediately, as they don't require dispatching any additional multiproofs. if !fetched_state_update.is_empty() { let _ = self.tx.send(MultiProofMessage::EmptyProof { sequence_number: self.proof_sequencer.next_sequence(), @@ -861,16 +913,21 @@ where state_updates += 1; } - // Clone+Arc MultiAddedRemovedKeys for sharing with the spawned multiproof tasks + // Clone+Arc MultiAddedRemovedKeys for sharing with the dispatched multiproof tasks let multi_added_removed_keys = Arc::new(self.multi_added_removed_keys.clone()); // Process state updates in chunks. let mut chunks = 0; - let should_chunk = !self.multiproof_manager.is_full(); let mut spawned_proof_targets = MultiProofTargets::default(); - let mut spawn = |hashed_state_update| { + // Only chunk if multiple account or storage workers are available to take advantage of + // parallelism. + let should_chunk = self.multiproof_manager.proof_worker_handle.available_account_workers() > + 1 || + self.multiproof_manager.proof_worker_handle.available_storage_workers() > 1; + + let mut dispatch = |hashed_state_update| { let proof_targets = get_proof_targets( &hashed_state_update, &self.fetched_proof_targets, @@ -878,9 +935,8 @@ where ); spawned_proof_targets.extend_ref(&proof_targets); - self.multiproof_manager.spawn_or_queue( + self.multiproof_manager.dispatch( MultiproofInput { - config: self.config.clone(), source: Some(source), hashed_state_update, proof_targets, @@ -894,12 +950,18 @@ where chunks += 1; }; - if should_chunk && let Some(chunk_size) = self.chunk_size { + if should_chunk && + let Some(chunk_size) = self.chunk_size && + not_fetched_state_update.chunking_length() > chunk_size + { + let mut chunks = 0usize; for chunk in not_fetched_state_update.chunks(chunk_size) { - spawn(chunk); + dispatch(chunk); + chunks += 1; } + tracing::Span::current().record("chunks", chunks); } else { - spawn(not_fetched_state_update); + dispatch(not_fetched_state_update); } self.metrics @@ -947,15 +1009,14 @@ where /// so that the proofs for accounts and storage slots that were already fetched are not /// requested again. /// 2. Using the proof targets, a new multiproof is calculated using - /// [`MultiproofManager::spawn_or_queue`]. + /// [`MultiproofManager::dispatch`]. /// * If the list of proof targets is empty, the [`MultiProofMessage::EmptyProof`] message is /// sent back to this task along with the original state update. - /// * Otherwise, the multiproof is calculated and the [`MultiProofMessage::ProofCalculated`] - /// message is sent back to this task along with the resulting multiproof, proof targets - /// and original state update. - /// 3. Either [`MultiProofMessage::EmptyProof`] or [`MultiProofMessage::ProofCalculated`] is - /// received. - /// * The multiproof is added to the (proof sequencer)[`ProofSequencer`]. + /// * Otherwise, the multiproof is dispatched to worker pools and results are sent directly + /// to this task via the `proof_result_rx` channel as [`ProofResultMessage`]. + /// 3. Either [`MultiProofMessage::EmptyProof`] (via control channel) or [`ProofResultMessage`] + /// (via proof result channel) is received. + /// * The multiproof is added to the [`ProofSequencer`]. /// * If the proof sequencer has a contiguous sequence of multiproofs in the same order as /// state updates arrived (i.e. transaction order), such sequence is returned. /// 4. Once there's a sequence of contiguous multiproofs along with the proof targets and state @@ -964,10 +1025,15 @@ where /// 5. Steps above are repeated until this task receives a /// [`MultiProofMessage::FinishedStateUpdates`]. /// * Once this message is received, on every [`MultiProofMessage::EmptyProof`] and - /// [`MultiProofMessage::ProofCalculated`] message, we check if there are any proofs are - /// currently being calculated, or if there are any pending proofs in the proof sequencer - /// left to be revealed by checking the pending tasks. + /// [`ProofResultMessage`], we check if all proofs have been processed and if there are any + /// pending proofs in the proof sequencer left to be revealed. /// 6. This task exits after all pending proofs are processed. + #[instrument( + level = "debug", + name = "MultiProofTask::run", + target = "engine::tree::payload_processor::multiproof", + skip_all + )] pub(crate) fn run(mut self) { // TODO convert those into fields let mut prefetch_proofs_requested = 0; @@ -985,154 +1051,171 @@ where let mut updates_finished_time = None; loop { - trace!(target: "engine::root", "entering main channel receiving loop"); - match self.rx.recv() { - Ok(message) => match message { - MultiProofMessage::PrefetchProofs(targets) => { - trace!(target: "engine::root", "processing MultiProofMessage::PrefetchProofs"); - if first_update_time.is_none() { - // record the wait time - self.metrics - .first_update_wait_time_histogram - .record(start.elapsed().as_secs_f64()); - first_update_time = Some(Instant::now()); - debug!(target: "engine::root", "Started state root calculation"); - } - - let account_targets = targets.len(); - let storage_targets = - targets.values().map(|slots| slots.len()).sum::(); - prefetch_proofs_requested += self.on_prefetch_proof(targets); - debug!( - target: "engine::root", - account_targets, - storage_targets, - prefetch_proofs_requested, - "Prefetching proofs" - ); - } - MultiProofMessage::StateUpdate(source, update) => { - trace!(target: "engine::root", "processing MultiProofMessage::StateUpdate"); - if first_update_time.is_none() { - // record the wait time - self.metrics - .first_update_wait_time_histogram - .record(start.elapsed().as_secs_f64()); - first_update_time = Some(Instant::now()); - debug!(target: "engine::root", "Started state root calculation"); - } - - let len = update.len(); - state_update_proofs_requested += self.on_state_update(source, update); - debug!( - target: "engine::root", - ?source, - len, - ?state_update_proofs_requested, - "Received new state update" - ); - } - MultiProofMessage::FinishedStateUpdates => { - trace!(target: "engine::root", "processing MultiProofMessage::FinishedStateUpdates"); - updates_finished = true; - updates_finished_time = Some(Instant::now()); - if self.is_done( - proofs_processed, - state_update_proofs_requested, - prefetch_proofs_requested, - updates_finished, - ) { - debug!( - target: "engine::root", - "State updates finished and all proofs processed, ending calculation" - ); - break - } - } - MultiProofMessage::EmptyProof { sequence_number, state } => { - trace!(target: "engine::root", "processing MultiProofMessage::EmptyProof"); + trace!(target: "engine::tree::payload_processor::multiproof", "entering main channel receiving loop"); - proofs_processed += 1; + crossbeam_channel::select_biased! { + recv(self.proof_result_rx) -> proof_msg => { + match proof_msg { + Ok(proof_result) => { + proofs_processed += 1; - if let Some(combined_update) = self.on_proof( - sequence_number, - SparseTrieUpdate { state, multiproof: Default::default() }, - ) { - let _ = self.to_sparse_trie.send(combined_update); + self.metrics + .proof_calculation_duration_histogram + .record(proof_result.elapsed); + + self.multiproof_manager.on_calculation_complete(); + + // Convert ProofResultMessage to SparseTrieUpdate + match proof_result.result { + Ok(proof_result_data) => { + debug!( + target: "engine::tree::payload_processor::multiproof", + sequence = proof_result.sequence_number, + total_proofs = proofs_processed, + "Processing calculated proof from worker" + ); + + let update = SparseTrieUpdate { + state: proof_result.state, + multiproof: proof_result_data.into_multiproof(), + }; + + if let Some(combined_update) = + self.on_proof(proof_result.sequence_number, update) + { + let _ = self.to_sparse_trie.send(combined_update); + } + } + Err(error) => { + error!(target: "engine::tree::payload_processor::multiproof", ?error, "proof calculation error from worker"); + return + } + } + + if self.is_done( + proofs_processed, + state_update_proofs_requested, + prefetch_proofs_requested, + updates_finished, + ) { + debug!( + target: "engine::tree::payload_processor::multiproof", + "State updates finished and all proofs processed, ending calculation" + ); + break + } } - - if self.is_done( - proofs_processed, - state_update_proofs_requested, - prefetch_proofs_requested, - updates_finished, - ) { - debug!( - target: "engine::root", - "State updates finished and all proofs processed, ending calculation" - ); - break + Err(_) => { + error!(target: "engine::tree::payload_processor::multiproof", "Proof result channel closed unexpectedly"); + return } } - MultiProofMessage::ProofCalculated(proof_calculated) => { - trace!(target: "engine::root", "processing - MultiProofMessage::ProofCalculated"); - - // we increment proofs_processed for both state updates and prefetches, - // because both are used for the root termination condition. - proofs_processed += 1; - - self.metrics - .proof_calculation_duration_histogram - .record(proof_calculated.elapsed); - - debug!( - target: "engine::root", - sequence = proof_calculated.sequence_number, - total_proofs = proofs_processed, - "Processing calculated proof" - ); - - self.multiproof_manager.on_calculation_complete(); - - if let Some(combined_update) = - self.on_proof(proof_calculated.sequence_number, proof_calculated.update) - { - let _ = self.to_sparse_trie.send(combined_update); - } - - if self.is_done( - proofs_processed, - state_update_proofs_requested, - prefetch_proofs_requested, - updates_finished, - ) { - debug!( - target: "engine::root", - "State updates finished and all proofs processed, ending calculation"); - break + }, + recv(self.rx) -> message => { + match message { + Ok(msg) => match msg { + MultiProofMessage::PrefetchProofs(targets) => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::PrefetchProofs"); + + if first_update_time.is_none() { + // record the wait time + self.metrics + .first_update_wait_time_histogram + .record(start.elapsed().as_secs_f64()); + first_update_time = Some(Instant::now()); + debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); + } + + let account_targets = targets.len(); + let storage_targets = + targets.values().map(|slots| slots.len()).sum::(); + prefetch_proofs_requested += self.on_prefetch_proof(targets); + debug!( + target: "engine::tree::payload_processor::multiproof", + account_targets, + storage_targets, + prefetch_proofs_requested, + "Prefetching proofs" + ); + } + MultiProofMessage::StateUpdate(source, update) => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::StateUpdate"); + + if first_update_time.is_none() { + // record the wait time + self.metrics + .first_update_wait_time_histogram + .record(start.elapsed().as_secs_f64()); + first_update_time = Some(Instant::now()); + debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); + } + + let len = update.len(); + state_update_proofs_requested += self.on_state_update(source, update); + debug!( + target: "engine::tree::payload_processor::multiproof", + ?source, + len, + ?state_update_proofs_requested, + "Received new state update" + ); + } + MultiProofMessage::FinishedStateUpdates => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::FinishedStateUpdates"); + + updates_finished = true; + updates_finished_time = Some(Instant::now()); + + if self.is_done( + proofs_processed, + state_update_proofs_requested, + prefetch_proofs_requested, + updates_finished, + ) { + debug!( + target: "engine::tree::payload_processor::multiproof", + "State updates finished and all proofs processed, ending calculation" + ); + break + } + } + MultiProofMessage::EmptyProof { sequence_number, state } => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::EmptyProof"); + + proofs_processed += 1; + + if let Some(combined_update) = self.on_proof( + sequence_number, + SparseTrieUpdate { state, multiproof: Default::default() }, + ) { + let _ = self.to_sparse_trie.send(combined_update); + } + + if self.is_done( + proofs_processed, + state_update_proofs_requested, + prefetch_proofs_requested, + updates_finished, + ) { + debug!( + target: "engine::tree::payload_processor::multiproof", + "State updates finished and all proofs processed, ending calculation" + ); + break + } + } + }, + Err(_) => { + error!(target: "engine::tree::payload_processor::multiproof", "State root related message channel closed unexpectedly"); + return } } - MultiProofMessage::ProofCalculationError(err) => { - error!( - target: "engine::root", - ?err, - "proof calculation error" - ); - return - } - }, - Err(_) => { - // this means our internal message channel is closed, which shouldn't happen - // in normal operation since we hold both ends - error!(target: "engine::root", "Internal message channel closed unexpectedly"); - return } } } debug!( - target: "engine::root", + target: "engine::tree::payload_processor::multiproof", total_updates = state_update_proofs_requested, total_proofs = proofs_processed, total_time = ?first_update_time.map(|t|t.elapsed()), @@ -1202,44 +1285,43 @@ fn get_proof_targets( mod tests { use super::*; use alloy_primitives::map::B256Set; - use reth_provider::{providers::ConsistentDbView, test_utils::create_test_provider_factory}; - use reth_trie::{MultiProof, TrieInput}; - use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofTaskManager}; + use reth_provider::{ + providers::OverlayStateProviderFactory, test_utils::create_test_provider_factory, + BlockReader, DatabaseProviderFactory, PruneCheckpointReader, StageCheckpointReader, + TrieReader, + }; + use reth_trie::MultiProof; + use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use revm_primitives::{B256, U256}; - use std::sync::Arc; - - fn create_state_root_config(factory: F, input: TrieInput) -> MultiProofConfig - where - F: DatabaseProviderFactory + Clone + 'static, - { - let consistent_view = ConsistentDbView::new(factory, None); - let nodes_sorted = Arc::new(input.nodes.clone().into_sorted()); - let state_sorted = Arc::new(input.state.clone().into_sorted()); - let prefix_sets = Arc::new(input.prefix_sets); - - MultiProofConfig { consistent_view, nodes_sorted, state_sorted, prefix_sets } + use std::sync::OnceLock; + use tokio::runtime::{Handle, Runtime}; + + /// Get a handle to the test runtime, creating it if necessary + fn get_test_runtime_handle() -> Handle { + static TEST_RT: OnceLock = OnceLock::new(); + TEST_RT + .get_or_init(|| { + tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap() + }) + .handle() + .clone() } - fn create_test_state_root_task(factory: F) -> MultiProofTask + fn create_test_state_root_task(factory: F) -> MultiProofTask where - F: DatabaseProviderFactory + Clone + 'static, + F: DatabaseProviderFactory< + Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + > + Clone + + Send + + 'static, { - let executor = WorkloadExecutor::default(); - let config = create_state_root_config(factory, TrieInput::default()); - let task_ctx = ProofTaskCtx::new( - config.nodes_sorted.clone(), - config.state_sorted.clone(), - config.prefix_sets.clone(), - ); - let proof_task = ProofTaskManager::new( - executor.handle().clone(), - config.consistent_view.clone(), - task_ctx, - 1, - ); - let channel = channel(); + let rt_handle = get_test_runtime_handle(); + let overlay_factory = OverlayStateProviderFactory::new(factory); + let task_ctx = ProofTaskCtx::new(overlay_factory); + let proof_handle = ProofWorkerHandle::new(rt_handle, task_ctx, 1, 1); + let (to_sparse_trie, _receiver) = std::sync::mpsc::channel(); - MultiProofTask::new(config, executor, proof_task.handle(), channel.0, 1, None) + MultiProofTask::new(proof_handle, to_sparse_trie, Some(1)) } #[test] diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 44293614d3d..ddbfc0715a1 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -24,6 +24,7 @@ use alloy_consensus::transaction::TxHashRef; use alloy_eips::Typed2718; use alloy_evm::Database; use alloy_primitives::{keccak256, map::B256Set, B256}; +use crossbeam_channel::Sender as CrossbeamSender; use metrics::{Counter, Gauge, Histogram}; use reth_evm::{execute::ExecutableTxFor, ConfigureEvm, Evm, EvmFor, SpecFor}; use reth_metrics::Metrics; @@ -39,7 +40,7 @@ use std::{ }, time::Instant, }; -use tracing::{debug, trace, warn}; +use tracing::{debug, debug_span, instrument, trace, warn}; /// A wrapper for transactions that includes their index in the block. #[derive(Clone)] @@ -83,7 +84,7 @@ where /// The number of transactions to be processed transaction_count_hint: usize, /// Sender to emit evm state outcome messages, if any. - to_multi_proof: Option>, + to_multi_proof: Option>, /// Receiver for events produced by tx execution actions_rx: Receiver, } @@ -99,14 +100,14 @@ where executor: WorkloadExecutor, execution_cache: PayloadExecutionCache, ctx: PrewarmContext, - to_multi_proof: Option>, + to_multi_proof: Option>, transaction_count_hint: usize, max_concurrency: usize, ) -> (Self, Sender) { let (actions_tx, actions_rx) = channel(); trace!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", max_concurrency, transaction_count_hint, "Initialized prewarm task" @@ -139,13 +140,12 @@ where let ctx = self.ctx.clone(); let max_concurrency = self.max_concurrency; let transaction_count_hint = self.transaction_count_hint; + let span = tracing::Span::current(); self.executor.spawn_blocking(move || { - let (done_tx, done_rx) = mpsc::channel(); - let mut executing = 0usize; + let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", parent: span, "spawn_all").entered(); - // Initialize worker handles container - let mut handles = Vec::with_capacity(max_concurrency); + let (done_tx, done_rx) = mpsc::channel(); // When transaction_count_hint is 0, it means the count is unknown. In this case, spawn // max workers to handle potentially many transactions in parallel rather @@ -156,67 +156,52 @@ where transaction_count_hint.min(max_concurrency) }; + // Initialize worker handles container + let mut handles = Vec::with_capacity(workers_needed); + // Only spawn initial workers as needed - for _ in 0..workers_needed { - handles.push(ctx.spawn_worker(&executor, actions_tx.clone(), done_tx.clone())); + for i in 0..workers_needed { + handles.push(ctx.spawn_worker(i, &executor, actions_tx.clone(), done_tx.clone())); } + // Distribute transactions to workers let mut tx_index = 0usize; + while let Ok(tx) = pending.recv() { + // Stop distributing if termination was requested + if ctx.terminate_execution.load(Ordering::Relaxed) { + trace!( + target: "engine::tree::payload_processor::prewarm", + "Termination requested, stopping transaction distribution" + ); + break; + } + + let indexed_tx = IndexedTransaction { index: tx_index, tx }; + let is_system_tx = indexed_tx.tx.tx().ty() > MAX_STANDARD_TX_TYPE; - // Handle first transaction - special case for system transactions - if let Ok(first_tx) = pending.recv() { - // Move the transaction into the indexed wrapper to avoid an extra clone - let indexed_tx = IndexedTransaction { index: tx_index, tx: first_tx }; - // Compute metadata from the moved value - let tx_ref = indexed_tx.tx.tx(); - let is_system_tx = tx_ref.ty() > MAX_STANDARD_TX_TYPE; - let first_tx_hash = tx_ref.tx_hash(); - - // Check if this is a system transaction (type > 4) - // System transactions in the first position typically set critical metadata - // that affects all subsequent transactions (e.g., L1 block info, fees on L2s). - if is_system_tx { - // Broadcast system transaction to all workers to ensure they have the - // critical state. This is particularly important for L2s like Optimism - // where the first deposit transaction contains essential block metadata. + // System transactions (type > 4) in the first position set critical metadata + // that affects all subsequent transactions (e.g., L1 block info on L2s). + // Broadcast the first system transaction to all workers to ensure they have + // the critical state. This is particularly important for L2s like Optimism + // where the first deposit transaction (type 126) contains essential block metadata. + if tx_index == 0 && is_system_tx { for handle in &handles { - if let Err(err) = handle.send(indexed_tx.clone()) { - warn!( - target: "engine::tree::prewarm", - tx_hash = %first_tx_hash, - error = %err, - "Failed to send deposit transaction to worker" - ); - } + // Ignore send errors: workers listen to terminate_execution and may + // exit early when signaled. Sending to a disconnected worker is + // possible and harmless and should happen at most once due to + // the terminate_execution check above. + let _ = handle.send(indexed_tx.clone()); } } else { - // Not a deposit, send to first worker via round-robin - if let Err(err) = handles[0].send(indexed_tx) { - warn!( - target: "engine::tree::prewarm", - task_idx = 0, - error = %err, - "Failed to send transaction to worker" - ); - } + // Round-robin distribution for all other transactions + let worker_idx = tx_index % workers_needed; + // Ignore send errors: workers listen to terminate_execution and may + // exit early when signaled. Sending to a disconnected worker is + // possible and harmless and should happen at most once due to + // the terminate_execution check above. + let _ = handles[worker_idx].send(indexed_tx); } - executing += 1; - tx_index += 1; - } - // Process remaining transactions with round-robin distribution - while let Ok(executable) = pending.recv() { - let indexed_tx = IndexedTransaction { index: tx_index, tx: executable }; - let task_idx = executing % workers_needed; - if let Err(err) = handles[task_idx].send(indexed_tx) { - warn!( - target: "engine::tree::prewarm", - task_idx, - error = %err, - "Failed to send transaction to worker" - ); - } - executing += 1; tx_index += 1; } @@ -226,12 +211,23 @@ where while done_rx.recv().is_ok() {} let _ = actions_tx - .send(PrewarmTaskEvent::FinishedTxExecution { executed_transactions: executing }); + .send(PrewarmTaskEvent::FinishedTxExecution { executed_transactions: tx_index }); }); } + /// Returns true if prewarming was terminated and no more transactions should be prewarmed. + fn is_execution_terminated(&self) -> bool { + self.ctx.terminate_execution.load(Ordering::Relaxed) + } + /// If configured and the tx returned proof targets, emit the targets the transaction produced fn send_multi_proof_targets(&self, targets: Option) { + if self.is_execution_terminated() { + // if execution is already terminated then we dont need to send more proof fetch + // messages + return + } + if let Some((proof_targets, to_multi_proof)) = targets.zip(self.to_multi_proof.as_ref()) { let _ = to_multi_proof.send(MultiProofMessage::PrefetchProofs(proof_targets)); } @@ -248,6 +244,7 @@ where /// the new, warmed cache to be inserted. /// /// This method is called from `run()` only after all execution tasks are complete. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn save_cache(self, state: BundleState) { let start = Instant::now(); @@ -255,9 +252,9 @@ where self; let hash = env.hash; + debug!(target: "engine::caching", parent_hash=?hash, "Updating execution cache"); // Perform all cache operations atomically under the lock execution_cache.update_with_guard(|cached| { - // consumes the `SavedCache` held by the prewarming task, which releases its usage guard let (caches, cache_metrics) = saved_cache.split(); let new_cache = SavedCache::new(hash, caches, cache_metrics); @@ -271,19 +268,27 @@ where } new_cache.update_metrics(); - debug!(target: "engine::caching", parent_hash=?new_cache.executed_block_hash(), "Updated execution cache"); // Replace the shared cache with the new one; the previous cache (if any) is dropped. *cached = Some(new_cache); }); - metrics.cache_saving_duration.set(start.elapsed().as_secs_f64()); + let elapsed = start.elapsed(); + debug!(target: "engine::caching", parent_hash=?hash, elapsed=?elapsed, "Updated execution cache"); + + metrics.cache_saving_duration.set(elapsed.as_secs_f64()); } /// Executes the task. /// /// This will execute the transactions until all transactions have been processed or the task /// was cancelled. + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::prewarm", + name = "prewarm", + skip_all + )] pub(super) fn run( self, pending: mpsc::Receiver + Clone + Send + 'static>, @@ -298,6 +303,7 @@ where match event { PrewarmTaskEvent::TerminateTransactionExecution => { // stop tx processing + debug!(target: "engine::tree::prewarm", "Terminating prewarm execution"); self.ctx.terminate_execution.store(true, Ordering::Relaxed); } PrewarmTaskEvent::Outcome { proof_targets } => { @@ -305,7 +311,7 @@ where self.send_multi_proof_targets(proof_targets); } PrewarmTaskEvent::Terminate { block_output } => { - trace!(target: "engine::tree::prewarm", "Received termination signal"); + trace!(target: "engine::tree::payload_processor::prewarm", "Received termination signal"); final_block_output = Some(block_output); if finished_execution { @@ -314,7 +320,7 @@ where } } PrewarmTaskEvent::FinishedTxExecution { executed_transactions } => { - trace!(target: "engine::tree::prewarm", "Finished prewarm execution signal"); + trace!(target: "engine::tree::payload_processor::prewarm", "Finished prewarm execution signal"); self.ctx.metrics.transactions.set(executed_transactions as f64); self.ctx.metrics.transactions_histogram.record(executed_transactions as f64); @@ -328,7 +334,7 @@ where } } - trace!(target: "engine::tree::prewarm", "Completed prewarm execution"); + debug!(target: "engine::tree::payload_processor::prewarm", "Completed prewarm execution"); // save caches and finish if let Some(Some(state)) = final_block_output { @@ -364,6 +370,7 @@ where { /// Splits this context into an evm, an evm config, metrics, and the atomic bool for terminating /// execution. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn evm_for_ctx(self) -> Option<(EvmFor, PrewarmMetrics, Arc)> { let Self { env, @@ -380,7 +387,7 @@ where Ok(provider) => provider, Err(err) => { trace!( - target: "engine::tree", + target: "engine::tree::payload_processor::prewarm", %err, "Failed to build state provider in prewarm thread" ); @@ -429,6 +436,7 @@ where /// /// Note: There are no ordering guarantees; this does not reflect the state produced by /// sequential execution. + #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] fn transact_batch( self, txs: mpsc::Receiver>, @@ -439,7 +447,18 @@ where { let Some((mut evm, metrics, terminate_execution)) = self.evm_for_ctx() else { return }; - while let Ok(IndexedTransaction { index, tx }) = txs.recv() { + while let Ok(IndexedTransaction { index, tx }) = { + let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "recv tx") + .entered(); + txs.recv() + } { + let _enter = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm tx", index, tx_hash=%tx.tx().tx_hash()) + .entered(); + + // create the tx env + let start = Instant::now(); + // If the task was cancelled, stop execution, send an empty result to notify the task, // and exit. if terminate_execution.load(Ordering::Relaxed) { @@ -447,13 +466,11 @@ where break } - // create the tx env - let start = Instant::now(); let res = match evm.transact(&tx) { Ok(res) => res, Err(err) => { trace!( - target: "engine::tree::prewarm", + target: "engine::tree::payload_processor::prewarm", %err, tx_hash=%tx.tx().tx_hash(), sender=%tx.signer(), @@ -467,12 +484,25 @@ where }; metrics.execution_duration.record(start.elapsed()); + drop(_enter); + + // If the task was cancelled, stop execution, send an empty result to notify the task, + // and exit. + if terminate_execution.load(Ordering::Relaxed) { + let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: None }); + break + } + // Only send outcome for transactions after the first txn // as the main execution will be just as fast if index > 0 { + let _enter = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm outcome", index, tx_hash=%tx.tx().tx_hash()) + .entered(); let (targets, storage_targets) = multiproof_targets_from_state(res.state); metrics.prefetch_storage_targets.record(storage_targets as f64); let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: Some(targets) }); + drop(_enter); } metrics.total_runtime.record(start.elapsed()); @@ -485,17 +515,21 @@ where /// Spawns a worker task for transaction execution and returns its sender channel. fn spawn_worker( &self, + idx: usize, executor: &WorkloadExecutor, actions_tx: Sender, done_tx: Sender<()>, ) -> mpsc::Sender> where - Tx: ExecutableTxFor + Clone + Send + 'static, + Tx: ExecutableTxFor + Send + 'static, { let (tx, rx) = mpsc::channel(); let ctx = self.clone(); + let span = + debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm worker", idx); executor.spawn_blocking(move || { + let _enter = span.entered(); ctx.transact_batch(rx, actions_tx, done_tx); }); diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index c16f7b6e4f4..6302abde5fb 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -15,7 +15,7 @@ use std::{ sync::mpsc, time::{Duration, Instant}, }; -use tracing::{debug, trace, trace_span}; +use tracing::{debug, debug_span, instrument, trace}; /// A task responsible for populating the sparse trie. pub(super) struct SparseTrieTask @@ -61,6 +61,11 @@ where /// /// - State root computation outcome. /// - `SparseStateTrie` that needs to be cleared and reused to avoid reallocations. + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::sparse_trie", + skip_all + )] pub(super) fn run( mut self, ) -> (Result, SparseStateTrie) { @@ -80,10 +85,14 @@ where while let Ok(mut update) = self.updates.recv() { num_iterations += 1; let mut num_updates = 1; + let _enter = + debug_span!(target: "engine::tree::payload_processor::sparse_trie", "drain updates") + .entered(); while let Ok(next) = self.updates.try_recv() { update.extend(next); num_updates += 1; } + drop(_enter); debug!( target: "engine::root", @@ -130,6 +139,7 @@ pub struct StateRootComputeOutcome { } /// Updates the sparse trie with the given proofs and state, and returns the elapsed time. +#[instrument(level = "debug", target = "engine::tree::payload_processor::sparse_trie", skip_all)] pub(crate) fn update_sparse_trie( trie: &mut SparseStateTrie, SparseTrieUpdate { mut state, multiproof }: SparseTrieUpdate, @@ -155,6 +165,7 @@ where ); // Update storage slots with new values and calculate storage roots. + let span = tracing::Span::current(); let (tx, rx) = mpsc::channel(); state .storages @@ -162,14 +173,16 @@ where .map(|(address, storage)| (address, storage, trie.take_storage_trie(&address))) .par_bridge() .map(|(address, storage, storage_trie)| { - let span = trace_span!(target: "engine::root::sparse", "Storage trie", ?address); - let _enter = span.enter(); - trace!(target: "engine::root::sparse", "Updating storage"); + let _enter = + debug_span!(target: "engine::tree::payload_processor::sparse_trie", parent: span.clone(), "storage trie", ?address) + .entered(); + + trace!(target: "engine::tree::payload_processor::sparse_trie", "Updating storage"); let storage_provider = blinded_provider_factory.storage_node_provider(address); let mut storage_trie = storage_trie.ok_or(SparseTrieErrorKind::Blind)?; if storage.wiped { - trace!(target: "engine::root::sparse", "Wiping storage"); + trace!(target: "engine::tree::payload_processor::sparse_trie", "Wiping storage"); storage_trie.wipe()?; } @@ -187,7 +200,7 @@ where continue; } - trace!(target: "engine::root::sparse", ?slot_nibbles, "Updating storage slot"); + trace!(target: "engine::tree::payload_processor::sparse_trie", ?slot_nibbles, "Updating storage slot"); storage_trie.update_leaf( slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec(), @@ -219,6 +232,9 @@ where let mut removed_accounts = Vec::new(); // Update account storage roots + let _enter = + tracing::debug_span!(target: "engine::tree::payload_processor::sparse_trie", "account trie") + .entered(); for result in rx { let (address, storage_trie) = result?; trie.insert_storage_trie(address, storage_trie); diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index e2c41b0ceba..d74b9e9ce2e 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -5,20 +5,17 @@ use crate::tree::{ error::{InsertBlockError, InsertBlockErrorKind, InsertPayloadError}, executor::WorkloadExecutor, instrumented_state::InstrumentedStateProvider, - payload_processor::PayloadProcessor, - persistence_state::CurrentPersistenceAction, + payload_processor::{multiproof::MultiProofConfig, PayloadProcessor}, precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}, sparse_trie::StateRootComputeOutcome, - ConsistentDbView, EngineApiMetrics, EngineApiTreeState, ExecutionEnv, PayloadHandle, - PersistenceState, PersistingKind, StateProviderBuilder, StateProviderDatabase, TreeConfig, + EngineApiMetrics, EngineApiTreeState, ExecutionEnv, PayloadHandle, StateProviderBuilder, + StateProviderDatabase, TreeConfig, }; use alloy_consensus::transaction::Either; use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_evm::Evm; use alloy_primitives::B256; -use reth_chain_state::{ - CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, -}; +use reth_chain_state::{CanonicalInMemoryState, ExecutedBlock}; use reth_consensus::{ConsensusError, FullConsensus, HeaderValidator}; use reth_engine_primitives::{ ConfigureEngineEvm, ExecutableTxIterator, ExecutionPayload, InvalidBlockHook, PayloadValidator, @@ -35,16 +32,16 @@ use reth_primitives_traits::{ AlloyBlockHeader, BlockTy, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, }; use reth_provider::{ - BlockExecutionOutput, BlockHashReader, BlockNumReader, BlockReader, DBProvider, - DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, HeaderProvider, - ProviderError, StateProvider, StateProviderFactory, StateReader, StateRootProvider, + providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockReader, + DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, ProviderError, + PruneCheckpointReader, StageCheckpointReader, StateProvider, StateProviderFactory, StateReader, + StateRootProvider, TrieReader, }; use reth_revm::db::State; -use reth_trie::{updates::TrieUpdates, HashedPostState, KeccakKeyHasher, TrieInput}; -use reth_trie_db::DatabaseHashedPostState; +use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use std::{collections::HashMap, sync::Arc, time::Instant}; -use tracing::{debug, debug_span, error, info, trace, warn}; +use tracing::{debug, debug_span, error, info, instrument, trace, warn}; /// Context providing access to tree state during validation. /// @@ -53,8 +50,6 @@ use tracing::{debug, debug_span, error, info, trace, warn}; pub struct TreeCtx<'a, N: NodePrimitives> { /// The engine API tree state state: &'a mut EngineApiTreeState, - /// Information about the current persistence state - persistence: &'a PersistenceState, /// Reference to the canonical in-memory state canonical_in_memory_state: &'a CanonicalInMemoryState, } @@ -63,7 +58,6 @@ impl<'a, N: NodePrimitives> std::fmt::Debug for TreeCtx<'a, N> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TreeCtx") .field("state", &"EngineApiTreeState") - .field("persistence_info", &self.persistence) .field("canonical_in_memory_state", &self.canonical_in_memory_state) .finish() } @@ -73,10 +67,9 @@ impl<'a, N: NodePrimitives> TreeCtx<'a, N> { /// Creates a new tree context pub const fn new( state: &'a mut EngineApiTreeState, - persistence: &'a PersistenceState, canonical_in_memory_state: &'a CanonicalInMemoryState, ) -> Self { - Self { state, persistence, canonical_in_memory_state } + Self { state, canonical_in_memory_state } } /// Returns a reference to the engine tree state @@ -89,43 +82,10 @@ impl<'a, N: NodePrimitives> TreeCtx<'a, N> { self.state } - /// Returns a reference to the persistence info - pub const fn persistence(&self) -> &PersistenceState { - self.persistence - } - /// Returns a reference to the canonical in-memory state pub const fn canonical_in_memory_state(&self) -> &'a CanonicalInMemoryState { self.canonical_in_memory_state } - - /// Determines the persisting kind for the given block based on persistence info. - /// - /// Based on the given header it returns whether any conflicting persistence operation is - /// currently in progress. - /// - /// This is adapted from the `persisting_kind_for` method in `EngineApiTreeHandler`. - pub fn persisting_kind_for(&self, block: BlockWithParent) -> PersistingKind { - // Check that we're currently persisting. - let Some(action) = self.persistence().current_action() else { - return PersistingKind::NotPersisting - }; - // Check that the persistince action is saving blocks, not removing them. - let CurrentPersistenceAction::SavingBlocks { highest } = action else { - return PersistingKind::PersistingNotDescendant - }; - - // The block being validated can only be a descendant if its number is higher than - // the highest block persisting. Otherwise, it's likely a fork of a lower block. - if block.block.number > highest.number && - self.state().tree_state.is_descendant(*highest, block) - { - return PersistingKind::PersistingDescendant - } - - // In all other cases, the block is not a descendant. - PersistingKind::PersistingNotDescendant - } } /// A helper type that provides reusable payload validation logic for network-specific validators. @@ -161,13 +121,16 @@ where metrics: EngineApiMetrics, /// Validator for the payload. validator: V, + /// A cleared trie input, kept around to be reused so allocations can be minimized. + trie_input: Option, } impl BasicEngineValidator where N: NodePrimitives, - P: DatabaseProviderFactory - + BlockReader

+ P: DatabaseProviderFactory< + Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + > + BlockReader
+ StateProviderFactory + StateReader + HashedPostStateProvider @@ -203,6 +166,7 @@ where invalid_block_hook, metrics: EngineApiMetrics::default(), validator, + trie_input: Default::default(), } } @@ -282,12 +246,12 @@ where input: BlockOrPayload, execution_err: InsertBlockErrorKind, parent_block: &SealedHeader, - ) -> Result, InsertPayloadError> + ) -> Result, InsertPayloadError> where V: PayloadValidator, { debug!( - target: "engine::tree", + target: "engine::tree::payload_validator", ?execution_err, block = ?input.num_hash(), "Block execution failed, checking for header validation errors" @@ -322,6 +286,15 @@ where /// - Block execution /// - State root computation /// - Fork detection + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields( + parent = ?input.parent_hash(), + type_name = ?input.type_name(), + ) + )] pub fn validate_block_with_state>>( &mut self, input: BlockOrPayload, @@ -364,7 +337,9 @@ where let parent_hash = input.parent_hash(); let block_num_hash = input.num_hash(); - trace!(target: "engine::tree", block=?block_num_hash, parent=?parent_hash, "Fetching block state provider"); + trace!(target: "engine::tree::payload_validator", "Fetching block state provider"); + let _enter = + debug_span!(target: "engine::tree::payload_validator", "state provider").entered(); let Some(provider_builder) = ensure_ok!(self.state_provider_builder(parent_hash, ctx.state())) else { @@ -375,8 +350,8 @@ where ) .into()) }; - let state_provider = ensure_ok!(provider_builder.build()); + drop(_enter); // fetch parent block let Some(parent_block) = ensure_ok!(self.sealed_header_by_hash(parent_hash, ctx.state())) @@ -388,22 +363,18 @@ where .into()) }; - let evm_env = self.evm_env_for(&input).map_err(NewPayloadError::other)?; + let evm_env = debug_span!(target: "engine::tree::payload_validator", "evm env") + .in_scope(|| self.evm_env_for(&input)) + .map_err(NewPayloadError::other)?; let env = ExecutionEnv { evm_env, hash: input.hash(), parent_hash: input.parent_hash() }; // Plan the strategy used for state root computation. - let state_root_plan = self.plan_state_root_computation(&input, &ctx); - let persisting_kind = state_root_plan.persisting_kind; - let has_ancestors_with_missing_trie_updates = - state_root_plan.has_ancestors_with_missing_trie_updates; - let strategy = state_root_plan.strategy; + let strategy = self.plan_state_root_computation(); debug!( - target: "engine::tree", - block=?block_num_hash, + target: "engine::tree::payload_validator", ?strategy, - ?has_ancestors_with_missing_trie_updates, "Deciding which state root algorithm to run" ); @@ -411,14 +382,12 @@ where let txs = self.tx_iterator_for(&input)?; // Spawn the appropriate processor based on strategy - let (mut handle, strategy) = ensure_ok!(self.spawn_payload_processor( + let mut handle = ensure_ok!(self.spawn_payload_processor( env.clone(), txs, provider_builder, - persisting_kind, parent_hash, ctx.state(), - block_num_hash, strategy, )); @@ -453,7 +422,7 @@ where block ); - debug!(target: "engine::tree", block=?block_num_hash, "Calculating block state root"); + debug!(target: "engine::tree::payload_validator", "Calculating block state root"); let root_time = Instant::now(); @@ -461,17 +430,17 @@ where match strategy { StateRootStrategy::StateRootTask => { - debug!(target: "engine::tree", block=?block_num_hash, "Using sparse trie state root algorithm"); + debug!(target: "engine::tree::payload_validator", "Using sparse trie state root algorithm"); match handle.state_root() { Ok(StateRootComputeOutcome { state_root, trie_updates }) => { let elapsed = root_time.elapsed(); - info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); + info!(target: "engine::tree::payload_validator", ?state_root, ?elapsed, "State root task finished"); // we double check the state root here for good measure if self.consensus.validate_state_root(block.header(), state_root).is_ok() { maybe_state_root = Some((state_root, trie_updates, elapsed)) } else { warn!( - target: "engine::tree", + target: "engine::tree::payload_validator", ?state_root, block_state_root = ?block.header().state_root(), "State root task returned incorrect state root" @@ -479,29 +448,29 @@ where } } Err(error) => { - debug!(target: "engine::tree", %error, "State root task failed"); + debug!(target: "engine::tree::payload_validator", %error, "State root task failed"); } } } StateRootStrategy::Parallel => { - debug!(target: "engine::tree", block=?block_num_hash, "Using parallel state root algorithm"); + debug!(target: "engine::tree::payload_validator", "Using parallel state root algorithm"); match self.compute_state_root_parallel( - persisting_kind, block.parent_hash(), &hashed_state, ctx.state(), ) { Ok(result) => { + let elapsed = root_time.elapsed(); info!( - target: "engine::tree", - block = ?block_num_hash, + target: "engine::tree::payload_validator", regular_state_root = ?result.0, + ?elapsed, "Regular root task finished" ); - maybe_state_root = Some((result.0, result.1, root_time.elapsed())); + maybe_state_root = Some((result.0, result.1, elapsed)); } Err(error) => { - debug!(target: "engine::tree", %error, "Parallel state root computation failed"); + debug!(target: "engine::tree::payload_validator", %error, "Parallel state root computation failed"); } } } @@ -518,9 +487,9 @@ where } else { // fallback is to compute the state root regularly in sync if self.config.state_root_fallback() { - debug!(target: "engine::tree", block=?block_num_hash, "Using state root fallback for testing"); + debug!(target: "engine::tree::payload_validator", "Using state root fallback for testing"); } else { - warn!(target: "engine::tree", block=?block_num_hash, ?persisting_kind, "Failed to compute state root in parallel"); + warn!(target: "engine::tree::payload_validator", "Failed to compute state root in parallel"); self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); } @@ -532,7 +501,7 @@ where }; self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); - debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root"); + debug!(target: "engine::tree::payload_validator", ?root_elapsed, "Calculated state root"); // ensure state root matches if self.consensus.validate_state_root(block.header(), state_root).is_err() { @@ -558,38 +527,11 @@ where // terminate prewarming task with good state output handle.terminate_caching(Some(&output.state)); - // If the block doesn't connect to the database tip, we don't save its trie updates, because - // they may be incorrect as they were calculated on top of the forked block. - // - // We also only save trie updates if all ancestors have trie updates, because otherwise the - // trie updates may be incorrect. - // - // Instead, they will be recomputed on persistence. - let connects_to_last_persisted = - ensure_ok_post_block!(self.block_connects_to_last_persisted(ctx, &block), block); - let should_discard_trie_updates = - !connects_to_last_persisted || has_ancestors_with_missing_trie_updates; - debug!( - target: "engine::tree", - block = ?block_num_hash, - connects_to_last_persisted, - has_ancestors_with_missing_trie_updates, - should_discard_trie_updates, - "Checking if should discard trie updates" - ); - let trie_updates = if should_discard_trie_updates { - ExecutedTrieUpdates::Missing - } else { - ExecutedTrieUpdates::Present(Arc::new(trie_output)) - }; - - Ok(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block), - execution_output: Arc::new(ExecutionOutcome::from((output, block_num_hash.number))), - hashed_state: Arc::new(hashed_state), - }, - trie: trie_updates, + Ok(ExecutedBlock { + recovered_block: Arc::new(block), + execution_output: Arc::new(ExecutionOutcome::from((output, block_num_hash.number))), + hashed_state: Arc::new(hashed_state), + trie_updates: Arc::new(trie_output), }) } @@ -613,12 +555,12 @@ where /// and block body itself. fn validate_block_inner(&self, block: &RecoveredBlock) -> Result<(), ConsensusError> { if let Err(e) = self.consensus.validate_header(block.sealed_header()) { - error!(target: "engine::tree", ?block, "Failed to validate header {}: {e}", block.hash()); + error!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {}: {e}", block.hash()); return Err(e) } if let Err(e) = self.consensus.validate_block_pre_execution(block.sealed_block()) { - error!(target: "engine::tree", ?block, "Failed to validate block {}: {e}", block.hash()); + error!(target: "engine::tree::payload_validator", ?block, "Failed to validate block {}: {e}", block.hash()); return Err(e) } @@ -626,6 +568,7 @@ where } /// Executes a block with the given state provider + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn execute_block( &mut self, state_provider: S, @@ -640,11 +583,7 @@ where T: PayloadTypes>, Evm: ConfigureEngineEvm, { - let num_hash = NumHash::new(env.evm_env.block_env.number.to(), env.hash); - - let span = debug_span!(target: "engine::tree", "execute_block", num = ?num_hash.number, hash = ?num_hash.hash); - let _enter = span.enter(); - debug!(target: "engine::tree", "Executing block"); + debug!(target: "engine::tree::payload_validator", "Executing block"); let mut db = State::builder() .with_database(StateProviderDatabase::new(&state_provider)) @@ -683,7 +622,7 @@ where )?; let execution_finish = Instant::now(); let execution_time = execution_finish.duration_since(execution_start); - debug!(target: "engine::tree", elapsed = ?execution_time, number=?num_hash.number, "Executed block"); + debug!(target: "engine::tree::payload_validator", elapsed = ?execution_time, "Executed block"); Ok(output) } @@ -695,71 +634,35 @@ where /// Returns `Err(_)` if error was encountered during computation. /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation /// should be used instead. + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn compute_state_root_parallel( &self, - persisting_kind: PersistingKind, parent_hash: B256, hashed_state: &HashedPostState, state: &EngineApiTreeState, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { - let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; + let (mut input, block_hash) = self.compute_trie_input(parent_hash, state, None)?; - let mut input = self.compute_trie_input( - persisting_kind, - consistent_view.provider_ro()?, - parent_hash, - state, - None, - )?; // Extend with block we are validating root for. input.append_ref(hashed_state); - ParallelStateRoot::new(consistent_view, input).incremental_root_with_updates() - } + // Convert the TrieInput into a MultProofConfig, since everything uses the sorted + // forms of the state/trie fields. + let (_, multiproof_config) = MultiProofConfig::from_input(input); - /// Checks if the given block connects to the last persisted block, i.e. if the last persisted - /// block is the ancestor of the given block. - /// - /// This checks the database for the actual last persisted block, not [`PersistenceState`]. - fn block_connects_to_last_persisted( - &self, - ctx: TreeCtx<'_, N>, - block: &RecoveredBlock, - ) -> ProviderResult { - let provider = self.provider.database_provider_ro()?; - let last_persisted_block = provider.best_block_number()?; - let last_persisted_hash = provider - .block_hash(last_persisted_block)? - .ok_or(ProviderError::HeaderNotFound(last_persisted_block.into()))?; - let last_persisted = NumHash::new(last_persisted_block, last_persisted_hash); - - let parent_num_hash = |hash: B256| -> ProviderResult { - let parent_num_hash = - if let Some(header) = ctx.state().tree_state.sealed_header_by_hash(&hash) { - Some(header.parent_num_hash()) - } else { - provider.sealed_header_by_hash(hash)?.map(|header| header.parent_num_hash()) - }; - - parent_num_hash.ok_or(ProviderError::BlockHashNotFound(hash)) - }; - - let mut parent_block = block.parent_num_hash(); - while parent_block.number > last_persisted.number { - parent_block = parent_num_hash(parent_block.hash)?; - } - - let connects = parent_block == last_persisted; + let factory = OverlayStateProviderFactory::new(self.provider.clone()) + .with_block_hash(Some(block_hash)) + .with_trie_overlay(Some(multiproof_config.nodes_sorted)) + .with_hashed_state_overlay(Some(multiproof_config.state_sorted)); - debug!( - target: "engine::tree", - num_hash = ?block.num_hash(), - ?last_persisted, - ?parent_block, - "Checking if block connects to last persisted block" - ); + // The `hashed_state` argument is already taken into account as part of the overlay, but we + // need to use the prefix sets which were generated from it to indicate to the + // ParallelStateRoot which parts of the trie need to be recomputed. + let prefix_sets = Arc::into_inner(multiproof_config.prefix_sets) + .expect("MultiProofConfig was never cloned") + .freeze(); - Ok(connects) + ParallelStateRoot::new(factory, prefix_sets).incremental_root_with_updates() } /// Validates the block after execution. @@ -780,7 +683,7 @@ where { let start = Instant::now(); - trace!(target: "engine::tree", block=?block.num_hash(), "Validating block consensus"); + trace!(target: "engine::tree::payload_validator", block=?block.num_hash(), "Validating block consensus"); // validate block consensus rules if let Err(e) = self.validate_block_inner(block) { return Err(e.into()) @@ -790,7 +693,7 @@ where if let Err(e) = self.consensus.validate_header_against_parent(block.sealed_header(), parent_block) { - warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); + warn!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {} against parent: {e}", block.hash()); return Err(e.into()) } @@ -830,77 +733,65 @@ where /// The method handles strategy fallbacks if the preferred approach fails, ensuring /// block execution always completes with a valid state root. #[allow(clippy::too_many_arguments)] + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields(strategy) + )] fn spawn_payload_processor>( &mut self, env: ExecutionEnv, txs: T, provider_builder: StateProviderBuilder, - persisting_kind: PersistingKind, parent_hash: B256, state: &EngineApiTreeState, - block_num_hash: NumHash, strategy: StateRootStrategy, ) -> Result< - ( - PayloadHandle< - impl ExecutableTxFor + use, - impl core::error::Error + Send + Sync + 'static + use, - >, - StateRootStrategy, - ), + PayloadHandle< + impl ExecutableTxFor + use, + impl core::error::Error + Send + Sync + 'static + use, + >, InsertBlockErrorKind, > { match strategy { StateRootStrategy::StateRootTask => { - // use background tasks for state root calc - let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; - // get allocated trie input if it exists - let allocated_trie_input = self.payload_processor.take_trie_input(); + let allocated_trie_input = self.trie_input.take(); // Compute trie input let trie_input_start = Instant::now(); - let trie_input = self.compute_trie_input( - persisting_kind, - consistent_view.provider_ro()?, - parent_hash, - state, - allocated_trie_input, - )?; + let (trie_input, block_hash) = + self.compute_trie_input(parent_hash, state, allocated_trie_input)?; self.metrics .block_validation .trie_input_duration .record(trie_input_start.elapsed().as_secs_f64()); + // Convert the TrieInput into a MultProofConfig, since everything uses the sorted + // forms of the state/trie fields. + let (trie_input, multiproof_config) = MultiProofConfig::from_input(trie_input); + self.trie_input.replace(trie_input); + + // Create OverlayStateProviderFactory with the multiproof config, for use with + // multiproofs. + let multiproof_provider_factory = + OverlayStateProviderFactory::new(self.provider.clone()) + .with_block_hash(Some(block_hash)) + .with_trie_overlay(Some(multiproof_config.nodes_sorted)) + .with_hashed_state_overlay(Some(multiproof_config.state_sorted)); + // Use state root task only if prefix sets are empty, otherwise proof generation is // too expensive because it requires walking all paths in every proof. let spawn_start = Instant::now(); - let (handle, strategy) = if trie_input.prefix_sets.is_empty() { - ( - self.payload_processor.spawn( - env, - txs, - provider_builder, - consistent_view, - trie_input, - &self.config, - ), - StateRootStrategy::StateRootTask, - ) - // if prefix sets are not empty, we spawn a task that exclusively handles cache - // prewarming for transaction execution - } else { - debug!( - target: "engine::tree", - block=?block_num_hash, - "Disabling state root task due to non-empty prefix sets" - ); - ( - self.payload_processor.spawn_cache_exclusive(env, txs, provider_builder), - StateRootStrategy::Parallel, - ) - }; + let handle = self.payload_processor.spawn( + env, + txs, + provider_builder, + multiproof_provider_factory, + &self.config, + ); // record prewarming initialization duration self.metrics @@ -908,9 +799,9 @@ where .spawn_payload_processor .record(spawn_start.elapsed().as_secs_f64()); - Ok((handle, strategy)) + Ok(handle) } - strategy @ (StateRootStrategy::Parallel | StateRootStrategy::Synchronous) => { + StateRootStrategy::Parallel | StateRootStrategy::Synchronous => { let start = Instant::now(); let handle = self.payload_processor.spawn_cache_exclusive(env, txs, provider_builder); @@ -921,32 +812,11 @@ where .spawn_payload_processor .record(start.elapsed().as_secs_f64()); - Ok((handle, strategy)) + Ok(handle) } } } - /// Check if the given block has any ancestors with missing trie updates. - fn has_ancestors_with_missing_trie_updates( - &self, - target_header: BlockWithParent, - state: &EngineApiTreeState, - ) -> bool { - // Walk back through the chain starting from the parent of the target block - let mut current_hash = target_header.parent; - while let Some(block) = state.tree_state.blocks_by_hash.get(¤t_hash) { - // Check if this block is missing trie updates - if block.trie.is_missing() { - return true; - } - - // Move to the parent block - current_hash = block.recovered_block().parent_hash(); - } - - false - } - /// Creates a `StateProviderBuilder` for the given parent hash. /// /// This method checks if the parent is in the tree state (in-memory) or persisted to disk, @@ -957,7 +827,7 @@ where state: &EngineApiTreeState, ) -> ProviderResult>> { if let Some((historical, blocks)) = state.tree_state.blocks_by_hash(hash) { - debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory, creating provider builder"); + debug!(target: "engine::tree::payload_validator", %hash, %historical, "found canonical state for block in memory, creating provider builder"); // the block leads back to the canonical chain return Ok(Some(StateProviderBuilder::new( self.provider.clone(), @@ -968,66 +838,34 @@ where // Check if the block is persisted if let Some(header) = self.provider.header(hash)? { - debug!(target: "engine::tree", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); + debug!(target: "engine::tree::payload_validator", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); // For persisted blocks, we create a builder that will fetch state directly from the // database return Ok(Some(StateProviderBuilder::new(self.provider.clone(), hash, None))) } - debug!(target: "engine::tree", %hash, "no canonical state found for block"); + debug!(target: "engine::tree::payload_validator", %hash, "no canonical state found for block"); Ok(None) } - /// Determines the state root computation strategy based on persistence state and configuration. - fn plan_state_root_computation>>( - &self, - input: &BlockOrPayload, - ctx: &TreeCtx<'_, N>, - ) -> StateRootPlan { - // We only run the parallel state root if we are not currently persisting any blocks or - // persisting blocks that are all ancestors of the one we are executing. - // - // If we're committing ancestor blocks, then: any trie updates being committed are a subset - // of the in-memory trie updates collected before fetching reverts. So any diff in - // reverts (pre vs post commit) is already covered by the in-memory trie updates we - // collect in `compute_state_root_parallel`. - // - // See https://github.com/paradigmxyz/reth/issues/12688 for more details - let persisting_kind = ctx.persisting_kind_for(input.block_with_parent()); - let can_run_parallel = - persisting_kind.can_run_parallel_state_root() && !self.config.state_root_fallback(); - - // Check for ancestors with missing trie updates - let has_ancestors_with_missing_trie_updates = - self.has_ancestors_with_missing_trie_updates(input.block_with_parent(), ctx.state()); - - // Decide on the strategy. - // Use state root task only if: - // 1. No persistence is in progress - // 2. Config allows it - // 3. No ancestors with missing trie updates. If any exist, it will mean that every state - // root task proof calculation will include a lot of unrelated paths in the prefix sets. - // It's cheaper to run a parallel state root that does one walk over trie tables while - // accounting for the prefix sets. - let strategy = if can_run_parallel { - if self.config.use_state_root_task() && !has_ancestors_with_missing_trie_updates { - StateRootStrategy::StateRootTask - } else { - StateRootStrategy::Parallel - } - } else { + /// Determines the state root computation strategy based on configuration. + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] + fn plan_state_root_computation(&self) -> StateRootStrategy { + let strategy = if self.config.state_root_fallback() { StateRootStrategy::Synchronous + } else if self.config.use_state_root_task() { + StateRootStrategy::StateRootTask + } else { + StateRootStrategy::Parallel }; debug!( - target: "engine::tree", - block=?input.num_hash(), + target: "engine::tree::payload_validator", ?strategy, - has_ancestors_with_missing_trie_updates, "Planned state root computation strategy" ); - StateRootPlan { strategy, has_ancestors_with_missing_trie_updates, persisting_kind } + strategy } /// Called when an invalid block is encountered during validation. @@ -1046,113 +884,54 @@ where self.invalid_block_hook.on_invalid_block(parent_header, block, output, trie_updates); } - /// Computes the trie input at the provided parent hash. + /// Computes the trie input at the provided parent hash, as well as the block number of the + /// highest persisted ancestor. /// /// The goal of this function is to take in-memory blocks and generate a [`TrieInput`] that /// serves as an overlay to the database blocks. /// /// It works as follows: /// 1. Collect in-memory blocks that are descendants of the provided parent hash using - /// [`crate::tree::TreeState::blocks_by_hash`]. - /// 2. If the persistence is in progress, and the block that we're computing the trie input for - /// is a descendant of the currently persisting blocks, we need to be sure that in-memory - /// blocks are not overlapping with the database blocks that may have been already persisted. - /// To do that, we're filtering out in-memory blocks that are lower than the highest database - /// block. - /// 3. Once in-memory blocks are collected and optionally filtered, we compute the - /// [`HashedPostState`] from them. - fn compute_trie_input( + /// [`crate::tree::TreeState::blocks_by_hash`]. This returns the highest persisted ancestor + /// hash (`block_hash`) and the list of in-memory descendant blocks. + /// 2. Extend the `TrieInput` with the contents of these in-memory blocks (from oldest to + /// newest) to build the overlay state and trie updates that sit on top of the database view + /// anchored at `block_hash`. + #[instrument( + level = "debug", + target = "engine::tree::payload_validator", + skip_all, + fields(parent_hash) + )] + fn compute_trie_input( &self, - persisting_kind: PersistingKind, - provider: TP, parent_hash: B256, state: &EngineApiTreeState, allocated_trie_input: Option, - ) -> ProviderResult { + ) -> ProviderResult<(TrieInput, B256)> { // get allocated trie input or use a default trie input let mut input = allocated_trie_input.unwrap_or_default(); - let best_block_number = provider.best_block_number()?; - - let (mut historical, mut blocks) = state - .tree_state - .blocks_by_hash(parent_hash) - .map_or_else(|| (parent_hash.into(), vec![]), |(hash, blocks)| (hash.into(), blocks)); - - // If the current block is a descendant of the currently persisting blocks, then we need to - // filter in-memory blocks, so that none of them are already persisted in the database. - if persisting_kind.is_descendant() { - // Iterate over the blocks from oldest to newest. - while let Some(block) = blocks.last() { - let recovered_block = block.recovered_block(); - if recovered_block.number() <= best_block_number { - // Remove those blocks that lower than or equal to the highest database - // block. - blocks.pop(); - } else { - // If the block is higher than the best block number, stop filtering, as it's - // the first block that's not in the database. - break - } - } - - historical = if let Some(block) = blocks.last() { - // If there are any in-memory blocks left after filtering, set the anchor to the - // parent of the oldest block. - (block.recovered_block().number() - 1).into() - } else { - // Otherwise, set the anchor to the original provided parent hash. - parent_hash.into() - }; - } + let (block_hash, blocks) = + state.tree_state.blocks_by_hash(parent_hash).unwrap_or_else(|| (parent_hash, vec![])); if blocks.is_empty() { - debug!(target: "engine::tree", %parent_hash, "Parent found on disk"); + debug!(target: "engine::tree::payload_validator", "Parent found on disk"); } else { - debug!(target: "engine::tree", %parent_hash, %historical, blocks = blocks.len(), "Parent found in memory"); + debug!(target: "engine::tree::payload_validator", historical = ?block_hash, blocks = blocks.len(), "Parent found in memory"); } - // Convert the historical block to the block number. - let block_number = provider - .convert_hash_or_number(historical)? - .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; - - // Retrieve revert state for historical block. - let revert_state = if block_number == best_block_number { - // We do not check against the `last_block_number` here because - // `HashedPostState::from_reverts` only uses the database tables, and not static files. - debug!(target: "engine::tree", block_number, best_block_number, "Empty revert state"); - HashedPostState::default() - } else { - let revert_state = HashedPostState::from_reverts::( - provider.tx_ref(), - block_number + 1.., - ) - .map_err(ProviderError::from)?; - debug!( - target: "engine::tree", - block_number, - best_block_number, - accounts = revert_state.accounts.len(), - storages = revert_state.storages.len(), - "Non-empty revert state" - ); - revert_state - }; - input.append(revert_state); - // Extend with contents of parent in-memory blocks. input.extend_with_blocks( blocks.iter().rev().map(|block| (block.hashed_state(), block.trie_updates())), ); - Ok(input) + Ok((input, block_hash)) } } /// Output of block or payload validation. -pub type ValidationOutcome>> = - Result, E>; +pub type ValidationOutcome>> = Result, E>; /// Strategy describing how to compute the state root. #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -1165,16 +944,6 @@ enum StateRootStrategy { Synchronous, } -/// State root computation plan that captures strategy and required data. -struct StateRootPlan { - /// Strategy that should be attempted for computing the state root. - strategy: StateRootStrategy, - /// Whether ancestors have missing trie updates. - has_ancestors_with_missing_trie_updates: bool, - /// The persisting kind for this block. - persisting_kind: PersistingKind, -} - /// Type that validates the payloads processed by the engine. /// /// This provides the necessary functions for validating/executing payloads/blocks. @@ -1228,8 +997,9 @@ pub trait EngineValidator< impl EngineValidator for BasicEngineValidator where - P: DatabaseProviderFactory - + BlockReader
+ P: DatabaseProviderFactory< + Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + > + BlockReader
+ StateProviderFactory + StateReader + HashedPostStateProvider @@ -1314,4 +1084,12 @@ impl BlockOrPayload { Self::Block(block) => block.block_with_parent(), } } + + /// Returns a string showing whether or not this is a block or payload. + pub const fn type_name(&self) -> &'static str { + match self { + Self::Payload(_) => "payload", + Self::Block(_) => "block", + } + } } diff --git a/crates/engine/tree/src/tree/persistence_state.rs b/crates/engine/tree/src/tree/persistence_state.rs index bbb981a531a..82a8078447d 100644 --- a/crates/engine/tree/src/tree/persistence_state.rs +++ b/crates/engine/tree/src/tree/persistence_state.rs @@ -67,6 +67,7 @@ impl PersistenceState { /// Returns the current persistence action. If there is no persistence task in progress, then /// this returns `None`. + #[cfg(test)] pub(crate) fn current_action(&self) -> Option<&CurrentPersistenceAction> { self.rx.as_ref().map(|rx| &rx.2) } diff --git a/crates/engine/tree/src/tree/precompile_cache.rs b/crates/engine/tree/src/tree/precompile_cache.rs index e58c4911887..1183dfbe983 100644 --- a/crates/engine/tree/src/tree/precompile_cache.rs +++ b/crates/engine/tree/src/tree/precompile_cache.rs @@ -1,4 +1,4 @@ -//! Contains a precompile cache that is backed by a moka cache. +//! Contains a precompile cache backed by `schnellru::LruMap` (LRU by length). use alloy_primitives::Bytes; use parking_lot::Mutex; diff --git a/crates/engine/tree/src/tree/state.rs b/crates/engine/tree/src/tree/state.rs index cab7d35fb22..0a13207e660 100644 --- a/crates/engine/tree/src/tree/state.rs +++ b/crates/engine/tree/src/tree/state.rs @@ -1,29 +1,19 @@ //! Functionality related to tree state. use crate::engine::EngineApiKind; -use alloy_eips::{eip1898::BlockWithParent, merge::EPOCH_SLOTS, BlockNumHash}; +use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, BlockNumber, B256, }; -use reth_chain_state::{EthPrimitives, ExecutedBlockWithTrieUpdates}; +use reth_chain_state::{EthPrimitives, ExecutedBlock}; use reth_primitives_traits::{AlloyBlockHeader, NodePrimitives, SealedHeader}; -use reth_trie::updates::TrieUpdates; use std::{ collections::{btree_map, hash_map, BTreeMap, VecDeque}, ops::Bound, - sync::Arc, }; use tracing::debug; -/// Default number of blocks to retain persisted trie updates -const DEFAULT_PERSISTED_TRIE_UPDATES_RETENTION: u64 = EPOCH_SLOTS * 2; - -/// Number of blocks to retain persisted trie updates for OP Stack chains -/// OP Stack chains only need `EPOCH_SLOTS` as reorgs are relevant only when -/// op-node reorgs to the same chain twice -const OPSTACK_PERSISTED_TRIE_UPDATES_RETENTION: u64 = EPOCH_SLOTS; - /// Keeps track of the state of the tree. /// /// ## Invariants @@ -35,19 +25,15 @@ pub struct TreeState { /// __All__ unique executed blocks by block hash that are connected to the canonical chain. /// /// This includes blocks of all forks. - pub(crate) blocks_by_hash: HashMap>, + pub(crate) blocks_by_hash: HashMap>, /// Executed blocks grouped by their respective block number. /// /// This maps unique block number to all known blocks for that height. /// /// Note: there can be multiple blocks at the same height due to forks. - pub(crate) blocks_by_number: BTreeMap>>, + pub(crate) blocks_by_number: BTreeMap>>, /// Map of any parent block hash to its children. pub(crate) parent_to_child: HashMap>, - /// Map of hash to trie updates for canonical blocks that are persisted but not finalized. - /// - /// Contains the block number for easy removal. - pub(crate) persisted_trie_updates: HashMap)>, /// Currently tracked canonical head of the chain. pub(crate) current_canonical_head: BlockNumHash, /// The engine API variant of this handler @@ -62,7 +48,6 @@ impl TreeState { blocks_by_number: BTreeMap::new(), current_canonical_head, parent_to_child: HashMap::default(), - persisted_trie_updates: HashMap::default(), engine_kind, } } @@ -77,11 +62,8 @@ impl TreeState { self.blocks_by_hash.len() } - /// Returns the [`ExecutedBlockWithTrieUpdates`] by hash. - pub(crate) fn executed_block_by_hash( - &self, - hash: B256, - ) -> Option<&ExecutedBlockWithTrieUpdates> { + /// Returns the [`ExecutedBlock`] by hash. + pub(crate) fn executed_block_by_hash(&self, hash: B256) -> Option<&ExecutedBlock> { self.blocks_by_hash.get(&hash) } @@ -94,13 +76,11 @@ impl TreeState { } /// Returns all available blocks for the given hash that lead back to the canonical chain, from - /// newest to oldest. And the parent hash of the oldest block that is missing from the buffer. + /// newest to oldest, and the parent hash of the oldest returned block. This parent hash is the + /// highest persisted block connected to this chain. /// /// Returns `None` if the block for the given hash is not found. - pub(crate) fn blocks_by_hash( - &self, - hash: B256, - ) -> Option<(B256, Vec>)> { + pub(crate) fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec>)> { let block = self.blocks_by_hash.get(&hash).cloned()?; let mut parent_hash = block.recovered_block().parent_hash(); let mut blocks = vec![block]; @@ -113,7 +93,7 @@ impl TreeState { } /// Insert executed block into the state. - pub(crate) fn insert_executed(&mut self, executed: ExecutedBlockWithTrieUpdates) { + pub(crate) fn insert_executed(&mut self, executed: ExecutedBlock) { let hash = executed.recovered_block().hash(); let parent_hash = executed.recovered_block().parent_hash(); let block_number = executed.recovered_block().number(); @@ -127,10 +107,6 @@ impl TreeState { self.blocks_by_number.entry(block_number).or_default().push(executed); self.parent_to_child.entry(parent_hash).or_default().insert(hash); - - for children in self.parent_to_child.values_mut() { - children.retain(|child| self.blocks_by_hash.contains_key(child)); - } } /// Remove single executed block by its hash. @@ -138,10 +114,7 @@ impl TreeState { /// ## Returns /// /// The removed block and the block hashes of its children. - fn remove_by_hash( - &mut self, - hash: B256, - ) -> Option<(ExecutedBlockWithTrieUpdates, HashSet)> { + fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock, HashSet)> { let executed = self.blocks_by_hash.remove(&hash)?; // Remove this block from collection of children of its parent block. @@ -215,41 +188,12 @@ impl TreeState { if executed.recovered_block().number() <= upper_bound { let num_hash = executed.recovered_block().num_hash(); debug!(target: "engine::tree", ?num_hash, "Attempting to remove block walking back from the head"); - if let Some((mut removed, _)) = - self.remove_by_hash(executed.recovered_block().hash()) - { - debug!(target: "engine::tree", ?num_hash, "Removed block walking back from the head"); - // finally, move the trie updates - let Some(trie_updates) = removed.trie.take_present() else { - debug!(target: "engine::tree", ?num_hash, "No trie updates found for persisted block"); - continue; - }; - self.persisted_trie_updates.insert( - removed.recovered_block().hash(), - (removed.recovered_block().number(), trie_updates), - ); - } + self.remove_by_hash(executed.recovered_block().hash()); } } debug!(target: "engine::tree", ?upper_bound, ?last_persisted_hash, "Removed canonical blocks from the tree"); } - /// Prunes old persisted trie updates based on the current block number - /// and chain type (OP Stack or regular) - pub(crate) fn prune_persisted_trie_updates(&mut self) { - let retention_blocks = if self.engine_kind.is_opstack() { - OPSTACK_PERSISTED_TRIE_UPDATES_RETENTION - } else { - DEFAULT_PERSISTED_TRIE_UPDATES_RETENTION - }; - - let earliest_block_to_retain = - self.current_canonical_head.number.saturating_sub(retention_blocks); - - self.persisted_trie_updates - .retain(|_, (block_number, _)| *block_number > earliest_block_to_retain); - } - /// Removes all blocks that are below the finalized block, as well as removing non-canonical /// sidechains that fork from below the finalized block. pub(crate) fn prune_finalized_sidechains(&mut self, finalized_num_hash: BlockNumHash) { @@ -274,8 +218,6 @@ impl TreeState { } } - self.prune_persisted_trie_updates(); - // The only block that should remain at the `finalized` number now, is the finalized // block, if it exists. // @@ -348,10 +290,37 @@ impl TreeState { } } + /// Updates the canonical head to the given block. + pub(crate) const fn set_canonical_head(&mut self, new_head: BlockNumHash) { + self.current_canonical_head = new_head; + } + + /// Returns the tracked canonical head. + pub(crate) const fn canonical_head(&self) -> &BlockNumHash { + &self.current_canonical_head + } + + /// Returns the block hash of the canonical head. + pub(crate) const fn canonical_block_hash(&self) -> B256 { + self.canonical_head().hash + } + + /// Returns the block number of the canonical head. + pub(crate) const fn canonical_block_number(&self) -> BlockNumber { + self.canonical_head().number + } +} + +#[cfg(test)] +impl TreeState { /// Determines if the second block is a descendant of the first block. /// /// If the two blocks are the same, this returns `false`. - pub(crate) fn is_descendant(&self, first: BlockNumHash, second: BlockWithParent) -> bool { + pub(crate) fn is_descendant( + &self, + first: BlockNumHash, + second: alloy_eips::eip1898::BlockWithParent, + ) -> bool { // If the second block's parent is the first block's hash, then it is a direct child // and we can return early. if second.parent == first.hash { @@ -384,26 +353,6 @@ impl TreeState { // Now the block numbers should be equal, so we compare hashes. current_block.recovered_block().parent_hash() == first.hash } - - /// Updates the canonical head to the given block. - pub(crate) const fn set_canonical_head(&mut self, new_head: BlockNumHash) { - self.current_canonical_head = new_head; - } - - /// Returns the tracked canonical head. - pub(crate) const fn canonical_head(&self) -> &BlockNumHash { - &self.current_canonical_head - } - - /// Returns the block hash of the canonical head. - pub(crate) const fn canonical_block_hash(&self) -> B256 { - self.canonical_head().hash - } - - /// Returns the block number of the canonical head. - pub(crate) const fn canonical_block_number(&self) -> BlockNumber { - self.canonical_head().number - } } #[cfg(test)] diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index b2774b8b17e..b22b1c1f698 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -3,10 +3,11 @@ use crate::{ persistence::PersistenceAction, tree::{ payload_validator::{BasicEngineValidator, TreeCtx, ValidationOutcome}, + persistence_state::CurrentPersistenceAction, TreeConfig, }, }; -use alloy_consensus::Header; + use alloy_eips::eip1898::BlockWithParent; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -26,7 +27,7 @@ use reth_ethereum_primitives::{Block, EthPrimitives}; use reth_evm_ethereum::MockEvmConfig; use reth_primitives_traits::Block as _; use reth_provider::{test_utils::MockEthProvider, ExecutionOutcome}; -use reth_trie::HashedPostState; +use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::{ collections::BTreeMap, str::FromStr, @@ -56,6 +57,7 @@ impl reth_engine_primitives::PayloadValidator for MockEngineVali reth_payload_primitives::NewPayloadError::Other(format!("{e:?}").into()) })?; let sealed = block.seal_slow(); + sealed.try_recover().map_err(|e| reth_payload_primitives::NewPayloadError::Other(e.into())) } } @@ -147,7 +149,7 @@ struct TestHarness { >, to_tree_tx: Sender, Block>>, from_tree_rx: UnboundedReceiver, - blocks: Vec, + blocks: Vec, action_rx: Receiver, block_builder: TestBlockBuilder, provider: MockEthProvider, @@ -227,7 +229,7 @@ impl TestHarness { } } - fn with_blocks(mut self, blocks: Vec) -> Self { + fn with_blocks(mut self, blocks: Vec) -> Self { let mut blocks_by_hash = HashMap::default(); let mut blocks_by_number = BTreeMap::new(); let mut state_by_hash = HashMap::default(); @@ -252,7 +254,6 @@ impl TestHarness { blocks_by_number, current_canonical_head: blocks.last().unwrap().recovered_block().num_hash(), parent_to_child, - persisted_trie_updates: HashMap::default(), engine_kind: EngineApiKind::Ethereum, }; @@ -335,15 +336,12 @@ impl TestHarness { fn persist_blocks(&self, blocks: Vec>) { let mut block_data: Vec<(B256, Block)> = Vec::with_capacity(blocks.len()); - let mut headers_data: Vec<(B256, Header)> = Vec::with_capacity(blocks.len()); for block in &blocks { block_data.push((block.hash(), block.clone_block())); - headers_data.push((block.hash(), block.header().clone())); } self.provider.extend_blocks(block_data); - self.provider.extend_headers(headers_data); } } @@ -402,9 +400,8 @@ impl ValidatorTestHarness { Self { harness, validator, metrics: TestMetrics::default() } } - /// Configure `PersistenceState` for specific `PersistingKind` scenarios + /// Configure `PersistenceState` for specific persistence scenarios fn start_persistence_operation(&mut self, action: CurrentPersistenceAction) { - use crate::tree::persistence_state::CurrentPersistenceAction; use tokio::sync::oneshot; // Create a dummy receiver for testing - it will never receive a value @@ -432,7 +429,6 @@ impl ValidatorTestHarness { ) -> ValidationOutcome { let ctx = TreeCtx::new( &mut self.harness.tree.state, - &self.harness.tree.persistence_state, &self.harness.tree.canonical_in_memory_state, ); let result = self.validator.validate_block(block, ctx); @@ -827,25 +823,21 @@ fn test_tree_state_on_new_head_deep_fork() { let chain_b = test_block_builder.create_fork(&last_block, 10); for block in &chain_a { - test_harness.tree.state.tree_state.insert_executed(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block.clone()), - execution_output: Arc::new(ExecutionOutcome::default()), - hashed_state: Arc::new(HashedPostState::default()), - }, - trie: ExecutedTrieUpdates::empty(), + test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { + recovered_block: Arc::new(block.clone()), + execution_output: Arc::new(ExecutionOutcome::default()), + hashed_state: Arc::new(HashedPostState::default()), + trie_updates: Arc::new(TrieUpdates::default()), }); } test_harness.tree.state.tree_state.set_canonical_head(chain_a.last().unwrap().num_hash()); for block in &chain_b { - test_harness.tree.state.tree_state.insert_executed(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block.clone()), - execution_output: Arc::new(ExecutionOutcome::default()), - hashed_state: Arc::new(HashedPostState::default()), - }, - trie: ExecutedTrieUpdates::empty(), + test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { + recovered_block: Arc::new(block.clone()), + execution_output: Arc::new(ExecutionOutcome::default()), + hashed_state: Arc::new(HashedPostState::default()), + trie_updates: Arc::new(TrieUpdates::default()), }); } @@ -1395,13 +1387,8 @@ fn test_validate_block_synchronous_strategy_during_persistence() { let genesis_hash = MAINNET.genesis_hash(); let valid_block = block_factory.create_valid_block(genesis_hash); - // Call validate_block_with_state directly - // This should execute the Synchronous strategy logic during active persistence - let result = test_harness.validate_block_direct(valid_block); - - // Verify validation was attempted (may fail due to test environment limitations) - // The key test is that the Synchronous strategy path is executed during persistence - assert!(result.is_ok() || result.is_err(), "Validation should complete") + // Test that Synchronous strategy executes during active persistence without panicking + let _result = test_harness.validate_block_direct(valid_block); } /// Test multiple validation scenarios including valid, consensus-invalid, and execution-invalid @@ -1415,15 +1402,9 @@ fn test_validate_block_multiple_scenarios() { let mut block_factory = TestBlockFactory::new(MAINNET.as_ref().clone()); let genesis_hash = MAINNET.genesis_hash(); - // Scenario 1: Valid block validation (may fail due to test environment limitations) + // Scenario 1: Valid block validation (test execution, not result) let valid_block = block_factory.create_valid_block(genesis_hash); - let result1 = test_harness.validate_block_direct(valid_block); - // Note: Valid blocks might fail in test environment due to missing provider data, - // but the important thing is that the validation logic executes without panicking - assert!( - result1.is_ok() || result1.is_err(), - "Valid block validation should complete (may fail due to test environment)" - ); + let _result1 = test_harness.validate_block_direct(valid_block); // Scenario 2: Block with consensus issues should be rejected let consensus_invalid = block_factory.create_invalid_consensus_block(genesis_hash); @@ -1705,3 +1686,277 @@ mod payload_execution_tests { } } } + +/// Test suite for the refactored `on_forkchoice_updated` helper methods +#[cfg(test)] +mod forkchoice_updated_tests { + use super::*; + + /// Test that validates the forkchoice state pre-validation logic + #[tokio::test] + async fn test_validate_forkchoice_state() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Test 1: Zero head block hash should return early with invalid state + let zero_state = ForkchoiceState { + head_block_hash: B256::ZERO, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(zero_state).unwrap(); + assert!(result.is_some(), "Zero head block hash should return early"); + let outcome = result.unwrap(); + // For invalid state, we expect an error response + assert!(matches!(outcome, OnForkChoiceUpdated { .. })); + + // Test 2: Valid state with backfill active should return syncing + test_harness.tree.backfill_sync_state = BackfillSyncState::Active; + let valid_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(valid_state).unwrap(); + assert!(result.is_some(), "Backfill active should return early"); + let outcome = result.unwrap(); + // We need to await the outcome to check the payload status + let fcu_result = outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + + // Test 3: Valid state with idle backfill should continue processing + test_harness.tree.backfill_sync_state = BackfillSyncState::Idle; + let valid_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(valid_state).unwrap(); + assert!(result.is_none(), "Valid state should continue processing"); + } + + /// Test that verifies canonical head handling + #[tokio::test] + async fn test_handle_canonical_head() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // Test 1: Head is already canonical, no payload attributes + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "Should return outcome for canonical head"); + let outcome = result.unwrap(); + let fcu_result = outcome.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test 2: Head is not canonical - should return None to continue processing + let non_canonical_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(non_canonical_state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_none(), "Non-canonical head should return None"); + } + + /// Test that verifies chain update application + #[tokio::test] + async fn test_apply_chain_update() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create a chain of blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..5).collect(); + test_harness = test_harness.with_blocks(blocks.clone()); + + let new_head = blocks[2].recovered_block().hash(); + + // Test 1: Apply chain update to a new head + let state = ForkchoiceState { + head_block_hash: new_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .apply_chain_update(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "Should apply chain update for new head"); + let outcome = result.unwrap(); + let fcu_result = outcome.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test 2: Try to apply chain update to missing block + let missing_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .apply_chain_update(missing_state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_none(), "Missing block should return None"); + } + + /// Test that verifies missing block handling + #[tokio::test] + async fn test_handle_missing_block() { + let chain_spec = MAINNET.clone(); + let test_harness = TestHarness::new(chain_spec); + + let state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.handle_missing_block(state).unwrap(); + + // Should return syncing status with download event + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + assert!(result.event.is_some()); + + if let Some(TreeEvent::Download(download_request)) = result.event { + match download_request { + DownloadRequest::BlockSet(block_set) => { + assert_eq!(block_set.len(), 1); + } + _ => panic!("Expected single block download request"), + } + } + } + + /// Test the complete `on_forkchoice_updated` flow with all helper methods + #[tokio::test] + async fn test_on_forkchoice_updated_integration() { + reth_tracing::init_test_tracing(); + + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks.clone()); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // Test Case 1: FCU to existing canonical head + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: canonical_head, + finalized_block_hash: canonical_head, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test Case 2: FCU to missing block + let missing_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(missing_state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + assert!(result.event.is_some(), "Should trigger download event for missing block"); + + // Test Case 3: FCU during backfill sync + test_harness.tree.backfill_sync_state = BackfillSyncState::Active; + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing(), "Should return syncing during backfill"); + } + + /// Test edge case: FCU with invalid ancestor + #[tokio::test] + async fn test_fcu_with_invalid_ancestor() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Mark a block as invalid + let invalid_block_hash = B256::random(); + test_harness.tree.state.invalid_headers.insert(BlockWithParent { + block: NumHash::new(1, invalid_block_hash), + parent: B256::ZERO, + }); + + // Test FCU that points to a descendant of the invalid block + // This is a bit tricky to test directly, but we can verify the check_invalid_ancestor + // method + let result = test_harness.tree.check_invalid_ancestor(invalid_block_hash).unwrap(); + assert!(result.is_some(), "Should detect invalid ancestor"); + } + + /// Test `OpStack` specific behavior with canonical head + #[tokio::test] + async fn test_opstack_canonical_head_behavior() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Set engine kind to OpStack + test_harness.tree.engine_kind = EngineApiKind::OpStack; + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // For OpStack, even if head is already canonical, we should still process payload + // attributes + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "OpStack should handle canonical head"); + } +} diff --git a/crates/era-downloader/src/client.rs b/crates/era-downloader/src/client.rs index 298248ff3e9..36ed93e1e2f 100644 --- a/crates/era-downloader/src/client.rs +++ b/crates/era-downloader/src/client.rs @@ -128,8 +128,6 @@ impl EraClient { let Some(number) = self.file_name_to_number(name) && (number < index || number >= last) { - eprintln!("Deleting file {}", entry.path().display()); - eprintln!("{number} < {index} || {number} >= {last}"); reth_fs_util::remove_file(entry.path())?; } } diff --git a/crates/era-downloader/tests/it/checksums.rs b/crates/era-downloader/tests/it/checksums.rs index 630cbece5d4..20717bfda0b 100644 --- a/crates/era-downloader/tests/it/checksums.rs +++ b/crates/era-downloader/tests/it/checksums.rs @@ -60,7 +60,7 @@ impl HttpClient for FailingClient { ) -> eyre::Result> + Send + Sync + Unpin> { let url = url.into_url().unwrap(); - Ok(futures::stream::iter(vec![Ok(match url.to_string().as_str() { + Ok(futures::stream::iter(vec![Ok(match url.as_str() { "https://mainnet.era1.nimbus.team/" => Bytes::from_static(crate::NIMBUS), "https://era1.ethportal.net/" => Bytes::from_static(crate::ETH_PORTAL), "https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(crate::ITHACA), diff --git a/crates/era-downloader/tests/it/main.rs b/crates/era-downloader/tests/it/main.rs index 526d3885bff..189d95506d0 100644 --- a/crates/era-downloader/tests/it/main.rs +++ b/crates/era-downloader/tests/it/main.rs @@ -32,7 +32,7 @@ impl HttpClient for StubClient { ) -> eyre::Result> + Send + Sync + Unpin> { let url = url.into_url().unwrap(); - Ok(futures::stream::iter(vec![Ok(match url.to_string().as_str() { + Ok(futures::stream::iter(vec![Ok(match url.as_str() { "https://mainnet.era1.nimbus.team/" => Bytes::from_static(NIMBUS), "https://era1.ethportal.net/" => Bytes::from_static(ETH_PORTAL), "https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(ITHACA), diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs index 670a534ba01..0502f0e2eac 100644 --- a/crates/era-utils/src/export.rs +++ b/crates/era-utils/src/export.rs @@ -1,17 +1,22 @@ //! Logic to export from database era1 block history //! and injecting them into era1 files with `Era1Writer`. +use crate::calculate_td_by_number; use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256, U256}; use eyre::{eyre, Result}; use reth_era::{ - e2s_types::IndexEntry, - era1_file::Era1Writer, - era1_types::{BlockIndex, Era1Id}, - era_file_ops::{EraFileId, StreamWriter}, - execution_types::{ - Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, - TotalDifficulty, MAX_BLOCKS_PER_ERA1, + common::file_ops::{EraFileId, StreamWriter}, + e2s::types::IndexEntry, + era1::{ + file::Era1Writer, + types::{ + execution::{ + Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, + TotalDifficulty, MAX_BLOCKS_PER_ERA1, + }, + group::{BlockIndex, Era1Id}, + }, }, }; use reth_fs_util as fs; @@ -114,9 +119,7 @@ where let mut total_difficulty = if config.first_block_number > 0 { let prev_block_number = config.first_block_number - 1; - provider - .header_td_by_number(prev_block_number)? - .ok_or_else(|| eyre!("Total difficulty not found for block {prev_block_number}"))? + calculate_td_by_number(provider, prev_block_number)? } else { U256::ZERO }; @@ -216,12 +219,12 @@ where writer.write_accumulator(&accumulator)?; writer.write_block_index(&block_index)?; writer.flush()?; - created_files.push(file_path.clone()); info!( target: "era::history::export", "Wrote ERA1 file: {file_path:?} with {blocks_written} blocks" ); + created_files.push(file_path); } } @@ -307,7 +310,7 @@ where #[cfg(test)] mod tests { use crate::ExportConfig; - use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; + use reth_era::era1::types::execution::MAX_BLOCKS_PER_ERA1; use tempfile::tempdir; #[test] diff --git a/crates/era-utils/src/history.rs b/crates/era-utils/src/history.rs index 12bafed6113..a1d3e8c8590 100644 --- a/crates/era-utils/src/history.rs +++ b/crates/era-utils/src/history.rs @@ -1,3 +1,4 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, U256}; use futures_util::{Stream, StreamExt}; use reth_db_api::{ @@ -8,26 +9,27 @@ use reth_db_api::{ RawKey, RawTable, RawValue, }; use reth_era::{ - e2s_types::E2sError, - era1_file::{BlockTupleIterator, Era1Reader}, - era_file_ops::StreamReader, - execution_types::BlockTuple, - DecodeCompressed, + common::{decode::DecodeCompressed, file_ops::StreamReader}, + e2s::error::E2sError, + era1::{ + file::{BlockTupleIterator, Era1Reader}, + types::execution::BlockTuple, + }, }; use reth_era_downloader::EraMeta; use reth_etl::Collector; use reth_fs_util as fs; use reth_primitives_traits::{Block, FullBlockBody, FullBlockHeader, NodePrimitives}; use reth_provider::{ - providers::StaticFileProviderRWRefMut, BlockWriter, ProviderError, StaticFileProviderFactory, + providers::StaticFileProviderRWRefMut, BlockReader, BlockWriter, StaticFileProviderFactory, StaticFileSegment, StaticFileWriter, }; use reth_stages_types::{ CheckpointBlockRange, EntitiesCheckpoint, HeadersCheckpoint, StageCheckpoint, StageId, }; use reth_storage_api::{ - errors::ProviderResult, DBProvider, DatabaseProviderFactory, HeaderProvider, - NodePrimitivesProvider, StageCheckpointWriter, + errors::ProviderResult, DBProvider, DatabaseProviderFactory, NodePrimitivesProvider, + StageCheckpointWriter, }; use std::{ collections::Bound, @@ -82,11 +84,6 @@ where .get_highest_static_file_block(StaticFileSegment::Headers) .unwrap_or_default(); - // Find the latest total difficulty - let mut td = static_file_provider - .header_td_by_number(height)? - .ok_or(ProviderError::TotalDifficultyNotFound(height))?; - while let Some(meta) = rx.recv()? { let from = height; let provider = provider_factory.database_provider_rw()?; @@ -96,7 +93,6 @@ where &mut static_file_provider.latest_writer(StaticFileSegment::Headers)?, &provider, hash_collector, - &mut td, height.., )?; @@ -146,7 +142,7 @@ where /// Extracts block headers and bodies from `meta` and appends them using `writer` and `provider`. /// -/// Adds on to `total_difficulty` and collects hash to height using `hash_collector`. +/// Collects hash to height using `hash_collector`. /// /// Skips all blocks below the [`start_bound`] of `block_numbers` and stops when reaching past the /// [`end_bound`] or the end of the file. @@ -160,7 +156,6 @@ pub fn process( writer: &mut StaticFileProviderRWRefMut<'_,

::Primitives>, provider: &P, hash_collector: &mut Collector, - total_difficulty: &mut U256, block_numbers: impl RangeBounds, ) -> eyre::Result where @@ -182,7 +177,7 @@ where as Box) -> eyre::Result<(BH, BB)>>); let iter = ProcessIter { iter, era: meta }; - process_iter(iter, writer, provider, hash_collector, total_difficulty, block_numbers) + process_iter(iter, writer, provider, hash_collector, block_numbers) } type ProcessInnerIter = @@ -271,7 +266,6 @@ pub fn process_iter( writer: &mut StaticFileProviderRWRefMut<'_,

::Primitives>, provider: &P, hash_collector: &mut Collector, - total_difficulty: &mut U256, block_numbers: impl RangeBounds, ) -> eyre::Result where @@ -286,12 +280,12 @@ where { let mut last_header_number = match block_numbers.start_bound() { Bound::Included(&number) => number, - Bound::Excluded(&number) => number.saturating_sub(1), + Bound::Excluded(&number) => number.saturating_add(1), Bound::Unbounded => 0, }; let target = match block_numbers.end_bound() { Bound::Included(&number) => Some(number), - Bound::Excluded(&number) => Some(number.saturating_add(1)), + Bound::Excluded(&number) => Some(number.saturating_sub(1)), Bound::Unbounded => None, }; @@ -311,11 +305,8 @@ where let hash = header.hash_slow(); last_header_number = number; - // Increase total difficulty - *total_difficulty += header.difficulty(); - // Append to Headers segment - writer.append_header(&header, *total_difficulty, &hash)?; + writer.append_header(&header, &hash)?; // Write bodies to database. provider.append_block_bodies(vec![(header.number(), Some(body))])?; @@ -382,3 +373,28 @@ where Ok(()) } + +/// Calculates the total difficulty for a given block number by summing the difficulty +/// of all blocks from genesis to the given block. +/// +/// Very expensive - iterates through all blocks in batches of 1000. +/// +/// Returns an error if any block is missing. +pub fn calculate_td_by_number

(provider: &P, num: BlockNumber) -> eyre::Result +where + P: BlockReader, +{ + let mut total_difficulty = U256::ZERO; + let mut start = 0; + + while start <= num { + let end = (start + 1000 - 1).min(num); + + total_difficulty += + provider.headers_range(start..=end)?.iter().map(|h| h.difficulty()).sum::(); + + start = end + 1; + } + + Ok(total_difficulty) +} diff --git a/crates/era-utils/src/lib.rs b/crates/era-utils/src/lib.rs index 966709d2f21..13a5ceefe92 100644 --- a/crates/era-utils/src/lib.rs +++ b/crates/era-utils/src/lib.rs @@ -14,5 +14,6 @@ pub use export::{export, ExportConfig}; /// Imports history from ERA files. pub use history::{ - build_index, decode, import, open, process, process_iter, save_stage_checkpoints, ProcessIter, + build_index, calculate_td_by_number, decode, import, open, process, process_iter, + save_stage_checkpoints, ProcessIter, }; diff --git a/crates/era-utils/tests/it/history.rs b/crates/era-utils/tests/it/history.rs index 8e720f1001b..2075722398f 100644 --- a/crates/era-utils/tests/it/history.rs +++ b/crates/era-utils/tests/it/history.rs @@ -1,7 +1,7 @@ use crate::{ClientWithFakeIndex, ITHACA_ERA_INDEX_URL}; use reqwest::{Client, Url}; use reth_db_common::init::init_genesis; -use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; +use reth_era::era1::types::execution::MAX_BLOCKS_PER_ERA1; use reth_era_downloader::{EraClient, EraStream, EraStreamConfig}; use reth_era_utils::{export, import, ExportConfig}; use reth_etl::Collector; diff --git a/crates/era-utils/tests/it/main.rs b/crates/era-utils/tests/it/main.rs index 94805c5b356..2e2ec0b0556 100644 --- a/crates/era-utils/tests/it/main.rs +++ b/crates/era-utils/tests/it/main.rs @@ -32,7 +32,7 @@ impl HttpClient for ClientWithFakeIndex { ) -> eyre::Result> + Send + Sync + Unpin> { let url = url.into_url()?; - match url.to_string().as_str() { + match url.as_str() { ITHACA_ERA_INDEX_URL => { // Create a static stream without boxing let stream = diff --git a/crates/era/src/common/decode.rs b/crates/era/src/common/decode.rs new file mode 100644 index 00000000000..cef3368d74c --- /dev/null +++ b/crates/era/src/common/decode.rs @@ -0,0 +1,17 @@ +//! Compressed data decoding utilities. + +use crate::e2s::error::E2sError; +use alloy_rlp::Decodable; +use ssz::Decode; + +/// Extension trait for generic decoding from compressed data +pub trait DecodeCompressed { + /// Decompress and decode the data into the given type + fn decode(&self) -> Result; +} + +/// Extension trait for generic decoding from compressed ssz data +pub trait DecodeCompressedSsz { + /// Decompress and decode the SSZ data into the given type + fn decode(&self) -> Result; +} diff --git a/crates/era/src/era_file_ops.rs b/crates/era/src/common/file_ops.rs similarity index 97% rename from crates/era/src/era_file_ops.rs rename to crates/era/src/common/file_ops.rs index 469d6b78351..752f5b66fb3 100644 --- a/crates/era/src/era_file_ops.rs +++ b/crates/era/src/common/file_ops.rs @@ -1,6 +1,6 @@ -//! Represents reading and writing operations' era file +//! Era file format traits and I/O operations. -use crate::{e2s_types::Version, E2sError}; +use crate::e2s::{error::E2sError, types::Version}; use std::{ fs::File, io::{Read, Seek, Write}, diff --git a/crates/era/src/common/mod.rs b/crates/era/src/common/mod.rs new file mode 100644 index 00000000000..3ad45dfdd8a --- /dev/null +++ b/crates/era/src/common/mod.rs @@ -0,0 +1,4 @@ +//! Common utilities and shared functionality. + +pub mod decode; +pub mod file_ops; diff --git a/crates/era/src/e2s/error.rs b/crates/era/src/e2s/error.rs new file mode 100644 index 00000000000..ccfbe7296c8 --- /dev/null +++ b/crates/era/src/e2s/error.rs @@ -0,0 +1,32 @@ +//! Error handling for e2s files operations + +use std::io; +use thiserror::Error; + +/// Error types for e2s file operations +#[derive(Error, Debug)] +pub enum E2sError { + /// IO error during file operations + #[error("IO error: {0}")] + Io(#[from] io::Error), + + /// Error during SSZ encoding/decoding + #[error("SSZ error: {0}")] + Ssz(String), + + /// Reserved field in header not zero + #[error("Reserved field in header not zero")] + ReservedNotZero, + + /// Error during snappy compression + #[error("Snappy compression error: {0}")] + SnappyCompression(String), + + /// Error during snappy decompression + #[error("Snappy decompression error: {0}")] + SnappyDecompression(String), + + /// Error during RLP encoding/decoding + #[error("RLP error: {0}")] + Rlp(String), +} diff --git a/crates/era/src/e2s_file.rs b/crates/era/src/e2s/file.rs similarity index 99% rename from crates/era/src/e2s_file.rs rename to crates/era/src/e2s/file.rs index e1b6989a0f3..9c48add603b 100644 --- a/crates/era/src/e2s_file.rs +++ b/crates/era/src/e2s/file.rs @@ -2,7 +2,10 @@ //! //! See also -use crate::e2s_types::{E2sError, Entry, Version}; +use crate::e2s::{ + error::E2sError, + types::{Entry, Version}, +}; use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write}; /// A reader for `E2Store` files that wraps a [`BufReader`]. @@ -107,7 +110,7 @@ impl E2StoreWriter { #[cfg(test)] mod tests { use super::*; - use crate::e2s_types::{SLOT_INDEX, VERSION}; + use crate::e2s::types::{SLOT_INDEX, VERSION}; use std::io::Cursor; fn create_slot_index_data(starting_slot: u64, offsets: &[i64]) -> Vec { diff --git a/crates/era/src/e2s/mod.rs b/crates/era/src/e2s/mod.rs new file mode 100644 index 00000000000..d67190f4759 --- /dev/null +++ b/crates/era/src/e2s/mod.rs @@ -0,0 +1,5 @@ +//! Core e2store primitives and file handling. + +pub mod error; +pub mod file; +pub mod types; diff --git a/crates/era/src/e2s_types.rs b/crates/era/src/e2s/types.rs similarity index 90% rename from crates/era/src/e2s_types.rs rename to crates/era/src/e2s/types.rs index f14bfe56e86..dd0e9485da2 100644 --- a/crates/era/src/e2s_types.rs +++ b/crates/era/src/e2s/types.rs @@ -8,9 +8,9 @@ //! An [`Entry`] is a complete record in the file, consisting of both a [`Header`] and its //! associated data +use crate::e2s::error::E2sError; use ssz_derive::{Decode, Encode}; use std::io::{self, Read, Write}; -use thiserror::Error; /// [`Version`] record: ['e', '2'] pub const VERSION: [u8; 2] = [0x65, 0x32]; @@ -21,34 +21,6 @@ pub const EMPTY: [u8; 2] = [0x00, 0x00]; /// `SlotIndex` record: ['i', '2'] pub const SLOT_INDEX: [u8; 2] = [0x69, 0x32]; -/// Error types for e2s file operations -#[derive(Error, Debug)] -pub enum E2sError { - /// IO error during file operations - #[error("IO error: {0}")] - Io(#[from] io::Error), - - /// Error during SSZ encoding/decoding - #[error("SSZ error: {0}")] - Ssz(String), - - /// Reserved field in header not zero - #[error("Reserved field in header not zero")] - ReservedNotZero, - - /// Error during snappy compression - #[error("Snappy compression error: {0}")] - SnappyCompression(String), - - /// Error during snappy decompression - #[error("Snappy decompression error: {0}")] - SnappyDecompression(String), - - /// Error during RLP encoding/decoding - #[error("RLP error: {0}")] - Rlp(String), -} - /// Header for TLV records in e2store files #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] pub struct Header { diff --git a/crates/era/src/era/mod.rs b/crates/era/src/era/mod.rs new file mode 100644 index 00000000000..108eeb30887 --- /dev/null +++ b/crates/era/src/era/mod.rs @@ -0,0 +1,3 @@ +//! Core era primitives. + +pub mod types; diff --git a/crates/era/src/consensus_types.rs b/crates/era/src/era/types/consensus.rs similarity index 98% rename from crates/era/src/consensus_types.rs rename to crates/era/src/era/types/consensus.rs index cdcc77ce57a..3c4a924e7a6 100644 --- a/crates/era/src/consensus_types.rs +++ b/crates/era/src/era/types/consensus.rs @@ -1,8 +1,8 @@ //! Consensus types for Era post-merge history files use crate::{ - e2s_types::{E2sError, Entry}, - DecodeCompressedSsz, + common::decode::DecodeCompressedSsz, + e2s::{error::E2sError, types::Entry}, }; use snap::{read::FrameDecoder, write::FrameEncoder}; use ssz::Decode; diff --git a/crates/era/src/era_types.rs b/crates/era/src/era/types/group.rs similarity index 97% rename from crates/era/src/era_types.rs rename to crates/era/src/era/types/group.rs index a50b6f19281..bb250872ed5 100644 --- a/crates/era/src/era_types.rs +++ b/crates/era/src/era/types/group.rs @@ -1,10 +1,10 @@ -//! Era types for `.era` files +//! Era types for `.era` file content //! //! See also use crate::{ - consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, - e2s_types::{Entry, IndexEntry, SLOT_INDEX}, + e2s::types::{Entry, IndexEntry, SLOT_INDEX}, + era::types::consensus::{CompressedBeaconState, CompressedSignedBeaconBlock}, }; /// Era file content group @@ -126,7 +126,7 @@ impl IndexEntry for SlotIndex { mod tests { use super::*; use crate::{ - e2s_types::{Entry, IndexEntry}, + e2s::types::{Entry, IndexEntry}, test_utils::{create_beacon_block, create_beacon_state}, }; diff --git a/crates/era/src/era/types/mod.rs b/crates/era/src/era/types/mod.rs new file mode 100644 index 00000000000..cf91adca546 --- /dev/null +++ b/crates/era/src/era/types/mod.rs @@ -0,0 +1,6 @@ +//! Era types primitives. +//! +//! See also + +pub mod consensus; +pub mod group; diff --git a/crates/era/src/era1_file.rs b/crates/era/src/era1/file.rs similarity index 94% rename from crates/era/src/era1_file.rs rename to crates/era/src/era1/file.rs index dc34ddef42b..3f230e8ea66 100644 --- a/crates/era/src/era1_file.rs +++ b/crates/era/src/era1/file.rs @@ -6,13 +6,19 @@ //! See also . use crate::{ - e2s_file::{E2StoreReader, E2StoreWriter}, - e2s_types::{E2sError, Entry, IndexEntry, Version}, - era1_types::{BlockIndex, Era1Group, Era1Id, BLOCK_INDEX}, - era_file_ops::{EraFileFormat, FileReader, StreamReader, StreamWriter}, - execution_types::{ - self, Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, - TotalDifficulty, MAX_BLOCKS_PER_ERA1, + common::file_ops::{EraFileFormat, FileReader, StreamReader, StreamWriter}, + e2s::{ + error::E2sError, + file::{E2StoreReader, E2StoreWriter}, + types::{Entry, IndexEntry, Version}, + }, + era1::types::{ + execution::{ + Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, + TotalDifficulty, ACCUMULATOR, COMPRESSED_BODY, COMPRESSED_HEADER, COMPRESSED_RECEIPTS, + MAX_BLOCKS_PER_ERA1, TOTAL_DIFFICULTY, + }, + group::{BlockIndex, Era1Group, Era1Id, BLOCK_INDEX}, }, }; use alloy_primitives::BlockNumber; @@ -127,19 +133,19 @@ impl BlockTupleIterator { }; match entry.entry_type { - execution_types::COMPRESSED_HEADER => { + COMPRESSED_HEADER => { self.headers.push_back(CompressedHeader::from_entry(&entry)?); } - execution_types::COMPRESSED_BODY => { + COMPRESSED_BODY => { self.bodies.push_back(CompressedBody::from_entry(&entry)?); } - execution_types::COMPRESSED_RECEIPTS => { + COMPRESSED_RECEIPTS => { self.receipts.push_back(CompressedReceipts::from_entry(&entry)?); } - execution_types::TOTAL_DIFFICULTY => { + TOTAL_DIFFICULTY => { self.difficulties.push_back(TotalDifficulty::from_entry(&entry)?); } - execution_types::ACCUMULATOR => { + ACCUMULATOR => { if self.accumulator.is_some() { return Err(E2sError::Ssz("Multiple accumulator entries found".to_string())); } @@ -327,10 +333,7 @@ impl StreamWriter for Era1Writer { impl Era1Writer { /// Write a single block tuple - pub fn write_block( - &mut self, - block_tuple: &crate::execution_types::BlockTuple, - ) -> Result<(), E2sError> { + pub fn write_block(&mut self, block_tuple: &BlockTuple) -> Result<(), E2sError> { if !self.has_written_version { self.write_version()?; } @@ -403,13 +406,7 @@ impl Era1Writer { #[cfg(test)] mod tests { use super::*; - use crate::{ - era_file_ops::FileWriter, - execution_types::{ - Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, - TotalDifficulty, - }, - }; + use crate::common::file_ops::FileWriter; use alloy_primitives::{B256, U256}; use std::io::Cursor; use tempfile::tempdir; diff --git a/crates/era/src/era1/mod.rs b/crates/era/src/era1/mod.rs new file mode 100644 index 00000000000..de0803e7212 --- /dev/null +++ b/crates/era/src/era1/mod.rs @@ -0,0 +1,4 @@ +//! Core era1 primitives and file handling. + +pub mod file; +pub mod types; diff --git a/crates/era/src/execution_types.rs b/crates/era/src/era1/types/execution.rs similarity index 97% rename from crates/era/src/execution_types.rs rename to crates/era/src/era1/types/execution.rs index 6feb2873fbd..e6022d57140 100644 --- a/crates/era/src/execution_types.rs +++ b/crates/era/src/era1/types/execution.rs @@ -16,7 +16,7 @@ //! //! ```rust //! use alloy_consensus::Header; -//! use reth_era::{execution_types::CompressedHeader, DecodeCompressed}; +//! use reth_era::{common::decode::DecodeCompressed, era1::types::execution::CompressedHeader}; //! //! let header = Header { number: 100, ..Default::default() }; //! // Compress the header: rlp encoding and Snappy compression @@ -24,7 +24,7 @@ //! // Decompressed and decode typed compressed header //! let decoded_header: Header = compressed.decode_header()?; //! assert_eq!(decoded_header.number, 100); -//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! # Ok::<(), reth_era::e2s::error::E2sError>(()) //! ``` //! //! ## [`CompressedBody`] @@ -32,7 +32,7 @@ //! ```rust //! use alloy_consensus::{BlockBody, Header}; //! use alloy_primitives::Bytes; -//! use reth_era::{execution_types::CompressedBody, DecodeCompressed}; +//! use reth_era::{common::decode::DecodeCompressed, era1::types::execution::CompressedBody}; //! use reth_ethereum_primitives::TransactionSigned; //! //! let body: BlockBody = BlockBody { @@ -46,14 +46,14 @@ //! let decoded_body: alloy_consensus::BlockBody = //! compressed_body.decode()?; //! assert_eq!(decoded_body.transactions.len(), 1); -//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! # Ok::<(), reth_era::e2s::error::E2sError>(()) //! ``` //! //! ## [`CompressedReceipts`] //! //! ```rust //! use alloy_consensus::ReceiptWithBloom; -//! use reth_era::{execution_types::CompressedReceipts, DecodeCompressed}; +//! use reth_era::{common::decode::DecodeCompressed, era1::types::execution::CompressedReceipts}; //! use reth_ethereum_primitives::{Receipt, TxType}; //! //! let receipt = Receipt { @@ -68,12 +68,12 @@ //! // Get raw receipt by decoding and decompressing compressed and encoded receipt //! let decompressed_receipt = compressed_receipt_data.decode::()?; //! assert_eq!(decompressed_receipt.receipt.cumulative_gas_used, 21000); -//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! # Ok::<(), reth_era::e2s::error::E2sError>(()) //! `````` use crate::{ - e2s_types::{E2sError, Entry}, - DecodeCompressed, + common::decode::DecodeCompressed, + e2s::{error::E2sError, types::Entry}, }; use alloy_consensus::{Block, BlockBody, Header}; use alloy_primitives::{B256, U256}; diff --git a/crates/era/src/era1_types.rs b/crates/era/src/era1/types/group.rs similarity index 98% rename from crates/era/src/era1_types.rs rename to crates/era/src/era1/types/group.rs index ef239f3e164..5a7e65a4048 100644 --- a/crates/era/src/era1_types.rs +++ b/crates/era/src/era1/types/group.rs @@ -1,11 +1,11 @@ -//! Era1 types +//! Era1 group for era1 file content //! //! See also use crate::{ - e2s_types::{Entry, IndexEntry}, - era_file_ops::EraFileId, - execution_types::{Accumulator, BlockTuple, MAX_BLOCKS_PER_ERA1}, + common::file_ops::EraFileId, + e2s::types::{Entry, IndexEntry}, + era1::types::execution::{Accumulator, BlockTuple, MAX_BLOCKS_PER_ERA1}, }; use alloy_primitives::BlockNumber; @@ -174,8 +174,8 @@ impl EraFileId for Era1Id { mod tests { use super::*; use crate::{ + common::decode::DecodeCompressed, test_utils::{create_sample_block, create_test_block_with_compressed_data}, - DecodeCompressed, }; use alloy_consensus::ReceiptWithBloom; use alloy_primitives::{B256, U256}; diff --git a/crates/era/src/era1/types/mod.rs b/crates/era/src/era1/types/mod.rs new file mode 100644 index 00000000000..44568ddf79b --- /dev/null +++ b/crates/era/src/era1/types/mod.rs @@ -0,0 +1,6 @@ +//! Era1 types +//! +//! See also + +pub mod execution; +pub mod group; diff --git a/crates/era/src/lib.rs b/crates/era/src/lib.rs index fd0596e9dfc..2e4b755d76f 100644 --- a/crates/era/src/lib.rs +++ b/crates/era/src/lib.rs @@ -12,29 +12,10 @@ //! - Era format: //! - Era1 format: -pub mod consensus_types; -pub mod e2s_file; -pub mod e2s_types; -pub mod era1_file; -pub mod era1_types; -pub mod era_file_ops; -pub mod era_types; -pub mod execution_types; +pub mod common; +pub mod e2s; +pub mod era; +pub mod era1; + #[cfg(test)] pub(crate) mod test_utils; - -use crate::e2s_types::E2sError; -use alloy_rlp::Decodable; -use ssz::Decode; - -/// Extension trait for generic decoding from compressed data -pub trait DecodeCompressed { - /// Decompress and decode the data into the given type - fn decode(&self) -> Result; -} - -/// Extension trait for generic decoding from compressed ssz data -pub trait DecodeCompressedSsz { - /// Decompress and decode the SSZ data into the given type - fn decode(&self) -> Result; -} diff --git a/crates/era/src/test_utils.rs b/crates/era/src/test_utils.rs index 96b2545be16..f5aab53f74b 100644 --- a/crates/era/src/test_utils.rs +++ b/crates/era/src/test_utils.rs @@ -1,8 +1,8 @@ //! Utilities helpers to create era data structures for testing purposes. use crate::{ - consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, - execution_types::{ + era::types::consensus::{CompressedBeaconState, CompressedSignedBeaconBlock}, + era1::types::execution::{ BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, }, }; diff --git a/crates/era/tests/it/dd.rs b/crates/era/tests/it/dd.rs index 769a398d6ce..9c1e5d163ea 100644 --- a/crates/era/tests/it/dd.rs +++ b/crates/era/tests/it/dd.rs @@ -4,10 +4,12 @@ use alloy_consensus::{BlockBody, Header}; use alloy_primitives::U256; use reth_era::{ - e2s_types::IndexEntry, - era1_file::{Era1Reader, Era1Writer}, - era_file_ops::{StreamReader, StreamWriter}, - execution_types::CompressedBody, + common::file_ops::{StreamReader, StreamWriter}, + e2s::types::IndexEntry, + era1::{ + file::{Era1Reader, Era1Writer}, + types::execution::CompressedBody, + }, }; use reth_ethereum_primitives::TransactionSigned; use std::io::Cursor; diff --git a/crates/era/tests/it/genesis.rs b/crates/era/tests/it/genesis.rs index 80869f97fa0..14f563edf2f 100644 --- a/crates/era/tests/it/genesis.rs +++ b/crates/era/tests/it/genesis.rs @@ -7,7 +7,7 @@ use crate::{ Era1TestDownloader, ERA1_MAINNET_FILES_NAMES, ERA1_SEPOLIA_FILES_NAMES, MAINNET, SEPOLIA, }; use alloy_consensus::{BlockBody, Header}; -use reth_era::{e2s_types::IndexEntry, execution_types::CompressedBody}; +use reth_era::{e2s::types::IndexEntry, era1::types::execution::CompressedBody}; use reth_ethereum_primitives::TransactionSigned; #[tokio::test(flavor = "multi_thread")] diff --git a/crates/era/tests/it/main.rs b/crates/era/tests/it/main.rs index 611862aa8ea..9750e7b10b0 100644 --- a/crates/era/tests/it/main.rs +++ b/crates/era/tests/it/main.rs @@ -8,9 +8,9 @@ use reqwest::{Client, Url}; use reth_era::{ - e2s_types::E2sError, - era1_file::{Era1File, Era1Reader}, - era_file_ops::FileReader, + common::file_ops::FileReader, + e2s::error::E2sError, + era1::file::{Era1File, Era1Reader}, }; use reth_era_downloader::EraClient; use std::{ diff --git a/crates/era/tests/it/roundtrip.rs b/crates/era/tests/it/roundtrip.rs index a78af341371..56f5ac20cd4 100644 --- a/crates/era/tests/it/roundtrip.rs +++ b/crates/era/tests/it/roundtrip.rs @@ -10,12 +10,16 @@ use alloy_consensus::{BlockBody, BlockHeader, Header, ReceiptWithBloom}; use rand::{prelude::IndexedRandom, rng}; use reth_era::{ - e2s_types::IndexEntry, - era1_file::{Era1File, Era1Reader, Era1Writer}, - era1_types::{Era1Group, Era1Id}, - era_file_ops::{EraFileFormat, StreamReader, StreamWriter}, - execution_types::{ - BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + common::file_ops::{EraFileFormat, StreamReader, StreamWriter}, + e2s::types::IndexEntry, + era1::{ + file::{Era1File, Era1Reader, Era1Writer}, + types::{ + execution::{ + BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + }, + group::{Era1Group, Era1Id}, + }, }, }; use reth_ethereum_primitives::TransactionSigned; diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index 01a7751e77b..728a97bfe3d 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -23,11 +23,13 @@ reth-node-ethereum.workspace = true reth-node-metrics.workspace = true reth-rpc-server-types.workspace = true reth-tracing.workspace = true +reth-tracing-otlp.workspace = true reth-node-api.workspace = true # misc clap.workspace = true eyre.workspace = true +url.workspace = true tracing.workspace = true [dev-dependencies] @@ -35,7 +37,9 @@ tracing.workspace = true tempfile.workspace = true [features] -default = ["jemalloc"] +default = [] + +otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] dev = ["reth-cli-commands/arbitrary"] @@ -59,8 +63,23 @@ tracy-allocator = [] snmalloc = [] snmalloc-native = [] -min-error-logs = ["tracing/release_max_level_error"] -min-warn-logs = ["tracing/release_max_level_warn"] -min-info-logs = ["tracing/release_max_level_info"] -min-debug-logs = ["tracing/release_max_level_debug"] -min-trace-logs = ["tracing/release_max_level_trace"] +min-error-logs = [ + "tracing/release_max_level_error", + "reth-node-core/min-error-logs", +] +min-warn-logs = [ + "tracing/release_max_level_warn", + "reth-node-core/min-warn-logs", +] +min-info-logs = [ + "tracing/release_max_level_info", + "reth-node-core/min-info-logs", +] +min-debug-logs = [ + "tracing/release_max_level_debug", + "reth-node-core/min-debug-logs", +] +min-trace-logs = [ + "tracing/release_max_level_trace", + "reth-node-core/min-trace-logs", +] diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index e99dae2ac77..b947d6df1db 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -14,8 +14,10 @@ use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthereumNo use reth_node_metrics::recorder::install_prometheus_recorder; use reth_rpc_server_types::RpcModuleValidator; use reth_tracing::{FileWorkerGuard, Layers}; +use reth_tracing_otlp::OtlpProtocol; use std::{fmt, sync::Arc}; use tracing::info; +use url::Url; /// A wrapper around a parsed CLI that handles command execution. #[derive(Debug)] @@ -82,10 +84,7 @@ where ) -> Result<()>, ) -> Result<()> where - N: CliNodeTypes< - Primitives: NodePrimitives, - ChainSpec: Hardforks + EthChainSpec, - >, + N: CliNodeTypes, ChainSpec: Hardforks>, C: ChainSpecParser, { let runner = match self.runner.take() { @@ -99,7 +98,8 @@ where self.cli.logs.log_file_directory.join(chain_spec.chain().to_string()); } - self.init_tracing()?; + self.init_tracing(&runner)?; + // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); @@ -109,14 +109,55 @@ where /// Initializes tracing with the configured options. /// /// If file logging is enabled, this function stores guard to the struct. - pub fn init_tracing(&mut self) -> Result<()> { + /// For gRPC OTLP, it requires tokio runtime context. + pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> { if self.guard.is_none() { - let layers = self.layers.take().unwrap_or_default(); + let mut layers = self.layers.take().unwrap_or_default(); + + #[cfg(feature = "otlp")] + { + self.cli.traces.validate()?; + + if let Some(endpoint) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", endpoint); + self.init_otlp_export(&mut layers, endpoint, runner)?; + } + } + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); } Ok(()) } + + /// Initialize OTLP tracing export based on protocol type. + /// + /// For gRPC, `block_on` is required because tonic's channel initialization needs + /// a tokio runtime context, even though `with_span_layer` itself is not async. + #[cfg(feature = "otlp")] + fn init_otlp_export( + &self, + layers: &mut Layers, + endpoint: &Url, + runner: &CliRunner, + ) -> Result<()> { + let endpoint = endpoint.clone(); + let protocol = self.cli.traces.protocol; + let filter_level = self.cli.traces.otlp_filter.clone(); + + match protocol { + OtlpProtocol::Grpc => { + runner.block_on(async { + layers.with_span_layer("reth".to_string(), endpoint, filter_level, protocol) + })?; + } + OtlpProtocol::Http => { + layers.with_span_layer("reth".to_string(), endpoint, filter_level, protocol)?; + } + } + + Ok(()) + } } /// Run CLI commands with the provided runner, components and launcher. diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index 1ebc40c7f8e..dffa62c96b3 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -18,7 +18,10 @@ use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_api::NodePrimitives; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use reth_node_core::{args::LogArgs, version::version_metadata}; +use reth_node_core::{ + args::{LogArgs, TraceArgs}, + version::version_metadata, +}; use reth_node_metrics::recorder::install_prometheus_recorder; use reth_rpc_server_types::{DefaultRpcModuleValidator, RpcModuleValidator}; use reth_tracing::FileWorkerGuard; @@ -29,7 +32,7 @@ use tracing::info; /// /// This is the entrypoint to the executable. #[derive(Debug, Parser)] -#[command(author, version =version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] +#[command(author, name = version_metadata().name_client.as_ref(), version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] pub struct Cli< C: ChainSpecParser = EthereumChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs, @@ -43,6 +46,10 @@ pub struct Cli< #[command(flatten)] pub logs: LogArgs, + /// The tracing configuration for the CLI. + #[command(flatten)] + pub traces: TraceArgs, + /// Type marker for the RPC module validator #[arg(skip)] pub _phantom: PhantomData, @@ -212,8 +219,11 @@ impl /// /// If file logging is enabled, this function returns a guard that must be kept alive to ensure /// that all logs are flushed to disk. + /// If an OTLP endpoint is specified, it will export metrics to the configured collector. pub fn init_tracing(&self) -> eyre::Result> { - let guard = self.logs.init_tracing()?; + let layers = reth_tracing::Layers::new(); + + let guard = self.logs.init_tracing_with_layers(layers)?; Ok(guard) } } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index a93e3312525..00cf303e25d 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -196,6 +196,7 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; use alloy_primitives::B256; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_consensus_common::validation::validate_against_parent_gas_limit; @@ -215,7 +216,7 @@ mod tests { let child = header_with_gas_limit((parent.gas_limit + 5) as u64); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::

::default()), Ok(()) ); } @@ -226,7 +227,7 @@ mod tests { let child = header_with_gas_limit(MINIMUM_GAS_LIMIT - 1); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: child.gas_limit as u64 }) ); } @@ -239,7 +240,7 @@ mod tests { ); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Err(ConsensusError::GasLimitInvalidIncrease { parent_gas_limit: parent.gas_limit, child_gas_limit: child.gas_limit, @@ -253,7 +254,7 @@ mod tests { let child = header_with_gas_limit(parent.gas_limit - 5); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Ok(()) ); } @@ -266,7 +267,7 @@ mod tests { ); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Err(ConsensusError::GasLimitInvalidDecrease { parent_gas_limit: parent.gas_limit, child_gas_limit: child.gas_limit, diff --git a/crates/ethereum/evm/src/build.rs b/crates/ethereum/evm/src/build.rs index 5f5e014d297..85d4cae311b 100644 --- a/crates/ethereum/evm/src/build.rs +++ b/crates/ethereum/evm/src/build.rs @@ -1,7 +1,7 @@ use alloc::{sync::Arc, vec::Vec}; use alloy_consensus::{ proofs::{self, calculate_receipt_root}, - Block, BlockBody, BlockHeader, Header, Transaction, TxReceipt, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, BlockHeader, Header, TxReceipt, EMPTY_OMMER_ROOT_HASH, }; use alloy_eips::merge::BEACON_NONCE; use alloy_evm::{block::BlockExecutorFactory, eth::EthBlockExecutionCtx}; @@ -10,6 +10,7 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::execute::{BlockAssembler, BlockAssemblerInput, BlockExecutionError}; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{logs_bloom, Receipt, SignedTransaction}; +use revm::context::Block as _; /// Block builder for Ethereum. #[derive(Debug, Clone)] @@ -47,12 +48,12 @@ where execution_ctx: ctx, parent, transactions, - output: BlockExecutionResult { receipts, requests, gas_used }, + output: BlockExecutionResult { receipts, requests, gas_used, blob_gas_used }, state_root, .. } = input; - let timestamp = evm_env.block_env.timestamp.saturating_to(); + let timestamp = evm_env.block_env.timestamp().saturating_to(); let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = calculate_receipt_root( @@ -73,12 +74,11 @@ where .then(|| requests.requests_hash()); let mut excess_blob_gas = None; - let mut blob_gas_used = None; + let mut block_blob_gas_used = None; // only determine cancun fields when active if self.chain_spec.is_cancun_active_at_timestamp(timestamp) { - blob_gas_used = - Some(transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum()); + block_blob_gas_used = Some(*blob_gas_used); excess_blob_gas = if self.chain_spec.is_cancun_active_at_timestamp(parent.timestamp) { parent.maybe_next_block_excess_blob_gas( self.chain_spec.blob_params_at_timestamp(timestamp), @@ -96,23 +96,23 @@ where let header = Header { parent_hash: ctx.parent_hash, ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: evm_env.block_env.beneficiary, + beneficiary: evm_env.block_env.beneficiary(), state_root, transactions_root, receipts_root, withdrawals_root, logs_bloom, timestamp, - mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(evm_env.block_env.basefee), - number: evm_env.block_env.number.saturating_to(), - gas_limit: evm_env.block_env.gas_limit, - difficulty: evm_env.block_env.difficulty, + base_fee_per_gas: Some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().saturating_to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, extra_data: self.extra_data.clone(), parent_beacon_block_root: ctx.parent_beacon_block_root, - blob_gas_used, + blob_gas_used: block_blob_gas_used, excess_blob_gas, requests_hash, }; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index eaf91f0c7be..c0f8adc9c54 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -132,6 +132,7 @@ where + FromRecoveredTx + FromTxWithEncoded, Spec = SpecId, + BlockEnv = BlockEnv, Precompiles = PrecompilesMap, > + Clone + Debug @@ -154,7 +155,7 @@ where &self.block_assembler } - fn evm_env(&self, header: &Header) -> Result { + fn evm_env(&self, header: &Header) -> Result, Self::Error> { Ok(EvmEnv::for_eth_block( header, self.chain_spec(), @@ -217,6 +218,7 @@ where + FromRecoveredTx + FromTxWithEncoded, Spec = SpecId, + BlockEnv = BlockEnv, Precompiles = PrecompilesMap, > + Clone + Debug diff --git a/crates/ethereum/evm/src/test_utils.rs b/crates/ethereum/evm/src/test_utils.rs index 87875dbc848..fe791b9f5fd 100644 --- a/crates/ethereum/evm/src/test_utils.rs +++ b/crates/ethereum/evm/src/test_utils.rs @@ -125,6 +125,7 @@ impl<'a, DB: Database, I: Inspector>>> BlockExec reqs }), gas_used: 0, + blob_gas_used: 0, }; evm.db_mut().bundle_state = bundle; diff --git a/crates/ethereum/hardforks/src/display.rs b/crates/ethereum/hardforks/src/display.rs index 7eda386a3cc..edd1e57c893 100644 --- a/crates/ethereum/hardforks/src/display.rs +++ b/crates/ethereum/hardforks/src/display.rs @@ -25,6 +25,8 @@ struct DisplayFork { activated_at: ForkCondition, /// An optional EIP (e.g. `EIP-1559`). eip: Option, + /// Optional metadata to display alongside the fork (e.g. blob parameters) + metadata: Option, } impl core::fmt::Display for DisplayFork { @@ -38,6 +40,9 @@ impl core::fmt::Display for DisplayFork { match self.activated_at { ForkCondition::Block(at) | ForkCondition::Timestamp(at) => { write!(f, "{name_with_eip:32} @{at}")?; + if let Some(metadata) = &self.metadata { + write!(f, " {metadata}")?; + } } ForkCondition::TTD { total_difficulty, .. } => { // All networks that have merged are finalized. @@ -45,6 +50,9 @@ impl core::fmt::Display for DisplayFork { f, "{name_with_eip:32} @{total_difficulty} (network is known to be merged)", )?; + if let Some(metadata) = &self.metadata { + write!(f, " {metadata}")?; + } } ForkCondition::Never => unreachable!(), } @@ -145,14 +153,27 @@ impl DisplayHardforks { pub fn new<'a, I>(hardforks: I) -> Self where I: IntoIterator, + { + // Delegate to with_meta by mapping the iterator to include None for metadata + Self::with_meta(hardforks.into_iter().map(|(fork, condition)| (fork, condition, None))) + } + + /// Creates a new [`DisplayHardforks`] from an iterator of hardforks with optional metadata. + pub fn with_meta<'a, I>(hardforks: I) -> Self + where + I: IntoIterator)>, { let mut pre_merge = Vec::new(); let mut with_merge = Vec::new(); let mut post_merge = Vec::new(); - for (fork, condition) in hardforks { - let mut display_fork = - DisplayFork { name: fork.name().to_string(), activated_at: condition, eip: None }; + for (fork, condition, metadata) in hardforks { + let mut display_fork = DisplayFork { + name: fork.name().to_string(), + activated_at: condition, + eip: None, + metadata, + }; match condition { ForkCondition::Block(_) => { diff --git a/crates/ethereum/hardforks/src/hardforks/mod.rs b/crates/ethereum/hardforks/src/hardforks/mod.rs index 1c67c380d96..dad175e8f66 100644 --- a/crates/ethereum/hardforks/src/hardforks/mod.rs +++ b/crates/ethereum/hardforks/src/hardforks/mod.rs @@ -4,11 +4,7 @@ pub use dev::DEV_HARDFORKS; use crate::{ForkCondition, ForkFilter, ForkId, Hardfork, Head}; #[cfg(feature = "std")] use rustc_hash::FxHashMap; -#[cfg(feature = "std")] -use std::collections::hash_map::Entry; -#[cfg(not(feature = "std"))] -use alloc::collections::btree_map::Entry; use alloc::{boxed::Box, vec::Vec}; /// Generic trait over a set of ordered hardforks @@ -115,26 +111,74 @@ impl ChainHardforks { self.fork(fork).active_at_block(block_number) } - /// Inserts `fork` into list, updating with a new [`ForkCondition`] if it already exists. + /// Inserts a fork with the given [`ForkCondition`], maintaining forks in ascending order + /// based on the `Ord` implementation of [`ForkCondition`]. + /// + /// If the fork already exists (regardless of its current condition type), it will be removed + /// and re-inserted at the appropriate position based on the new condition. + /// + /// # Ordering Behavior + /// + /// Forks are ordered according to [`ForkCondition`]'s `Ord` implementation: + /// - [`ForkCondition::Never`] comes first + /// - [`ForkCondition::Block`] ordered by block number + /// - [`ForkCondition::Timestamp`] ordered by timestamp value + /// - [`ForkCondition::TTD`] ordered by total difficulty + /// + /// # Example + /// + /// ```ignore + /// let mut forks = ChainHardforks::default(); + /// forks.insert(Fork::Frontier, ForkCondition::Block(0)); + /// forks.insert(Fork::Homestead, ForkCondition::Block(1_150_000)); + /// forks.insert(Fork::Cancun, ForkCondition::Timestamp(1710338135)); + /// + /// // Forks are ordered: Frontier (Block 0), Homestead (Block 1150000), Cancun (Timestamp) + /// ``` pub fn insert(&mut self, fork: H, condition: ForkCondition) { - match self.map.entry(fork.name()) { - Entry::Occupied(mut entry) => { - *entry.get_mut() = condition; - if let Some((_, inner)) = - self.forks.iter_mut().find(|(inner, _)| inner.name() == fork.name()) - { - *inner = condition; - } - } - Entry::Vacant(entry) => { - entry.insert(condition); - self.forks.push((Box::new(fork), condition)); - } + // Remove existing fork if it exists + self.remove(&fork); + + // Find the correct position based on ForkCondition's Ord implementation + let pos = self + .forks + .iter() + .position(|(_, existing_condition)| *existing_condition > condition) + .unwrap_or(self.forks.len()); + + self.map.insert(fork.name(), condition); + self.forks.insert(pos, (Box::new(fork), condition)); + } + + /// Extends the list with multiple forks, updating existing entries with new + /// [`ForkCondition`]s if they already exist. + /// + /// Each fork is inserted using [`Self::insert`], maintaining proper ordering based on + /// [`ForkCondition`]'s `Ord` implementation. + /// + /// # Example + /// + /// ```ignore + /// let mut forks = ChainHardforks::default(); + /// forks.extend([ + /// (Fork::Homestead, ForkCondition::Block(1_150_000)), + /// (Fork::Frontier, ForkCondition::Block(0)), + /// (Fork::Cancun, ForkCondition::Timestamp(1710338135)), + /// ]); + /// + /// // Forks will be automatically ordered: Frontier, Homestead, Cancun + /// ``` + pub fn extend( + &mut self, + forks: impl IntoIterator, + ) { + for (fork, condition) in forks { + self.insert(fork, condition); } } /// Removes `fork` from list. - pub fn remove(&mut self, fork: H) { + pub fn remove(&mut self, fork: &H) { self.forks.retain(|(inner_fork, _)| inner_fork.name() != fork.name()); self.map.remove(fork.name()); } @@ -157,3 +201,122 @@ impl From<[(T, ForkCondition); N]> for ChainHardfor ) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_hardforks::hardfork; + + hardfork!(AHardfork { A1, A2, A3 }); + hardfork!(BHardfork { B1, B2 }); + + #[test] + fn add_hardforks() { + let mut forks = ChainHardforks::default(); + forks.insert(AHardfork::A1, ForkCondition::Block(1)); + forks.insert(BHardfork::B1, ForkCondition::Block(1)); + assert_eq!(forks.len(), 2); + forks.is_fork_active_at_block(AHardfork::A1, 1); + forks.is_fork_active_at_block(BHardfork::B1, 1); + } + + #[test] + fn insert_maintains_fork_order() { + let mut forks = ChainHardforks::default(); + + // Insert forks in random order + forks.insert(BHardfork::B1, ForkCondition::Timestamp(2000)); + forks.insert(AHardfork::A1, ForkCondition::Block(100)); + forks.insert(AHardfork::A2, ForkCondition::Block(50)); + forks.insert(BHardfork::B2, ForkCondition::Timestamp(1000)); + + assert_eq!(forks.len(), 4); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + // Verify ordering: Block conditions come before Timestamp conditions + // and within each type, they're ordered by value + assert_eq!(fork_list[0].0.name(), "A2"); + assert_eq!(fork_list[0].1, ForkCondition::Block(50)); + assert_eq!(fork_list[1].0.name(), "A1"); + assert_eq!(fork_list[1].1, ForkCondition::Block(100)); + assert_eq!(fork_list[2].0.name(), "B2"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(1000)); + assert_eq!(fork_list[3].0.name(), "B1"); + assert_eq!(fork_list[3].1, ForkCondition::Timestamp(2000)); + } + + #[test] + fn insert_replaces_and_reorders_existing_fork() { + let mut forks = ChainHardforks::default(); + + // Insert initial forks + forks.insert(AHardfork::A1, ForkCondition::Block(100)); + forks.insert(BHardfork::B1, ForkCondition::Block(200)); + forks.insert(AHardfork::A2, ForkCondition::Timestamp(1000)); + + assert_eq!(forks.len(), 3); + + // Update A1 from Block to Timestamp - should move it after B1 + forks.insert(AHardfork::A1, ForkCondition::Timestamp(500)); + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + // Verify new ordering + assert_eq!(fork_list[0].0.name(), "B1"); + assert_eq!(fork_list[0].1, ForkCondition::Block(200)); + assert_eq!(fork_list[1].0.name(), "A1"); + assert_eq!(fork_list[1].1, ForkCondition::Timestamp(500)); + assert_eq!(fork_list[2].0.name(), "A2"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(1000)); + + // Update A1 timestamp to move it after A2 + forks.insert(AHardfork::A1, ForkCondition::Timestamp(2000)); + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + assert_eq!(fork_list[0].0.name(), "B1"); + assert_eq!(fork_list[0].1, ForkCondition::Block(200)); + assert_eq!(fork_list[1].0.name(), "A2"); + assert_eq!(fork_list[1].1, ForkCondition::Timestamp(1000)); + assert_eq!(fork_list[2].0.name(), "A1"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(2000)); + } + + #[test] + fn extend_maintains_order() { + let mut forks = ChainHardforks::default(); + + // Use extend to insert multiple forks at once in random order + forks.extend([ + (AHardfork::A1, ForkCondition::Block(100)), + (AHardfork::A2, ForkCondition::Timestamp(1000)), + ]); + forks.extend([(BHardfork::B1, ForkCondition::Timestamp(2000))]); + + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + // Verify ordering is maintained + assert_eq!(fork_list[0].0.name(), "A1"); + assert_eq!(fork_list[0].1, ForkCondition::Block(100)); + assert_eq!(fork_list[1].0.name(), "A2"); + assert_eq!(fork_list[1].1, ForkCondition::Timestamp(1000)); + assert_eq!(fork_list[2].0.name(), "B1"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(2000)); + + // Extend again with an update to A2 + forks.extend([(AHardfork::A2, ForkCondition::Timestamp(3000))]); + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + assert_eq!(fork_list[0].0.name(), "A1"); + assert_eq!(fork_list[1].0.name(), "B1"); + assert_eq!(fork_list[2].0.name(), "A2"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(3000)); + } +} diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 3c0efdb0394..575934007f9 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -49,7 +49,7 @@ tokio.workspace = true # revm with required ethereum features # Note: this must be kept to ensure all features are properly enabled/forwarded -revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg", "memory_limit"] } # misc eyre.workspace = true @@ -82,7 +82,12 @@ asm-keccak = [ "reth-node-core/asm-keccak", "revm/asm-keccak", ] -js-tracer = ["reth-node-builder/js-tracer"] +js-tracer = [ + "reth-node-builder/js-tracer", + "reth-rpc/js-tracer", + "reth-rpc-eth-api/js-tracer", + "reth-rpc-eth-types/js-tracer", +] test-utils = [ "reth-node-builder/test-utils", "reth-chainspec/test-utils", diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 4d77b5e26ec..0f0cb54a8f3 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -15,7 +15,6 @@ use reth_ethereum_engine_primitives::{ use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; use reth_evm::{ eth::spec::EthExecutorSpec, ConfigureEvm, EvmFactory, EvmFactoryFor, NextBlockEnvAttributes, - SpecFor, TxEnvFor, }; use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ @@ -58,7 +57,7 @@ use reth_transaction_pool::{ TransactionPool, TransactionValidationTaskExecutor, }; use revm::context::TxEnv; -use std::{default::Default, marker::PhantomData, sync::Arc, time::SystemTime}; +use std::{marker::PhantomData, sync::Arc, time::SystemTime}; /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] @@ -159,10 +158,9 @@ where NetworkT: RpcTypes>>, EthRpcConverterFor: RpcConvert< Primitives = PrimitivesTy, - TxEnv = TxEnvFor, Error = EthApiError, Network = NetworkT, - Spec = SpecFor, + Evm = N::Evm, >, EthApiError: FromEvmError, { diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index 8fd9d08d2dc..1c088e33da6 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -1,15 +1,21 @@ use crate::utils::eth_payload_attributes; +use alloy_eips::Decodable2718; use alloy_genesis::Genesis; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_ethereum_engine_primitives::BlobSidecars; +use reth_ethereum_primitives::PooledTransactionVariant; use reth_node_builder::{NodeBuilder, NodeHandle}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; use reth_tasks::TaskManager; use reth_transaction_pool::TransactionPool; -use std::sync::Arc; +use std::{ + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; #[tokio::test] async fn can_handle_blobs() -> eyre::Result<()> { @@ -82,3 +88,165 @@ async fn can_handle_blobs() -> eyre::Result<()> { Ok(()) } + +#[tokio::test] +async fn can_send_legacy_sidecar_post_activation() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default().chain(MAINNET.chain).genesis(genesis).osaka_activated().build(), + ); + let genesis_hash = chain_spec.genesis_hash(); + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(EthereumNode::default()) + .launch() + .await?; + + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; + + let wallets = Wallet::new(2).wallet_gen(); + let blob_wallet = wallets.first().unwrap(); + + // build blob tx + let blob_tx = TransactionTestContext::tx_with_blobs_bytes(1, blob_wallet.clone()).await?; + + let tx = PooledTransactionVariant::decode_2718_exact(&blob_tx).unwrap(); + assert!(tx.as_eip4844().unwrap().tx().sidecar.is_eip4844()); + + // inject blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(blob_tx).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that sidecar was converted to eip7594 + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip7594()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + // build a payload + let blob_payload = node.new_payload().await?; + + // submit the blob payload + let blob_block_hash = node.submit_payload(blob_payload).await?; + + node.update_forkchoice(genesis_hash, blob_block_hash).await?; + + Ok(()) +} + +#[tokio::test] +async fn blob_conversion_at_osaka() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let current_timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); + // Osaka activates in 2 slots + let osaka_timestamp = current_timestamp + 24; + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .prague_activated() + .with_osaka_at(osaka_timestamp) + .build(), + ); + let genesis_hash = chain_spec.genesis_hash(); + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .node(EthereumNode::default()) + .launch() + .await?; + + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; + + let mut wallets = Wallet::new(3).wallet_gen(); + let first = wallets.pop().unwrap(); + let second = wallets.pop().unwrap(); + + // build a dummy payload at `current_timestamp` + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallets.pop().unwrap()).await; + node.rpc.inject_tx(raw_tx).await?; + node.payload.timestamp = current_timestamp - 1; + node.advance_block().await?; + + // build blob txs + let first_blob = TransactionTestContext::tx_with_blobs_bytes(1, first.clone()).await?; + let second_blob = TransactionTestContext::tx_with_blobs_bytes(1, second.clone()).await?; + + // assert both txs have legacy sidecars + assert!(PooledTransactionVariant::decode_2718_exact(&first_blob) + .unwrap() + .as_eip4844() + .unwrap() + .tx() + .sidecar + .is_eip4844()); + assert!(PooledTransactionVariant::decode_2718_exact(&second_blob) + .unwrap() + .as_eip4844() + .unwrap() + .tx() + .sidecar + .is_eip4844()); + + // inject first blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(first_blob).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that it still has a legacy sidecar + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip4844()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + // build last Prague payload + node.payload.timestamp = current_timestamp + 11; + let prague_payload = node.new_payload().await?; + assert!(matches!(prague_payload.sidecars(), BlobSidecars::Eip4844(_))); + + // inject second blob tx to the pool + let blob_tx_hash = node.rpc.inject_tx(second_blob).await?; + // fetch it from rpc + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that it still has a legacy sidecar + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip4844()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + tokio::time::sleep(Duration::from_secs(11)).await; + + // fetch second blob tx from rpc again + let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; + // assert that it was converted to eip7594 + assert!(envelope.as_eip4844().unwrap().tx().sidecar().unwrap().is_eip7594()); + // validate sidecar + TransactionTestContext::validate_sidecar(envelope); + + // submit the Prague payload + node.update_forkchoice(genesis_hash, node.submit_payload(prague_payload).await?).await?; + + // Build first Osaka payload + node.payload.timestamp = osaka_timestamp - 1; + let osaka_payload = node.new_payload().await?; + + // Assert that it includes the second blob tx with eip7594 sidecar + assert!(osaka_payload.block().body().transactions().any(|tx| *tx.hash() == blob_tx_hash)); + assert!(matches!(osaka_payload.sidecars(), BlobSidecars::Eip7594(_))); + + node.update_forkchoice(genesis_hash, node.submit_payload(osaka_payload).await?).await?; + + Ok(()) +} diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index eb69452449f..11300537b5e 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -84,7 +84,6 @@ async fn assert_chain_advances(node: &FullNode) where N: FullNodeComponents, AddOns: RethRpcAddOns, - N::Types: NodeTypes, >::Handle: RpcHandleProvider>::EthApi>, { let mut notifications = node.provider.canonical_state_stream(); diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 8c969c9d44c..5b3eb9cfcbd 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -176,8 +176,8 @@ where debug!(target: "payload_builder", id=%attributes.id, parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; - let block_gas_limit: u64 = builder.evm_mut().block().gas_limit; - let base_fee = builder.evm_mut().block().basefee; + let block_gas_limit: u64 = builder.evm_mut().block().gas_limit(); + let base_fee = builder.evm_mut().block().basefee(); let mut best_txs = best_txs(BestTransactionsAttributes::new( base_fee, @@ -232,10 +232,10 @@ where if is_osaka && estimated_block_size_with_tx > MAX_RLP_BLOCK_SIZE { best_txs.mark_invalid( &pool_tx, - InvalidPoolTransactionError::OversizedData( - estimated_block_size_with_tx, - MAX_RLP_BLOCK_SIZE, - ), + InvalidPoolTransactionError::OversizedData { + size: estimated_block_size_with_tx, + limit: MAX_RLP_BLOCK_SIZE, + }, ); continue; } diff --git a/crates/ethereum/primitives/Cargo.toml b/crates/ethereum/primitives/Cargo.toml index efa8b945f95..3bf9e8f3a48 100644 --- a/crates/ethereum/primitives/Cargo.toml +++ b/crates/ethereum/primitives/Cargo.toml @@ -73,6 +73,7 @@ reth-codec = [ "dep:reth-zstd-compressors", ] arbitrary = [ + "std", "dep:arbitrary", "alloy-consensus/arbitrary", "alloy-consensus/k256", diff --git a/crates/ethereum/reth/Cargo.toml b/crates/ethereum/reth/Cargo.toml index 959b7c1b65f..0d57abf6f20 100644 --- a/crates/ethereum/reth/Cargo.toml +++ b/crates/ethereum/reth/Cargo.toml @@ -144,7 +144,13 @@ rpc = [ "dep:alloy-rpc-types-engine", ] tasks = ["dep:reth-tasks"] -js-tracer = ["rpc", "reth-rpc/js-tracer"] +js-tracer = [ + "rpc", + "reth-rpc/js-tracer", + "reth-node-builder?/js-tracer", + "reth-node-ethereum?/js-tracer", + "reth-rpc-eth-types?/js-tracer", +] network = ["dep:reth-network", "tasks", "dep:reth-network-api", "dep:reth-eth-wire"] provider = ["storage-api", "tasks", "dep:reth-provider", "dep:reth-db", "dep:reth-codecs"] storage-api = ["dep:reth-storage-api"] diff --git a/crates/evm/evm/src/aliases.rs b/crates/evm/evm/src/aliases.rs index 6bb1ab1c35a..7758f0aea17 100644 --- a/crates/evm/evm/src/aliases.rs +++ b/crates/evm/evm/src/aliases.rs @@ -11,6 +11,9 @@ pub type EvmFactoryFor = /// Helper to access [`EvmFactory::Spec`] for a given [`ConfigureEvm`]. pub type SpecFor = as EvmFactory>::Spec; +/// Helper to access [`EvmFactory::BlockEnv`] for a given [`ConfigureEvm`]. +pub type BlockEnvFor = as EvmFactory>::BlockEnv; + /// Helper to access [`EvmFactory::Evm`] for a given [`ConfigureEvm`]. pub type EvmFor = as EvmFactory>::Evm; @@ -31,7 +34,7 @@ pub type ExecutionCtxFor<'a, Evm> = <::BlockExecutorFactory as BlockExecutorFactory>::ExecutionCtx<'a>; /// Type alias for [`EvmEnv`] for a given [`ConfigureEvm`]. -pub type EvmEnvFor = EvmEnv>; +pub type EvmEnvFor = EvmEnv, BlockEnvFor>; /// Helper trait to bound [`Inspector`] for a [`ConfigureEvm`]. pub trait InspectorFor: Inspector> {} diff --git a/crates/evm/evm/src/engine.rs b/crates/evm/evm/src/engine.rs index 5c721d811bc..e8316426079 100644 --- a/crates/evm/evm/src/engine.rs +++ b/crates/evm/evm/src/engine.rs @@ -2,7 +2,7 @@ use crate::{execute::ExecutableTxFor, ConfigureEvm, EvmEnvFor, ExecutionCtxFor}; /// [`ConfigureEvm`] extension providing methods for executing payloads. pub trait ConfigureEngineEvm: ConfigureEvm { - /// Returns an [`EvmEnvFor`] for the given payload. + /// Returns an [`crate::EvmEnv`] for the given payload. fn evm_env_for_payload(&self, payload: &ExecutionData) -> Result, Self::Error>; /// Returns an [`ExecutionCtxFor`] for the given payload. @@ -23,14 +23,14 @@ pub trait ExecutableTxIterator: Iterator> + Send + 'static { /// The executable transaction type iterator yields. - type Tx: ExecutableTxFor + Clone + Send + 'static; + type Tx: ExecutableTxFor + Clone + Send + Sync + 'static; /// Errors that may occur while recovering or decoding transactions. type Error: core::error::Error + Send + Sync + 'static; } impl ExecutableTxIterator for T where - Tx: ExecutableTxFor + Clone + Send + 'static, + Tx: ExecutableTxFor + Clone + Send + Sync + 'static, Err: core::error::Error + Send + Sync + 'static, T: Iterator> + Send + 'static, { diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 5e072f56e45..29a7f233a15 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -1,7 +1,7 @@ //! Traits for execution. use crate::{ConfigureEvm, Database, OnStateHook, TxEnvFor}; -use alloc::{boxed::Box, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::{BlockHeader, Header}; use alloy_eips::eip2718::WithEncoded; pub use alloy_evm::block::{BlockExecutor, BlockExecutorFactory}; @@ -149,6 +149,11 @@ pub trait Executor: Sized { } /// Helper type for the output of executing a block. +/// +/// Deprecated: this type is unused within reth and will be removed in the next +/// major release. Use `reth_execution_types::BlockExecutionResult` or +/// `reth_execution_types::BlockExecutionOutput`. +#[deprecated(note = "Use reth_execution_types::BlockExecutionResult or BlockExecutionOutput")] #[derive(Debug, Clone)] pub struct ExecuteOutput { /// Receipts obtained after executing a block. @@ -198,7 +203,8 @@ pub struct BlockAssemblerInput<'a, 'b, F: BlockExecutorFactory, H = Header> { /// Configuration of EVM used when executing the block. /// /// Contains context relevant to EVM such as [`revm::context::BlockEnv`]. - pub evm_env: EvmEnv<::Spec>, + pub evm_env: + EvmEnv<::Spec, ::BlockEnv>, /// [`BlockExecutorFactory::ExecutionCtx`] used to execute the block. pub execution_ctx: F::ExecutionCtx<'a>, /// Parent block header. @@ -220,7 +226,10 @@ impl<'a, 'b, F: BlockExecutorFactory, H> BlockAssemblerInput<'a, 'b, F, H> { /// Creates a new [`BlockAssemblerInput`]. #[expect(clippy::too_many_arguments)] pub fn new( - evm_env: EvmEnv<::Spec>, + evm_env: EvmEnv< + ::Spec, + ::BlockEnv, + >, execution_ctx: F::ExecutionCtx<'a>, parent: &'a SealedHeader, transactions: Vec, @@ -438,7 +447,7 @@ impl ExecutorTx for Recovered ExecutorTx for WithTxEnv<<::Evm as Evm>::Tx, T> where - T: ExecutorTx, + T: ExecutorTx + Clone, Executor: BlockExecutor, <::Evm as Evm>::Tx: Clone, Self: RecoveredTx, @@ -448,7 +457,7 @@ where } fn into_recovered(self) -> Recovered { - self.tx.into_recovered() + Arc::unwrap_or_clone(self.tx).into_recovered() } } @@ -460,6 +469,7 @@ where Evm: Evm< Spec = ::Spec, HaltReason = ::HaltReason, + BlockEnv = ::BlockEnv, DB = &'a mut State, >, Transaction = N::SignedTx, @@ -631,7 +641,7 @@ pub struct WithTxEnv { /// The transaction environment for EVM. pub tx_env: TxEnv, /// The recovered transaction. - pub tx: T, + pub tx: Arc, } impl> RecoveredTx for WithTxEnv { diff --git a/crates/evm/evm/src/lib.rs b/crates/evm/evm/src/lib.rs index 00cd3a0f1f2..8b62ce26640 100644 --- a/crates/evm/evm/src/lib.rs +++ b/crates/evm/evm/src/lib.rs @@ -404,7 +404,13 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { db: &'a mut State, parent: &'a SealedHeader<::BlockHeader>, attributes: Self::NextBlockEnvCtx, - ) -> Result, Self::Error> { + ) -> Result< + impl BlockBuilder< + Primitives = Self::Primitives, + Executor: BlockExecutorFor<'a, Self::BlockExecutorFactory, DB>, + >, + Self::Error, + > { let evm_env = self.next_evm_env(parent, &attributes)?; let evm = self.evm_with_env(db, evm_env); let ctx = self.context_for_next_block(parent, attributes)?; diff --git a/crates/exex/exex/src/backfill/factory.rs b/crates/exex/exex/src/backfill/factory.rs index 789d63f84e2..29734b905e2 100644 --- a/crates/exex/exex/src/backfill/factory.rs +++ b/crates/exex/exex/src/backfill/factory.rs @@ -24,7 +24,7 @@ impl BackfillJobFactory { Self { evm_config, provider, - prune_modes: PruneModes::none(), + prune_modes: PruneModes::default(), thresholds: ExecutionStageThresholds { // Default duration for a database transaction to be considered long-lived is // 60 seconds, so we limit the backfill job to the half of it to be sure we finish @@ -39,7 +39,7 @@ impl BackfillJobFactory { } /// Sets the prune modes - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + pub const fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { self.prune_modes = prune_modes; self } diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index aa7cacdba4a..9d50737f5aa 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -256,7 +256,7 @@ mod tests { use reth_ethereum_primitives::{Block, BlockBody, Transaction}; use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::{ - crypto::secp256k1::public_key_to_address, Block as _, FullNodePrimitives, + crypto::secp256k1::public_key_to_address, Block as _, NodePrimitives, }; use reth_provider::{ providers::{BlockchainProvider, ProviderNodeTypes}, @@ -395,7 +395,7 @@ mod tests { ) -> Result<()> where N: ProviderNodeTypes< - Primitives: FullNodePrimitives< + Primitives: NodePrimitives< Block = reth_ethereum_primitives::Block, BlockBody = reth_ethereum_primitives::BlockBody, Receipt = reth_ethereum_primitives::Receipt, diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index a3d82428822..e489a98abf7 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -10,7 +10,7 @@ use reth_evm::{ ConfigureEvm, }; use reth_evm_ethereum::EthEvmConfig; -use reth_node_api::FullNodePrimitives; +use reth_node_api::NodePrimitives; use reth_primitives_traits::{Block as _, RecoveredBlock}; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, @@ -58,7 +58,7 @@ pub(crate) fn execute_block_and_commit_to_database( ) -> eyre::Result> where N: ProviderNodeTypes< - Primitives: FullNodePrimitives< + Primitives: NodePrimitives< Block = reth_ethereum_primitives::Block, BlockBody = reth_ethereum_primitives::BlockBody, Receipt = reth_ethereum_primitives::Receipt, @@ -169,7 +169,7 @@ pub(crate) fn blocks_and_execution_outputs( > where N: ProviderNodeTypes< - Primitives: FullNodePrimitives< + Primitives: NodePrimitives< Block = reth_ethereum_primitives::Block, BlockBody = reth_ethereum_primitives::BlockBody, Receipt = reth_ethereum_primitives::Receipt, @@ -193,7 +193,7 @@ pub(crate) fn blocks_and_execution_outcome( ) -> eyre::Result<(Vec>, ExecutionOutcome)> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives< + N::Primitives: NodePrimitives< Block = reth_ethereum_primitives::Block, Receipt = reth_ethereum_primitives::Receipt, >, diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 0305da323d0..4b29dec99c9 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -244,7 +244,7 @@ pub async fn test_exex_context_with_chain_spec( db, chain_spec.clone(), StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"), - ); + )?; let genesis_hash = init_genesis(&provider_factory)?; let provider = BlockchainProvider::new(provider_factory.clone())?; diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index d3195ad27fe..54a22875d94 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -39,7 +39,7 @@ pub enum FsPathError { }, /// Error variant for failed read link operation with additional path context. - #[error("failed to read from {path:?}: {source}")] + #[error("failed to read link {path:?}: {source}")] ReadLink { /// The source `io::Error`. source: io::Error, @@ -230,6 +230,12 @@ pub fn read(path: impl AsRef) -> Result> { fs::read(path).map_err(|err| FsPathError::read(err, path)) } +/// Wrapper for `std::fs::read_link` +pub fn read_link(path: impl AsRef) -> Result { + let path = path.as_ref(); + fs::read_link(path).map_err(|err| FsPathError::read_link(err, path)) +} + /// Wrapper for `std::fs::write` pub fn write(path: impl AsRef, contents: impl AsRef<[u8]>) -> Result<()> { let path = path.as_ref(); @@ -332,10 +338,7 @@ where Err(err) => { // Clean up the temporary file before returning the error let _ = fs::remove_file(&tmp_path); - return Err(FsPathError::Write { - source: Error::other(err.into()), - path: tmp_path.clone(), - }); + return Err(FsPathError::Write { source: Error::other(err.into()), path: tmp_path }); } } diff --git a/crates/net/banlist/src/lib.rs b/crates/net/banlist/src/lib.rs index fb44500efe2..31b779bc8d5 100644 --- a/crates/net/banlist/src/lib.rs +++ b/crates/net/banlist/src/lib.rs @@ -125,11 +125,14 @@ impl BanList { /// Bans the IP until the timestamp. /// /// This does not ban non-global IPs. + /// If the IP is already banned, the timeout will be updated to the new value. pub fn ban_ip_until(&mut self, ip: IpAddr, until: Instant) { self.ban_ip_with(ip, Some(until)); } - /// Bans the peer until the timestamp + /// Bans the peer until the timestamp. + /// + /// If the peer is already banned, the timeout will be updated to the new value. pub fn ban_peer_until(&mut self, node_id: PeerId, until: Instant) { self.ban_peer_with(node_id, Some(until)); } @@ -147,6 +150,8 @@ impl BanList { } /// Bans the peer indefinitely or until the given timeout. + /// + /// If the peer is already banned, the timeout will be updated to the new value. pub fn ban_peer_with(&mut self, node_id: PeerId, until: Option) { self.banned_peers.insert(node_id, until); } @@ -154,6 +159,7 @@ impl BanList { /// Bans the ip indefinitely or until the given timeout. /// /// This does not ban non-global IPs. + /// If the IP is already banned, the timeout will be updated to the new value. pub fn ban_ip_with(&mut self, ip: IpAddr, until: Option) { if is_global(&ip) { self.banned_ips.insert(ip, until); @@ -167,7 +173,7 @@ mod tests { #[test] fn can_ban_unban_peer() { - let peer = PeerId::random(); + let peer = PeerId::new([1; 64]); let mut banlist = BanList::default(); banlist.ban_peer(peer); assert!(banlist.is_banned_peer(&peer)); diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 20691a6d929..fadda2b6348 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -35,7 +35,6 @@ tracing.workspace = true thiserror.workspace = true parking_lot.workspace = true rand_08 = { workspace = true, optional = true } -generic-array.workspace = true serde = { workspace = true, optional = true } itertools.workspace = true @@ -53,7 +52,6 @@ serde = [ "alloy-primitives/serde", "discv5/serde", "enr/serde", - "generic-array/serde", "parking_lot/serde", "rand_08?/serde", "secp256k1/serde", diff --git a/crates/net/discv4/src/node.rs b/crates/net/discv4/src/node.rs index 242c3883228..7e993ff8333 100644 --- a/crates/net/discv4/src/node.rs +++ b/crates/net/discv4/src/node.rs @@ -1,5 +1,4 @@ use alloy_primitives::keccak256; -use generic_array::GenericArray; use reth_network_peers::{NodeRecord, PeerId}; /// The key type for the table. @@ -15,8 +14,7 @@ impl From for NodeKey { impl From for discv5::Key { fn from(value: NodeKey) -> Self { let hash = keccak256(value.0.as_slice()); - let hash = *GenericArray::from_slice(hash.as_slice()); - Self::new_raw(value, hash) + Self::new_raw(value, hash.0.into()) } } diff --git a/crates/net/discv5/src/error.rs b/crates/net/discv5/src/error.rs index c373a17194c..64b2cd73af8 100644 --- a/crates/net/discv5/src/error.rs +++ b/crates/net/discv5/src/error.rs @@ -13,7 +13,7 @@ pub enum Error { #[error("network stack identifier is not configured")] NetworkStackIdNotConfigured, /// Missing key used to identify rlpx network. - #[error("fork missing on enr, key missing")] + #[error("fork missing on enr, key {0:?} and key 'eth' missing")] ForkMissing(&'static [u8]), /// Failed to decode [`ForkId`](reth_ethereum_forks::ForkId) rlp value. #[error("failed to decode fork id, 'eth': {0:?}")] diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index be7b781fe74..92c7c543a3a 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -83,7 +83,6 @@ impl Discv5 { //////////////////////////////////////////////////////////////////////////////////////////////// /// Adds the node to the table, if it is not already present. - #[expect(clippy::result_large_err)] pub fn add_node(&self, node_record: Enr) -> Result<(), Error> { let EnrCombinedKeyWrapper(enr) = node_record.into(); self.discv5.add_enr(enr).map_err(Error::AddNodeFailed) @@ -320,10 +319,7 @@ impl Discv5 { return None } - // todo: extend for all network stacks in reth-network rlpx logic - let fork_id = (self.fork_key == Some(NetworkStackId::ETH)) - .then(|| self.get_fork_id(enr).ok()) - .flatten(); + let fork_id = self.get_fork_id(enr).ok(); trace!(target: "net::discv5", ?fork_id, @@ -379,7 +375,6 @@ impl Discv5 { /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr) w.r.t. the local node's network /// stack, if field is set. - #[expect(clippy::result_large_err)] pub fn get_fork_id( &self, enr: &discv5::enr::Enr, @@ -387,7 +382,22 @@ impl Discv5 { let Some(key) = self.fork_key else { return Err(Error::NetworkStackIdNotConfigured) }; let fork_id = enr .get_decodable::(key) - .ok_or(Error::ForkMissing(key))? + .or_else(|| { + (key != NetworkStackId::ETH) + .then(|| { + // Fallback: trying to get fork id from Enr with 'eth' as network stack id + trace!(target: "net::discv5", + key = %String::from_utf8_lossy(key), + "Fork id not found for key, trying 'eth'..." + ); + enr.get_decodable::(NetworkStackId::ETH) + }) + .flatten() + }) + .ok_or({ + trace!(target: "net::discv5", "Fork id not found for 'eth' network stack id"); + Error::ForkMissing(key) + })? .map(Into::into)?; Ok(fork_id) @@ -669,6 +679,8 @@ mod test { use ::enr::{CombinedKey, EnrKey}; use rand_08::thread_rng; use reth_chainspec::MAINNET; + use reth_tracing::init_test_tracing; + use std::env; use tracing::trace; fn discv5_noop() -> Discv5 { @@ -901,4 +913,55 @@ mod test { assert_eq!(fork_id, decoded_fork_id); assert_eq!(TCP_PORT, enr.tcp4().unwrap()); // listen config is defaulting to ip mode ipv4 } + + #[test] + fn get_fork_id_with_different_network_stack_ids() { + unsafe { + env::set_var("RUST_LOG", "net::discv5=trace"); + } + init_test_tracing(); + + let fork_id = MAINNET.latest_fork_id(); + let sk = SecretKey::new(&mut thread_rng()); + + // Test 1: ENR with OPEL fork ID, Discv5 configured for OPEL + let enr_with_opel = Enr::builder() + .add_value_rlp( + NetworkStackId::OPEL, + alloy_rlp::encode(EnrForkIdEntry::from(fork_id)).into(), + ) + .build(&sk) + .unwrap(); + + let mut discv5 = discv5_noop(); + discv5.fork_key = Some(NetworkStackId::OPEL); + assert_eq!(discv5.get_fork_id(&enr_with_opel).unwrap(), fork_id); + + // Test 2: ENR with ETH fork ID, Discv5 configured for OPEL (fallback to ETH) + let enr_with_eth = Enr::builder() + .add_value_rlp( + NetworkStackId::ETH, + alloy_rlp::encode(EnrForkIdEntry::from(fork_id)).into(), + ) + .build(&sk) + .unwrap(); + + discv5.fork_key = Some(NetworkStackId::OPEL); + assert_eq!(discv5.get_fork_id(&enr_with_eth).unwrap(), fork_id); + + // Test 3: ENR with neither OPEL nor ETH fork ID (should fail) + let enr_without_network_stack_id = Enr::empty(&sk).unwrap(); + discv5.fork_key = Some(NetworkStackId::OPEL); + assert!(matches!( + discv5.get_fork_id(&enr_without_network_stack_id), + Err(Error::ForkMissing(NetworkStackId::OPEL)) + )); + + // Test 4: discv5 without network stack id configured (should fail) + let discv5 = discv5_noop(); + assert!(matches!( + discv5.get_fork_id(&enr_without_network_stack_id), + Err(Error::NetworkStackIdNotConfigured) + )); + } } diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 57094813eee..056d809d02f 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -51,7 +51,7 @@ thiserror.workspace = true tracing.workspace = true tempfile = { workspace = true, optional = true } -itertools.workspace = true +itertools = { workspace = true, optional = true } [dev-dependencies] async-compression = { workspace = true, features = ["gzip", "tokio"] } @@ -70,7 +70,7 @@ tempfile.workspace = true [features] default = [] -file-client = ["dep:async-compression", "dep:alloy-rlp"] +file-client = ["dep:async-compression", "dep:alloy-rlp", "dep:itertools"] test-utils = [ "tempfile", "reth-consensus/test-utils", diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 09eb22854d4..5d6bd3cf7f8 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -21,7 +21,6 @@ use std::{ cmp::Ordering, collections::BinaryHeap, fmt::Debug, - mem, ops::RangeInclusive, pin::Pin, sync::Arc, @@ -215,9 +214,7 @@ where /// Adds a new response to the internal buffer fn buffer_bodies_response(&mut self, response: Vec>) { - // take into account capacity - let size = response.iter().map(BlockResponse::size).sum::() + - response.capacity() * mem::size_of::>(); + let size = response.iter().map(BlockResponse::size).sum::(); let response = OrderedBodiesResponse { resp: response, size }; let response_len = response.len(); @@ -347,6 +344,12 @@ where // written by external services (e.g. BlockchainTree). tracing::trace!(target: "downloaders::bodies", ?range, prev_range = ?self.download_range, "Download range reset"); info!(target: "downloaders::bodies", count, ?range, "Downloading bodies"); + // Increment out-of-order requests metric if the new start is below the last returned block + if let Some(last_returned) = self.latest_queued_block_number && + *range.start() < last_returned + { + self.metrics.out_of_order_requests.increment(1); + } self.clear(); self.download_range = range; Ok(()) diff --git a/crates/net/downloaders/src/bodies/test_utils.rs b/crates/net/downloaders/src/bodies/test_utils.rs index a7172ec1a00..513226a2c91 100644 --- a/crates/net/downloaders/src/bodies/test_utils.rs +++ b/crates/net/downloaders/src/bodies/test_utils.rs @@ -3,7 +3,7 @@ #![allow(dead_code)] use alloy_consensus::BlockHeader; -use alloy_primitives::{B256, U256}; +use alloy_primitives::B256; use reth_ethereum_primitives::BlockBody; use reth_network_p2p::bodies::response::BlockResponse; use reth_primitives_traits::{Block, SealedBlock, SealedHeader}; @@ -55,9 +55,7 @@ pub(crate) fn insert_headers( .expect("failed to create writer"); for header in headers { - writer - .append_header(header.header(), U256::ZERO, &header.hash()) - .expect("failed to append header"); + writer.append_header(header.header(), &header.hash()).expect("failed to append header"); } drop(writer); provider_rw.commit().expect("failed to commit"); diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 34c2f56b75c..4d545aec178 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -481,18 +481,16 @@ impl FileReader { chunk: &mut Vec, chunk_byte_len: u64, ) -> Result { + let mut buffer = vec![0u8; 64 * 1024]; loop { if chunk.len() >= chunk_byte_len as usize { return Ok(true) } - let mut buffer = vec![0u8; 64 * 1024]; - match self.read(&mut buffer).await { Ok(0) => return Ok(!chunk.is_empty()), Ok(n) => { - buffer.truncate(n); - chunk.extend_from_slice(&buffer); + chunk.extend_from_slice(&buffer[..n]); } Err(e) => return Err(e.into()), } @@ -815,7 +813,7 @@ mod tests { // construct headers downloader and use first header let mut header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(Arc::clone(&Arc::new(client)), Arc::new(TestConsensus::default())); + .build(Arc::new(client), Arc::new(TestConsensus::default())); header_downloader.update_local_head(local_header.clone()); header_downloader.update_sync_target(SyncTarget::Tip(sync_target_hash)); @@ -890,7 +888,7 @@ mod tests { // construct headers downloader and use first header let mut header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(Arc::clone(&Arc::new(client)), Arc::new(TestConsensus::default())); + .build(Arc::new(client), Arc::new(TestConsensus::default())); header_downloader.update_local_head(local_header.clone()); header_downloader.update_sync_target(SyncTarget::Tip(sync_target_hash)); diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index a55e5fa7e8f..75a4bc78978 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -25,9 +25,6 @@ pin-project.workspace = true tracing = { workspace = true, features = ["attributes"] } -# HeaderBytes -generic-array.workspace = true -typenum.workspace = true byteorder.workspace = true # crypto @@ -42,3 +39,6 @@ aes.workspace = true hmac.workspace = true block-padding.workspace = true cipher = { workspace = true, features = ["block-padding"] } + +[dev-dependencies] +tokio = { workspace = true, features = ["net", "rt", "macros"] } diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 350cd3f7ed4..a6355c294f6 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -2,7 +2,7 @@ use crate::{ error::ECIESErrorImpl, - mac::{HeaderBytes, MAC}, + mac::MAC, util::{hmac_sha256, sha256}, ECIESError, }; @@ -499,7 +499,7 @@ impl ECIES { } /// Read and verify an auth message from the input data. - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "trace", skip_all)] pub fn read_auth(&mut self, data: &mut [u8]) -> Result<(), ECIESError> { self.remote_init_msg = Some(Bytes::copy_from_slice(data)); let unencrypted = self.decrypt_message(data)?; @@ -571,7 +571,7 @@ impl ECIES { } /// Read and verify an ack message from the input data. - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "trace", skip_all)] pub fn read_ack(&mut self, data: &mut [u8]) -> Result<(), ECIESError> { self.remote_init_msg = Some(Bytes::copy_from_slice(data)); let unencrypted = self.decrypt_message(data)?; @@ -639,7 +639,6 @@ impl ECIES { header[..3].copy_from_slice(&buf[..3]); header[3..6].copy_from_slice(&[194, 128, 128]); - let mut header = HeaderBytes::from(header); self.egress_aes.as_mut().unwrap().apply_keystream(&mut header); self.egress_mac.as_mut().unwrap().update_header(&header); let tag = self.egress_mac.as_mut().unwrap().digest(); @@ -660,7 +659,7 @@ impl ECIES { } let (header_bytes, mac_bytes) = split_at_mut(data, 16)?; - let header = HeaderBytes::from_mut_slice(header_bytes); + let header: &mut [u8; 16] = header_bytes.try_into().unwrap(); let mac = B128::from_slice(&mac_bytes[..16]); self.ingress_mac.as_mut().unwrap().update_header(header); @@ -670,11 +669,11 @@ impl ECIES { } self.ingress_aes.as_mut().unwrap().apply_keystream(header); - if header.as_slice().len() < 3 { + if header.len() < 3 { return Err(ECIESErrorImpl::InvalidHeader.into()) } - let body_size = usize::try_from(header.as_slice().read_uint::(3)?)?; + let body_size = usize::try_from((&header[..]).read_uint::(3)?)?; self.body_size = Some(body_size); diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index b5a10284cf2..73c3469cd2f 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -58,7 +58,7 @@ impl Decoder for ECIESCodec { type Item = IngressECIESValue; type Error = ECIESError; - #[instrument(level = "trace", skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))] + #[instrument(level = "trace", target = "net::ecies", skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))] fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { loop { match self.state { @@ -110,7 +110,7 @@ impl Decoder for ECIESCodec { self.ecies.read_header(&mut buf.split_to(ECIES::header_len()))?; if body_size > MAX_INITIAL_HANDSHAKE_SIZE { - trace!(?body_size, max=?MAX_INITIAL_HANDSHAKE_SIZE, "Header exceeds max initial handshake size"); + trace!(?body_size, max=?MAX_INITIAL_HANDSHAKE_SIZE, "Body exceeds max initial handshake size"); return Err(ECIESErrorImpl::InitialHeaderBodyTooLarge { body_size, max_body_size: MAX_INITIAL_HANDSHAKE_SIZE, @@ -150,7 +150,7 @@ impl Decoder for ECIESCodec { impl Encoder for ECIESCodec { type Error = io::Error; - #[instrument(level = "trace", skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))] + #[instrument(level = "trace", target = "net::ecies", skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))] fn encode(&mut self, item: EgressECIESValue, buf: &mut BytesMut) -> Result<(), Self::Error> { match item { EgressECIESValue::Auth => { diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index 9dabfc16183..a93b731fee6 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -33,7 +33,7 @@ pub enum ECIESErrorImpl { #[error(transparent)] IO(std::io::Error), /// Error when checking the HMAC tag against the tag on the message being decrypted - #[error("tag check failure in read_header")] + #[error("tag check failure in decrypt_message")] TagCheckDecryptFailed, /// Error when checking the HMAC tag against the tag on the header #[error("tag check failure in read_header")] @@ -47,8 +47,8 @@ pub enum ECIESErrorImpl { /// Error when parsing ACK data #[error("invalid ack data")] InvalidAckData, - /// Error when reading the header if its length is <3 - #[error("invalid body data")] + /// Error when reading/parsing the `RLPx` header + #[error("invalid header")] InvalidHeader, /// Error when interacting with secp256k1 #[error(transparent)] diff --git a/crates/net/ecies/src/mac.rs b/crates/net/ecies/src/mac.rs index 03847d091ee..fcccae72679 100644 --- a/crates/net/ecies/src/mac.rs +++ b/crates/net/ecies/src/mac.rs @@ -14,16 +14,7 @@ use alloy_primitives::{B128, B256}; use block_padding::NoPadding; use cipher::BlockEncrypt; use digest::KeyInit; -use generic_array::GenericArray; use sha3::{Digest, Keccak256}; -use typenum::U16; - -/// Type alias for a fixed-size array of 16 bytes used as headers. -/// -/// This type is defined as [`GenericArray`] and is commonly employed in Ethereum `RLPx` -/// protocol-related structures for headers. It represents 16 bytes of data used in various -/// cryptographic operations, such as MAC (Message Authentication Code) computation. -pub type HeaderBytes = GenericArray; /// [`Ethereum MAC`](https://github.com/ethereum/devp2p/blob/master/rlpx.md#mac) state. /// @@ -49,8 +40,8 @@ impl MAC { self.hasher.update(data) } - /// Accumulate the given [`HeaderBytes`] into the MAC's internal state. - pub fn update_header(&mut self, data: &HeaderBytes) { + /// Accumulate the given header bytes into the MAC's internal state. + pub fn update_header(&mut self, data: &[u8; 16]) { let aes = Aes256Enc::new_from_slice(self.secret.as_ref()).unwrap(); let mut encrypted = self.digest().0; diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index 9915fc42e6a..adf4dc7634d 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -40,7 +40,7 @@ where Io: AsyncRead + AsyncWrite + Unpin, { /// Connect to an `ECIES` server - #[instrument(skip(transport, secret_key))] + #[instrument(level = "trace", target = "net::ecies", skip(transport, secret_key))] pub async fn connect( transport: Io, secret_key: SecretKey, @@ -67,8 +67,7 @@ where secret_key: SecretKey, remote_id: PeerId, ) -> Result { - let ecies = ECIESCodec::new_client(secret_key, remote_id) - .map_err(|_| io::Error::other("invalid handshake"))?; + let ecies = ECIESCodec::new_client(secret_key, remote_id)?; let mut transport = ecies.framed(transport); diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 9eb4f15f0bc..489fd86e7dc 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -332,9 +332,9 @@ impl ProtocolProxy { return Ok(msg); } - let mut masked = Vec::from(msg); + let mut masked: BytesMut = msg.into(); masked[0] = masked[0].checked_add(offset).ok_or(io::ErrorKind::InvalidInput)?; - Ok(masked.into()) + Ok(masked.freeze()) } /// Unmasks the message ID of a message received from the wire. @@ -385,7 +385,6 @@ impl CanDisconnect for ProtocolProxy { &mut self, _reason: DisconnectReason, ) -> Pin>::Error>> + Send + '_>> { - // TODO handle disconnects Box::pin(async move { Ok(()) }) } } diff --git a/crates/net/network-types/src/peers/mod.rs b/crates/net/network-types/src/peers/mod.rs index f3529875018..d41882d494c 100644 --- a/crates/net/network-types/src/peers/mod.rs +++ b/crates/net/network-types/src/peers/mod.rs @@ -25,7 +25,7 @@ pub struct Peer { /// The state of the connection, if any. pub state: PeerConnectionState, /// The [`ForkId`] that the peer announced via discovery. - pub fork_id: Option, + pub fork_id: Option>, /// Whether the entry should be removed after an existing session was terminated. pub remove_after_disconnect: bool, /// The kind of peer diff --git a/crates/net/network/src/budget.rs b/crates/net/network/src/budget.rs index 824148387b4..f1d9ca87469 100644 --- a/crates/net/network/src/budget.rs +++ b/crates/net/network/src/budget.rs @@ -35,13 +35,6 @@ pub const DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS: u32 = DEFAULT_BUD // Default is 40 pending pool imports. pub const DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS: u32 = 4 * DEFAULT_BUDGET_TRY_DRAIN_STREAM; -/// Default budget to try and stream hashes of successfully imported transactions from the pool. -/// -/// Default is naturally same as the number of transactions to attempt importing, -/// [`DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS`], so 40 pool imports. -pub const DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS: u32 = - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS; - /// Polls the given stream. Breaks with `true` if there maybe is more work. #[macro_export] macro_rules! poll_nested_stream_with_budget { diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 99c3629b42e..bffe52584c9 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -229,8 +229,11 @@ pub struct NetworkConfigBuilder { handshake: Arc, /// List of block hashes to check for required blocks. required_block_hashes: Vec, + /// Optional network id + network_id: Option, /// The header transform type. header_transform: Option>>, + } impl NetworkConfigBuilder { @@ -272,6 +275,7 @@ impl NetworkConfigBuilder { nat: None, handshake: Arc::new(EthHandshake::default()), required_block_hashes: Vec::new(), + network_id: None, header_transform: None, } } @@ -593,6 +597,12 @@ impl NetworkConfigBuilder { self } + /// Set the optional network id. + pub const fn network_id(mut self, network_id: Option) -> Self { + self.network_id = network_id; + self + } + /// Sets the header transform type. pub fn header_transform( mut self, @@ -635,6 +645,7 @@ impl NetworkConfigBuilder { nat, handshake, required_block_hashes, + network_id, header_transform, } = self; @@ -662,7 +673,11 @@ impl NetworkConfigBuilder { hello_message.port = listener_addr.port(); // set the status - let status = UnifiedStatus::spec_builder(&chain_spec, &head); + let mut status = UnifiedStatus::spec_builder(&chain_spec, &head); + + if let Some(id) = network_id { + status.chain = id.into(); + } // set a fork filter based on the chain spec and head let fork_filter = chain_spec.fork_filter(head); diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index 6b95b1e3a63..9cc3a6249a8 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -200,7 +200,6 @@ impl Discovery { } /// Add a node to the discv4 table. - #[expect(clippy::result_large_err)] pub(crate) fn add_discv5_node(&self, enr: Enr) -> Result<(), NetworkError> { if let Some(discv5) = &self.discv5 { discv5.add_node(enr).map_err(NetworkError::Discv5Error)?; diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index bbdbaf22f40..0a69fc8fe0b 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -7,7 +7,9 @@ pub use client::FetchClient; use crate::{message::BlockRequest, session::BlockRangeInfo, transform::header::HeaderTransform}; use alloy_primitives::B256; use futures::StreamExt; -use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives}; +use reth_eth_wire::{ + Capabilities, EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives, +}; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::{ error::{EthResponseValidator, PeerRequestResult, RequestError, RequestResult}, @@ -29,7 +31,7 @@ use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; type InflightHeadersRequest = Request>>; -type InflightBodiesRequest = Request, PeerRequestResult>>; +type InflightBodiesRequest = Request<(), PeerRequestResult>>; /// Manages data fetching operations. /// @@ -87,6 +89,7 @@ impl StateFetcher { peer_id: PeerId, best_hash: B256, best_number: u64, + capabilities: Arc, timeout: Arc, range_info: Option, ) { @@ -96,6 +99,7 @@ impl StateFetcher { state: PeerState::Idle, best_hash, best_number, + capabilities, timeout, last_response_likely_bad: false, range_info, @@ -244,7 +248,7 @@ impl StateFetcher { }) } DownloadRequest::GetBlockBodies { request, response, .. } => { - let inflight = Request { request: request.clone(), response }; + let inflight = Request { request: (), response }; self.inflight_bodies_requests.insert(peer_id, inflight); BlockRequest::GetBlockBodies(GetBlockBodies(request)) } @@ -350,6 +354,9 @@ struct Peer { best_hash: B256, /// Tracks the best number of the peer. best_number: u64, + /// Capabilities announced by the peer. + #[allow(dead_code)] + capabilities: Arc, /// Tracks the current timeout value we use for the peer. timeout: Arc, /// Tracks whether the peer has recently responded with a likely bad response. @@ -526,8 +533,23 @@ mod tests { // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); - fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(1)), None); - fetcher.new_active_peer(peer2, B256::random(), 2, Arc::new(AtomicU64::new(1)), None); + let capabilities = Arc::new(Capabilities::from(vec![])); + fetcher.new_active_peer( + peer1, + B256::random(), + 1, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(1)), + None, + ); + fetcher.new_active_peer( + peer2, + B256::random(), + 2, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(1)), + None, + ); let first_peer = fetcher.next_best_peer().unwrap(); assert!(first_peer == peer1 || first_peer == peer2); @@ -557,9 +579,31 @@ mod tests { let peer2_timeout = Arc::new(AtomicU64::new(300)); - fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(30)), None); - fetcher.new_active_peer(peer2, B256::random(), 2, Arc::clone(&peer2_timeout), None); - fetcher.new_active_peer(peer3, B256::random(), 3, Arc::new(AtomicU64::new(50)), None); + let capabilities = Arc::new(Capabilities::from(vec![])); + fetcher.new_active_peer( + peer1, + B256::random(), + 1, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(30)), + None, + ); + fetcher.new_active_peer( + peer2, + B256::random(), + 2, + Arc::clone(&capabilities), + Arc::clone(&peer2_timeout), + None, + ); + fetcher.new_active_peer( + peer3, + B256::random(), + 3, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(50)), + None, + ); // Must always get peer1 (lowest timeout) assert_eq!(fetcher.next_best_peer(), Some(peer1)); @@ -633,6 +677,7 @@ mod tests { peer_id, Default::default(), Default::default(), + Arc::new(Capabilities::from(vec![])), Default::default(), None, ); diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index d9ece3dd061..e89b1695d91 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -715,7 +715,7 @@ impl PeersManager { pub(crate) fn set_discovered_fork_id(&mut self, peer_id: PeerId, fork_id: ForkId) { if let Some(peer) = self.peers.get_mut(&peer_id) { trace!(target: "net::peers", ?peer_id, ?fork_id, "set discovered fork id"); - peer.fork_id = Some(fork_id); + peer.fork_id = Some(Box::new(fork_id)); } } @@ -757,7 +757,7 @@ impl PeersManager { Entry::Occupied(mut entry) => { let peer = entry.get_mut(); peer.kind = kind; - peer.fork_id = fork_id; + peer.fork_id = fork_id.map(Box::new); peer.addr = addr; if peer.state.is_incoming() { @@ -770,7 +770,7 @@ impl PeersManager { Entry::Vacant(entry) => { trace!(target: "net::peers", ?peer_id, addr=?addr.tcp(), "discovered new node"); let mut peer = Peer::with_kind(addr, kind); - peer.fork_id = fork_id; + peer.fork_id = fork_id.map(Box::new); entry.insert(peer); self.queued_actions.push_back(PeerAction::PeerAdded(peer_id)); } @@ -838,7 +838,7 @@ impl PeersManager { Entry::Occupied(mut entry) => { let peer = entry.get_mut(); peer.kind = kind; - peer.fork_id = fork_id; + peer.fork_id = fork_id.map(Box::new); peer.addr = addr; if peer.state == PeerConnectionState::Idle { @@ -853,7 +853,7 @@ impl PeersManager { trace!(target: "net::peers", ?peer_id, addr=?addr.tcp(), "connects new node"); let mut peer = Peer::with_kind(addr, kind); peer.state = PeerConnectionState::PendingOut; - peer.fork_id = fork_id; + peer.fork_id = fork_id.map(Box::new); entry.insert(peer); self.connection_info.inc_pending_out(); self.queued_actions diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 32f90899851..0044c1f92e1 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -924,6 +924,16 @@ impl QueuedOutgoingMessages { } } +impl Drop for QueuedOutgoingMessages { + fn drop(&mut self) { + // Ensure gauge is decremented for any remaining items to avoid metric leak on teardown. + let remaining = self.messages.len(); + if remaining > 0 { + self.count.decrement(remaining as f64); + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/net/network/src/session/counter.rs b/crates/net/network/src/session/counter.rs index db9bd16cda9..a3318ea05c5 100644 --- a/crates/net/network/src/session/counter.rs +++ b/crates/net/network/src/session/counter.rs @@ -3,7 +3,7 @@ use reth_network_api::Direction; use reth_network_types::SessionLimits; /// Keeps track of all sessions. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct SessionCounter { /// Limits to enforce. limits: SessionLimits, diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 9c01fc6f410..17528e2fcfa 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -905,7 +905,7 @@ pub(crate) async fn start_pending_incoming_session( } /// Starts the authentication process for a connection initiated by a remote peer. -#[instrument(skip_all, fields(%remote_addr, peer_id), target = "net")] +#[instrument(level = "trace", target = "net::network", skip_all, fields(%remote_addr, peer_id))] #[expect(clippy::too_many_arguments)] async fn start_pending_outbound_session( handshake: Arc, diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 847c3f40249..7d0f3c39e5f 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -163,6 +163,7 @@ impl NetworkState { peer, status.blockhash, block_number, + Arc::clone(&capabilities), timeout, range_info, ); diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 4859352f8c4..a112e8cac89 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -411,7 +411,6 @@ impl TransactionFetcher { if let (_, Some(evicted_hash)) = self.hashes_pending_fetch.insert_and_get_evicted(hash) { self.hashes_fetch_inflight_and_pending_fetch.remove(&evicted_hash); - self.hashes_pending_fetch.remove(&evicted_hash); } } } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 9eb07e7b1a0..f4ef42523d5 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -28,8 +28,7 @@ use self::constants::{tx_manager::*, DEFAULT_SOFT_LIMIT_BYTE_SIZE_TRANSACTIONS_B use crate::{ budget::{ DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - DEFAULT_BUDGET_TRY_DRAIN_STREAM, + DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_STREAM, }, cache::LruCache, duration_metered_exec, metered_poll_nested_stream_with_budget, @@ -77,7 +76,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; -use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; +use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, trace}; /// The future for importing transactions into the pool. @@ -339,7 +338,7 @@ pub struct TransactionsManager< /// - no nonce gaps /// - all dynamic fee requirements are (currently) met /// - account has enough balance to cover the transaction's gas - pending_transactions: ReceiverStream, + pending_transactions: mpsc::Receiver, /// Incoming events from the [`NetworkManager`](crate::NetworkManager). transaction_events: UnboundedMeteredReceiver>, /// How the `TransactionsManager` is configured. @@ -422,7 +421,7 @@ impl peers: Default::default(), command_tx, command_rx: UnboundedReceiverStream::new(command_rx), - pending_transactions: ReceiverStream::new(pending), + pending_transactions: pending, transaction_events: UnboundedMeteredReceiver::new( from_network, NETWORK_POOL_TRANSACTIONS_SCOPE, @@ -1529,14 +1528,16 @@ where // We don't expect this buffer to be large, since only pending transactions are // emitted here. let mut new_txs = Vec::new(); - let maybe_more_pending_txns = metered_poll_nested_stream_with_budget!( - poll_durations.acc_imported_txns, - "net::tx", - "Pending transactions stream", - DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - this.pending_transactions.poll_next_unpin(cx), - |hash| new_txs.push(hash) - ); + let maybe_more_pending_txns = match this.pending_transactions.poll_recv_many( + cx, + &mut new_txs, + SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE, + ) { + Poll::Ready(count) => { + count == SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE + } + Poll::Pending => false, + }; if !new_txs.is_empty() { this.on_new_pending_transactions(new_txs); } diff --git a/crates/net/p2p/src/snap/client.rs b/crates/net/p2p/src/snap/client.rs index 667824e448c..c8003c38f8e 100644 --- a/crates/net/p2p/src/snap/client.rs +++ b/crates/net/p2p/src/snap/client.rs @@ -1,15 +1,28 @@ use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; use futures::Future; use reth_eth_wire_types::snap::{ - AccountRangeMessage, GetAccountRangeMessage, GetByteCodesMessage, GetStorageRangesMessage, - GetTrieNodesMessage, + AccountRangeMessage, ByteCodesMessage, GetAccountRangeMessage, GetByteCodesMessage, + GetStorageRangesMessage, GetTrieNodesMessage, StorageRangesMessage, TrieNodesMessage, }; +/// Response types for snap sync requests +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum SnapResponse { + /// Response containing account range data + AccountRange(AccountRangeMessage), + /// Response containing storage ranges data + StorageRanges(StorageRangesMessage), + /// Response containing bytecode data + ByteCodes(ByteCodesMessage), + /// Response containing trie node data + TrieNodes(TrieNodesMessage), +} + /// The snap sync downloader client #[auto_impl::auto_impl(&, Arc, Box)] pub trait SnapClient: DownloadClient { - /// The output future type for account range requests - type Output: Future> + Send + Sync + Unpin; + /// The output future type for snap requests + type Output: Future> + Send + Sync + Unpin; /// Sends the account range request to the p2p network and returns the account range /// response received from a peer. diff --git a/crates/net/peers/src/node_record.rs b/crates/net/peers/src/node_record.rs index 0b1ef38b3dd..641f2d274dc 100644 --- a/crates/net/peers/src/node_record.rs +++ b/crates/net/peers/src/node_record.rs @@ -309,6 +309,18 @@ mod tests { } } + #[test] + fn test_node_record() { + let url = "enode://fc8a2ff614e848c0af4c99372a81b8655edb8e11b617cffd0aab1a0691bcca66ca533626a528ee567f05f70c8cb529bda2c0a864cc0aec638a367fd2bb8e49fb@127.0.0.1:35481?discport=0"; + let node: NodeRecord = url.parse().unwrap(); + assert_eq!(node, NodeRecord { + address: IpAddr::V4([127,0,0, 1].into()), + tcp_port: 35481, + udp_port: 0, + id: "0xfc8a2ff614e848c0af4c99372a81b8655edb8e11b617cffd0aab1a0691bcca66ca533626a528ee567f05f70c8cb529bda2c0a864cc0aec638a367fd2bb8e49fb".parse().unwrap(), + }) + } + #[test] fn test_url_parse() { let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index c1224d35e5a..8e8774e86c8 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -95,7 +95,11 @@ reth-evm-ethereum = { workspace = true, features = ["test-utils"] } [features] default = [] -js-tracer = ["reth-rpc/js-tracer"] +js-tracer = [ + "reth-rpc/js-tracer", + "reth-node-ethereum/js-tracer", + "reth-rpc-eth-types/js-tracer", +] test-utils = [ "reth-db/test-utils", "reth-chain-state/test-utils", diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 2f543d8b71b..746830595ea 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -22,8 +22,7 @@ use reth_network::{ NetworkPrimitives, }; use reth_node_api::{ - FullNodePrimitives, FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, - NodeTypesWithDBAdapter, + FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, NodeTypesWithDBAdapter, }; use reth_node_core::{ cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, @@ -341,6 +340,11 @@ impl WithLaunchContext> { pub const fn config(&self) -> &NodeConfig { self.builder.config() } + + /// Returns a mutable reference to the node builder's config. + pub const fn config_mut(&mut self) -> &mut NodeConfig { + self.builder.config_mut() + } } impl WithLaunchContext> @@ -405,7 +409,6 @@ where FullNodeAdapter, >>::EthApi, >, - N::Primitives: FullNodePrimitives, EngineNodeLauncher: LaunchNode< NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns>, >, @@ -468,6 +471,11 @@ where &self.builder.config } + /// Returns a mutable reference to the node builder's config. + pub const fn config_mut(&mut self) -> &mut NodeConfig<::ChainSpec> { + &mut self.builder.config + } + /// Returns a reference to node's database. pub const fn db(&self) -> &T::DB { &self.builder.adapter.database @@ -745,6 +753,11 @@ impl BuilderContext { &self.config_container.config } + /// Returns a mutable reference to the config of the node. + pub const fn config_mut(&mut self) -> &mut NodeConfig<::ChainSpec> { + &mut self.config_container.config + } + /// Returns the loaded reh.toml config. pub const fn reth_config(&self) -> &reth_config::Config { &self.config_container.toml_config diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index 53c46f79b2a..3cd8529dc5f 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -4,8 +4,8 @@ use crate::{BuilderContext, FullNodeTypes}; use alloy_primitives::Address; use reth_chain_state::CanonStateSubscriptions; -use reth_chainspec::ChainSpecProvider; -use reth_node_api::TxTy; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_node_api::{NodeTypes, TxTy}; use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolConfig, PoolTransaction, SubPoolLimit, TransactionPool, TransactionValidationTaskExecutor, TransactionValidator, @@ -127,8 +127,9 @@ impl<'a, Node: FullNodeTypes, V> TxPoolBuilder<'a, Node, V> { } } -impl<'a, Node: FullNodeTypes, V> TxPoolBuilder<'a, Node, TransactionValidationTaskExecutor> +impl<'a, Node, V> TxPoolBuilder<'a, Node, TransactionValidationTaskExecutor> where + Node: FullNodeTypes>, V: TransactionValidator + 'static, V::Transaction: PoolTransaction> + reth_transaction_pool::EthPoolTransaction, @@ -167,14 +168,12 @@ where pub fn create_blob_store( ctx: &BuilderContext, ) -> eyre::Result { - let data_dir = ctx.config().datadir(); - Ok(reth_transaction_pool::blobstore::DiskFileBlobStore::open( - data_dir.blobstore(), - Default::default(), - )?) + let cache_size = Some(ctx.config().txpool.max_cached_entries); + create_blob_store_with_cache(ctx, cache_size) } -/// Create blob store with custom cache size configuration. +/// Create blob store with custom cache size configuration for how many blobs should be cached in +/// memory. pub fn create_blob_store_with_cache( ctx: &BuilderContext, cache_size: Option, @@ -231,7 +230,7 @@ fn spawn_pool_maintenance_task( pool_config: &PoolConfig, ) -> eyre::Result<()> where - Node: FullNodeTypes, + Node: FullNodeTypes>, Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static, Pool::Transaction: PoolTransaction>, { @@ -265,7 +264,7 @@ pub fn spawn_maintenance_tasks( pool_config: &PoolConfig, ) -> eyre::Result<()> where - Node: FullNodeTypes, + Node: FullNodeTypes>, Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static, Pool::Transaction: PoolTransaction>, { diff --git a/crates/node/builder/src/engine_api_ext.rs b/crates/node/builder/src/engine_api_ext.rs index 936a2e19051..33d1d3e63ad 100644 --- a/crates/node/builder/src/engine_api_ext.rs +++ b/crates/node/builder/src/engine_api_ext.rs @@ -5,7 +5,6 @@ use crate::rpc::EngineApiBuilder; use eyre::Result; use reth_node_api::{AddOnsContext, FullNodeComponents}; -use reth_rpc_api::IntoEngineApiRpcModule; /// Provides access to an `EngineApi` instance with a callback #[derive(Debug)] @@ -27,7 +26,7 @@ impl EngineApiBuilder for EngineApiExt where B: EngineApiBuilder, N: FullNodeComponents, - B::EngineApi: IntoEngineApiRpcModule + Send + Sync + Clone + 'static, + B::EngineApi: Clone, F: FnOnce(B::EngineApi) + Send + Sync + 'static, { type EngineApi = B::EngineApi; diff --git a/crates/node/builder/src/hooks.rs b/crates/node/builder/src/hooks.rs index dda976599ed..71f0f3b4d2c 100644 --- a/crates/node/builder/src/hooks.rs +++ b/crates/node/builder/src/hooks.rs @@ -10,7 +10,6 @@ pub struct NodeHooks> { pub on_component_initialized: Box>, /// Hook to run once the node is started. pub on_node_started: Box>, - _marker: std::marker::PhantomData, } impl NodeHooks @@ -23,7 +22,6 @@ where Self { on_component_initialized: Box::<()>::default(), on_node_started: Box::<()>::default(), - _marker: Default::default(), } } diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 3a35c4183f1..1f5d5dff83b 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -34,12 +34,11 @@ use crate::{ hooks::OnComponentInitializedHook, BuilderContext, ExExLauncher, NodeAdapter, PrimitivesTy, }; -use alloy_consensus::BlockHeader as _; use alloy_eips::eip2124::Head; use alloy_primitives::{BlockNumber, B256}; use eyre::Context; use rayon::ThreadPoolBuilder; -use reth_chainspec::{Chain, EthChainSpec, EthereumHardfork, EthereumHardforks}; +use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; use reth_consensus::noop::NoopConsensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; @@ -67,8 +66,8 @@ use reth_node_metrics::{ }; use reth_provider::{ providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, BlockNumReader, BlockReaderIdExt, ProviderError, ProviderFactory, - ProviderResult, StageCheckpointReader, StaticFileProviderFactory, + BlockHashReader, BlockNumReader, ProviderError, ProviderFactory, ProviderResult, + StageCheckpointReader, StaticFileProviderBuilder, StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_builder::config::RethRpcServerConfig; @@ -159,7 +158,7 @@ impl LaunchContext { let mut toml_config = reth_config::Config::from_path(&config_path) .wrap_err_with(|| format!("Could not load config file {config_path:?}"))?; - Self::save_pruning_config_if_full_node(&mut toml_config, config, &config_path)?; + Self::save_pruning_config(&mut toml_config, config, &config_path)?; info!(target: "reth::cli", path = ?config_path, "Configuration loaded"); @@ -169,8 +168,9 @@ impl LaunchContext { Ok(toml_config) } - /// Save prune config to the toml file if node is a full node. - fn save_pruning_config_if_full_node( + /// Save prune config to the toml file if node is a full node or has custom pruning CLI + /// arguments. + fn save_pruning_config( reth_config: &mut reth_config::Config, config: &NodeConfig, config_path: impl AsRef, @@ -178,14 +178,14 @@ impl LaunchContext { where ChainSpec: EthChainSpec + reth_chainspec::EthereumHardforks, { - if reth_config.prune.is_none() { - if let Some(prune_config) = config.prune_config() { - reth_config.update_prune_config(prune_config); + if let Some(prune_config) = config.prune_config() { + if reth_config.prune != prune_config { + reth_config.set_prune_config(prune_config); info!(target: "reth::cli", "Saving prune config to toml file"); reth_config.save(config_path.as_ref())?; } - } else if config.prune_config().is_none() { - warn!(target: "reth::cli", "Prune configs present in config file but --full not provided. Running as a Full node"); + } else if !reth_config.prune.is_default() { + warn!(target: "reth::cli", "Pruning configuration is present in the config file, but no CLI arguments are provided. Using config from file."); } Ok(()) } @@ -401,18 +401,19 @@ impl LaunchContextWith Option + pub fn prune_config(&self) -> PruneConfig where ChainSpec: reth_chainspec::EthereumHardforks, { + let toml_config = self.toml_config().prune.clone(); let Some(mut node_prune_config) = self.node_config().prune_config() else { // No CLI config is set, use the toml config. - return self.toml_config().prune.clone(); + return toml_config; }; // Otherwise, use the CLI configuration and merge with toml config. - node_prune_config.merge(self.toml_config().prune.clone()); - Some(node_prune_config) + node_prune_config.merge(toml_config); + node_prune_config } /// Returns the configured [`PruneModes`], returning the default if no config was available. @@ -420,7 +421,7 @@ impl LaunchContextWith LaunchContextWith, Evm: ConfigureEvm + 'static, { - let factory = ProviderFactory::new( - self.right().clone(), - self.chain_spec(), - StaticFileProvider::read_write(self.data_dir().static_files())?, - ) - .with_prune_modes(self.prune_modes()) - .with_static_files_metrics(); + let static_file_provider = + StaticFileProviderBuilder::read_write(self.data_dir().static_files())? + .with_metrics() + .build()?; + + let factory = + ProviderFactory::new(self.right().clone(), self.chain_spec(), static_file_provider)? + .with_prune_modes(self.prune_modes()); - let has_receipt_pruning = - self.toml_config().prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); + let has_receipt_pruning = self.toml_config().prune.has_receipts_pruning(); // Check for consistency between database and static files. If it fails, it unwinds to // the first block that's consistent between database and static files. @@ -582,9 +581,8 @@ where // ensure recorder runs upkeep periodically install_prometheus_recorder().spawn_upkeep(); - let listen_addr = self.node_config().metrics; + let listen_addr = self.node_config().metrics.prometheus; if let Some(addr) = listen_addr { - info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); let config = MetricServerConfig::new( addr, VersionInfo { @@ -611,7 +609,7 @@ where } }) .build(), - ); + ).with_push_gateway(self.node_config().metrics.push_gateway_url.clone(), self.node_config().metrics.push_gateway_interval); MetricServer::new(config).serve().await?; } @@ -946,40 +944,6 @@ where Ok(None) } - /// Expire the pre-merge transactions if the node is configured to do so and the chain has a - /// merge block. - /// - /// If the node is configured to prune pre-merge transactions and it has synced past the merge - /// block, it will delete the pre-merge transaction static files if they still exist. - pub fn expire_pre_merge_transactions(&self) -> eyre::Result<()> - where - T: FullNodeTypes, - { - if self.node_config().pruning.bodies_pre_merge && - let Some(merge_block) = self - .chain_spec() - .ethereum_fork_activation(EthereumHardfork::Paris) - .block_number() - { - // Ensure we only expire transactions after we synced past the merge block. - let Some(latest) = self.blockchain_db().latest_header()? else { return Ok(()) }; - if latest.number() > merge_block { - let provider = self.blockchain_db().static_file_provider(); - if provider - .get_lowest_transaction_static_file_block() - .is_some_and(|lowest| lowest < merge_block) - { - info!(target: "reth::cli", merge_block, "Expiring pre-merge transactions"); - provider.delete_transactions_below(merge_block)?; - } else { - debug!(target: "reth::cli", merge_block, "No pre-merge transactions to expire"); - } - } - } - - Ok(()) - } - /// Returns the metrics sender. pub fn sync_metrics_tx(&self) -> UnboundedSender { self.right().db_provider_container.metrics_sender.clone() @@ -1208,17 +1172,14 @@ mod tests { storage_history_before: None, bodies_pre_merge: false, bodies_distance: None, + #[expect(deprecated)] receipts_log_filter: None, bodies_before: None, }, ..NodeConfig::test() }; - LaunchContext::save_pruning_config_if_full_node( - &mut reth_config, - &node_config, - config_path, - ) - .unwrap(); + LaunchContext::save_pruning_config(&mut reth_config, &node_config, config_path) + .unwrap(); let loaded_config = Config::from_path(config_path).unwrap(); diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index c5b639cbb89..1d11a325308 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -126,9 +126,6 @@ impl EngineNodeLauncher { })? .with_components(components_builder, on_component_initialized).await?; - // Try to expire pre-merge transaction history if configured - ctx.expire_pre_merge_transactions()?; - // spawn exexs if any let maybe_exex_manager_handle = ctx.launch_exex(installed_exex).await?; @@ -177,7 +174,7 @@ impl EngineNodeLauncher { } let pruner = pruner_builder.build_with_provider_factory(ctx.provider_factory().clone()); let pruner_events = pruner.events(); - info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); + info!(target: "reth::cli", prune_config=?ctx.prune_config(), "Pruner initialized"); let event_sender = EventSender::default(); @@ -272,12 +269,16 @@ impl EngineNodeLauncher { let provider = ctx.blockchain_db().clone(); let (exit, rx) = oneshot::channel(); let terminate_after_backfill = ctx.terminate_after_initial_backfill(); + let startup_sync_state_idle = ctx.node_config().debug.startup_sync_state_idle; info!(target: "reth::cli", "Starting consensus engine"); ctx.task_executor().spawn_critical("consensus engine", Box::pin(async move { if let Some(initial_target) = initial_target { debug!(target: "reth::cli", %initial_target, "start backfill sync"); + // network_handle's sync state is already initialized at Syncing engine_service.orchestrator_mut().start_backfill_sync(initial_target); + } else if startup_sync_state_idle { + network_handle.update_sync_state(SyncState::Idle); } let mut res = Ok(()); @@ -287,8 +288,8 @@ impl EngineNodeLauncher { tokio::select! { payload = built_payloads.select_next_some() => { if let Some(executed_block) = payload.executed_block() { - debug!(target: "reth::cli", block=?executed_block.recovered_block().num_hash(), "inserting built payload"); - engine_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block).into()); + debug!(target: "reth::cli", block=?executed_block.recovered_block.num_hash(), "inserting built payload"); + engine_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block.into_executed_payload()).into()); } } event = engine_service.next() => { @@ -300,6 +301,9 @@ impl EngineNodeLauncher { debug!(target: "reth::cli", "Terminating after initial backfill"); break } + if startup_sync_state_idle { + network_handle.update_sync_state(SyncState::Idle); + } } ChainEvent::BackfillSyncStarted => { network_handle.update_sync_state(SyncState::Syncing); diff --git a/crates/node/builder/src/launch/invalid_block_hook.rs b/crates/node/builder/src/launch/invalid_block_hook.rs index 7221077847a..3c1848dceb4 100644 --- a/crates/node/builder/src/launch/invalid_block_hook.rs +++ b/crates/node/builder/src/launch/invalid_block_hook.rs @@ -1,6 +1,7 @@ //! Invalid block hook helpers for the node builder. use crate::AddOnsContext; +use alloy_consensus::TxEnvelope; use alloy_rpc_types::{Block, Header, Receipt, Transaction, TransactionRequest}; use eyre::OptionExt; use reth_chainspec::EthChainSpec; @@ -128,10 +129,16 @@ where let client = jsonrpsee::http_client::HttpClientBuilder::default().build(url)?; // Verify that the healthy node is running the same chain as the current node. - let healthy_chain_id = - EthApiClient::::chain_id(&client) - .await? - .ok_or_eyre("healthy node rpc client didn't return a chain id")?; + let healthy_chain_id = EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TxEnvelope, + >::chain_id(&client) + .await? + .ok_or_eyre("healthy node rpc client didn't return a chain id")?; if healthy_chain_id.to::() != chain_id { eyre::bail!("Invalid chain ID. Expected {}, got {}", chain_id, healthy_chain_id); diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 2864b9f9245..995ac1c095e 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -191,7 +191,9 @@ where /// Returns the [`EngineApiClient`] interface for the authenticated engine API. /// /// This will send authenticated ws requests to the node's auth server. - pub async fn engine_ws_client(&self) -> impl EngineApiClient { + pub async fn engine_ws_client( + &self, + ) -> impl EngineApiClient + use { self.auth_server_handle().ws_client().await } @@ -199,7 +201,9 @@ where /// /// This will send not authenticated IPC requests to the node's auth server. #[cfg(unix)] - pub async fn engine_ipc_client(&self) -> Option> { + pub async fn engine_ipc_client( + &self, + ) -> Option + use> { self.auth_server_handle().ipc_client().await } } diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 774f508c549..115d9d5820d 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -23,8 +23,8 @@ use reth_node_core::{ version::{version_metadata, CLIENT_CODE}, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadStore}; -use reth_rpc::eth::{core::EthRpcConverterFor, EthApiTypes, FullEthApiServer}; -use reth_rpc_api::{eth::helpers::AddDevSigners, IntoEngineApiRpcModule}; +use reth_rpc::eth::{core::EthRpcConverterFor, DevSigner, EthApiTypes, FullEthApiServer}; +use reth_rpc_api::{eth::helpers::EthTransactions, IntoEngineApiRpcModule}; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, config::RethRpcServerConfig, @@ -1005,7 +1005,8 @@ where // in dev mode we generate 20 random dev-signer accounts if config.dev.dev { - registry.eth_api().with_dev_accounts(); + let signers = DevSigner::from_mnemonic(config.dev.dev_mnemonic.as_str(), 20); + registry.eth_api().signers().write().extend(signers); } let mut registry = RpcRegistry { registry }; @@ -1170,6 +1171,7 @@ impl<'a, N: FullNodeComponents: Default + Send + 'static { /// The Ethapi implementation this builder will build. type EthApi: EthApiTypes + FullEthApiServer - + AddDevSigners + Unpin + 'static; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index a4099691191..ad78ffb59a2 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -36,7 +36,7 @@ pub fn build_networked_pipeline( provider_factory: ProviderFactory, task_executor: &TaskExecutor, metrics_tx: reth_stages::MetricEventsSender, - prune_config: Option, + prune_config: PruneConfig, max_block: Option, static_file_producer: StaticFileProducer>, evm_config: Evm, @@ -85,7 +85,7 @@ pub fn build_pipeline( consensus: Arc>, max_block: Option, metrics_tx: reth_stages::MetricEventsSender, - prune_config: Option, + prune_config: PruneConfig, static_file_producer: StaticFileProducer>, evm_config: Evm, exex_manager_handle: ExExManagerHandle, @@ -106,8 +106,6 @@ where let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let prune_modes = prune_config.map(|prune| prune.segments).unwrap_or_default(); - let pipeline = builder .with_tip_sender(tip_tx) .with_metrics_tx(metrics_tx) @@ -120,7 +118,7 @@ where body_downloader, evm_config.clone(), stage_config.clone(), - prune_modes, + prune_config.segments, era_import_source, ) .set(ExecutionStage::new( diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 2240fa98837..e2852e01a81 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -45,22 +45,22 @@ alloy-eips.workspace = true # misc eyre.workspace = true -clap = { workspace = true, features = ["derive"] } +clap = { workspace = true, features = ["derive", "env"] } humantime.workspace = true rand.workspace = true derive_more.workspace = true toml.workspace = true serde.workspace = true strum = { workspace = true, features = ["derive"] } -thiserror.workspace = true url.workspace = true # io dirs-next.workspace = true shellexpand.workspace = true -# tracing +# obs tracing.workspace = true +reth-tracing-otlp.workspace = true # crypto secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] } @@ -77,6 +77,14 @@ tokio.workspace = true # Features for vergen to generate correct env vars jemalloc = ["reth-cli-util/jemalloc"] asm-keccak = ["alloy-primitives/asm-keccak"] +# Feature to enable opentelemetry export +otlp = ["reth-tracing/otlp"] + +min-error-logs = ["tracing/release_max_level_error"] +min-warn-logs = ["tracing/release_max_level_warn"] +min-info-logs = ["tracing/release_max_level_info"] +min-debug-logs = ["tracing/release_max_level_debug"] +min-trace-logs = ["tracing/release_max_level_trace"] [build-dependencies] vergen = { workspace = true, features = ["build", "cargo", "emit_and_set"] } diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 09b8f15ef68..6384f36a806 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -6,9 +6,12 @@ use crate::version::default_client_version; use clap::{ builder::{PossibleValue, TypedValueParser}, error::ErrorKind, - Arg, Args, Command, Error, + value_parser, Arg, Args, Command, Error, +}; +use reth_db::{ + mdbx::{MaxReadTransactionDuration, SyncMode}, + ClientVersion, }; -use reth_db::{mdbx::MaxReadTransactionDuration, ClientVersion}; use reth_storage_errors::db::LogLevel; /// Parameters for database configuration @@ -22,7 +25,12 @@ pub struct DatabaseArgs { /// NFS volume. #[arg(long = "db.exclusive")] pub exclusive: Option, - /// Maximum database size (e.g., 4TB, 8MB) + /// Maximum database size (e.g., 4TB, 8TB). + /// + /// This sets the "map size" of the database. If the database grows beyond this + /// limit, the node will stop with an "environment map size limit reached" error. + /// + /// The default value is 8TB. #[arg(long = "db.max-size", value_parser = parse_byte_size)] pub max_size: Option, /// Database growth step (e.g., 4GB, 4KB) @@ -34,6 +42,12 @@ pub struct DatabaseArgs { /// Maximum number of readers allowed to access the database concurrently. #[arg(long = "db.max-readers")] pub max_readers: Option, + /// Controls how aggressively the database synchronizes data to disk. + #[arg( + long = "db.sync-mode", + value_parser = value_parser!(SyncMode), + )] + pub sync_mode: Option, } impl DatabaseArgs { @@ -61,6 +75,7 @@ impl DatabaseArgs { .with_geometry_max_size(self.max_size) .with_growth_step(self.growth_step) .with_max_readers(self.max_readers) + .with_sync_mode(self.sync_mode) } } @@ -340,4 +355,36 @@ mod tests { let cmd = CommandParser::::try_parse_from(["reth"]).unwrap(); assert_eq!(cmd.args.log_level, None); } + + #[test] + fn test_command_parser_with_valid_default_sync_mode() { + let cmd = CommandParser::::try_parse_from(["reth"]).unwrap(); + assert!(cmd.args.sync_mode.is_none()); + } + + #[test] + fn test_command_parser_with_valid_sync_mode_durable() { + let cmd = + CommandParser::::try_parse_from(["reth", "--db.sync-mode", "durable"]) + .unwrap(); + assert!(matches!(cmd.args.sync_mode, Some(SyncMode::Durable))); + } + + #[test] + fn test_command_parser_with_valid_sync_mode_safe_no_sync() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.sync-mode", + "safe-no-sync", + ]) + .unwrap(); + assert!(matches!(cmd.args.sync_mode, Some(SyncMode::SafeNoSync))); + } + + #[test] + fn test_command_parser_with_invalid_sync_mode() { + let result = + CommandParser::::try_parse_from(["reth", "--db.sync-mode", "ultra-fast"]); + assert!(result.is_err()); + } } diff --git a/crates/node/core/src/args/debug.rs b/crates/node/core/src/args/debug.rs index 13d7685b055..b5d1fb3f7d8 100644 --- a/crates/node/core/src/args/debug.rs +++ b/crates/node/core/src/args/debug.rs @@ -101,6 +101,13 @@ pub struct DebugArgs { /// Example: `nodename:secret@host:port` #[arg(long = "ethstats", help_heading = "Debug")] pub ethstats: Option, + + /// Set the node to idle state when the backfill is not running. + /// + /// This makes the `eth_syncing` RPC return "Idle" when the node has just started or finished + /// the backfill, but did not yet receive any new blocks. + #[arg(long = "debug.startup-sync-state-idle", help_heading = "Debug")] + pub startup_sync_state_idle: bool, } impl Default for DebugArgs { @@ -119,6 +126,7 @@ impl Default for DebugArgs { invalid_block_hook: Some(InvalidBlockSelection::default()), healthy_node_rpc_url: None, ethstats: None, + startup_sync_state_idle: false, } } } diff --git a/crates/node/core/src/args/dev.rs b/crates/node/core/src/args/dev.rs index b6a01745257..d62ff1c5dce 100644 --- a/crates/node/core/src/args/dev.rs +++ b/crates/node/core/src/args/dev.rs @@ -5,8 +5,10 @@ use std::time::Duration; use clap::Args; use humantime::parse_duration; +const DEFAULT_MNEMONIC: &str = "test test test test test test test test test test test junk"; + /// Parameters for Dev testnet configuration -#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)] +#[derive(Debug, Args, PartialEq, Eq, Clone)] #[command(next_help_heading = "Dev testnet")] pub struct DevArgs { /// Start the node in dev mode @@ -39,6 +41,28 @@ pub struct DevArgs { verbatim_doc_comment )] pub block_time: Option, + + /// Derive dev accounts from a fixed mnemonic instead of random ones. + #[arg( + long = "dev.mnemonic", + help_heading = "Dev testnet", + value_name = "MNEMONIC", + requires = "dev", + verbatim_doc_comment, + default_value = DEFAULT_MNEMONIC + )] + pub dev_mnemonic: String, +} + +impl Default for DevArgs { + fn default() -> Self { + Self { + dev: false, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + } } #[cfg(test)] @@ -56,13 +80,37 @@ mod tests { #[test] fn test_parse_dev_args() { let args = CommandParser::::parse_from(["reth"]).args; - assert_eq!(args, DevArgs { dev: false, block_max_transactions: None, block_time: None }); + assert_eq!( + args, + DevArgs { + dev: false, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from(["reth", "--dev"]).args; - assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None }); + assert_eq!( + args, + DevArgs { + dev: true, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from(["reth", "--auto-mine"]).args; - assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None }); + assert_eq!( + args, + DevArgs { + dev: true, + block_max_transactions: None, + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from([ "reth", @@ -71,7 +119,15 @@ mod tests { "2", ]) .args; - assert_eq!(args, DevArgs { dev: true, block_max_transactions: Some(2), block_time: None }); + assert_eq!( + args, + DevArgs { + dev: true, + block_max_transactions: Some(2), + block_time: None, + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), + } + ); let args = CommandParser::::parse_from(["reth", "--dev", "--dev.block-time", "1s"]).args; @@ -80,7 +136,8 @@ mod tests { DevArgs { dev: true, block_max_transactions: None, - block_time: Some(std::time::Duration::from_secs(1)) + block_time: Some(std::time::Duration::from_secs(1)), + dev_mnemonic: DEFAULT_MNEMONIC.to_string(), } ); } diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 88179a6b40e..29535f2c1df 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -4,8 +4,8 @@ use clap::Args; use reth_engine_primitives::{TreeConfig, DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE}; use crate::node_config::{ - DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, DEFAULT_MAX_PROOF_TASK_CONCURRENCY, - DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, + DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, + DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; /// Parameters for configuring the engine driver. @@ -30,9 +30,9 @@ pub struct EngineArgs { #[deprecated] pub caching_and_prewarming_enabled: bool, - /// Disable cross-block caching and parallel prewarming - #[arg(long = "engine.disable-caching-and-prewarming")] - pub caching_and_prewarming_disabled: bool, + /// Disable parallel prewarming + #[arg(long = "engine.disable-prewarming", alias = "engine.disable-caching-and-prewarming")] + pub prewarming_disabled: bool, /// CAUTION: This CLI flag has no effect anymore, use --engine.disable-parallel-sparse-trie /// if you want to disable usage of the `ParallelSparseTrie`. @@ -63,10 +63,6 @@ pub struct EngineArgs { #[arg(long = "engine.accept-execution-requests-hash")] pub accept_execution_requests_hash: bool, - /// Configure the maximum number of concurrent proof tasks - #[arg(long = "engine.max-proof-task-concurrency", default_value_t = DEFAULT_MAX_PROOF_TASK_CONCURRENCY)] - pub max_proof_task_concurrency: u64, - /// Whether multiproof task should chunk proof targets. #[arg(long = "engine.multiproof-chunking", default_value = "true")] pub multiproof_chunking_enabled: bool, @@ -108,6 +104,16 @@ pub struct EngineArgs { /// See `TreeConfig::unwind_canonical_header` for more details. #[arg(long = "engine.allow-unwind-canonical-header", default_value = "false")] pub allow_unwind_canonical_header: bool, + + /// Configure the number of storage proof workers in the Tokio blocking pool. + /// If not specified, defaults to 2x available parallelism, clamped between 2 and 64. + #[arg(long = "engine.storage-worker-count")] + pub storage_worker_count: Option, + + /// Configure the number of account proof workers in the Tokio blocking pool. + /// If not specified, defaults to the same count as storage workers. + #[arg(long = "engine.account-worker-count")] + pub account_worker_count: Option, } #[allow(deprecated)] @@ -119,13 +125,12 @@ impl Default for EngineArgs { legacy_state_root_task_enabled: false, state_root_task_compare_updates: false, caching_and_prewarming_enabled: true, - caching_and_prewarming_disabled: false, + prewarming_disabled: false, parallel_sparse_trie_enabled: true, parallel_sparse_trie_disabled: false, state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, accept_execution_requests_hash: false, - max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY, multiproof_chunking_enabled: true, multiproof_chunk_size: DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE, reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, @@ -134,6 +139,8 @@ impl Default for EngineArgs { state_root_fallback: false, always_process_payload_attributes_on_canonical_head: false, allow_unwind_canonical_header: false, + storage_worker_count: None, + account_worker_count: None, } } } @@ -141,16 +148,15 @@ impl Default for EngineArgs { impl EngineArgs { /// Creates a [`TreeConfig`] from the engine arguments. pub fn tree_config(&self) -> TreeConfig { - TreeConfig::default() + let mut config = TreeConfig::default() .with_persistence_threshold(self.persistence_threshold) .with_memory_block_buffer_target(self.memory_block_buffer_target) .with_legacy_state_root(self.legacy_state_root_task_enabled) - .without_caching_and_prewarming(self.caching_and_prewarming_disabled) + .without_prewarming(self.prewarming_disabled) .with_disable_parallel_sparse_trie(self.parallel_sparse_trie_disabled) .with_state_provider_metrics(self.state_provider_metrics) .with_always_compare_trie_updates(self.state_root_task_compare_updates) .with_cross_block_cache_size(self.cross_block_cache_size * 1024 * 1024) - .with_max_proof_task_concurrency(self.max_proof_task_concurrency) .with_multiproof_chunking_enabled(self.multiproof_chunking_enabled) .with_multiproof_chunk_size(self.multiproof_chunk_size) .with_reserved_cpu_cores(self.reserved_cpu_cores) @@ -159,7 +165,17 @@ impl EngineArgs { .with_always_process_payload_attributes_on_canonical_head( self.always_process_payload_attributes_on_canonical_head, ) - .with_unwind_canonical_header(self.allow_unwind_canonical_header) + .with_unwind_canonical_header(self.allow_unwind_canonical_header); + + if let Some(count) = self.storage_worker_count { + config = config.with_storage_worker_count(count); + } + + if let Some(count) = self.account_worker_count { + config = config.with_account_worker_count(count); + } + + config } } diff --git a/crates/node/core/src/args/error.rs b/crates/node/core/src/args/error.rs deleted file mode 100644 index 163c063cd74..00000000000 --- a/crates/node/core/src/args/error.rs +++ /dev/null @@ -1,22 +0,0 @@ -use std::num::ParseIntError; - -/// Error while parsing a `ReceiptsLogPruneConfig` -#[derive(thiserror::Error, Debug)] -#[expect(clippy::enum_variant_names)] -pub(crate) enum ReceiptsLogError { - /// The format of the filter is invalid. - #[error("invalid filter format: {0}")] - InvalidFilterFormat(String), - /// Address is invalid. - #[error("address is invalid: {0}")] - InvalidAddress(String), - /// The prune mode is not one of full, distance, before. - #[error("prune mode is invalid: {0}")] - InvalidPruneMode(String), - /// The distance value supplied is invalid. - #[error("distance is invalid: {0}")] - InvalidDistance(ParseIntError), - /// The block number supplied is invalid. - #[error("block number is invalid: {0}")] - InvalidBlockNumber(ParseIntError), -} diff --git a/crates/node/core/src/args/log.rs b/crates/node/core/src/args/log.rs index 1236984fac0..20c60362d7b 100644 --- a/crates/node/core/src/args/log.rs +++ b/crates/node/core/src/args/log.rs @@ -70,6 +70,7 @@ pub struct LogArgs { default_value_t = ColorMode::Always )] pub color: ColorMode, + /// The verbosity settings for the tracer. #[command(flatten)] pub verbosity: Verbosity, @@ -138,7 +139,7 @@ impl LogArgs { pub enum ColorMode { /// Colors on Always, - /// Colors on + /// Auto-detect Auto, /// Colors off Never, diff --git a/crates/node/core/src/args/metric.rs b/crates/node/core/src/args/metric.rs new file mode 100644 index 00000000000..5ef18787a81 --- /dev/null +++ b/crates/node/core/src/args/metric.rs @@ -0,0 +1,35 @@ +use clap::Parser; +use reth_cli_util::{parse_duration_from_secs, parse_socket_address}; +use std::{net::SocketAddr, time::Duration}; + +/// Metrics configuration. +#[derive(Debug, Clone, Default, Parser)] +pub struct MetricArgs { + /// Enable Prometheus metrics. + /// + /// The metrics will be served at the given interface and port. + #[arg(long="metrics", alias = "metrics.prometheus", value_name = "PROMETHEUS", value_parser = parse_socket_address, help_heading = "Metrics")] + pub prometheus: Option, + + /// URL for pushing Prometheus metrics to a push gateway. + /// + /// If set, the node will periodically push metrics to the specified push gateway URL. + #[arg( + long = "metrics.prometheus.push.url", + value_name = "PUSH_GATEWAY_URL", + help_heading = "Metrics" + )] + pub push_gateway_url: Option, + + /// Interval in seconds for pushing metrics to push gateway. + /// + /// Default: 5 seconds + #[arg( + long = "metrics.prometheus.push.interval", + default_value = "5", + value_parser = parse_duration_from_secs, + value_name = "SECONDS", + help_heading = "Metrics" + )] + pub push_gateway_interval: Duration, +} diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index 6799fe418dc..17584a913ce 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -24,6 +24,14 @@ pub use database::DatabaseArgs; mod log; pub use log::{ColorMode, LogArgs, Verbosity}; +/// `TraceArgs` for tracing and spans support +mod trace; +pub use trace::TraceArgs; + +/// `MetricArgs` to configure metrics. +mod metric; +pub use metric::MetricArgs; + /// `PayloadBuilderArgs` struct for configuring the payload builder mod payload_builder; pub use payload_builder::PayloadBuilderArgs; @@ -68,5 +76,4 @@ pub use ress_args::RessArgs; mod era; pub use era::{DefaultEraHost, EraArgs, EraSourceArgs}; -mod error; pub mod types; diff --git a/crates/node/core/src/args/network.rs b/crates/node/core/src/args/network.rs index a32f14edd41..4e57839e3ec 100644 --- a/crates/node/core/src/args/network.rs +++ b/crates/node/core/src/args/network.rs @@ -100,11 +100,11 @@ pub struct NetworkArgs { #[arg(long = "port", value_name = "PORT", default_value_t = DEFAULT_DISCOVERY_PORT)] pub port: u16, - /// Maximum number of outbound requests. default: 100 + /// Maximum number of outbound peers. default: 100 #[arg(long)] pub max_outbound_peers: Option, - /// Maximum number of inbound requests. default: 30 + /// Maximum number of inbound peers. default: 30 #[arg(long)] pub max_inbound_peers: Option, @@ -184,6 +184,10 @@ pub struct NetworkArgs { /// Peers that don't have these blocks will be filtered out. #[arg(long = "required-block-hashes", value_delimiter = ',')] pub required_block_hashes: Vec, + + /// Optional network ID to override the chain specification's network ID for P2P connections + #[arg(long)] + pub network_id: Option, } impl NetworkArgs { @@ -297,6 +301,7 @@ impl NetworkArgs { )) .disable_tx_gossip(self.disable_tx_gossip) .required_block_hashes(self.required_block_hashes.clone()) + .network_id(self.network_id) } /// If `no_persist_peers` is false then this returns the path to the persistent peers file path. @@ -371,6 +376,7 @@ impl Default for NetworkArgs { disable_tx_gossip: false, propagation_mode: TransactionPropagationMode::Sqrt, required_block_hashes: vec![], + network_id: None, } } } diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index d658241c21c..8b92e35f7ed 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -17,7 +17,7 @@ pub struct PayloadBuilderArgs { pub extra_data: String, /// Target gas limit for built blocks. - #[arg(long = "builder.gaslimit", value_name = "GAS_LIMIT")] + #[arg(long = "builder.gaslimit", alias = "miner.gaslimit", value_name = "GAS_LIMIT")] pub gas_limit: Option, /// The interval at which the job should build a new payload after the last. diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index e96245350fd..0bd72e207ea 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -1,12 +1,13 @@ //! Pruning and full node arguments -use crate::{args::error::ReceiptsLogError, primitives::EthereumHardfork}; -use alloy_primitives::{Address, BlockNumber}; +use std::ops::Not; + +use crate::primitives::EthereumHardfork; +use alloy_primitives::BlockNumber; use clap::{builder::RangedU64ValueParser, Args}; use reth_chainspec::EthereumHardforks; use reth_config::config::PruneConfig; -use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE}; -use std::collections::BTreeMap; +use reth_prune_types::{PruneMode, PruneModes, MINIMUM_PRUNING_DISTANCE}; /// Parameters for pruning and full node #[derive(Debug, Clone, Args, PartialEq, Eq, Default)] @@ -17,33 +18,33 @@ pub struct PruningArgs { pub full: bool, /// Minimum pruning interval measured in blocks. - #[arg(long, value_parser = RangedU64ValueParser::::new().range(1..),)] + #[arg(long = "prune.block-interval", alias = "block-interval", value_parser = RangedU64ValueParser::::new().range(1..))] pub block_interval: Option, // Sender Recovery /// Prunes all sender recovery data. - #[arg(long = "prune.senderrecovery.full", conflicts_with_all = &["sender_recovery_distance", "sender_recovery_before"])] + #[arg(long = "prune.sender-recovery.full", alias = "prune.senderrecovery.full", conflicts_with_all = &["sender_recovery_distance", "sender_recovery_before"])] pub sender_recovery_full: bool, /// Prune sender recovery data before the `head-N` block number. In other words, keep last N + /// 1 blocks. - #[arg(long = "prune.senderrecovery.distance", value_name = "BLOCKS", conflicts_with_all = &["sender_recovery_full", "sender_recovery_before"])] + #[arg(long = "prune.sender-recovery.distance", alias = "prune.senderrecovery.distance", value_name = "BLOCKS", conflicts_with_all = &["sender_recovery_full", "sender_recovery_before"])] pub sender_recovery_distance: Option, /// Prune sender recovery data before the specified block number. The specified block number is /// not pruned. - #[arg(long = "prune.senderrecovery.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["sender_recovery_full", "sender_recovery_distance"])] + #[arg(long = "prune.sender-recovery.before", alias = "prune.senderrecovery.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["sender_recovery_full", "sender_recovery_distance"])] pub sender_recovery_before: Option, // Transaction Lookup /// Prunes all transaction lookup data. - #[arg(long = "prune.transactionlookup.full", conflicts_with_all = &["transaction_lookup_distance", "transaction_lookup_before"])] + #[arg(long = "prune.transaction-lookup.full", alias = "prune.transactionlookup.full", conflicts_with_all = &["transaction_lookup_distance", "transaction_lookup_before"])] pub transaction_lookup_full: bool, /// Prune transaction lookup data before the `head-N` block number. In other words, keep last N /// + 1 blocks. - #[arg(long = "prune.transactionlookup.distance", value_name = "BLOCKS", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_before"])] + #[arg(long = "prune.transaction-lookup.distance", alias = "prune.transactionlookup.distance", value_name = "BLOCKS", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_before"])] pub transaction_lookup_distance: Option, /// Prune transaction lookup data before the specified block number. The specified block number /// is not pruned. - #[arg(long = "prune.transactionlookup.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_distance"])] + #[arg(long = "prune.transaction-lookup.before", alias = "prune.transactionlookup.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_distance"])] pub transaction_lookup_before: Option, // Receipts @@ -59,36 +60,39 @@ pub struct PruningArgs { /// Prune receipts before the specified block number. The specified block number is not pruned. #[arg(long = "prune.receipts.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance"])] pub receipts_before: Option, - // Receipts Log Filter - /// Configure receipts log filter. Format: - /// <`address`>:<`prune_mode`>... where <`prune_mode`> can be 'full', 'distance:<`blocks`>', or - /// 'before:<`block_number`>' - #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance", "receipts_before"], value_parser = parse_receipts_log_filter)] - pub receipts_log_filter: Option, + /// Receipts Log Filter + #[arg( + long = "prune.receipts-log-filter", + alias = "prune.receiptslogfilter", + value_name = "FILTER_CONFIG", + hide = true + )] + #[deprecated] + pub receipts_log_filter: Option, // Account History /// Prunes all account history. - #[arg(long = "prune.accounthistory.full", conflicts_with_all = &["account_history_distance", "account_history_before"])] + #[arg(long = "prune.account-history.full", alias = "prune.accounthistory.full", conflicts_with_all = &["account_history_distance", "account_history_before"])] pub account_history_full: bool, /// Prune account before the `head-N` block number. In other words, keep last N + 1 blocks. - #[arg(long = "prune.accounthistory.distance", value_name = "BLOCKS", conflicts_with_all = &["account_history_full", "account_history_before"])] + #[arg(long = "prune.account-history.distance", alias = "prune.accounthistory.distance", value_name = "BLOCKS", conflicts_with_all = &["account_history_full", "account_history_before"])] pub account_history_distance: Option, /// Prune account history before the specified block number. The specified block number is not /// pruned. - #[arg(long = "prune.accounthistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["account_history_full", "account_history_distance"])] + #[arg(long = "prune.account-history.before", alias = "prune.accounthistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["account_history_full", "account_history_distance"])] pub account_history_before: Option, // Storage History /// Prunes all storage history data. - #[arg(long = "prune.storagehistory.full", conflicts_with_all = &["storage_history_distance", "storage_history_before"])] + #[arg(long = "prune.storage-history.full", alias = "prune.storagehistory.full", conflicts_with_all = &["storage_history_distance", "storage_history_before"])] pub storage_history_full: bool, /// Prune storage history before the `head-N` block number. In other words, keep last N + 1 /// blocks. - #[arg(long = "prune.storagehistory.distance", value_name = "BLOCKS", conflicts_with_all = &["storage_history_full", "storage_history_before"])] + #[arg(long = "prune.storage-history.distance", alias = "prune.storagehistory.distance", value_name = "BLOCKS", conflicts_with_all = &["storage_history_full", "storage_history_before"])] pub storage_history_distance: Option, /// Prune storage history before the specified block number. The specified block number is not /// pruned. - #[arg(long = "prune.storagehistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["storage_history_full", "storage_history_distance"])] + #[arg(long = "prune.storage-history.before", alias = "prune.storagehistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["storage_history_full", "storage_history_distance"])] pub storage_history_before: Option, // Bodies @@ -107,6 +111,9 @@ pub struct PruningArgs { impl PruningArgs { /// Returns pruning configuration. + /// + /// Returns [`None`] if no parameters are specified and default pruning configuration should be + /// used. pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option where ChainSpec: EthereumHardforks, @@ -124,9 +131,13 @@ impl PruningArgs { receipts: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - // TODO: set default to pre-merge block if available - bodies_history: None, - receipts_log_filter: Default::default(), + bodies_history: chain_spec + .ethereum_fork_activation(EthereumHardfork::Paris) + .block_number() + .map(PruneMode::Before), + merkle_changesets: PruneMode::Distance(MINIMUM_PRUNING_DISTANCE), + #[expect(deprecated)] + receipts_log_filter: (), }, } } @@ -153,16 +164,17 @@ impl PruningArgs { if let Some(mode) = self.storage_history_prune_mode() { config.segments.storage_history = Some(mode); } - if let Some(receipt_logs) = - self.receipts_log_filter.as_ref().filter(|c| !c.is_empty()).cloned() - { - config.segments.receipts_log_filter = receipt_logs; - // need to remove the receipts segment filter entirely because that takes precedence - // over the logs filter - config.segments.receipts.take(); + + // Log warning if receipts_log_filter is set (deprecated feature) + #[expect(deprecated)] + if self.receipts_log_filter.is_some() { + tracing::warn!( + target: "reth::cli", + "The --prune.receiptslogfilter flag is deprecated and has no effect. It will be removed in a future release." + ); } - Some(config) + config.is_default().not().then_some(config) } fn bodies_prune_mode(&self, chain_spec: &ChainSpec) -> Option @@ -247,141 +259,3 @@ impl PruningArgs { } } } - -/// Parses `,` separated pruning info into [`ReceiptsLogPruneConfig`]. -pub(crate) fn parse_receipts_log_filter( - value: &str, -) -> Result { - let mut config = BTreeMap::new(); - // Split out each of the filters. - let filters = value.split(','); - for filter in filters { - let parts: Vec<&str> = filter.split(':').collect(); - if parts.len() < 2 { - return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); - } - // Parse the address - let address = parts[0] - .parse::
() - .map_err(|_| ReceiptsLogError::InvalidAddress(parts[0].to_string()))?; - - // Parse the prune mode - let prune_mode = match parts[1] { - "full" => PruneMode::Full, - s if s.starts_with("distance") => { - if parts.len() < 3 { - return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); - } - let distance = - parts[2].parse::().map_err(ReceiptsLogError::InvalidDistance)?; - PruneMode::Distance(distance) - } - s if s.starts_with("before") => { - if parts.len() < 3 { - return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); - } - let block_number = - parts[2].parse::().map_err(ReceiptsLogError::InvalidBlockNumber)?; - PruneMode::Before(block_number) - } - _ => return Err(ReceiptsLogError::InvalidPruneMode(parts[1].to_string())), - }; - config.insert(address, prune_mode); - } - Ok(ReceiptsLogPruneConfig(config)) -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::address; - use clap::Parser; - - /// A helper type to parse Args more easily - #[derive(Parser)] - struct CommandParser { - #[command(flatten)] - args: T, - } - - #[test] - fn pruning_args_sanity_check() { - let args = CommandParser::::parse_from([ - "reth", - "--prune.receiptslogfilter", - "0x0000000000000000000000000000000000000003:before:5000000", - ]) - .args; - let mut config = ReceiptsLogPruneConfig::default(); - config.0.insert( - address!("0x0000000000000000000000000000000000000003"), - PruneMode::Before(5000000), - ); - assert_eq!(args.receipts_log_filter, Some(config)); - } - - #[test] - fn parse_receiptslogfilter() { - let default_args = PruningArgs::default(); - let args = CommandParser::::parse_from(["reth"]).args; - assert_eq!(args, default_args); - } - - #[test] - fn test_parse_receipts_log_filter() { - let filter1 = "0x0000000000000000000000000000000000000001:full"; - let filter2 = "0x0000000000000000000000000000000000000002:distance:1000"; - let filter3 = "0x0000000000000000000000000000000000000003:before:5000000"; - let filters = [filter1, filter2, filter3].join(","); - - // Args can be parsed. - let result = parse_receipts_log_filter(&filters); - assert!(result.is_ok()); - let config = result.unwrap(); - assert_eq!(config.0.len(), 3); - - // Check that the args were parsed correctly. - let addr1: Address = "0x0000000000000000000000000000000000000001".parse().unwrap(); - let addr2: Address = "0x0000000000000000000000000000000000000002".parse().unwrap(); - let addr3: Address = "0x0000000000000000000000000000000000000003".parse().unwrap(); - - assert_eq!(config.0.get(&addr1), Some(&PruneMode::Full)); - assert_eq!(config.0.get(&addr2), Some(&PruneMode::Distance(1000))); - assert_eq!(config.0.get(&addr3), Some(&PruneMode::Before(5000000))); - } - - #[test] - fn test_parse_receipts_log_filter_invalid_filter_format() { - let result = parse_receipts_log_filter("invalid_format"); - assert!(matches!(result, Err(ReceiptsLogError::InvalidFilterFormat(_)))); - } - - #[test] - fn test_parse_receipts_log_filter_invalid_address() { - let result = parse_receipts_log_filter("invalid_address:full"); - assert!(matches!(result, Err(ReceiptsLogError::InvalidAddress(_)))); - } - - #[test] - fn test_parse_receipts_log_filter_invalid_prune_mode() { - let result = - parse_receipts_log_filter("0x0000000000000000000000000000000000000000:invalid_mode"); - assert!(matches!(result, Err(ReceiptsLogError::InvalidPruneMode(_)))); - } - - #[test] - fn test_parse_receipts_log_filter_invalid_distance() { - let result = parse_receipts_log_filter( - "0x0000000000000000000000000000000000000000:distance:invalid_distance", - ); - assert!(matches!(result, Err(ReceiptsLogError::InvalidDistance(_)))); - } - - #[test] - fn test_parse_receipts_log_filter_invalid_block_number() { - let result = parse_receipts_log_filter( - "0x0000000000000000000000000000000000000000:before:invalid_block", - ); - assert!(matches!(result, Err(ReceiptsLogError::InvalidBlockNumber(_)))); - } -} diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 58a1c388e4e..f4930db9f9b 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -188,6 +188,16 @@ pub struct RpcServerArgs { )] pub rpc_gas_cap: u64, + /// Maximum memory the EVM can allocate per RPC request. + #[arg( + long = "rpc.evm-memory-limit", + alias = "rpc-evm-memory-limit", + value_name = "MEMORY_LIMIT", + value_parser = MaxOr::new(RangedU64ValueParser::::new().range(1..)), + default_value_t = (1 << 32) - 1 + )] + pub rpc_evm_memory_limit: u64, + /// Maximum eth transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) #[arg( long = "rpc.txfeecap", @@ -408,6 +418,7 @@ impl Default for RpcServerArgs { rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(), rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(), rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP, + rpc_evm_memory_limit: (1 << 32) - 1, rpc_tx_fee_cap: constants::DEFAULT_TX_FEE_CAP_WEI, rpc_max_simulate_blocks: constants::DEFAULT_MAX_SIMULATE_BLOCKS, rpc_eth_proof_window: constants::DEFAULT_ETH_PROOF_WINDOW, diff --git a/crates/node/core/src/args/stage.rs b/crates/node/core/src/args/stage.rs index 337f5a4a60b..7718fb85605 100644 --- a/crates/node/core/src/args/stage.rs +++ b/crates/node/core/src/args/stage.rs @@ -38,6 +38,11 @@ pub enum StageEnum { /// /// Handles Merkle tree-related computations and data processing. Merkle, + /// The merkle changesets stage within the pipeline. + /// + /// Handles Merkle trie changesets for storage and accounts. + #[value(name = "merkle-changesets")] + MerkleChangeSets, /// The transaction lookup stage within the pipeline. /// /// Deals with the retrieval and processing of transactions. diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs new file mode 100644 index 00000000000..5b5e21502d1 --- /dev/null +++ b/crates/node/core/src/args/trace.rs @@ -0,0 +1,89 @@ +//! Opentelemetry tracing configuration through CLI args. + +use clap::Parser; +use eyre::WrapErr; +use reth_tracing::tracing_subscriber::EnvFilter; +use reth_tracing_otlp::OtlpProtocol; +use url::Url; + +/// CLI arguments for configuring `Opentelemetry` trace and span export. +#[derive(Debug, Clone, Parser)] +pub struct TraceArgs { + /// Enable `Opentelemetry` tracing export to an OTLP endpoint. + /// + /// If no value provided, defaults based on protocol: + /// - HTTP: `http://localhost:4318/v1/traces` + /// - gRPC: `http://localhost:4317` + /// + /// Example: --tracing-otlp=http://collector:4318/v1/traces + #[arg( + long = "tracing-otlp", + // Per specification. + env = "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT", + global = true, + value_name = "URL", + num_args = 0..=1, + default_missing_value = "http://localhost:4318/v1/traces", + require_equals = true, + value_parser = parse_otlp_endpoint, + help_heading = "Tracing" + )] + pub otlp: Option, + + /// OTLP transport protocol to use for exporting traces. + /// + /// - `http`: expects endpoint path to end with `/v1/traces` + /// - `grpc`: expects endpoint without a path + /// + /// Defaults to HTTP if not specified. + #[arg( + long = "tracing-otlp-protocol", + env = "OTEL_EXPORTER_OTLP_PROTOCOL", + global = true, + value_name = "PROTOCOL", + default_value = "http", + help_heading = "Tracing" + )] + pub protocol: OtlpProtocol, + + /// Set a filter directive for the OTLP tracer. This controls the verbosity + /// of spans and events sent to the OTLP endpoint. It follows the same + /// syntax as the `RUST_LOG` environment variable. + /// + /// Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + /// + /// Defaults to TRACE if not specified. + #[arg( + long = "tracing-otlp.filter", + global = true, + value_name = "FILTER", + default_value = "debug", + help_heading = "Tracing" + )] + pub otlp_filter: EnvFilter, +} + +impl Default for TraceArgs { + fn default() -> Self { + Self { + otlp: None, + protocol: OtlpProtocol::Http, + otlp_filter: EnvFilter::from_default_env(), + } + } +} + +impl TraceArgs { + /// Validate the configuration + pub fn validate(&mut self) -> eyre::Result<()> { + if let Some(url) = &mut self.otlp { + self.protocol.validate_endpoint(url)?; + } + Ok(()) + } +} + +// Parses an OTLP endpoint url. +fn parse_otlp_endpoint(arg: &str) -> eyre::Result { + Url::parse(arg).wrap_err("Invalid URL for OTLP trace output") +} diff --git a/crates/node/core/src/cli/config.rs b/crates/node/core/src/cli/config.rs index 657b8cac1f9..8c29c4745e9 100644 --- a/crates/node/core/src/cli/config.rs +++ b/crates/node/core/src/cli/config.rs @@ -42,10 +42,9 @@ pub trait PayloadBuilderConfig { } match chain.kind() { - ChainKind::Named(NamedChain::Sepolia | NamedChain::Holesky | NamedChain::Hoodi) => { - ETHEREUM_BLOCK_GAS_LIMIT_60M - } - ChainKind::Named(NamedChain::Mainnet) => ETHEREUM_BLOCK_GAS_LIMIT_60M, + ChainKind::Named( + NamedChain::Mainnet | NamedChain::Sepolia | NamedChain::Holesky | NamedChain::Hoodi, + ) => ETHEREUM_BLOCK_GAS_LIMIT_60M, _ => ETHEREUM_BLOCK_GAS_LIMIT_36M, } } diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 96fa8cc8dfa..64b469086e7 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -10,7 +10,7 @@ use crate::{ }; use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::{BlockNumber, B256, U256}; use eyre::eyre; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_config::config::PruneConfig; @@ -27,21 +27,16 @@ use reth_transaction_pool::TransactionPool; use serde::{de::DeserializeOwned, Serialize}; use std::{ fs, - net::SocketAddr, path::{Path, PathBuf}, sync::Arc, }; use tracing::*; -use crate::args::EraArgs; +use crate::args::{EraArgs, MetricArgs}; pub use reth_engine_primitives::{ - DEFAULT_MAX_PROOF_TASK_CONCURRENCY, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, - DEFAULT_RESERVED_CPU_CORES, + DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; -/// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. -pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; - /// Default size of cross-block cache in megabytes. pub const DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB: u64 = 4 * 1024; @@ -103,10 +98,8 @@ pub struct NodeConfig { /// Possible values are either a built-in chain or the path to a chain specification file. pub chain: Arc, - /// Enable Prometheus metrics. - /// - /// The metrics will be served at the given interface and port. - pub metrics: Option, + /// Enable to configure metrics export to endpoints + pub metrics: MetricArgs, /// Add a new instance of a node. /// @@ -171,7 +164,7 @@ impl NodeConfig { Self { config: None, chain, - metrics: None, + metrics: MetricArgs::default(), instance: None, network: NetworkArgs::default(), rpc: RpcServerArgs::default(), @@ -197,6 +190,22 @@ impl NodeConfig { self } + /// Apply a function to the config. + pub fn apply(self, f: F) -> Self + where + F: FnOnce(Self) -> Self, + { + f(self) + } + + /// Applies a fallible function to the config. + pub fn try_apply(self, f: F) -> Result + where + F: FnOnce(Self) -> Result, + { + f(self) + } + /// Sets --dev mode for the node [`NodeConfig::dev`], if `dev` is true. pub const fn set_dev(self, dev: bool) -> Self { if dev { @@ -225,8 +234,8 @@ impl NodeConfig { } /// Set the metrics address for the node - pub const fn with_metrics(mut self, metrics: SocketAddr) -> Self { - self.metrics = Some(metrics); + pub fn with_metrics(mut self, metrics: MetricArgs) -> Self { + self.metrics = metrics; self } @@ -278,7 +287,7 @@ impl NodeConfig { } /// Set the dev args for the node - pub const fn with_dev(mut self, dev: DevArgs) -> Self { + pub fn with_dev(mut self, dev: DevArgs) -> Self { self.dev = dev; self } @@ -336,12 +345,6 @@ impl NodeConfig { .header_by_number(head)? .expect("the header for the latest block is missing, database is corrupt"); - let total_difficulty = provider - .header_td_by_number(head)? - // total difficulty is effectively deprecated, but still required in some places, e.g. - // p2p - .unwrap_or_default(); - let hash = provider .block_hash(head)? .expect("the hash for the latest block is missing, database is corrupt"); @@ -350,7 +353,7 @@ impl NodeConfig { number: head, hash, difficulty: header.difficulty(), - total_difficulty, + total_difficulty: U256::ZERO, timestamp: header.timestamp(), }) } @@ -424,6 +427,12 @@ impl NodeConfig { self } + /// Disables all discovery services for the node. + pub const fn with_disabled_discovery(mut self) -> Self { + self.network.discovery.disable_discovery = true; + self + } + /// Effectively disables the RPC state cache by setting the cache sizes to `0`. /// /// By setting the cache sizes to 0, caching of newly executed or fetched blocks will be @@ -517,7 +526,7 @@ impl Clone for NodeConfig { Self { chain: self.chain.clone(), config: self.config.clone(), - metrics: self.metrics, + metrics: self.metrics.clone(), instance: self.instance, network: self.network.clone(), rpc: self.rpc.clone(), @@ -525,7 +534,7 @@ impl Clone for NodeConfig { builder: self.builder.clone(), debug: self.debug.clone(), db: self.db, - dev: self.dev, + dev: self.dev.clone(), pruning: self.pruning.clone(), datadir: self.datadir.clone(), engine: self.engine.clone(), diff --git a/crates/node/ethstats/src/ethstats.rs b/crates/node/ethstats/src/ethstats.rs index b9fe5e47272..7592e93ae9d 100644 --- a/crates/node/ethstats/src/ethstats.rs +++ b/crates/node/ethstats/src/ethstats.rs @@ -109,10 +109,9 @@ where "Attempting to connect to EthStats server at {}", self.credentials.host ); let full_url = format!("ws://{}/api", self.credentials.host); - let url = Url::parse(&full_url) - .map_err(|e| EthStatsError::InvalidUrl(format!("Invalid URL: {full_url} - {e}")))?; + let url = Url::parse(&full_url).map_err(EthStatsError::Url)?; - match timeout(CONNECT_TIMEOUT, connect_async(url.to_string())).await { + match timeout(CONNECT_TIMEOUT, connect_async(url.as_str())).await { Ok(Ok((ws_stream, _))) => { debug!( target: "ethstats", @@ -123,7 +122,7 @@ where self.login().await?; Ok(()) } - Ok(Err(e)) => Err(EthStatsError::InvalidUrl(e.to_string())), + Ok(Err(e)) => Err(EthStatsError::WebSocket(e)), Err(_) => { debug!(target: "ethstats", "Connection to EthStats server timed out"); Err(EthStatsError::Timeout) diff --git a/crates/node/events/src/cl.rs b/crates/node/events/src/cl.rs index bdced7c97d6..99cdc1c245f 100644 --- a/crates/node/events/src/cl.rs +++ b/crates/node/events/src/cl.rs @@ -61,7 +61,7 @@ impl Stream for ConsensusLayerHealthEvents { )) } - // We never had both FCU and transition config exchange. + // We never received any forkchoice updates. return Poll::Ready(Some(ConsensusLayerHealthEvent::NeverSeen)) } } @@ -71,12 +71,8 @@ impl Stream for ConsensusLayerHealthEvents { /// Execution Layer point of view. #[derive(Clone, Copy, Debug)] pub enum ConsensusLayerHealthEvent { - /// Consensus Layer client was never seen. + /// Consensus Layer client was never seen (no forkchoice updates received). NeverSeen, - /// Consensus Layer client has not been seen for a while. - HasNotBeenSeenForAWhile(Duration), - /// Updates from the Consensus Layer client were never received. - NeverReceivedUpdates, - /// Updates from the Consensus Layer client have not been received for a while. + /// Forkchoice updates from the Consensus Layer client have not been received for a while. HaveNotReceivedUpdatesForAWhile(Duration), } diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 3539eae0316..20ac4394b4f 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -296,17 +296,6 @@ impl NodeState { "Post-merge network, but never seen beacon client. Please launch one to follow the chain!" ) } - ConsensusLayerHealthEvent::HasNotBeenSeenForAWhile(period) => { - warn!( - ?period, - "Post-merge network, but no beacon client seen for a while. Please launch one to follow the chain!" - ) - } - ConsensusLayerHealthEvent::NeverReceivedUpdates => { - warn!( - "Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!" - ) - } ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile(period) => { warn!( ?period, @@ -607,6 +596,8 @@ impl Display for Eta { f, "{}", humantime::format_duration(Duration::from_secs(remaining.as_secs())) + .to_string() + .replace(' ', "") ) } } @@ -632,6 +623,6 @@ mod tests { } .to_string(); - assert_eq!(eta, "13m 37s"); + assert_eq!(eta, "13m37s"); } } diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml index 39884fa73ef..9687c9c20ac 100644 --- a/crates/node/metrics/Cargo.toml +++ b/crates/node/metrics/Cargo.toml @@ -21,6 +21,7 @@ tokio.workspace = true jsonrpsee-server.workspace = true http.workspace = true tower.workspace = true +reqwest.workspace = true tracing.workspace = true eyre.workspace = true diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index c029b773718..26e9a918faa 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -8,9 +8,10 @@ use eyre::WrapErr; use http::{header::CONTENT_TYPE, HeaderValue, Response}; use metrics::describe_gauge; use metrics_process::Collector; +use reqwest::Client; use reth_metrics::metrics::Unit; use reth_tasks::TaskExecutor; -use std::{convert::Infallible, net::SocketAddr, sync::Arc}; +use std::{convert::Infallible, net::SocketAddr, sync::Arc, time::Duration}; /// Configuration for the [`MetricServer`] #[derive(Debug)] @@ -20,6 +21,8 @@ pub struct MetricServerConfig { chain_spec_info: ChainSpecInfo, task_executor: TaskExecutor, hooks: Hooks, + push_gateway_url: Option, + push_gateway_interval: Duration, } impl MetricServerConfig { @@ -31,7 +34,22 @@ impl MetricServerConfig { task_executor: TaskExecutor, hooks: Hooks, ) -> Self { - Self { listen_addr, hooks, task_executor, version_info, chain_spec_info } + Self { + listen_addr, + hooks, + task_executor, + version_info, + chain_spec_info, + push_gateway_url: None, + push_gateway_interval: Duration::from_secs(5), + } + } + + /// Set the gateway URL and interval for pushing metrics + pub fn with_push_gateway(mut self, url: Option, interval: Duration) -> Self { + self.push_gateway_url = url; + self.push_gateway_interval = interval; + self } } @@ -49,18 +67,35 @@ impl MetricServer { /// Spawns the metrics server pub async fn serve(&self) -> eyre::Result<()> { - let MetricServerConfig { listen_addr, hooks, task_executor, version_info, chain_spec_info } = - &self.config; + let MetricServerConfig { + listen_addr, + hooks, + task_executor, + version_info, + chain_spec_info, + push_gateway_url, + push_gateway_interval, + } = &self.config; - let hooks = hooks.clone(); + let hooks_for_endpoint = hooks.clone(); self.start_endpoint( *listen_addr, - Arc::new(move || hooks.iter().for_each(|hook| hook())), + Arc::new(move || hooks_for_endpoint.iter().for_each(|hook| hook())), task_executor.clone(), ) .await .wrap_err_with(|| format!("Could not start Prometheus endpoint at {listen_addr}"))?; + // Start push-gateway task if configured + if let Some(url) = push_gateway_url { + self.start_push_gateway_task( + url.clone(), + *push_gateway_interval, + hooks.clone(), + task_executor.clone(), + )?; + } + // Describe metrics after recorder installation describe_db_metrics(); describe_static_file_metrics(); @@ -84,6 +119,8 @@ impl MetricServer { .await .wrap_err("Could not bind to address")?; + tracing::info!(target: "reth::cli", "Starting metrics endpoint at {}", listener.local_addr().unwrap()); + task_executor.spawn_with_graceful_shutdown_signal(|mut signal| { Box::pin(async move { loop { @@ -128,6 +165,51 @@ impl MetricServer { Ok(()) } + + /// Starts a background task to push metrics to a metrics gateway + fn start_push_gateway_task( + &self, + url: String, + interval: Duration, + hooks: Hooks, + task_executor: TaskExecutor, + ) -> eyre::Result<()> { + let client = Client::builder() + .build() + .wrap_err("Could not create HTTP client to push metrics to gateway")?; + task_executor.spawn_with_graceful_shutdown_signal(move |mut signal| { + Box::pin(async move { + tracing::info!(url = %url, interval = ?interval, "Starting task to push metrics to gateway"); + let handle = install_prometheus_recorder(); + loop { + tokio::select! { + _ = &mut signal => { + tracing::info!("Shutting down task to push metrics to gateway"); + break; + } + _ = tokio::time::sleep(interval) => { + hooks.iter().for_each(|hook| hook()); + let metrics = handle.handle().render(); + match client.put(&url).header("Content-Type", "text/plain").body(metrics).send().await { + Ok(response) => { + if !response.status().is_success() { + tracing::warn!( + status = %response.status(), + "Failed to push metrics to gateway" + ); + } + } + Err(err) => { + tracing::warn!(%err, "Failed to push metrics to gateway"); + } + } + } + } + } + }) + }); + Ok(()) + } } fn describe_db_metrics() { diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index daa4d11153a..b5b38f48c7d 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -11,7 +11,7 @@ use core::{fmt::Debug, marker::PhantomData}; pub use reth_primitives_traits::{ - Block, BlockBody, FullBlock, FullNodePrimitives, FullReceipt, FullSignedTx, NodePrimitives, + Block, BlockBody, FullBlock, FullReceipt, FullSignedTx, NodePrimitives, }; use reth_chainspec::EthChainSpec; diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 3733227a3aa..ef203df0fc0 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -12,7 +12,7 @@ exclude.workspace = true reth-cli-util.workspace = true reth-optimism-cli.workspace = true reth-optimism-rpc.workspace = true -reth-optimism-node = { workspace = true, features = ["js-tracer"] } +reth-optimism-node.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-evm.workspace = true @@ -27,7 +27,13 @@ tracing.workspace = true workspace = true [features] -default = ["jemalloc", "reth-optimism-evm/portable"] +default = ["jemalloc", "otlp", "reth-optimism-evm/portable", "js-tracer"] + +otlp = ["reth-optimism-cli/otlp"] + +js-tracer = [ + "reth-optimism-node/js-tracer", +] jemalloc = ["reth-cli-util/jemalloc", "reth-optimism-cli/jemalloc"] jemalloc-prof = ["reth-cli-util/jemalloc-prof"] diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index 55201164701..a4ef9263b1c 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -48,6 +48,7 @@ op-alloy-consensus.workspace = true [dev-dependencies] reth-chainspec = { workspace = true, features = ["test-utils"] } +alloy-op-hardforks.workspace = true [features] default = ["std"] @@ -85,4 +86,5 @@ serde = [ "reth-optimism-primitives/serde", "reth-primitives-traits/serde", "op-alloy-consensus/serde", + "alloy-op-hardforks/serde", ] diff --git a/crates/optimism/chainspec/res/superchain-configs.tar b/crates/optimism/chainspec/res/superchain-configs.tar index da035a32da5..80345a28438 100644 Binary files a/crates/optimism/chainspec/res/superchain-configs.tar and b/crates/optimism/chainspec/res/superchain-configs.tar differ diff --git a/crates/optimism/chainspec/res/superchain_registry_commit b/crates/optimism/chainspec/res/superchain_registry_commit index 70808136d14..d37cde1bb4a 100644 --- a/crates/optimism/chainspec/res/superchain_registry_commit +++ b/crates/optimism/chainspec/res/superchain_registry_commit @@ -1 +1 @@ -d56233c1e5254fc2fd769d5b33269502a1fe9ef8 +9e3f71cee0e4e2acb4864cb00f5fbee3555d8e9f diff --git a/crates/optimism/chainspec/src/basefee.rs b/crates/optimism/chainspec/src/basefee.rs index 0ef712dc04f..3c0dcdfd88d 100644 --- a/crates/optimism/chainspec/src/basefee.rs +++ b/crates/optimism/chainspec/src/basefee.rs @@ -1,26 +1,13 @@ //! Base fee related utilities for Optimism chains. +use core::cmp::max; + use alloy_consensus::BlockHeader; +use alloy_eips::calc_next_block_base_fee; use op_alloy_consensus::{decode_holocene_extra_data, decode_jovian_extra_data, EIP1559ParamError}; use reth_chainspec::{BaseFeeParams, EthChainSpec}; use reth_optimism_forks::OpHardforks; -fn next_base_fee_params( - chain_spec: impl EthChainSpec + OpHardforks, - parent: &H, - timestamp: u64, - denominator: u32, - elasticity: u32, -) -> u64 { - let base_fee_params = if elasticity == 0 && denominator == 0 { - chain_spec.base_fee_params_at_timestamp(timestamp) - } else { - BaseFeeParams::new(denominator as u128, elasticity as u128) - }; - - parent.next_block_base_fee(base_fee_params).unwrap_or_default() -} - /// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header. /// /// Caution: Caller must ensure that holocene is active in the parent header. @@ -36,7 +23,13 @@ where { let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?; - Ok(next_base_fee_params(chain_spec, parent, timestamp, denominator, elasticity)) + let base_fee_params = if elasticity == 0 && denominator == 0 { + chain_spec.base_fee_params_at_timestamp(timestamp) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128) + }; + + Ok(parent.next_block_base_fee(base_fee_params).unwrap_or_default()) } /// Extracts the Jovian 1599 parameters from the encoded extra data from the parent header. @@ -57,8 +50,22 @@ where { let (elasticity, denominator, min_base_fee) = decode_jovian_extra_data(parent.extra_data())?; - let next_base_fee = - next_base_fee_params(chain_spec, parent, timestamp, denominator, elasticity); + let base_fee_params = if elasticity == 0 && denominator == 0 { + chain_spec.base_fee_params_at_timestamp(timestamp) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128) + }; + + // Starting from Jovian, we use the maximum of the gas used and the blob gas used to calculate + // the next base fee. + let gas_used = max(parent.gas_used(), parent.blob_gas_used().unwrap_or_default()); + + let next_base_fee = calc_next_block_base_fee( + gas_used, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + base_fee_params, + ); if next_base_fee < min_base_fee { return Ok(min_base_fee); @@ -66,3 +73,127 @@ where Ok(next_base_fee) } + +#[cfg(test)] +mod tests { + use alloc::sync::Arc; + + use op_alloy_consensus::encode_jovian_extra_data; + use reth_chainspec::{ChainSpec, ForkCondition, Hardfork}; + use reth_optimism_forks::OpHardfork; + + use crate::{OpChainSpec, BASE_SEPOLIA}; + + use super::*; + + const JOVIAN_TIMESTAMP: u64 = 1900000000; + + fn get_chainspec() -> Arc { + let mut base_sepolia_spec = BASE_SEPOLIA.inner.clone(); + base_sepolia_spec + .hardforks + .insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(JOVIAN_TIMESTAMP)); + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: base_sepolia_spec.chain, + genesis: base_sepolia_spec.genesis, + genesis_header: base_sepolia_spec.genesis_header, + ..Default::default() + }, + }) + } + + #[test] + fn test_next_base_fee_jovian_blob_gas_used_greater_than_gas_used() { + let chain_spec = get_chainspec(); + let mut parent = chain_spec.genesis_header().clone(); + let timestamp = JOVIAN_TIMESTAMP; + + const GAS_LIMIT: u64 = 10_000_000_000; + const BLOB_GAS_USED: u64 = 5_000_000_000; + const GAS_USED: u64 = 1_000_000_000; + const MIN_BASE_FEE: u64 = 100_000_000; + + parent.extra_data = + encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) + .unwrap(); + parent.blob_gas_used = Some(BLOB_GAS_USED); + parent.gas_used = GAS_USED; + parent.gas_limit = GAS_LIMIT; + + let expected_base_fee = calc_next_block_base_fee( + BLOB_GAS_USED, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + BaseFeeParams::base_sepolia(), + ); + assert_eq!( + expected_base_fee, + compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() + ); + assert_ne!( + expected_base_fee, + calc_next_block_base_fee( + GAS_USED, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + BaseFeeParams::base_sepolia(), + ) + ) + } + + #[test] + fn test_next_base_fee_jovian_blob_gas_used_less_than_gas_used() { + let chain_spec = get_chainspec(); + let mut parent = chain_spec.genesis_header().clone(); + let timestamp = JOVIAN_TIMESTAMP; + + const GAS_LIMIT: u64 = 10_000_000_000; + const BLOB_GAS_USED: u64 = 100_000_000; + const GAS_USED: u64 = 1_000_000_000; + const MIN_BASE_FEE: u64 = 100_000_000; + + parent.extra_data = + encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) + .unwrap(); + parent.blob_gas_used = Some(BLOB_GAS_USED); + parent.gas_used = GAS_USED; + parent.gas_limit = GAS_LIMIT; + + let expected_base_fee = calc_next_block_base_fee( + GAS_USED, + parent.gas_limit(), + parent.base_fee_per_gas().unwrap_or_default(), + BaseFeeParams::base_sepolia(), + ); + assert_eq!( + expected_base_fee, + compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() + ); + } + + #[test] + fn test_next_base_fee_jovian_min_base_fee() { + let chain_spec = get_chainspec(); + let mut parent = chain_spec.genesis_header().clone(); + let timestamp = JOVIAN_TIMESTAMP; + + const GAS_LIMIT: u64 = 10_000_000_000; + const BLOB_GAS_USED: u64 = 100_000_000; + const GAS_USED: u64 = 1_000_000_000; + const MIN_BASE_FEE: u64 = 5_000_000_000; + + parent.extra_data = + encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE) + .unwrap(); + parent.blob_gas_used = Some(BLOB_GAS_USED); + parent.gas_used = GAS_USED; + parent.gas_limit = GAS_LIMIT; + + let expected_base_fee = MIN_BASE_FEE; + assert_eq!( + expected_base_fee, + compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap() + ); + } +} diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index d5ff6d495d7..28c10adfff4 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -520,9 +520,12 @@ pub fn make_op_genesis_header(genesis: &Genesis, hardforks: &ChainHardforks) -> #[cfg(test)] mod tests { - use alloc::string::String; + use alloc::string::{String, ToString}; use alloy_genesis::{ChainConfig, Genesis}; - use alloy_primitives::b256; + use alloy_op_hardforks::{ + BASE_MAINNET_JOVIAN_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, OP_SEPOLIA_JOVIAN_TIMESTAMP, + }; + use alloy_primitives::{b256, hex}; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; use reth_optimism_forks::{OpHardfork, OpHardforks}; @@ -532,7 +535,7 @@ mod tests { #[test] fn test_storage_root_consistency() { use alloy_primitives::{B256, U256}; - use std::str::FromStr; + use core::str::FromStr; let k1 = B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000001") @@ -614,13 +617,20 @@ mod tests { // Isthmus ( Head { number: 0, timestamp: 1746806401, ..Default::default() }, - ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), + next: BASE_MAINNET_JOVIAN_TIMESTAMP, + }, + ), + // Jovian + ( + Head { + number: 0, + timestamp: BASE_MAINNET_JOVIAN_TIMESTAMP, + ..Default::default() + }, + BASE_MAINNET.hardfork_fork_id(OpHardfork::Jovian).unwrap(), ), - // // Jovian - // ( - // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: - // update timestamp when Jovian is planned */ ForkId { hash: - // ForkHash([0xef, 0x0e, 0x58, 0x33]), next: 0 }, ), ], ); } @@ -673,13 +683,20 @@ mod tests { // Isthmus ( Head { number: 0, timestamp: 1744905600, ..Default::default() }, - ForkId { hash: ForkHash([0x6c, 0x62, 0x5e, 0xe1]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x6c, 0x62, 0x5e, 0xe1]), + next: OP_SEPOLIA_JOVIAN_TIMESTAMP, + }, + ), + // Jovian + ( + Head { + number: 0, + timestamp: OP_SEPOLIA_JOVIAN_TIMESTAMP, + ..Default::default() + }, + OP_SEPOLIA.hardfork_fork_id(OpHardfork::Jovian).unwrap(), ), - // // Jovian - // ( - // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: - // update timestamp when Jovian is planned */ ForkId { hash: - // ForkHash([0x04, 0x2a, 0x5c, 0x14]), next: 0 }, ), ], ); } @@ -742,13 +759,20 @@ mod tests { // Isthmus ( Head { number: 105235063, timestamp: 1746806401, ..Default::default() }, - ForkId { hash: ForkHash([0x37, 0xbe, 0x75, 0x8f]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x37, 0xbe, 0x75, 0x8f]), + next: OP_MAINNET_JOVIAN_TIMESTAMP, + }, ), // Jovian - // ( - // Head { number: 105235063, timestamp: u64::MAX, ..Default::default() }, /* - // TODO: update timestamp when Jovian is planned */ ForkId { - // hash: ForkHash([0x26, 0xce, 0xa1, 0x75]), next: 0 }, ), + ( + Head { + number: 105235063, + timestamp: OP_MAINNET_JOVIAN_TIMESTAMP, + ..Default::default() + }, + OP_MAINNET.hardfork_fork_id(OpHardfork::Jovian).unwrap(), + ), ], ); } @@ -801,13 +825,20 @@ mod tests { // Isthmus ( Head { number: 0, timestamp: 1744905600, ..Default::default() }, - ForkId { hash: ForkHash([0x06, 0x0a, 0x4d, 0x1d]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x06, 0x0a, 0x4d, 0x1d]), + next: OP_SEPOLIA_JOVIAN_TIMESTAMP, + }, /* TODO: update timestamp when Jovian is planned */ + ), + // Jovian + ( + Head { + number: 0, + timestamp: OP_SEPOLIA_JOVIAN_TIMESTAMP, + ..Default::default() + }, + BASE_SEPOLIA.hardfork_fork_id(OpHardfork::Jovian).unwrap(), ), - // // Jovian - // ( - // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: - // update timestamp when Jovian is planned */ ForkId { hash: - // ForkHash([0xcd, 0xfd, 0x39, 0x99]), next: 0 }, ), ], ); } @@ -851,7 +882,7 @@ mod tests { #[test] fn latest_base_mainnet_fork_id() { assert_eq!( - ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, + ForkId { hash: ForkHash(hex!("1cfeafc9")), next: 0 }, BASE_MAINNET.latest_fork_id() ) } @@ -860,7 +891,7 @@ mod tests { fn latest_base_mainnet_fork_id_with_builder() { let base_mainnet = OpChainSpecBuilder::base_mainnet().build(); assert_eq!( - ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, + ForkId { hash: ForkHash(hex!("1cfeafc9")), next: 0 }, base_mainnet.latest_fork_id() ) } diff --git a/crates/optimism/chainspec/src/superchain/chain_specs.rs b/crates/optimism/chainspec/src/superchain/chain_specs.rs index 1547082eca3..8a794221ea6 100644 --- a/crates/optimism/chainspec/src/superchain/chain_specs.rs +++ b/crates/optimism/chainspec/src/superchain/chain_specs.rs @@ -45,6 +45,7 @@ create_superchain_specs!( ("settlus-sepolia", "sepolia"), ("shape", "mainnet"), ("shape", "sepolia"), + ("silent-data-mainnet", "mainnet"), ("snax", "mainnet"), ("soneium", "mainnet"), ("soneium-minato", "sepolia"), diff --git a/crates/optimism/chainspec/src/superchain/configs.rs b/crates/optimism/chainspec/src/superchain/configs.rs index 53b30a2f5d9..bb1929646a0 100644 --- a/crates/optimism/chainspec/src/superchain/configs.rs +++ b/crates/optimism/chainspec/src/superchain/configs.rs @@ -87,7 +87,17 @@ fn read_file( #[cfg(test)] mod tests { use super::*; - use crate::superchain::Superchain; + use crate::{generated_chain_value_parser, superchain::Superchain, SUPPORTED_CHAINS}; + use alloy_chains::NamedChain; + use alloy_op_hardforks::{ + OpHardfork, BASE_MAINNET_CANYON_TIMESTAMP, BASE_MAINNET_ECOTONE_TIMESTAMP, + BASE_MAINNET_ISTHMUS_TIMESTAMP, BASE_MAINNET_JOVIAN_TIMESTAMP, + BASE_SEPOLIA_CANYON_TIMESTAMP, BASE_SEPOLIA_ECOTONE_TIMESTAMP, + BASE_SEPOLIA_ISTHMUS_TIMESTAMP, BASE_SEPOLIA_JOVIAN_TIMESTAMP, OP_MAINNET_CANYON_TIMESTAMP, + OP_MAINNET_ECOTONE_TIMESTAMP, OP_MAINNET_ISTHMUS_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, + OP_SEPOLIA_CANYON_TIMESTAMP, OP_SEPOLIA_ECOTONE_TIMESTAMP, OP_SEPOLIA_ISTHMUS_TIMESTAMP, + OP_SEPOLIA_JOVIAN_TIMESTAMP, + }; use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER; use tar_no_std::TarArchiveRef; @@ -150,4 +160,139 @@ mod tests { ); } } + + #[test] + fn test_hardfork_timestamps() { + for &chain in SUPPORTED_CHAINS { + let metadata = generated_chain_value_parser(chain).unwrap(); + + match metadata.chain().named() { + Some(NamedChain::Optimism) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + OP_MAINNET_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + OP_MAINNET_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + OP_MAINNET_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + OP_MAINNET_ECOTONE_TIMESTAMP + ); + } + Some(NamedChain::OptimismSepolia) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + OP_SEPOLIA_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + OP_SEPOLIA_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + OP_SEPOLIA_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + OP_SEPOLIA_ECOTONE_TIMESTAMP + ); + } + Some(NamedChain::Base) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + BASE_MAINNET_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_MAINNET_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + BASE_MAINNET_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_MAINNET_ECOTONE_TIMESTAMP + ); + } + Some(NamedChain::BaseSepolia) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + BASE_SEPOLIA_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_SEPOLIA_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + BASE_SEPOLIA_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_SEPOLIA_ECOTONE_TIMESTAMP + ); + } + _ => {} + } + } + } } diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 422da3b883e..aee7566de22 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -44,6 +44,7 @@ reth-optimism-evm.workspace = true reth-cli-runner.workspace = true reth-node-builder = { workspace = true, features = ["op"] } reth-tracing.workspace = true +reth-tracing-otlp.workspace = true # eth alloy-eips.workspace = true @@ -55,6 +56,7 @@ alloy-rlp.workspace = true futures-util.workspace = true derive_more.workspace = true serde.workspace = true +url.workspace = true clap = { workspace = true, features = ["derive", "env"] } tokio = { workspace = true, features = ["sync", "macros", "time", "rt-multi-thread"] } @@ -74,6 +76,11 @@ reth-stages = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec = { workspace = true, features = ["std", "superchain-configs"] } [features] +default = [] + +# Opentelemtry feature to activate metrics export +otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] + asm-keccak = [ "alloy-primitives/asm-keccak", "reth-node-core/asm-keccak", @@ -100,4 +107,5 @@ serde = [ "reth-optimism-primitives/serde", "reth-primitives-traits/serde", "reth-optimism-chainspec/serde", + "url/serde", ] diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 1e9f7960ad1..8567c2b7e5a 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -9,8 +9,10 @@ use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_node::{OpExecutorProvider, OpNode}; use reth_rpc_server_types::RpcModuleValidator; use reth_tracing::{FileWorkerGuard, Layers}; +use reth_tracing_otlp::OtlpProtocol; use std::{fmt, sync::Arc}; use tracing::info; +use url::Url; /// A wrapper around a parsed CLI that handles command execution. #[derive(Debug)] @@ -63,7 +65,8 @@ where self.cli.logs.log_file_directory.join(chain_spec.chain.to_string()); } - self.init_tracing()?; + self.init_tracing(&runner)?; + // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); @@ -114,12 +117,52 @@ where /// Initializes tracing with the configured options. /// /// If file logging is enabled, this function stores guard to the struct. - pub fn init_tracing(&mut self) -> Result<()> { + /// For gRPC OTLP, it requires tokio runtime context. + pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> { if self.guard.is_none() { - let layers = self.layers.take().unwrap_or_default(); + let mut layers = self.layers.take().unwrap_or_default(); + + #[cfg(feature = "otlp")] + { + self.cli.traces.validate()?; + if let Some(endpoint) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", endpoint); + self.init_otlp_export(&mut layers, endpoint, runner)?; + } + } + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); } Ok(()) } + + /// Initialize OTLP tracing export based on protocol type. + /// + /// For gRPC, `block_on` is required because tonic's channel initialization needs + /// a tokio runtime context, even though `with_span_layer` itself is not async. + #[cfg(feature = "otlp")] + fn init_otlp_export( + &self, + layers: &mut Layers, + endpoint: &Url, + runner: &CliRunner, + ) -> Result<()> { + let endpoint = endpoint.clone(); + let protocol = self.cli.traces.protocol; + let level_filter = self.cli.traces.otlp_filter.clone(); + + match protocol { + OtlpProtocol::Grpc => { + runner.block_on(async { + layers.with_span_layer("reth".to_string(), endpoint, level_filter, protocol) + })?; + } + OtlpProtocol::Http => { + layers.with_span_layer("reth".to_string(), endpoint, level_filter, protocol)?; + } + } + + Ok(()) + } } diff --git a/crates/optimism/cli/src/commands/import.rs b/crates/optimism/cli/src/commands/import.rs index 0fd1d64ac12..74656511af1 100644 --- a/crates/optimism/cli/src/commands/import.rs +++ b/crates/optimism/cli/src/commands/import.rs @@ -71,6 +71,9 @@ impl> ImportOpCommand { .sealed_header(provider_factory.last_block_number()?)? .expect("should have genesis"); + let static_file_producer = + StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); + while let Some(mut file_client) = reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? { @@ -100,7 +103,7 @@ impl> ImportOpCommand { provider_factory.clone(), &consensus, Arc::new(file_client), - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), + static_file_producer.clone(), true, OpExecutorProvider::optimism(provider_factory.chain_spec()), )?; diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index 0d065c29442..950f60193f0 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -7,13 +7,13 @@ use reth_cli_commands::common::{AccessRights, CliHeader, CliNodeTypes, Environme use reth_db_common::init::init_from_state_dump; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::{ - bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}, + bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH}, OpPrimitives, }; use reth_primitives_traits::SealedHeader; use reth_provider::{ - BlockNumReader, ChainSpecProvider, DBProvider, DatabaseProviderFactory, - StaticFileProviderFactory, StaticFileWriter, + BlockNumReader, DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, + StaticFileWriter, }; use std::{io::BufReader, sync::Arc}; use tracing::info; @@ -24,12 +24,11 @@ pub struct InitStateCommandOp { #[command(flatten)] init_state: reth_cli_commands::init_state::InitStateCommand, - /// **Optimism Mainnet Only** - /// - /// Specifies whether to initialize the state without relying on OVM historical data. + /// Specifies whether to initialize the state without relying on OVM or EVM historical data. /// /// When enabled, and before inserting the state, it creates a dummy chain up to the last OVM - /// block (#105235062) (14GB / 90 seconds). It then, appends the Bedrock block. + /// block (#105235062) (14GB / 90 seconds). It then, appends the Bedrock block. This is + /// hardcoded for OP mainnet, for other OP chains you will need to pass in a header. /// /// - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be /// ignored. @@ -40,43 +39,59 @@ pub struct InitStateCommandOp { impl> InitStateCommandOp { /// Execute the `init` command pub async fn execute>( - self, + mut self, ) -> eyre::Result<()> { - info!(target: "reth::cli", "Reth init-state starting"); + // If using --without-ovm for OP mainnet, handle the special case with hardcoded Bedrock + // header. Otherwise delegate to the base InitStateCommand implementation. + if self.without_ovm { + if self.init_state.env.chain.is_optimism_mainnet() { + return self.execute_with_bedrock_header::(); + } + + // For non-mainnet OP chains with --without-ovm, use the base implementation + // by setting the without_evm flag + self.init_state.without_evm = true; + } + + self.init_state.execute::().await + } - let Environment { config, provider_factory, .. } = - self.init_state.env.init::(AccessRights::RW)?; + /// Execute init-state with hardcoded Bedrock header for OP mainnet. + fn execute_with_bedrock_header< + N: CliNodeTypes, + >( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "Reth init-state starting for OP mainnet"); + let env = self.init_state.env.init::(AccessRights::RW)?; + let Environment { config, provider_factory, .. } = env; let static_file_provider = provider_factory.static_file_provider(); let provider_rw = provider_factory.database_provider_rw()?; - // OP-Mainnet may want to bootstrap a chain without OVM historical data - if provider_factory.chain_spec().is_optimism_mainnet() && self.without_ovm { - let last_block_number = provider_rw.last_block_number()?; - - if last_block_number == 0 { - reth_cli_commands::init_state::without_evm::setup_without_evm( - &provider_rw, - SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), - BEDROCK_HEADER_TTD, - |number| { - let mut header = Header::default(); - header.set_number(number); - header - }, - )?; - - // SAFETY: it's safe to commit static files, since in the event of a crash, they - // will be unwound according to database checkpoints. - // - // Necessary to commit, so the BEDROCK_HEADER is accessible to provider_rw and - // init_state_dump - static_file_provider.commit()?; - } else if last_block_number > 0 && last_block_number < BEDROCK_HEADER.number { - return Err(eyre::eyre!( - "Data directory should be empty when calling init-state with --without-ovm." - )) - } + let last_block_number = provider_rw.last_block_number()?; + + if last_block_number == 0 { + reth_cli_commands::init_state::without_evm::setup_without_evm( + &provider_rw, + SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), + |number| { + let mut header = Header::default(); + header.set_number(number); + header + }, + )?; + + // SAFETY: it's safe to commit static files, since in the event of a crash, they + // will be unwound according to database checkpoints. + // + // Necessary to commit, so the BEDROCK_HEADER is accessible to provider_rw and + // init_state_dump + static_file_provider.commit()?; + } else if last_block_number > 0 && last_block_number < BEDROCK_HEADER.number { + return Err(eyre::eyre!( + "Data directory should be empty when calling init-state with --without-ovm." + )) } info!(target: "reth::cli", "Initiating state dump"); diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index f910df244eb..d41f8cc0b60 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -48,7 +48,10 @@ use reth_cli_commands::launcher::FnLauncher; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use reth_node_core::{args::LogArgs, version::version_metadata}; +use reth_node_core::{ + args::{LogArgs, TraceArgs}, + version::version_metadata, +}; use reth_optimism_node::args::RollupArgs; // This allows us to manually enable node metrics features, required for proper jemalloc metric @@ -59,7 +62,7 @@ use reth_node_metrics as _; /// /// This is the entrypoint to the executable. #[derive(Debug, Parser)] -#[command(author, version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] +#[command(author, name = version_metadata().name_client.as_ref(), version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] pub struct Cli< Spec: ChainSpecParser = OpChainSpecParser, Ext: clap::Args + fmt::Debug = RollupArgs, @@ -73,6 +76,10 @@ pub struct Cli< #[command(flatten)] pub logs: LogArgs, + /// The metrics configuration for the CLI. + #[command(flatten)] + pub traces: TraceArgs, + /// Type marker for the RPC module validator #[arg(skip)] _phantom: PhantomData, @@ -198,6 +205,7 @@ mod test { "10000", "--metrics", "9003", + "--tracing-otlp=http://localhost:4318/v1/traces", "--log.file.max-size", "100", "--builder.gaslimit", diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 93768dcc696..34a003bad32 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -18,9 +18,9 @@ use core::fmt::Debug; use reth_chainspec::EthChainSpec; use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; use reth_consensus_common::validation::{ - validate_against_parent_4844, validate_against_parent_eip1559_base_fee, - validate_against_parent_hash_number, validate_against_parent_timestamp, validate_cancun_gas, - validate_header_base_fee, validate_header_extra_data, validate_header_gas, + validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, + validate_against_parent_timestamp, validate_cancun_gas, validate_header_base_fee, + validate_header_extra_data, validate_header_gas, }; use reth_execution_types::BlockExecutionResult; use reth_optimism_forks::OpHardforks; @@ -65,7 +65,7 @@ where block: &RecoveredBlock, result: &BlockExecutionResult, ) -> Result<(), ConsensusError> { - validate_block_post_execution(block.header(), &self.chain_spec, &result.receipts) + validate_block_post_execution(block.header(), &self.chain_spec, result) } } @@ -111,7 +111,13 @@ where return Ok(()) } - if self.chain_spec.is_ecotone_active_at_timestamp(block.timestamp()) { + // Blob gas used validation + // In Jovian, the blob gas used computation has changed. We are moving the blob base fee + // validation to post-execution since the DA footprint calculation is stateful. + // Pre-execution we only validate that the blob gas used is present in the header. + if self.chain_spec.is_jovian_active_at_timestamp(block.timestamp()) { + block.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + } else if self.chain_spec.is_ecotone_active_at_timestamp(block.timestamp()) { validate_cancun_gas(block)?; } @@ -182,11 +188,575 @@ where &self.chain_spec, )?; - // ensure that the blob gas fields for this block - if let Some(blob_params) = self.chain_spec.blob_params_at_timestamp(header.timestamp()) { - validate_against_parent_4844(header.header(), parent.header(), blob_params)?; + // Ensure that the blob gas fields for this block are correctly set. + // In the op-stack, the excess blob gas is always 0 for all blocks after ecotone. + // The blob gas used and the excess blob gas should both be set after ecotone. + // After Jovian, the blob gas used contains the current DA footprint. + if self.chain_spec.is_ecotone_active_at_timestamp(header.timestamp()) { + let blob_gas_used = header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + + // Before Jovian and after ecotone, the blob gas used should be 0. + if !self.chain_spec.is_jovian_active_at_timestamp(header.timestamp()) && + blob_gas_used != 0 + { + return Err(ConsensusError::BlobGasUsedDiff(GotExpected { + got: blob_gas_used, + expected: 0, + })); + } + + let excess_blob_gas = + header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; + if excess_blob_gas != 0 { + return Err(ConsensusError::ExcessBlobGasDiff { + diff: GotExpected { got: excess_blob_gas, expected: 0 }, + parent_excess_blob_gas: parent.excess_blob_gas().unwrap_or(0), + parent_blob_gas_used: parent.blob_gas_used().unwrap_or(0), + }) + } } Ok(()) } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use alloy_consensus::{BlockBody, Eip658Value, Header, Receipt, TxEip7702, TxReceipt}; + use alloy_eips::{eip4895::Withdrawals, eip7685::Requests}; + use alloy_primitives::{Address, Bytes, Signature, U256}; + use op_alloy_consensus::{ + encode_holocene_extra_data, encode_jovian_extra_data, OpTypedTransaction, + }; + use reth_chainspec::BaseFeeParams; + use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; + use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder, OP_MAINNET}; + use reth_optimism_primitives::{OpPrimitives, OpReceipt, OpTransactionSigned}; + use reth_primitives_traits::{proofs, GotExpected, RecoveredBlock, SealedBlock, SealedHeader}; + use reth_provider::BlockExecutionResult; + + use crate::OpBeaconConsensus; + + fn mock_tx(nonce: u64) -> OpTransactionSigned { + let tx = TxEip7702 { + chain_id: 1u64, + nonce, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + input: Bytes::from(vec![1, 2]), + access_list: Default::default(), + authorization_list: Default::default(), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature) + } + + #[test] + fn test_block_blob_gas_used_validation_isthmus() { + let chain_spec = OpChainSpecBuilder::default() + .isthmus_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let header = Header { + base_fee_per_gas: Some(1337), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + timestamp: u64::MAX, + ..Default::default() + }; + let body = BlockBody { + transactions: vec![transaction], + ommers: vec![], + withdrawals: Some(Withdrawals::default()), + }; + + let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body }); + + // validate blob, it should pass blob gas used validation + let pre_execution = beacon_consensus.validate_block_pre_execution(&block); + + assert!(pre_execution.is_ok()); + } + + #[test] + fn test_block_blob_gas_used_validation_failure_isthmus() { + let chain_spec = OpChainSpecBuilder::default() + .isthmus_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let header = Header { + base_fee_per_gas: Some(1337), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(10), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + timestamp: u64::MAX, + ..Default::default() + }; + let body = BlockBody { + transactions: vec![transaction], + ommers: vec![], + withdrawals: Some(Withdrawals::default()), + }; + + let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body }); + + // validate blob, it should fail blob gas used validation + let pre_execution = beacon_consensus.validate_block_pre_execution(&block); + + assert!(pre_execution.is_err()); + assert_eq!( + pre_execution.unwrap_err(), + ConsensusError::BlobGasUsedDiff(GotExpected { got: 10, expected: 0 }) + ); + } + + #[test] + fn test_block_blob_gas_used_validation_jovian() { + const BLOB_GAS_USED: u64 = 1000; + const GAS_USED: u64 = 10; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: GAS_USED, + logs: vec![], + }); + + let header = Header { + base_fee_per_gas: Some(1337), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(BLOB_GAS_USED), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + timestamp: u64::MAX, + gas_used: GAS_USED, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref( + &receipt.with_bloom_ref(), + )), + logs_bloom: receipt.bloom(), + ..Default::default() + }; + let body = BlockBody { + transactions: vec![transaction], + ommers: vec![], + withdrawals: Some(Withdrawals::default()), + }; + + let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body }); + + let result = BlockExecutionResult:: { + blob_gas_used: BLOB_GAS_USED, + receipts: vec![receipt], + requests: Requests::default(), + gas_used: GAS_USED, + }; + + // validate blob, it should pass blob gas used validation + let pre_execution = beacon_consensus.validate_block_pre_execution(&block); + + assert!(pre_execution.is_ok()); + + let block = RecoveredBlock::new_sealed(block, vec![Address::default()]); + + let post_execution = as FullConsensus>::validate_block_post_execution( + &beacon_consensus, + &block, + &result + ); + + // validate blob, it should pass blob gas used validation + assert!(post_execution.is_ok()); + } + + #[test] + fn test_block_blob_gas_used_validation_failure_jovian() { + const BLOB_GAS_USED: u64 = 1000; + const GAS_USED: u64 = 10; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: GAS_USED, + logs: vec![], + }); + + let header = Header { + base_fee_per_gas: Some(1337), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(BLOB_GAS_USED), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: GAS_USED, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + ..Default::default() + }; + let body = BlockBody { + transactions: vec![transaction], + ommers: vec![], + withdrawals: Some(Withdrawals::default()), + }; + + let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body }); + + let result = BlockExecutionResult:: { + blob_gas_used: BLOB_GAS_USED + 1, + receipts: vec![receipt], + requests: Requests::default(), + gas_used: GAS_USED, + }; + + // validate blob, it should pass blob gas used validation + let pre_execution = beacon_consensus.validate_block_pre_execution(&block); + + assert!(pre_execution.is_ok()); + + let block = RecoveredBlock::new_sealed(block, vec![Address::default()]); + + let post_execution = as FullConsensus>::validate_block_post_execution( + &beacon_consensus, + &block, + &result + ); + + // validate blob, it should fail blob gas used validation post execution. + assert!(post_execution.is_err()); + assert_eq!( + post_execution.unwrap_err(), + ConsensusError::BlobGasUsedDiff(GotExpected { + got: BLOB_GAS_USED + 1, + expected: BLOB_GAS_USED, + }) + ); + } + + #[test] + fn test_header_min_base_fee_validation() { + const MIN_BASE_FEE: u64 = 1000; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE / 10), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + extra_data: encode_jovian_extra_data( + Default::default(), + BaseFeeParams::optimism(), + MIN_BASE_FEE, + ) + .unwrap(), + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_ok()); + } + + #[test] + fn test_header_min_base_fee_validation_failure() { + const MIN_BASE_FEE: u64 = 1000; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE / 10), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + extra_data: encode_jovian_extra_data( + Default::default(), + BaseFeeParams::optimism(), + MIN_BASE_FEE, + ) + .unwrap(), + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE - 1), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_err()); + assert_eq!( + result.unwrap_err(), + ConsensusError::BaseFeeDiff(GotExpected { + got: MIN_BASE_FEE - 1, + expected: MIN_BASE_FEE, + }) + ); + } + + #[test] + fn test_header_da_footprint_validation() { + const MIN_BASE_FEE: u64 = 100_000; + const DA_FOOTPRINT: u64 = GAS_LIMIT - 1; + const GAS_LIMIT: u64 = 100_000_000; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + extra_data: encode_jovian_extra_data( + Default::default(), + BaseFeeParams::optimism(), + MIN_BASE_FEE, + ) + .unwrap(), + gas_limit: GAS_LIMIT, + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE + MIN_BASE_FEE / 10), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_ok()); + } + + #[test] + fn test_header_isthmus_validation() { + const MIN_BASE_FEE: u64 = 100_000; + const DA_FOOTPRINT: u64 = GAS_LIMIT - 1; + const GAS_LIMIT: u64 = 100_000_000; + + let chain_spec = OpChainSpecBuilder::default() + .isthmus_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + extra_data: encode_holocene_extra_data(Default::default(), BaseFeeParams::optimism()) + .unwrap(), + gas_limit: GAS_LIMIT, + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE - 2 * MIN_BASE_FEE / 100), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_err()); + assert_eq!( + result.unwrap_err(), + ConsensusError::BlobGasUsedDiff(GotExpected { got: DA_FOOTPRINT, expected: 0 }) + ); + } +} diff --git a/crates/optimism/consensus/src/validation/isthmus.rs b/crates/optimism/consensus/src/validation/isthmus.rs index 64d45eae5c8..4703e10869e 100644 --- a/crates/optimism/consensus/src/validation/isthmus.rs +++ b/crates/optimism/consensus/src/validation/isthmus.rs @@ -4,7 +4,6 @@ use crate::OpConsensusError; use alloy_consensus::BlockHeader; use alloy_primitives::{address, Address, B256}; use alloy_trie::EMPTY_ROOT_HASH; -use core::fmt::Debug; use reth_storage_api::{errors::ProviderResult, StorageRootProvider}; use reth_trie_common::HashedStorage; use revm::database::BundleState; @@ -72,7 +71,7 @@ pub fn verify_withdrawals_root( ) -> Result<(), OpConsensusError> where DB: StorageRootProvider, - H: BlockHeader + Debug, + H: BlockHeader, { let header_storage_root = header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?; @@ -110,7 +109,7 @@ pub fn verify_withdrawals_root_prehashed( ) -> Result<(), OpConsensusError> where DB: StorageRootProvider, - H: BlockHeader + core::fmt::Debug, + H: BlockHeader, { let header_storage_root = header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?; diff --git a/crates/optimism/consensus/src/validation/mod.rs b/crates/optimism/consensus/src/validation/mod.rs index 2dd4cea0904..c17e8429c81 100644 --- a/crates/optimism/consensus/src/validation/mod.rs +++ b/crates/optimism/consensus/src/validation/mod.rs @@ -4,6 +4,7 @@ pub mod canyon; pub mod isthmus; // Re-export the decode_holocene_base_fee function for compatibility +use reth_execution_types::BlockExecutionResult; pub use reth_optimism_chainspec::decode_holocene_base_fee; use crate::proof::calculate_receipt_root_optimism; @@ -87,8 +88,24 @@ where pub fn validate_block_post_execution( header: impl BlockHeader, chain_spec: impl OpHardforks, - receipts: &[R], + result: &BlockExecutionResult, ) -> Result<(), ConsensusError> { + // Validate that the blob gas used is present and correctly computed if Jovian is active. + if chain_spec.is_jovian_active_at_timestamp(header.timestamp()) { + let computed_blob_gas_used = result.blob_gas_used; + let header_blob_gas_used = + header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + + if computed_blob_gas_used != header_blob_gas_used { + return Err(ConsensusError::BlobGasUsedDiff(GotExpected { + got: computed_blob_gas_used, + expected: header_blob_gas_used, + })); + } + } + + let receipts = &result.receipts; + // Before Byzantium, receipts contained state root that would mean that expensive // operation as hashing that is required for state root got calculated in every // transaction This was replaced with is_success flag. @@ -176,19 +193,24 @@ fn compare_receipts_root_and_logs_bloom( mod tests { use super::*; use alloy_consensus::Header; + use alloy_eips::eip7685::Requests; use alloy_primitives::{b256, hex, Bytes, U256}; use op_alloy_consensus::OpTxEnvelope; use reth_chainspec::{BaseFeeParams, ChainSpec, EthChainSpec, ForkCondition, Hardfork}; use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; use reth_optimism_forks::{OpHardfork, BASE_SEPOLIA_HARDFORKS}; + use reth_optimism_primitives::OpReceipt; use std::sync::Arc; - const JOVIAN_TIMESTAMP: u64 = 1900000000; + const HOLOCENE_TIMESTAMP: u64 = 1700000000; + const ISTHMUS_TIMESTAMP: u64 = 1750000000; + const JOVIAN_TIMESTAMP: u64 = 1800000000; const BLOCK_TIME_SECONDS: u64 = 2; fn holocene_chainspec() -> Arc { let mut hardforks = BASE_SEPOLIA_HARDFORKS.clone(); - hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + hardforks + .insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(HOLOCENE_TIMESTAMP)); Arc::new(OpChainSpec { inner: ChainSpec { chain: BASE_SEPOLIA.inner.chain, @@ -208,7 +230,7 @@ mod tests { chainspec .inner .hardforks - .insert(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1800000000)); + .insert(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(ISTHMUS_TIMESTAMP)); chainspec } @@ -217,7 +239,7 @@ mod tests { chainspec .inner .hardforks - .insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(1900000000)); + .insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(JOVIAN_TIMESTAMP)); chainspec } @@ -245,14 +267,14 @@ mod tests { base_fee_per_gas: Some(1), gas_used: 15763614, gas_limit: 144000000, - timestamp: 1800000003, + timestamp: HOLOCENE_TIMESTAMP + 3, extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]), ..Default::default() }; let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( &op_chain_spec, &parent, - 1800000005, + HOLOCENE_TIMESTAMP + 5, ); assert_eq!( base_fee.unwrap(), @@ -267,14 +289,14 @@ mod tests { gas_used: 15763614, gas_limit: 144000000, extra_data: Bytes::from_static(&[0, 0, 0, 0, 8, 0, 0, 0, 8]), - timestamp: 1800000003, + timestamp: HOLOCENE_TIMESTAMP + 3, ..Default::default() }; let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( &holocene_chainspec(), &parent, - 1800000005, + HOLOCENE_TIMESTAMP + 5, ); assert_eq!( base_fee.unwrap(), @@ -502,4 +524,52 @@ mod tests { body.withdrawals.take(); validate_body_against_header_op(&chainspec, &body, &header).unwrap_err(); } + + #[test] + fn test_jovian_blob_gas_used_validation() { + const BLOB_GAS_USED: u64 = 1000; + const GAS_USED: u64 = 5000; + + let chainspec = jovian_chainspec(); + let header = Header { + timestamp: JOVIAN_TIMESTAMP, + blob_gas_used: Some(BLOB_GAS_USED), + ..Default::default() + }; + + let result = BlockExecutionResult:: { + blob_gas_used: BLOB_GAS_USED, + receipts: vec![], + requests: Requests::default(), + gas_used: GAS_USED, + }; + validate_block_post_execution(&header, &chainspec, &result).unwrap(); + } + + #[test] + fn test_jovian_blob_gas_used_validation_mismatched() { + const BLOB_GAS_USED: u64 = 1000; + const GAS_USED: u64 = 5000; + + let chainspec = jovian_chainspec(); + let header = Header { + timestamp: JOVIAN_TIMESTAMP, + blob_gas_used: Some(BLOB_GAS_USED + 1), + ..Default::default() + }; + + let result = BlockExecutionResult:: { + blob_gas_used: BLOB_GAS_USED, + receipts: vec![], + requests: Requests::default(), + gas_used: GAS_USED, + }; + assert_eq!( + validate_block_post_execution(&header, &chainspec, &result), + Err(ConsensusError::BlobGasUsedDiff(GotExpected { + got: BLOB_GAS_USED, + expected: BLOB_GAS_USED + 1, + })) + ); + } } diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index f2dce0a9ba0..d7bbe29330f 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -76,4 +76,4 @@ std = [ "reth-storage-errors/std", ] portable = ["reth-revm/portable"] -rpc = ["reth-rpc-eth-api"] +rpc = ["reth-rpc-eth-api", "reth-optimism-primitives/serde", "reth-optimism-primitives/reth-codec"] diff --git a/crates/optimism/evm/src/build.rs b/crates/optimism/evm/src/build.rs index 087b7f10046..b8fab18833c 100644 --- a/crates/optimism/evm/src/build.rs +++ b/crates/optimism/evm/src/build.rs @@ -14,6 +14,7 @@ use reth_optimism_consensus::{calculate_receipt_root_no_memo_optimism, isthmus}; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::DepositReceipt; use reth_primitives_traits::{Receipt, SignedTransaction}; +use revm::context::Block as _; /// Block builder for Optimism. #[derive(Debug)] @@ -45,7 +46,7 @@ impl OpBlockAssembler { evm_env, execution_ctx: ctx, transactions, - output: BlockExecutionResult { receipts, gas_used, .. }, + output: BlockExecutionResult { receipts, gas_used, blob_gas_used, requests: _ }, bundle_state, state_root, state_provider, @@ -53,7 +54,7 @@ impl OpBlockAssembler { } = input; let ctx = ctx.into(); - let timestamp = evm_env.block_env.timestamp.saturating_to(); + let timestamp = evm_env.block_env.timestamp().saturating_to(); let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = @@ -79,7 +80,11 @@ impl OpBlockAssembler { }; let (excess_blob_gas, blob_gas_used) = - if self.chain_spec.is_ecotone_active_at_timestamp(timestamp) { + if self.chain_spec.is_jovian_active_at_timestamp(timestamp) { + // In jovian, we're using the blob gas used field to store the current da + // footprint's value. + (Some(0), Some(*blob_gas_used)) + } else if self.chain_spec.is_ecotone_active_at_timestamp(timestamp) { (Some(0), Some(0)) } else { (None, None) @@ -88,19 +93,19 @@ impl OpBlockAssembler { let header = Header { parent_hash: ctx.parent_hash, ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: evm_env.block_env.beneficiary, + beneficiary: evm_env.block_env.beneficiary(), state_root, transactions_root, receipts_root, withdrawals_root, logs_bloom, timestamp, - mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(evm_env.block_env.basefee), - number: evm_env.block_env.number.saturating_to(), - gas_limit: evm_env.block_env.gas_limit, - difficulty: evm_env.block_env.difficulty, + base_fee_per_gas: Some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().saturating_to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, extra_data: ctx.extra_data, parent_beacon_block_root: ctx.parent_beacon_block_root, diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index 47ed2853d0a..6ae2a91a6cb 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -1,8 +1,6 @@ pub use alloy_op_evm::{ spec as revm_spec, spec_by_timestamp_after_bedrock as revm_spec_by_timestamp_after_bedrock, }; - -use alloy_consensus::BlockHeader; use revm::primitives::{Address, Bytes, B256}; /// Context relevant for execution of a next block w.r.t OP. @@ -23,7 +21,7 @@ pub struct OpNextBlockEnvAttributes { } #[cfg(feature = "rpc")] -impl reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv +impl reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv for OpNextBlockEnvAttributes { fn build_pending_env(parent: &crate::SealedHeader) -> Self { diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index 9b694243fac..1a8e76c1490 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -38,6 +38,9 @@ pub enum L1BlockInfoError { /// Operator fee constant conversion error #[error("could not convert operator fee constant")] OperatorFeeConstantConversion, + /// DA foootprint gas scalar constant conversion error + #[error("could not convert DA footprint gas scalar constant")] + DaFootprintGasScalarConversion, /// Optimism hardforks not active #[error("Optimism hardforks are not active")] HardforksNotActive, diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index a538c8d8690..2afe6e9d3a2 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -2,7 +2,7 @@ use crate::{error::L1BlockInfoError, revm_spec_by_timestamp_after_bedrock, OpBlockExecutionError}; use alloy_consensus::Transaction; -use alloy_primitives::{hex, U256}; +use alloy_primitives::{hex, U16, U256}; use op_revm::L1BlockInfo; use reth_execution_errors::BlockExecutionError; use reth_optimism_forks::OpHardforks; @@ -14,6 +14,10 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// The function selector of the "setL1BlockValuesIsthmus" function in the `L1Block` contract. const L1_BLOCK_ISTHMUS_SELECTOR: [u8; 4] = hex!("098999be"); +/// The function selector of the "setL1BlockValuesJovian" function in the `L1Block` contract. +/// This is the first 4 bytes of `keccak256("setL1BlockValuesJovian()")`. +const L1_BLOCK_JOVIAN_SELECTOR: [u8; 4] = hex!("3db6be2b"); + /// Extracts the [`L1BlockInfo`] from the L2 block. The L1 info transaction is always the first /// transaction in the L2 block. /// @@ -52,11 +56,14 @@ pub fn extract_l1_info_from_tx( /// If the input is shorter than 4 bytes. pub fn parse_l1_info(input: &[u8]) -> Result { // Parse the L1 info transaction into an L1BlockInfo struct, depending on the function selector. - // There are currently 3 variants: + // There are currently 4 variants: + // - Jovian // - Isthmus // - Ecotone // - Bedrock - if input[0..4] == L1_BLOCK_ISTHMUS_SELECTOR { + if input[0..4] == L1_BLOCK_JOVIAN_SELECTOR { + parse_l1_info_tx_jovian(input[4..].as_ref()) + } else if input[0..4] == L1_BLOCK_ISTHMUS_SELECTOR { parse_l1_info_tx_isthmus(input[4..].as_ref()) } else if input[0..4] == L1_BLOCK_ECOTONE_SELECTOR { parse_l1_info_tx_ecotone(input[4..].as_ref()) @@ -88,12 +95,12 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result Result Result { + if data.len() != 174 { + return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::UnexpectedCalldataLength)); + } + + // https://github.com/ethereum-optimism/op-geth/blob/60038121c7571a59875ff9ed7679c48c9f73405d/core/types/rollup_cost.go#L317-L328 + // + // data layout assumed for Ecotone: + // offset type varname + // 0 + // 4 uint32 _basefeeScalar (start offset in this scope) + // 8 uint32 _blobBaseFeeScalar + // 12 uint64 _sequenceNumber, + // 20 uint64 _timestamp, + // 28 uint64 _l1BlockNumber + // 36 uint256 _basefee, + // 68 uint256 _blobBaseFee, + // 100 bytes32 _hash, + // 132 bytes32 _batcherHash, + // 164 uint32 _operatorFeeScalar + // 168 uint64 _operatorFeeConstant + // 176 uint16 _daFootprintGasScalar - Ok(l1block) + let l1_base_fee_scalar = U256::try_from_be_slice(&data[..4]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeScalarConversion))?; + let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[4..8]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeScalarConversion) + })?; + let l1_base_fee = U256::try_from_be_slice(&data[32..64]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeConversion))?; + let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96]) + .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeConversion))?; + let operator_fee_scalar = U256::try_from_be_slice(&data[160..164]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeScalarConversion) + })?; + let operator_fee_constant = U256::try_from_be_slice(&data[164..172]).ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeConstantConversion) + })?; + let da_footprint_gas_scalar: u16 = U16::try_from_be_slice(&data[172..174]) + .ok_or({ + OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::DaFootprintGasScalarConversion) + })? + .to(); + + Ok(L1BlockInfo { + l1_base_fee, + l1_base_fee_scalar, + l1_blob_base_fee: Some(l1_blob_base_fee), + l1_blob_base_fee_scalar: Some(l1_blob_base_fee_scalar), + operator_fee_scalar: Some(operator_fee_scalar), + operator_fee_constant: Some(operator_fee_constant), + da_footprint_gas_scalar: Some(da_footprint_gas_scalar), + ..Default::default() + }) } /// An extension trait for [`L1BlockInfo`] that allows us to calculate the L1 cost of a transaction @@ -276,6 +354,7 @@ mod tests { use super::*; use alloy_consensus::{Block, BlockBody}; use alloy_eips::eip2718::Decodable2718; + use alloy_primitives::keccak256; use reth_optimism_chainspec::OP_MAINNET; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::OpTransactionSigned; @@ -302,6 +381,12 @@ mod tests { assert_eq!(l1_info.l1_blob_base_fee_scalar, None); } + #[test] + fn test_verify_set_jovian() { + let hash = &keccak256("setL1BlockValuesJovian()")[..4]; + assert_eq!(hash, L1_BLOCK_JOVIAN_SELECTOR) + } + #[test] fn sanity_l1_block_ecotone() { // rig @@ -402,4 +487,33 @@ mod tests { assert_eq!(l1_block_info.operator_fee_scalar, operator_fee_scalar); assert_eq!(l1_block_info.operator_fee_constant, operator_fee_constant); } + + #[test] + fn parse_l1_info_jovian() { + // L1 block info from a devnet with Isthmus activated + const DATA: &[u8] = &hex!( + "3db6be2b00000558000c5fc500000000000000030000000067a9f765000000000000002900000000000000000000000000000000000000000000000000000000006a6d09000000000000000000000000000000000000000000000000000000000000000172fcc8e8886636bdbe96ba0e4baab67ea7e7811633f52b52e8cf7a5123213b6f000000000000000000000000d3f2c5afb2d76f5579f326b0cd7da5f5a4126c3500004e2000000000000001f4dead" + ); + + // expected l1 block info verified against expected l1 fee and operator fee for tx. + let l1_base_fee = U256::from(6974729); + let l1_base_fee_scalar = U256::from(1368); + let l1_blob_base_fee = Some(U256::from(1)); + let l1_blob_base_fee_scalar = Some(U256::from(810949)); + let operator_fee_scalar = Some(U256::from(20000)); + let operator_fee_constant = Some(U256::from(500)); + let da_footprint_gas_scalar: Option = Some(U16::from(0xdead).to()); + + // test + + let l1_block_info = parse_l1_info(DATA).unwrap(); + + assert_eq!(l1_block_info.l1_base_fee, l1_base_fee); + assert_eq!(l1_block_info.l1_base_fee_scalar, l1_base_fee_scalar); + assert_eq!(l1_block_info.l1_blob_base_fee, l1_blob_base_fee); + assert_eq!(l1_block_info.l1_blob_base_fee_scalar, l1_blob_base_fee_scalar); + assert_eq!(l1_block_info.operator_fee_scalar, operator_fee_scalar); + assert_eq!(l1_block_info.operator_fee_constant, operator_fee_constant); + assert_eq!(l1_block_info.da_footprint_gas_scalar, da_footprint_gas_scalar); + } } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 2d598b94501..e5df16ee2e7 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -15,7 +15,7 @@ use alloc::sync::Arc; use alloy_consensus::{BlockHeader, Header}; use alloy_eips::Decodable2718; use alloy_evm::{EvmFactory, FromRecoveredTx, FromTxWithEncoded}; -use alloy_op_evm::block::receipt_builder::OpReceiptBuilder; +use alloy_op_evm::block::{receipt_builder::OpReceiptBuilder, OpTxEnv}; use alloy_primitives::U256; use core::fmt::Debug; use op_alloy_consensus::EIP1559ParamError; @@ -131,9 +131,11 @@ where EvmF: EvmFactory< Tx: FromRecoveredTx + FromTxWithEncoded - + TransactionEnv, + + TransactionEnv + + OpTxEnv, Precompiles = PrecompilesMap, Spec = OpSpecId, + BlockEnv = BlockEnv, > + Debug, Self: Send + Sync + Unpin + Clone + 'static, { diff --git a/crates/optimism/flashblocks/Cargo.toml b/crates/optimism/flashblocks/Cargo.toml index 532cd4d6962..977e28d37e1 100644 --- a/crates/optimism/flashblocks/Cargo.toml +++ b/crates/optimism/flashblocks/Cargo.toml @@ -16,17 +16,17 @@ reth-optimism-primitives = { workspace = true, features = ["serde"] } reth-optimism-evm.workspace = true reth-chain-state = { workspace = true, features = ["serde"] } reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-engine-primitives = { workspace = true, features = ["std"] } reth-execution-types = { workspace = true, features = ["serde"] } reth-evm.workspace = true reth-revm.workspace = true reth-optimism-payload-builder.workspace = true reth-rpc-eth-types.workspace = true reth-errors.workspace = true +reth-payload-primitives.workspace = true reth-storage-api.workspace = true -reth-node-api.workspace = true reth-tasks.workspace = true reth-metrics.workspace = true -reth-trie.workspace = true # alloy alloy-eips = { workspace = true, features = ["serde"] } diff --git a/crates/optimism/flashblocks/src/consensus.rs b/crates/optimism/flashblocks/src/consensus.rs index 353eddbf4cc..60314d2f6c8 100644 --- a/crates/optimism/flashblocks/src/consensus.rs +++ b/crates/optimism/flashblocks/src/consensus.rs @@ -1,7 +1,8 @@ use crate::FlashBlockCompleteSequenceRx; use alloy_primitives::B256; -use reth_node_api::{ConsensusEngineHandle, EngineApiMessageVersion}; +use reth_engine_primitives::ConsensusEngineHandle; use reth_optimism_payload_builder::OpPayloadTypes; +use reth_payload_primitives::EngineApiMessageVersion; use ringbuffer::{AllocRingBuffer, RingBuffer}; use tracing::warn; diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index e818e9cb538..7220f443cc1 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -1,10 +1,21 @@ //! A downstream integration of Flashblocks. +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use reth_primitives_traits::NodePrimitives; +use std::sync::Arc; + pub use payload::{ ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashBlock, FlashBlockDecoder, Metadata, }; -pub use service::FlashBlockService; +pub use service::{FlashBlockBuildInfo, FlashBlockService}; pub use ws::{WsConnect, WsFlashBlockStream}; mod consensus; @@ -12,7 +23,7 @@ pub use consensus::FlashBlockConsensusClient; mod payload; pub use payload::PendingFlashBlock; mod sequence; -pub use sequence::FlashBlockCompleteSequence; +pub use sequence::{FlashBlockCompleteSequence, FlashBlockPendingSequence}; mod service; mod worker; @@ -28,3 +39,38 @@ pub type PendingBlockRx = tokio::sync::watch::Receiver; + +/// Receiver of received [`FlashBlock`]s from the (websocket) subscription. +/// +/// [`FlashBlock`]: crate::FlashBlock +pub type FlashBlockRx = tokio::sync::broadcast::Receiver>; + +/// Receiver that signals whether a [`FlashBlock`] is currently being built. +pub type InProgressFlashBlockRx = tokio::sync::watch::Receiver>; + +/// Container for all flashblocks-related listeners. +/// +/// Groups together the channels for flashblock-related updates. +#[derive(Debug)] +pub struct FlashblocksListeners { + /// Receiver of the most recent executed [`PendingFlashBlock`] built out of [`FlashBlock`]s. + pub pending_block_rx: PendingBlockRx, + /// Subscription channel of the complete sequences of [`FlashBlock`]s built. + pub flashblocks_sequence: tokio::sync::broadcast::Sender, + /// Receiver that signals whether a [`FlashBlock`] is currently being built. + pub in_progress_rx: InProgressFlashBlockRx, + /// Subscription channel for received flashblocks from the (websocket) connection. + pub received_flashblocks: tokio::sync::broadcast::Sender>, +} + +impl FlashblocksListeners { + /// Creates a new [`FlashblocksListeners`] with the given channels. + pub const fn new( + pending_block_rx: PendingBlockRx, + flashblocks_sequence: tokio::sync::broadcast::Sender, + in_progress_rx: InProgressFlashBlockRx, + received_flashblocks: tokio::sync::broadcast::Sender>, + ) -> Self { + Self { pending_block_rx, flashblocks_sequence, in_progress_rx, received_flashblocks } + } +} diff --git a/crates/optimism/flashblocks/src/payload.rs b/crates/optimism/flashblocks/src/payload.rs index f7d8a38c964..7469538ee3b 100644 --- a/crates/optimism/flashblocks/src/payload.rs +++ b/crates/optimism/flashblocks/src/payload.rs @@ -3,9 +3,9 @@ use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{bytes, Address, Bloom, Bytes, B256, U256}; use alloy_rpc_types_engine::PayloadId; use derive_more::Deref; -use reth_node_api::NodePrimitives; use reth_optimism_evm::OpNextBlockEnvAttributes; use reth_optimism_primitives::OpReceipt; +use reth_primitives_traits::NodePrimitives; use reth_rpc_eth_types::PendingBlock; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; @@ -41,6 +41,11 @@ impl FlashBlock { pub fn parent_hash(&self) -> Option { Some(self.base.as_ref()?.parent_hash) } + + /// Returns the receipt for the given transaction hash. + pub fn receipt_by_hash(&self, hash: &B256) -> Option<&OpReceipt> { + self.metadata.receipt_by_hash(hash) + } } /// A trait for decoding flashblocks from bytes. @@ -57,6 +62,7 @@ impl FlashBlockDecoder for () { } /// Provides metadata about the block that may be useful for indexing or analysis. +// Note: this uses mixed camel, snake case: #[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct Metadata { /// The number of the block in the L2 chain. @@ -69,6 +75,13 @@ pub struct Metadata { pub receipts: BTreeMap, } +impl Metadata { + /// Returns the receipt for the given transaction hash. + pub fn receipt_by_hash(&self, hash: &B256) -> Option<&OpReceipt> { + self.receipts.get(hash) + } +} + /// Represents the base configuration of an execution payload that remains constant /// throughout block construction. This includes fundamental block properties like /// parent hash, block number, and other header fields that are determined at @@ -168,3 +181,206 @@ impl PendingFlashBlock { self.has_computed_state_root.then_some(self.pending.block().state_root()) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_flashblock_serde_roundtrip() { + let raw = r#"{ + "diff": { + "block_hash": "0x2d902e3fcb5bd57e0bf878cbbda1386e7fb8968d518912d58678a35e58261c46", + "gas_used": "0x2907796", + "logs_bloom": "0x5c21065292452cfcd5175abfee20e796773da578307356043ba4f62692aca01204e8908f97ab9df43f1e9c57f586b1c9a7df8b66ffa7746dfeeb538617fea5eb75ad87f8b6653f597d86814dc5ad6de404e5a48aeffcc4b1e170c2bdbc7a334936c66166ba0faa6517597b676ef65c588342756f280f7d610aa3ed35c5d877449bfacbdb9b40d98c457f974ab264ec40e4edd6e9fab4c0cb794bf75f10ea20dab75a1f9fd1c441d4c365d1476841e8593f1d1b9a1c52919a0fcf9fc5eef2ef82fe80971a72d1cde1cb195db4806058a229e88acfddfe1a1308adb6f69afa3aaf67f4bd49e93e9f9532ea30bd891a8ff08de61fb645bec678db816950b47fcef0", + "receipts_root": "0x2c4203e9aa87258627bf23ab4d5f9d92da30285ea11dc0b3e140a5a8d4b63e26", + "state_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactions": [ + "0x02f8c2822105830b0c58840b677c0f840c93fb5a834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c080a07e8486ab3db9f07588a3f37bd8ffb9b349ba9bb738a2500d78a4583e1e54a6f9a068d0b3c729a6777c81dd49bd0c2dc3a079f0ceed4e778fbfe79176e8b70d68d8", + "0xf90fae820248840158a3c58307291a94bbbfd134e9b44bfb5123898ba36b01de7ab93d9880b90f443087505600000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000012000000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f403000000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f40300000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda0291300000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000003600000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000044095ea7b30000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e22200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000133f4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ff3684f28c67538d4d072c2273400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000007e42213bc0b000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b000000000000000000000000ea758cac6115309b325c582fd0782d79e350217700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000007041fff991f000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b06d9200000000000000000000000000000000000000000000000000000000000000a0d311e79cd2099f6f1f0607040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000000e4c1fb425e000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069073bb900000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003c438c9c147000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000002710000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c800000000000000000000000000000000000000000000000000000000000001c400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002e4945bcec9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000220000000000000000000000000ea758cac6115309b325c582fd0782d79e35021770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000069073bb9000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000208f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca00000000000000000000000000000000000000000000000000000000000000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008434ee90ca000000000000000000000000f5c4f3dc02c3fb9279495a8fef7b0741da956157000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b1a7880000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e22200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f403000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002887696e8edbbcbd7306955512ff6f2d8426403eef4762157da3e9c5a89d78f682422da0c8d8b1aa1c9bfd1fe1e4a10c6123caa2fe582294aa5798c54546faa4c09590a9a012a1c78fca9cfefd281c1e44682de3c4420299da5cf2ae498f67d7de7dcf166c", + "0x02f8f582210582a649831db02984026c1a34833d090094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31c4289d066d04f33681f6686155c8243dff963557765630a39bdd8c54e6b7dbe5d4b689e9d536608db03163882cf005f7b5813e41d2fdec75161c8470a410c4c9201000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a088fd1a2b2e5891109afc3845b2c8b0ca76ea8306190dcb80a703a2451f7bab25a0718ae373e36c8ddb2b934ca936ed824db22c0625cfea29be3d408ff41787fc8c", + "0x02f9030b822105830536f9830f58ab84025c6b93833d090094c90d989d809e26b2d95fb72eb3288fef72af8c2f80b9029a00000000000069073af31c3d4d0646e102b6f958428cd8ed562efa6efb234f629b5f6ca52a15fd2e33aea76eb64fb04cae81b3e5b769dbdc681dcfd4b7a802a2cacdf1ccb65276a722c67607000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000010206777762d3eb91810b15526c2c9102864d722ef7a9ed24e77271c1dcbf0fdcba68138800000000010698c8f03094a9e65ccedc14c40130e4a5dd0ce14fb12ea58cbeac11f662b458b9271000000000000003045a9ad2bb92b0b3e5c571fdd5125114e04e02be1a0bb80000000001036e55486ea6b8691ba58224f3cae35505add86c372710000000000003681d6e4b0b020656ca04956ddaf76add7ef022f60dac00000000010003028be0fcdd7cf0b53b7b82b8f6ea8586d07c53359f2710000000000006c30e25679d5c77b257ac3a61ad08603b11e7afe77ac9222a5386c27d08b6b6c3ea6000000000010696d4b53a38337a5733179751781178a2613306063c511b78cd02684739288c0a01f400000000000002020d028b2d7a29d2e57efc6405a1dce1437180e3ce27100000000001068a71465e76d736564b0c90f5cf3d0d7b69c461c36f69250ae27dbead147cc8f80bb80000000000000206354def8b7e6b2ee04bf85c00f5e79f173d0b76d5017bab3a90c7ba62e1722699000000000000010245f3ad9e63f629be6e278cc4cf34d3b0a79a4a0b27100000000000010404b154dbcd3c75580382c2353082df4390613d93c627120000000001011500cc7d9c2b460720a48cc7444d7e7dfe43f6050bb80a03000000015c8dec5f0eedf1f8934815ef8fb8cb8198eac6520bb80a030000010286f3dd3b4d08de718d7909b0fdc16f4cbdf94ef527100000000000c001a0d4c12f6433ff6ea0573633364c030d8b46ed5764494f80eb434f27060c39f315a034df82c4ac185a666280d578992feee0c05fc75d93e3e2286726c85fba1bb0a0", + "0x02f8f68221058305c7b3830f4ef58401a5485d832dc6c094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31b777ac6b2082fc399fde92a814114b7896ca0b0503106910ea099d5e32c93bfc0013ed2850534c3f8583ab7276414416c0d15ac021126f6cb6ca1ed091ddc01eb01000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a09694b95dc893bed698ede415c188db3530ccc98a01d79bb9f11d783de7dddde9a0275b0165ab21ea0e6f721c624aa2270a3f98276ca0c95381d90e3f9d434b4881", + "0x02f8f682210583034573830f4ef58401a5485d832dc6c094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31c970da8f2adb8bafe6d254ec4428f8342508e169f75e8450f6ff8488813dfa638395e16787966f01731fddffd0e7352cde07fd24bba283bd27f1828fb2a0c700701000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a00181afe4bedab67692a9c1ff30a89fde6b3d3c8407a47a2777efcd6bdc0c39d2a022d6a4219e72eebdbc5d31ae998243ccec1b192c5c7c586308ccddb4838cd631", + "0x02f8c1822105830b0cfd830f4ed084013bce1b834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c001a0d87c4e16986db55b8846bccfe7bca824b75216e72d8f92369c46681800285cb2a00ec53251be3c2a0d19884747d123ddb0ada3c0a917b21882e297e95c2294d52a", + "0x02f901d58221058306361d830f4240840163efbc8301546194833589fcd6edb6e08f4c7c32d4f71b54bda0291380b90164cf092995000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b00000000000000000000000000b55712de2ce8f93be30d53c03d48ea275cd14d000000000000000000000000000000000000000000000000000000000000003e8000000000000000000000000000000000000000000000000000000006907385e0000000000000000000000000000000000000000000000000000000069073be2bef9866b70d0bb74d8763996eb5967b1b24cd48f7801f94ad80cb49431df6b1d00000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000417c9c2382c6c3f029aa3dcbf1df075366fae7bc9fba7f3729713e0bf4d518951f5340350208db96af23686d9985ce552e3588244456a23ca99ecbcae779ea11e71c00000000000000000000000000000000000000000000000000000000000000c080a0b1090c8c67ca9a49ba3591c72c8851f187bbfc39b1920dff2f6c0157ed1ada39a0265b7f704f4c1b5c2c5ca57f1a4040e1e48878c9ad5f2cca9c4e6669d12989f2", + "0x02f8c1822105830b0c98830f424084013bc18b834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c001a080a96d18ae46b58d9a470846a05b394ab4a49a2e379de1941205684e1ac291f9a01e6d4d2c6bab5bf8b89f1df2d6beb85d9f1b3f3be73ca2b72e4ad2d9da0d12d2", + "0x02f901d48221058231e0830f4240840163efbc8301544d94833589fcd6edb6e08f4c7c32d4f71b54bda0291380b90164cf0929950000000000000000000000001de8dbc2409c4bbf14445b0d404bb894f0c6cff70000000000000000000000008d8fa42584a727488eeb0e29405ad794a105bb9b0000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000006907385d0000000000000000000000000000000000000000000000000000000069073af16b129c414484e011621c44e0b32451fdbd69e63ef4919f427dde08c16cb199b100000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000041ae0a4b618c30f0e5d92d7fe99bb435413b2201711427699fd285f69666396cee76199d4e901cfb298612cb3b8ad06178cefb4136a8bc1be07c01b5fea80e5ec11b00000000000000000000000000000000000000000000000000000000000000c080a0af315068084aae367f00263dbd872908bbb9ceaefd6b792fc48dd357e6bdf8afa01e7f0e5913570394b9648939ef71fc5ac34fe320a2757ec388316731a335e69f", + "0x02f9022f82210583052d0b830f423f84025c5527833d090094c90d989d809e26b2d95fb72eb3288fef72af8c2f80b901be00000000000069073af31cf0f932cecc8c4c6ffffa554a63e8fba251434483ed3903966d2ba5a70121618a1c45bd9ee158192ab8d7e12ce0f447f2848a48aedaa89e0efa8637bb931745de05000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e2000000000000003045a9ad2bb92b0b3e5c571fdd5125114e04e02be1a0bb80000000001036e55486ea6b8691ba58224f3cae35505add86c372710000000000003681d6e4b0b020656ca04956ddaf76add7ef022f60dac0000000001010206777762d3eb91810b15526c2c9102864d722ef7a9ed24e77271c1dcbf0fdcba68138800000000010698c8f03094a9e65ccedc14c40130e4a5dd0ce14fb12ea58cbeac11f662b458b9271000000000000002005554419ccd0293d9383901f461c7c3e0c66e925f0bb80000000001028eb9437532fac8d6a7870f3f887b7978d20355fc271000000000000003035d28f920c9d23100e4a38b2ba2d8ae617c3b261501f4000000000102bc51db8aec659027ae0b0e468c0735418161a7800bb8000000000003dbc6998296caa1652a810dc8d3baf4a8294330f100500000000000c080a040000b130b1759df897a9573691a3d1cafacc6d95d0db1826f275afc30e2ff63a0400a7514f8d5383970c4412205ec8e9c6ca06acea504acabd2d3c36e9cb5003d" + ], + "withdrawals": [], + "withdrawals_root": "0x81864c23f426ad807d66c9fdde33213e1fdbac06c1b751d279901d1ce13670ac" + }, + "index": 10, + "metadata": { + "block_number": 37646058, + "new_account_balances": { + "0x000000000022d473030f116ddee9f6b43ac78ba3": "0x0", + "0x0000000071727de22e5e9d8baf0edac6f37da032": "0x23281e39594556899", + "0x0000f90827f1c53a10cb7a02335b175320002935": "0x0", + "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": "0x0" + }, + "receipts": { + "0x1a766690fd6d0febffc488f12fbd7385c43fbe1e07113a1316f22f176355297e": { + "Legacy": { + "cumulativeGasUsed": "0x2868d76", + "logs": [ + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b2ee6f", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x00000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f4030", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b2ee6f", + "topics": [ + "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x0000000000000000000000000000000000001ff3684f28c67538d4d072c22734" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044095ea7b30000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x00000000000000000000000000000000000000000000000000000000000133f4", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e2220000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000133f400000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177" + ] + }, + { + "address": "0x8f360baf899845441eccdc46525e26bb8860752a", + "data": "0x00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000001957cc57b7a9959c0000000000000000000000000000000000000000000000001957cc57b7a9959800000000000000000000000000000000000000000000000444e308096a22c339000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000092458cc3a866f04600000000000000000000000000000000000000000000000025f3e27916e84b59000", + "topics": [ + "0x4e1d56f7310a8c32b2267f756b19ba65019b4890068ce114a25009abe54de5ba" + ] + }, + { + "address": "0xba12222222228d8ba445958a75a0704d566bf2c8", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0x2170c741c41531aec20e7c107c24eecfdd15e69c9bb0a8dd37b1840b9e0b207b", + "0x8f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd", + "0x000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "0x000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177", + "0x000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c8" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c8", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x0000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007e42213bc0b000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b000000000000000000000000ea758cac6115309b325c582fd0782d79e350217700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000007041fff991f000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b06d9200000000000000000000000000000000000000000000000000000000000000a0d311e79cd2099f6f1f0607040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000000e4c1fb425e000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069073bb900000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003c438c9c147000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000002710000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c800000000000000000000000000000000000000000000000000000000000001c400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002e4945bcec9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000220000000000000000000000000ea758cac6115309b325c582fd0782d79e35021770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000069073bb9000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000208f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca00000000000000000000000000000000000000000000000000000000000000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008434ee90ca000000000000000000000000f5c4f3dc02c3fb9279495a8fef7b0741da956157000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b1a7880000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x00000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f4030" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e2220000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f40300000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + } + ], + "status": "0x1" + } + }, + "0x2cd6b4825b5ee40b703c947e15630336dceda97825b70412da54ccc27f484496": { + "Eip1559": { + "cumulativeGasUsed": "0x28cca69", + "logs": [ + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x", + "topics": [ + "0x98de503528ee59b575ef0c0a2576a82497bfc029a5685b209e9ec333479b10a5", + "0x000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b0", + "0xbef9866b70d0bb74d8763996eb5967b1b24cd48f7801f94ad80cb49431df6b1d" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x00000000000000000000000000000000000000000000000000000000000003e8", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b0", + "0x0000000000000000000000000b55712de2ce8f93be30d53c03d48ea275cd14d0" + ] + } + ], + "status": "0x1" + } + } + } + }, + "payload_id": "0x0316ecb1aa1671b5" +}"#; + + let flashblock: FlashBlock = serde_json::from_str(raw).expect("deserialize"); + let serialized = serde_json::to_string(&flashblock).expect("serialize"); + let roundtrip: FlashBlock = serde_json::from_str(&serialized).expect("roundtrip"); + + assert_eq!(flashblock, roundtrip); + } +} diff --git a/crates/optimism/flashblocks/src/sequence.rs b/crates/optimism/flashblocks/src/sequence.rs index 087f97db7be..f2363207e38 100644 --- a/crates/optimism/flashblocks/src/sequence.rs +++ b/crates/optimism/flashblocks/src/sequence.rs @@ -1,6 +1,7 @@ use crate::{ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx}; use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; +use alloy_rpc_types_engine::PayloadId; use core::mem; use eyre::{bail, OptionExt}; use reth_primitives_traits::{Recovered, SignedTransaction}; @@ -13,7 +14,7 @@ const FLASHBLOCK_SEQUENCE_CHANNEL_SIZE: usize = 128; /// An ordered B-tree keeping the track of a sequence of [`FlashBlock`]s by their indices. #[derive(Debug)] -pub(crate) struct FlashBlockPendingSequence { +pub struct FlashBlockPendingSequence { /// tracks the individual flashblocks in order /// /// With a blocktime of 2s and flashblock tick-rate of 200ms plus one extra flashblock per new @@ -29,15 +30,23 @@ impl FlashBlockPendingSequence where T: SignedTransaction, { - pub(crate) fn new() -> Self { + /// Create a new pending sequence. + pub fn new() -> Self { // Note: if the channel is full, send will not block but rather overwrite the oldest // messages. Order is preserved. let (tx, _) = broadcast::channel(FLASHBLOCK_SEQUENCE_CHANNEL_SIZE); Self { inner: BTreeMap::new(), block_broadcaster: tx, state_root: None } } + /// Returns the sender half of the [`FlashBlockCompleteSequence`] channel. + pub const fn block_sequence_broadcaster( + &self, + ) -> &broadcast::Sender { + &self.block_broadcaster + } + /// Gets a subscriber to the flashblock sequences produced. - pub(crate) fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { + pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { self.block_broadcaster.subscribe() } @@ -70,7 +79,7 @@ where /// Inserts a new block into the sequence. /// /// A [`FlashBlock`] with index 0 resets the set. - pub(crate) fn insert(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { + pub fn insert(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { if flashblock.index == 0 { trace!(number=%flashblock.block_number(), "Tracking new flashblock sequence"); @@ -81,8 +90,12 @@ where return Ok(()) } - // only insert if we previously received the same block, assume we received index 0 - if self.block_number() == Some(flashblock.metadata.block_number) { + // only insert if we previously received the same block and payload, assume we received + // index 0 + let same_block = self.block_number() == Some(flashblock.metadata.block_number); + let same_payload = self.payload_id() == Some(flashblock.payload_id); + + if same_block && same_payload { trace!(number=%flashblock.block_number(), index = %flashblock.index, block_count = self.inner.len() ,"Received followup flashblock"); self.inner.insert(flashblock.index, PreparedFlashBlock::new(flashblock)?); } else { @@ -93,7 +106,7 @@ where } /// Set state root - pub(crate) const fn set_state_root(&mut self, state_root: Option) { + pub const fn set_state_root(&mut self, state_root: Option) { self.state_root = state_root; } @@ -103,9 +116,7 @@ where /// the sequence /// /// Note: flashblocks start at `index 0`. - pub(crate) fn ready_transactions( - &self, - ) -> impl Iterator>> + '_ { + pub fn ready_transactions(&self) -> impl Iterator>> + '_ { self.inner .values() .enumerate() @@ -117,33 +128,49 @@ where } /// Returns the first block number - pub(crate) fn block_number(&self) -> Option { + pub fn block_number(&self) -> Option { Some(self.inner.values().next()?.block().metadata.block_number) } /// Returns the payload base of the first tracked flashblock. - pub(crate) fn payload_base(&self) -> Option { + pub fn payload_base(&self) -> Option { self.inner.values().next()?.block().base.clone() } /// Returns the number of tracked flashblocks. - pub(crate) fn count(&self) -> usize { + pub fn count(&self) -> usize { self.inner.len() } /// Returns the reference to the last flashblock. - pub(crate) fn last_flashblock(&self) -> Option<&FlashBlock> { + pub fn last_flashblock(&self) -> Option<&FlashBlock> { self.inner.last_key_value().map(|(_, b)| &b.block) } /// Returns the current/latest flashblock index in the sequence - pub(crate) fn index(&self) -> Option { + pub fn index(&self) -> Option { Some(self.inner.values().last()?.block().index) } + /// Returns the payload id of the first tracked flashblock in the current sequence. + pub fn payload_id(&self) -> Option { + Some(self.inner.values().next()?.block().payload_id) + } +} + +impl Default for FlashBlockPendingSequence +where + T: SignedTransaction, +{ + fn default() -> Self { + Self::new() + } } /// A complete sequence of flashblocks, often corresponding to a full block. -/// Ensure invariants of a complete flashblocks sequence. +/// +/// Ensures invariants of a complete flashblocks sequence. +/// If this entire sequence of flashblocks was executed on top of latest block, this also includes +/// the computed state root. #[derive(Debug, Clone)] pub struct FlashBlockCompleteSequence { inner: Vec, diff --git a/crates/optimism/flashblocks/src/service.rs b/crates/optimism/flashblocks/src/service.rs index f4cf7f18450..f5d4a4a810d 100644 --- a/crates/optimism/flashblocks/src/service.rs +++ b/crates/optimism/flashblocks/src/service.rs @@ -1,7 +1,8 @@ use crate::{ sequence::FlashBlockPendingSequence, worker::{BuildArgs, FlashBlockBuilder}, - ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx, PendingFlashBlock, + ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, + InProgressFlashBlockRx, PendingFlashBlock, }; use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; @@ -18,10 +19,14 @@ use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskExecutor; use std::{ pin::Pin, + sync::Arc, task::{ready, Context, Poll}, time::Instant, }; -use tokio::{pin, sync::oneshot}; +use tokio::{ + pin, + sync::{oneshot, watch}, +}; use tracing::{debug, trace, warn}; pub(crate) const FB_STATE_ROOT_FROM_INDEX: usize = 9; @@ -38,6 +43,8 @@ pub struct FlashBlockService< rx: S, current: Option>, blocks: FlashBlockPendingSequence, + /// Broadcast channel to forward received flashblocks from the subscription. + received_flashblocks_tx: tokio::sync::broadcast::Sender>, rebuild: bool, builder: FlashBlockBuilder, canon_receiver: CanonStateNotifications, @@ -48,6 +55,9 @@ pub struct FlashBlockService< /// when fb received on top of the same block. Avoid redundant I/O across multiple /// executions within the same block. cached_state: Option<(B256, CachedReads)>, + /// Signals when a block build is in progress + in_progress_tx: watch::Sender>, + /// `FlashBlock` service's metrics metrics: FlashBlockServiceMetrics, /// Enable state root calculation from flashblock with index [`FB_STATE_ROOT_FROM_INDEX`] compute_state_root: bool, @@ -73,16 +83,20 @@ where { /// Constructs a new `FlashBlockService` that receives [`FlashBlock`]s from `rx` stream. pub fn new(rx: S, evm_config: EvmConfig, provider: Provider, spawner: TaskExecutor) -> Self { + let (in_progress_tx, _) = watch::channel(None); + let (received_flashblocks_tx, _) = tokio::sync::broadcast::channel(128); Self { rx, current: None, blocks: FlashBlockPendingSequence::new(), + received_flashblocks_tx, canon_receiver: provider.subscribe_to_canonical_state(), builder: FlashBlockBuilder::new(evm_config, provider), rebuild: false, spawner, job: None, cached_state: None, + in_progress_tx, metrics: FlashBlockServiceMetrics::default(), compute_state_root: false, } @@ -94,11 +108,30 @@ where self } + /// Returns the sender half to the received flashblocks. + pub const fn flashblocks_broadcaster( + &self, + ) -> &tokio::sync::broadcast::Sender> { + &self.received_flashblocks_tx + } + + /// Returns the sender half to the flashblock sequence. + pub const fn block_sequence_broadcaster( + &self, + ) -> &tokio::sync::broadcast::Sender { + self.blocks.block_sequence_broadcaster() + } + /// Returns a subscriber to the flashblock sequence. pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { self.blocks.subscribe_block_sequence() } + /// Returns a receiver that signals when a flashblock is being built. + pub fn subscribe_in_progress(&self) -> InProgressFlashBlockRx { + self.in_progress_tx.subscribe() + } + /// Drives the services and sends new blocks to the receiver /// /// Note: this should be spawned @@ -112,6 +145,13 @@ where warn!("Flashblock service has stopped"); } + /// Notifies all subscribers about the received flashblock + fn notify_received_flashblock(&self, flashblock: &FlashBlock) { + if self.received_flashblocks_tx.receiver_count() > 0 { + let _ = self.received_flashblocks_tx.send(Arc::new(flashblock.clone())); + } + } + /// Returns the [`BuildArgs`] made purely out of [`FlashBlock`]s that were received earlier. /// /// Returns `None` if the flashblock have no `base` or the base is not a child block of latest. @@ -218,6 +258,8 @@ where }; // reset job this.job.take(); + // No build in progress + let _ = this.in_progress_tx.send(None); if let Some((now, result)) = result { match result { @@ -257,6 +299,7 @@ where while let Poll::Ready(Some(result)) = this.rx.poll_next_unpin(cx) { match result { Ok(flashblock) => { + this.notify_received_flashblock(&flashblock); if flashblock.index == 0 { this.metrics.last_flashblock_length.record(this.blocks.count() as f64); } @@ -293,6 +336,13 @@ where if let Some(args) = this.build_args() { let now = Instant::now(); + let fb_info = FlashBlockBuildInfo { + parent_hash: args.base.parent_hash, + index: args.last_flashblock_index, + block_number: args.base.block_number, + }; + // Signal that a flashblock build has started with build metadata + let _ = this.in_progress_tx.send(Some(fb_info)); let (tx, rx) = oneshot::channel(); let builder = this.builder.clone(); @@ -310,6 +360,17 @@ where } } +/// Information for a flashblock currently built +#[derive(Debug, Clone, Copy)] +pub struct FlashBlockBuildInfo { + /// Parent block hash + pub parent_hash: B256, + /// Flashblock index within the current block's sequence + pub index: u64, + /// Block number of the flashblock being built. + pub block_number: u64, +} + type BuildJob = (Instant, oneshot::Receiver, CachedReads)>>>); diff --git a/crates/optimism/flashblocks/src/worker.rs b/crates/optimism/flashblocks/src/worker.rs index 68071851f43..8cf7777f6a6 100644 --- a/crates/optimism/flashblocks/src/worker.rs +++ b/crates/optimism/flashblocks/src/worker.rs @@ -124,6 +124,7 @@ where recovered_block: block.into(), execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), + trie_updates: Arc::default(), }, ); let pending_flashblock = PendingFlashBlock::new( diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index 85152c59743..202194c63a4 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -18,6 +18,10 @@ extern crate alloc; +use alloy_op_hardforks::{ + BASE_MAINNET_JOVIAN_TIMESTAMP, BASE_SEPOLIA_JOVIAN_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, + OP_SEPOLIA_JOVIAN_TIMESTAMP, +}; // Re-export alloy-op-hardforks types. pub use alloy_op_hardforks::{OpHardfork, OpHardforks}; @@ -28,6 +32,7 @@ use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardf /// Dev hardforks pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { + const JOVIAN_TIMESTAMP: ForkCondition = ForkCondition::Timestamp(1761840000); ChainHardforks::new(vec![ (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), @@ -58,7 +63,7 @@ pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Granite.boxed(), ForkCondition::Timestamp(0)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(0)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(0)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(0)), + (OpHardfork::Jovian.boxed(), JOVIAN_TIMESTAMP), ]) }); @@ -97,8 +102,7 @@ pub static OP_MAINNET_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1736445601)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1746806401)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1746806401)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(OP_MAINNET_JOVIAN_TIMESTAMP)), ]) }); /// Optimism Sepolia list of hardforks. @@ -136,8 +140,7 @@ pub static OP_SEPOLIA_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1744905600)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1744905600)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(OP_SEPOLIA_JOVIAN_TIMESTAMP)), ]) }); @@ -176,8 +179,7 @@ pub static BASE_SEPOLIA_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1744905600)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1744905600)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(BASE_SEPOLIA_JOVIAN_TIMESTAMP)), ]) }); @@ -216,7 +218,6 @@ pub static BASE_MAINNET_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1736445601)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1746806401)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1746806401)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(BASE_MAINNET_JOVIAN_TIMESTAMP)), ]) }); diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 162700ac0ae..0576b3897f5 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -45,7 +45,7 @@ reth-optimism-primitives = { workspace = true, features = ["serde", "serde-binco # revm with required optimism features # Note: this must be kept to ensure all features are properly enabled/forwarded -revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg", "memory_limit"] } op-revm.workspace = true # ethereum @@ -93,7 +93,12 @@ asm-keccak = [ "reth-node-core/asm-keccak", "revm/asm-keccak", ] -js-tracer = ["reth-node-builder/js-tracer"] +js-tracer = [ + "reth-node-builder/js-tracer", + "reth-optimism-node/js-tracer", + "reth-rpc/js-tracer", + "reth-rpc-eth-types/js-tracer", +] test-utils = [ "reth-tasks", "reth-e2e-test-utils", diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index ca4919fe63d..ea010498339 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -39,7 +39,7 @@ use reth_optimism_evm::{OpEvmConfig, OpRethReceiptBuilder}; use reth_optimism_forks::OpHardforks; use reth_optimism_payload_builder::{ builder::OpPayloadTransactions, - config::{OpBuilderConfig, OpDAConfig}, + config::{OpBuilderConfig, OpDAConfig, OpGasLimitConfig}, OpAttributes, OpBuiltPayload, OpPayloadPrimitives, }; use reth_optimism_primitives::{DepositReceipt, OpPrimitives}; @@ -118,6 +118,10 @@ pub struct OpNode { /// /// By default no throttling is applied. pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + /// Used to control the gas limit of the blocks produced by the OP builder.(configured by the + /// batcher via the `miner_` api) + pub gas_limit_config: OpGasLimitConfig, } /// A [`ComponentsBuilder`] with its generic arguments set to a stack of Optimism specific builders. @@ -133,7 +137,11 @@ pub type OpNodeComponentBuilder = ComponentsBu impl OpNode { /// Creates a new instance of the Optimism node type. pub fn new(args: RollupArgs) -> Self { - Self { args, da_config: OpDAConfig::default() } + Self { + args, + da_config: OpDAConfig::default(), + gas_limit_config: OpGasLimitConfig::default(), + } } /// Configure the data availability configuration for the OP builder. @@ -142,6 +150,12 @@ impl OpNode { self } + /// Configure the gas limit configuration for the OP builder. + pub fn with_gas_limit_config(mut self, gas_limit_config: OpGasLimitConfig) -> Self { + self.gas_limit_config = gas_limit_config; + self + } + /// Returns the components for the given [`RollupArgs`]. pub fn components(&self) -> OpNodeComponentBuilder where @@ -161,7 +175,9 @@ impl OpNode { ) .executor(OpExecutorBuilder::default()) .payload(BasicPayloadServiceBuilder::new( - OpPayloadBuilder::new(compute_pending_block).with_da_config(self.da_config.clone()), + OpPayloadBuilder::new(compute_pending_block) + .with_da_config(self.da_config.clone()) + .with_gas_limit_config(self.gas_limit_config.clone()), )) .network(OpNetworkBuilder::new(disable_txpool_gossip, !discovery_v4)) .consensus(OpConsensusBuilder::default()) @@ -173,6 +189,7 @@ impl OpNode { .with_sequencer(self.args.sequencer.clone()) .with_sequencer_headers(self.args.sequencer_headers.clone()) .with_da_config(self.da_config.clone()) + .with_gas_limit_config(self.gas_limit_config.clone()) .with_enable_tx_conditional(self.args.enable_tx_conditional) .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) .with_historical_rpc(self.args.historical_rpc.clone()) @@ -286,6 +303,8 @@ pub struct OpAddOns< pub rpc_add_ons: RpcAddOns, /// Data availability configuration for the OP builder. pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + pub gas_limit_config: OpGasLimitConfig, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. pub sequencer_url: Option, @@ -306,9 +325,11 @@ where EthB: EthApiBuilder, { /// Creates a new instance from components. + #[allow(clippy::too_many_arguments)] pub const fn new( rpc_add_ons: RpcAddOns, da_config: OpDAConfig, + gas_limit_config: OpGasLimitConfig, sequencer_url: Option, sequencer_headers: Vec, historical_rpc: Option, @@ -318,6 +339,7 @@ where Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -368,6 +390,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -378,6 +401,7 @@ where OpAddOns::new( rpc_add_ons.with_engine_api(engine_api_builder), da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -394,6 +418,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, enable_tx_conditional, @@ -404,6 +429,7 @@ where OpAddOns::new( rpc_add_ons.with_payload_validator(payload_validator_builder), da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -423,6 +449,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, enable_tx_conditional, @@ -433,6 +460,7 @@ where OpAddOns::new( rpc_add_ons.with_rpc_middleware(rpc_middleware), da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -496,6 +524,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, enable_tx_conditional, @@ -536,7 +565,7 @@ where Box::new(ctx.node.task_executor().clone()), builder, ); - let miner_ext = OpMinerExtApi::new(da_config); + let miner_ext = OpMinerExtApi::new(da_config, gas_limit_config); let sequencer_client = if let Some(url) = sequencer_url { Some(SequencerClient::new_with_headers(url, sequencer_headers).await?) @@ -559,7 +588,7 @@ where modules.merge_if_module_configured(RethRpcModule::Debug, debug_ext.into_rpc())?; // extend the miner namespace if configured in the regular http server - modules.merge_if_module_configured( + modules.add_or_replace_if_module_configured( RethRpcModule::Miner, miner_ext.clone().into_rpc(), )?; @@ -652,6 +681,8 @@ pub struct OpAddOnsBuilder { historical_rpc: Option, /// Data availability configuration for the OP builder. da_config: Option, + /// Gas limit configuration for the OP builder. + gas_limit_config: Option, /// Enable transaction conditionals. enable_tx_conditional: bool, /// Marker for network types. @@ -673,6 +704,7 @@ impl Default for OpAddOnsBuilder { sequencer_headers: Vec::new(), historical_rpc: None, da_config: None, + gas_limit_config: None, enable_tx_conditional: false, min_suggested_priority_fee: 1_000_000, _nt: PhantomData, @@ -702,6 +734,12 @@ impl OpAddOnsBuilder { self } + /// Configure the gas limit configuration for the OP payload builder. + pub fn with_gas_limit_config(mut self, gas_limit_config: OpGasLimitConfig) -> Self { + self.gas_limit_config = Some(gas_limit_config); + self + } + /// Configure if transaction conditional should be enabled. pub const fn with_enable_tx_conditional(mut self, enable_tx_conditional: bool) -> Self { self.enable_tx_conditional = enable_tx_conditional; @@ -735,6 +773,7 @@ impl OpAddOnsBuilder { sequencer_headers, historical_rpc, da_config, + gas_limit_config, enable_tx_conditional, min_suggested_priority_fee, tokio_runtime, @@ -747,6 +786,7 @@ impl OpAddOnsBuilder { sequencer_headers, historical_rpc, da_config, + gas_limit_config, enable_tx_conditional, min_suggested_priority_fee, _nt, @@ -779,6 +819,7 @@ impl OpAddOnsBuilder { sequencer_url, sequencer_headers, da_config, + gas_limit_config, enable_tx_conditional, min_suggested_priority_fee, historical_rpc, @@ -802,6 +843,7 @@ impl OpAddOnsBuilder { ) .with_tokio_runtime(tokio_runtime), da_config.unwrap_or_default(), + gas_limit_config.unwrap_or_default(), sequencer_url, sequencer_headers, historical_rpc, @@ -1006,13 +1048,21 @@ pub struct OpPayloadBuilder { /// This data availability configuration specifies constraints for the payload builder /// when assembling payloads pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + /// This is used to configure gas limit related constraints for the payload builder. + pub gas_limit_config: OpGasLimitConfig, } impl OpPayloadBuilder { /// Create a new instance with the given `compute_pending_block` flag and data availability /// config. pub fn new(compute_pending_block: bool) -> Self { - Self { compute_pending_block, best_transactions: (), da_config: OpDAConfig::default() } + Self { + compute_pending_block, + best_transactions: (), + da_config: OpDAConfig::default(), + gas_limit_config: OpGasLimitConfig::default(), + } } /// Configure the data availability configuration for the OP payload builder. @@ -1020,14 +1070,20 @@ impl OpPayloadBuilder { self.da_config = da_config; self } + + /// Configure the gas limit configuration for the OP payload builder. + pub fn with_gas_limit_config(mut self, gas_limit_config: OpGasLimitConfig) -> Self { + self.gas_limit_config = gas_limit_config; + self + } } impl OpPayloadBuilder { /// Configures the type responsible for yielding the transactions that should be included in the /// payload. pub fn with_transactions(self, best_transactions: T) -> OpPayloadBuilder { - let Self { compute_pending_block, da_config, .. } = self; - OpPayloadBuilder { compute_pending_block, best_transactions, da_config } + let Self { compute_pending_block, da_config, gas_limit_config, .. } = self; + OpPayloadBuilder { compute_pending_block, best_transactions, da_config, gas_limit_config } } } @@ -1068,7 +1124,10 @@ where pool, ctx.provider().clone(), evm_config, - OpBuilderConfig { da_config: self.da_config.clone() }, + OpBuilderConfig { + da_config: self.da_config.clone(), + gas_limit_config: self.gas_limit_config.clone(), + }, ) .with_transactions(self.best_transactions.clone()) .set_compute_pending_block(self.compute_pending_block); @@ -1110,7 +1169,8 @@ impl OpNetworkBuilder { Node: FullNodeTypes>, NetworkP: NetworkPrimitives, { - let Self { disable_txpool_gossip, disable_discovery_v4, .. } = self.clone(); + let disable_txpool_gossip = self.disable_txpool_gossip; + let disable_discovery_v4 = self.disable_discovery_v4; let args = &ctx.config().network; let network_builder = ctx .network_config_builder()? diff --git a/crates/optimism/node/tests/e2e-testsuite/testsuite.rs b/crates/optimism/node/tests/e2e-testsuite/testsuite.rs index 75dff49c141..b031b3a8266 100644 --- a/crates/optimism/node/tests/e2e-testsuite/testsuite.rs +++ b/crates/optimism/node/tests/e2e-testsuite/testsuite.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Address, B256}; +use alloy_primitives::{Address, B256, B64}; use eyre::Result; use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_e2e_test_utils::testsuite::{ @@ -53,3 +53,48 @@ async fn test_testsuite_op_assert_mine_block() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_testsuite_op_assert_mine_block_isthmus_activated() -> Result<()> { + reth_tracing::init_test_tracing(); + + let setup = Setup::default() + .with_chain_spec(Arc::new( + OpChainSpecBuilder::default() + .chain(OP_MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .isthmus_activated() + .build() + .into(), + )) + .with_network(NetworkSetup::single_node()); + + let test = + TestBuilder::new().with_setup(setup).with_action(AssertMineBlock::::new( + 0, + vec![], + Some(B256::ZERO), + // TODO: refactor once we have actions to generate payload attributes. + OpPayloadAttributes { + payload_attributes: alloy_rpc_types_engine::PayloadAttributes { + timestamp: std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }, + transactions: None, + no_tx_pool: None, + eip_1559_params: Some(B64::ZERO), + min_base_fee: None, + gas_limit: Some(30_000_000), + }, + )); + + test.run::().await?; + + Ok(()) +} diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index e0437a5f655..b495fdb47ce 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -19,7 +19,7 @@ use reth_optimism_node::{args::RollupArgs, OpEvmConfig, OpExecutorBuilder, OpNod use reth_optimism_primitives::OpPrimitives; use reth_provider::providers::BlockchainProvider; use revm::{ - context::{Cfg, ContextTr, TxEnv}, + context::{BlockEnv, Cfg, ContextTr, TxEnv}, context_interface::result::EVMError, inspector::NoOpInspector, interpreter::interpreter::EthInterpreter, @@ -94,6 +94,7 @@ fn test_setup_custom_precompiles() { EVMError; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm( diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 8d1875fe753..0674ed7cf73 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -25,7 +25,6 @@ reth-payload-builder-primitives.workspace = true reth-payload-util.workspace = true reth-payload-primitives = { workspace = true, features = ["op"] } reth-basic-payload-builder.workspace = true -reth-chain-state.workspace = true reth-payload-validator.workspace = true # op-reth @@ -44,6 +43,7 @@ op-alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true alloy-rpc-types-debug.workspace = true alloy-consensus.workspace = true +alloy-evm.workspace = true # misc derive_more.workspace = true @@ -51,3 +51,4 @@ tracing.workspace = true thiserror.workspace = true sha2.workspace = true serde.workspace = true +either.workspace = true diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 1d73464e178..1f7c0c00f91 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,23 +1,22 @@ //! Optimism payload builder implementation. - use crate::{ - config::{OpBuilderConfig, OpDAConfig}, - error::OpPayloadBuilderError, - payload::OpBuiltPayload, - OpAttributes, OpPayloadBuilderAttributes, OpPayloadPrimitives, + config::OpBuilderConfig, error::OpPayloadBuilderError, payload::OpBuiltPayload, OpAttributes, + OpPayloadBuilderAttributes, OpPayloadPrimitives, }; use alloy_consensus::{BlockHeader, Transaction, Typed2718}; +use alloy_evm::Evm as AlloyEvm; use alloy_primitives::{B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; -use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ + block::BlockExecutorFor, execute::{ BlockBuilder, BlockBuilderOutcome, BlockExecutionError, BlockExecutor, BlockValidationError, }, - ConfigureEvm, Database, Evm, + op_revm::{constants::L1_BLOCK_CONTRACT, L1BlockInfo}, + ConfigureEvm, Database, }; use reth_execution_types::ExecutionOutcome; use reth_optimism_forks::OpHardforks; @@ -28,7 +27,7 @@ use reth_optimism_txpool::{ OpPooledTx, }; use reth_payload_builder_primitives::PayloadBuilderError; -use reth_payload_primitives::{BuildNextEnv, PayloadBuilderAttributes}; +use reth_payload_primitives::{BuildNextEnv, BuiltPayloadExecutedBlock, PayloadBuilderAttributes}; use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; use reth_primitives_traits::{ HeaderTy, NodePrimitives, SealedHeader, SealedHeaderFor, SignedTransaction, TxTy, @@ -185,7 +184,7 @@ where let ctx = OpPayloadBuilderCtx { evm_config: self.evm_config.clone(), - da_config: self.config.da_config.clone(), + builder_config: self.config.clone(), chain_spec: self.client.chain_spec(), config, cancel, @@ -221,7 +220,7 @@ where let config = PayloadConfig { parent_header: Arc::new(parent), attributes }; let ctx = OpPayloadBuilderCtx { evm_config: self.evm_config.clone(), - da_config: self.config.da_config.clone(), + builder_config: self.config.clone(), chain_spec: self.client.chain_spec(), config, cancel: Default::default(), @@ -340,6 +339,11 @@ impl OpBuilder<'_, Txs> { let mut db = State::builder().with_database(db).with_bundle_update().build(); + // Load the L1 block contract into the database cache. If the L1 block contract is not + // pre-loaded the database will panic when trying to fetch the DA footprint gas + // scalar. + db.load_cache_account(L1_BLOCK_CONTRACT).map_err(BlockExecutionError::other)?; + let mut builder = ctx.block_builder(&mut db)?; // 1. apply pre-execution changes @@ -379,13 +383,11 @@ impl OpBuilder<'_, Txs> { ); // create the executed block data - let executed: ExecutedBlockWithTrieUpdates = ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block), - execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - }, - trie: ExecutedTrieUpdates::Present(Arc::new(trie_updates)), + let executed: BuiltPayloadExecutedBlock = BuiltPayloadExecutedBlock { + recovered_block: Arc::new(block), + execution_output: Arc::new(execution_outcome), + hashed_state: either::Either::Left(Arc::new(hashed_state)), + trie_updates: either::Either::Left(Arc::new(trie_updates)), }; let no_tx_pool = ctx.attributes().no_tx_pool(); @@ -511,17 +513,27 @@ impl ExecutionInfo { tx_data_limit: Option, block_data_limit: Option, tx_gas_limit: u64, + da_footprint_gas_scalar: Option, ) -> bool { if tx_data_limit.is_some_and(|da_limit| tx_da_size > da_limit) { return true; } - if block_data_limit - .is_some_and(|da_limit| self.cumulative_da_bytes_used + tx_da_size > da_limit) - { + let total_da_bytes_used = self.cumulative_da_bytes_used.saturating_add(tx_da_size); + + if block_data_limit.is_some_and(|da_limit| total_da_bytes_used > da_limit) { return true; } + // Post Jovian: the tx DA footprint must be less than the block gas limit + if let Some(da_footprint_gas_scalar) = da_footprint_gas_scalar { + let tx_da_footprint = + total_da_bytes_used.saturating_mul(da_footprint_gas_scalar as u64); + if tx_da_footprint > block_gas_limit { + return true; + } + } + self.cumulative_gas_used + tx_gas_limit > block_gas_limit } } @@ -535,8 +547,8 @@ pub struct OpPayloadBuilderCtx< > { /// The type that knows how to perform system calls and configure the evm. pub evm_config: Evm, - /// The DA config for the payload builder - pub da_config: OpDAConfig, + /// Additional config for the builder/sequencer, e.g. DA and gas limit + pub builder_config: OpBuilderConfig, /// The chainspec pub chain_spec: Arc, /// How to build the payload. @@ -567,9 +579,9 @@ where } /// Returns the current fee settings for transactions from the mempool - pub fn best_transaction_attributes(&self, block_env: &BlockEnv) -> BestTransactionsAttributes { + pub fn best_transaction_attributes(&self, block_env: impl Block) -> BestTransactionsAttributes { BestTransactionsAttributes::new( - block_env.basefee, + block_env.basefee(), block_env.blob_gasprice().map(|p| p as u64), ) } @@ -588,7 +600,13 @@ where pub fn block_builder<'a, DB: Database>( &'a self, db: &'a mut State, - ) -> Result + 'a, PayloadBuilderError> { + ) -> Result< + impl BlockBuilder< + Primitives = Evm::Primitives, + Executor: BlockExecutorFor<'a, Evm::BlockExecutorFactory, DB>, + > + 'a, + PayloadBuilderError, + > { self.evm_config .builder_for_next_block( db, @@ -651,29 +669,49 @@ where /// Executes the given best transactions and updates the execution info. /// /// Returns `Ok(Some(())` if the job was cancelled. - pub fn execute_best_transactions( + pub fn execute_best_transactions( &self, info: &mut ExecutionInfo, - builder: &mut impl BlockBuilder, + builder: &mut Builder, mut best_txs: impl PayloadTransactions< Transaction: PoolTransaction> + OpPooledTx, >, - ) -> Result, PayloadBuilderError> { - let block_gas_limit = builder.evm_mut().block().gas_limit; - let block_da_limit = self.da_config.max_da_block_size(); - let tx_da_limit = self.da_config.max_da_tx_size(); - let base_fee = builder.evm_mut().block().basefee; + ) -> Result, PayloadBuilderError> + where + Builder: BlockBuilder, + <::Evm as AlloyEvm>::DB: Database, + { + let mut block_gas_limit = builder.evm_mut().block().gas_limit(); + if let Some(gas_limit_config) = self.builder_config.gas_limit_config.gas_limit() { + // If a gas limit is configured, use that limit as target if it's smaller, otherwise use + // the block's actual gas limit. + block_gas_limit = gas_limit_config.min(block_gas_limit); + }; + let block_da_limit = self.builder_config.da_config.max_da_block_size(); + let tx_da_limit = self.builder_config.da_config.max_da_tx_size(); + let base_fee = builder.evm_mut().block().basefee(); while let Some(tx) = best_txs.next(()) { let interop = tx.interop_deadline(); let tx_da_size = tx.estimated_da_size(); let tx = tx.into_consensus(); + + let da_footprint_gas_scalar = self + .chain_spec + .is_jovian_active_at_timestamp(self.attributes().timestamp()) + .then_some( + L1BlockInfo::fetch_da_footprint_gas_scalar(builder.evm_mut().db_mut()).expect( + "DA footprint should always be available from the database post jovian", + ), + ); + if info.is_tx_over_limits( tx_da_size, block_gas_limit, tx_da_limit, block_da_limit, tx.gas_limit(), + da_footprint_gas_scalar, ) { // we can't fit this transaction into the block, so we need to mark it as // invalid which also removes all dependent transaction from diff --git a/crates/optimism/payload/src/config.rs b/crates/optimism/payload/src/config.rs index 469bfc9fe31..c79ee0ece4b 100644 --- a/crates/optimism/payload/src/config.rs +++ b/crates/optimism/payload/src/config.rs @@ -7,12 +7,14 @@ use std::sync::{atomic::AtomicU64, Arc}; pub struct OpBuilderConfig { /// Data availability configuration for the OP builder. pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + pub gas_limit_config: OpGasLimitConfig, } impl OpBuilderConfig { /// Creates a new OP builder configuration with the given data availability configuration. - pub const fn new(da_config: OpDAConfig) -> Self { - Self { da_config } + pub const fn new(da_config: OpDAConfig, gas_limit_config: OpGasLimitConfig) -> Self { + Self { da_config, gas_limit_config } } /// Returns the Data Availability configuration for the OP builder, if it has configured @@ -100,6 +102,40 @@ struct OpDAConfigInner { max_da_block_size: AtomicU64, } +/// Contains the Gas Limit configuration for the OP builder. +/// +/// This type is shareable and can be used to update the Gas Limit configuration for the OP payload +/// builder. +#[derive(Debug, Clone, Default)] +pub struct OpGasLimitConfig { + /// Gas limit for a transaction + /// + /// 0 means use the default gas limit. + gas_limit: Arc, +} + +impl OpGasLimitConfig { + /// Creates a new Gas Limit configuration with the given maximum gas limit. + pub fn new(max_gas_limit: u64) -> Self { + let this = Self::default(); + this.set_gas_limit(max_gas_limit); + this + } + /// Returns the gas limit for a transaction, if any. + pub fn gas_limit(&self) -> Option { + let val = self.gas_limit.load(std::sync::atomic::Ordering::Relaxed); + if val == 0 { + None + } else { + Some(val) + } + } + /// Sets the gas limit for a transaction. 0 means use the default gas limit. + pub fn set_gas_limit(&self, gas_limit: u64) { + self.gas_limit.store(gas_limit, std::sync::atomic::Ordering::Relaxed); + } +} + #[cfg(test)] mod tests { use super::*; @@ -122,4 +158,14 @@ mod tests { let config = OpBuilderConfig::default(); assert!(config.constrained_da_config().is_none()); } + + #[test] + fn test_gas_limit() { + let gas_limit = OpGasLimitConfig::default(); + assert_eq!(gas_limit.gas_limit(), None); + gas_limit.set_gas_limit(50000); + assert_eq!(gas_limit.gas_limit(), Some(50000)); + gas_limit.set_gas_limit(0); + assert_eq!(gas_limit.gas_limit(), None); + } } diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index de1705faa8f..b44f69ddb7e 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -16,12 +16,13 @@ use op_alloy_consensus::{encode_holocene_extra_data, encode_jovian_extra_data, E use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4, }; -use reth_chain_state::ExecutedBlockWithTrieUpdates; use reth_chainspec::EthChainSpec; use reth_optimism_evm::OpNextBlockEnvAttributes; use reth_optimism_forks::OpHardforks; use reth_payload_builder::{EthPayloadBuilderAttributes, PayloadBuilderError}; -use reth_payload_primitives::{BuildNextEnv, BuiltPayload, PayloadBuilderAttributes}; +use reth_payload_primitives::{ + BuildNextEnv, BuiltPayload, BuiltPayloadExecutedBlock, PayloadBuilderAttributes, +}; use reth_primitives_traits::{ NodePrimitives, SealedBlock, SealedHeader, SignedTransaction, WithEncoded, }; @@ -176,7 +177,7 @@ pub struct OpBuiltPayload { /// Sealed block pub(crate) block: Arc>, /// Block execution data for the payload, if any. - pub(crate) executed_block: Option>, + pub(crate) executed_block: Option>, /// The fees of the block pub(crate) fees: U256, } @@ -189,7 +190,7 @@ impl OpBuiltPayload { id: PayloadId, block: Arc>, fees: U256, - executed_block: Option>, + executed_block: Option>, ) -> Self { Self { id, block, fees, executed_block } } @@ -226,7 +227,7 @@ impl BuiltPayload for OpBuiltPayload { self.fees } - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { self.executed_block.clone() } diff --git a/crates/optimism/reth/Cargo.toml b/crates/optimism/reth/Cargo.toml index 384eca45b8c..d120f04f614 100644 --- a/crates/optimism/reth/Cargo.toml +++ b/crates/optimism/reth/Cargo.toml @@ -126,7 +126,13 @@ rpc = [ "dep:reth-optimism-rpc", ] tasks = ["dep:reth-tasks"] -js-tracer = ["rpc", "reth-rpc/js-tracer"] +js-tracer = [ + "rpc", + "reth-rpc/js-tracer", + "reth-node-builder?/js-tracer", + "reth-optimism-node?/js-tracer", + "reth-rpc-eth-types?/js-tracer", +] network = ["dep:reth-network", "tasks", "dep:reth-network-api", "dep:reth-eth-wire"] provider = ["storage-api", "tasks", "dep:reth-provider", "dep:reth-db", "dep:reth-codecs"] pool = ["dep:reth-transaction-pool"] diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index acbc491f648..5d926caf159 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -28,7 +28,6 @@ reth-node-builder.workspace = true reth-chainspec.workspace = true reth-chain-state.workspace = true reth-rpc-engine-api.workspace = true -reth-rpc-convert.workspace = true # op-reth reth-optimism-evm.workspace = true @@ -84,6 +83,7 @@ metrics.workspace = true [dev-dependencies] reth-optimism-chainspec.workspace = true +alloy-op-hardforks.workspace = true [features] client = [ diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 40d34ef7cc0..2b5962460d6 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -67,6 +67,9 @@ pub enum OpInvalidTransactionError { /// A deposit transaction halted post-regolith #[error("deposit transaction halted after regolith")] HaltedDepositPostRegolith, + /// The encoded transaction was missing during evm execution. + #[error("missing enveloped transaction bytes")] + MissingEnvelopedTx, /// Transaction conditional errors. #[error(transparent)] TxConditionalErr(#[from] TxConditionalErr), @@ -76,7 +79,8 @@ impl From for jsonrpsee_types::error::ErrorObject<'st fn from(err: OpInvalidTransactionError) -> Self { match err { OpInvalidTransactionError::DepositSystemTxPostRegolith | - OpInvalidTransactionError::HaltedDepositPostRegolith => { + OpInvalidTransactionError::HaltedDepositPostRegolith | + OpInvalidTransactionError::MissingEnvelopedTx => { rpc_err(EthRpcErrorCode::TransactionRejected.code(), err.to_string(), None) } OpInvalidTransactionError::TxConditionalErr(_) => err.into(), @@ -93,6 +97,7 @@ impl TryFrom for OpInvalidTransactionError { Ok(Self::DepositSystemTxPostRegolith) } OpTransactionError::HaltedDepositPostRegolith => Ok(Self::HaltedDepositPostRegolith), + OpTransactionError::MissingEnvelopedTx => Ok(Self::MissingEnvelopedTx), OpTransactionError::Base(err) => Err(err), } } diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index b7ce75c51b2..db96bda83f3 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,5 +1,4 @@ use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; -use reth_evm::{SpecFor, TxEnvFor}; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall}, FromEvmError, RpcConvert, @@ -9,12 +8,7 @@ impl EthCall for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } @@ -22,12 +16,7 @@ impl EstimateCall for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } @@ -35,12 +24,7 @@ impl Call for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -51,4 +35,9 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.eth_api.max_simulate_blocks() } + + #[inline] + fn evm_memory_limit(&self) -> u64 { + self.inner.eth_api.evm_memory_limit() + } } diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index fdd06d224bc..8adbee93adc 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -13,7 +13,7 @@ use crate::{ OpEthApiError, SequencerClient, }; use alloy_consensus::BlockHeader; -use alloy_primitives::U256; +use alloy_primitives::{B256, U256}; use eyre::WrapErr; use op_alloy_network::Optimism; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; @@ -23,30 +23,36 @@ use reth_evm::ConfigureEvm; use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ - ExecutionPayloadBaseV1, FlashBlockCompleteSequenceRx, FlashBlockService, PendingBlockRx, - WsFlashBlockStream, + ExecutionPayloadBaseV1, FlashBlockBuildInfo, FlashBlockCompleteSequenceRx, FlashBlockRx, + FlashBlockService, FlashblocksListeners, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; -use reth_rpc::eth::{core::EthApiInner, DevSigner}; +use reth_rpc::eth::core::EthApiInner; use reth_rpc_eth_api::{ helpers::{ - pending_block::BuildPendingEnv, AddDevSigners, EthApiSpec, EthFees, EthState, LoadFee, - LoadPendingBlock, LoadState, SpawnBlocking, Trace, + pending_block::BuildPendingEnv, EthApiSpec, EthFees, EthState, LoadFee, LoadPendingBlock, + LoadState, SpawnBlocking, Trace, }, EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore, - RpcNodeCoreExt, RpcTypes, SignableTxRequest, + RpcNodeCoreExt, RpcTypes, }; -use reth_rpc_eth_types::{ - EthStateCache, FeeHistoryCache, GasPriceOracle, PendingBlock, PendingBlockEnvOrigin, -}; -use reth_storage_api::{ProviderHeader, ProviderTx}; +use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle, PendingBlock}; +use reth_storage_api::{BlockReaderIdExt, ProviderHeader}; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, }; -use std::{fmt, fmt::Formatter, marker::PhantomData, sync::Arc, time::Instant}; -use tokio::sync::watch; +use std::{ + fmt::{self, Formatter}, + marker::PhantomData, + sync::Arc, + time::Duration, +}; +use tokio::{sync::watch, time}; use tracing::info; +/// Maximum duration to wait for a fresh flashblock when one is being built. +const MAX_FLASHBLOCK_WAIT_DURATION: Duration = Duration::from_millis(50); + /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. pub type EthApiNodeBackend = EthApiInner; @@ -77,19 +83,22 @@ impl OpEthApi { eth_api: EthApiNodeBackend, sequencer_client: Option, min_suggested_priority_fee: U256, - pending_block_rx: Option>, - flashblock_rx: Option, + flashblocks: Option>, ) -> Self { let inner = Arc::new(OpEthApiInner { eth_api, sequencer_client, min_suggested_priority_fee, - pending_block_rx, - flashblock_rx, + flashblocks, }); Self { inner } } + /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. + pub const fn builder() -> OpEthApiBuilder { + OpEthApiBuilder::new() + } + /// Returns a reference to the [`EthApiNodeBackend`]. pub fn eth_api(&self) -> &EthApiNodeBackend { self.inner.eth_api() @@ -101,48 +110,76 @@ impl OpEthApi { /// Returns a cloned pending block receiver, if any. pub fn pending_block_rx(&self) -> Option> { - self.inner.pending_block_rx.clone() + self.inner.flashblocks.as_ref().map(|f| f.pending_block_rx.clone()) } - /// Returns a flashblock receiver, if any, by resubscribing to it. - pub fn flashblock_rx(&self) -> Option { - self.inner.flashblock_rx.as_ref().map(|rx| rx.resubscribe()) + /// Returns a new subscription to received flashblocks. + pub fn subscribe_received_flashblocks(&self) -> Option { + self.inner.flashblocks.as_ref().map(|f| f.received_flashblocks.subscribe()) } - /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. - pub const fn builder() -> OpEthApiBuilder { - OpEthApiBuilder::new() + /// Returns a new subscription to flashblock sequences. + pub fn subscribe_flashblock_sequence(&self) -> Option { + self.inner.flashblocks.as_ref().map(|f| f.flashblocks_sequence.subscribe()) + } + + /// Returns information about the flashblock currently being built, if any. + fn flashblock_build_info(&self) -> Option { + self.inner.flashblocks.as_ref().and_then(|f| *f.in_progress_rx.borrow()) + } + + /// Extracts pending block if it matches the expected parent hash. + fn extract_matching_block( + &self, + block: Option<&PendingFlashBlock>, + parent_hash: B256, + ) -> Option> { + block.filter(|b| b.block().parent_hash() == parent_hash).map(|b| b.pending.clone()) + } + + /// Awaits a fresh flashblock if one is being built, otherwise returns current. + async fn flashblock( + &self, + parent_hash: B256, + ) -> eyre::Result>> { + let Some(rx) = self.inner.flashblocks.as_ref().map(|f| &f.pending_block_rx) else { + return Ok(None) + }; + + // Check if a flashblock is being built + if let Some(build_info) = self.flashblock_build_info() { + let current_index = rx.borrow().as_ref().map(|b| b.last_flashblock_index); + + // Check if this is the first flashblock or the next consecutive index + let is_next_index = current_index.is_none_or(|idx| build_info.index == idx + 1); + + // Wait only for relevant flashblocks: matching parent and next in sequence + if build_info.parent_hash == parent_hash && is_next_index { + let mut rx_clone = rx.clone(); + // Wait up to MAX_FLASHBLOCK_WAIT_DURATION for a new flashblock to arrive + let _ = time::timeout(MAX_FLASHBLOCK_WAIT_DURATION, rx_clone.changed()).await; + } + } + + // Fall back to current block + Ok(self.extract_matching_block(rx.borrow().as_ref(), parent_hash)) } /// Returns a [`PendingBlock`] that is built out of flashblocks. /// /// If flashblocks receiver is not set, then it always returns `None`. - pub fn pending_flashblock(&self) -> eyre::Result>> + /// + /// It may wait up to 50ms for a fresh flashblock if one is currently being built. + pub async fn pending_flashblock(&self) -> eyre::Result>> where OpEthApiError: FromEvmError, Rpc: RpcConvert, { - let pending = self.pending_block_env_and_cfg()?; - let parent = match pending.origin { - PendingBlockEnvOrigin::ActualPending(..) => return Ok(None), - PendingBlockEnvOrigin::DerivedFromLatest(parent) => parent, + let Some(latest) = self.provider().latest_header()? else { + return Ok(None); }; - let Some(rx) = self.inner.pending_block_rx.as_ref() else { return Ok(None) }; - let pending_block = rx.borrow(); - let Some(pending_block) = pending_block.as_ref() else { return Ok(None) }; - - let now = Instant::now(); - - // Is the pending block not expired and latest is its parent? - if pending.evm_env.block_env.number == U256::from(pending_block.block().number()) && - parent.hash() == pending_block.block().parent_hash() && - now <= pending_block.expires_at - { - return Ok(Some(pending_block.pending.clone())); - } - - Ok(None) + self.flashblock(latest.hash()).await } } @@ -252,8 +289,12 @@ where } async fn suggested_priority_fee(&self) -> Result { - let min_tip = U256::from(self.inner.min_suggested_priority_fee); - self.inner.eth_api.gas_oracle().op_suggest_tip_cap(min_tip).await.map_err(Into::into) + self.inner + .eth_api + .gas_oracle() + .op_suggest_tip_cap(self.inner.min_suggested_priority_fee) + .await + .map_err(Into::into) } } @@ -293,18 +334,6 @@ where { } -impl AddDevSigners for OpEthApi -where - N: RpcNodeCore, - Rpc: RpcConvert< - Network: RpcTypes>>, - >, -{ - fn with_dev_accounts(&self) { - *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) - } -} - impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() @@ -322,14 +351,10 @@ pub struct OpEthApiInner { /// /// See also min_suggested_priority_fee: U256, - /// Pending block receiver. - /// - /// If set, then it provides current pending block based on received Flashblocks. - pending_block_rx: Option>, - /// Flashblocks receiver. + /// Flashblocks listeners. /// - /// If set, then it provides sequences of flashblock built. - flashblock_rx: Option, + /// If set, provides receivers for pending blocks, flashblock sequences, and build status. + flashblocks: Option>, } impl fmt::Debug for OpEthApiInner { @@ -439,7 +464,7 @@ where NetworkT: RpcTypes, OpRpcConvert: RpcConvert, OpEthApi>: - FullEthApiServer + AddDevSigners, + FullEthApiServer, { type EthApi = OpEthApi>; @@ -465,9 +490,10 @@ where None }; - let rxs = if let Some(ws_url) = flashblocks_url { - info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); - let (tx, pending_block_rx) = watch::channel(None); + let flashblocks = if let Some(ws_url) = flashblocks_url { + info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); + + let (tx, pending_rx) = watch::channel(None); let stream = WsFlashBlockStream::new(ws_url); let service = FlashBlockService::new( stream, @@ -475,23 +501,30 @@ where ctx.components.provider().clone(), ctx.components.task_executor().clone(), ); - let flashblock_rx = service.subscribe_block_sequence(); + + let flashblocks_sequence = service.block_sequence_broadcaster().clone(); + let received_flashblocks = service.flashblocks_broadcaster().clone(); + let in_progress_rx = service.subscribe_in_progress(); + ctx.components.task_executor().spawn(Box::pin(service.run(tx))); - Some((pending_block_rx, flashblock_rx)) + + Some(FlashblocksListeners::new( + pending_rx, + flashblocks_sequence, + in_progress_rx, + received_flashblocks, + )) } else { None }; - let (pending_block_rx, flashblock_rx) = rxs.unzip(); - let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner(); Ok(OpEthApi::new( eth_api, sequencer_client, U256::from(min_suggested_priority_fee), - pending_block_rx, - flashblock_rx, + flashblocks, )) } } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 8857b89b021..88bf2496592 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -6,16 +6,13 @@ use alloy_eips::BlockNumberOrTag; use reth_chain_state::BlockState; use reth_rpc_eth_api::{ helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock, SpawnBlocking}, - FromEvmError, RpcConvert, RpcNodeCore, + FromEvmError, RpcConvert, RpcNodeCore, RpcNodeCoreExt, }; use reth_rpc_eth_types::{ block::BlockAndReceipts, builder::config::PendingBlockKind, error::FromEthApiError, EthApiError, PendingBlock, }; -use reth_storage_api::{ - BlockReader, BlockReaderIdExt, ReceiptProvider, StateProviderBox, StateProviderFactory, -}; -use std::sync::Arc; +use reth_storage_api::{BlockReaderIdExt, StateProviderBox, StateProviderFactory}; impl LoadPendingBlock for OpEthApi where @@ -38,39 +35,12 @@ where self.inner.eth_api.pending_block_kind() } - /// Returns the locally built pending block - async fn local_pending_block( - &self, - ) -> Result>, Self::Error> { - if let Ok(Some(pending)) = self.pending_flashblock() { - return Ok(Some(pending.into_block_and_receipts())); - } - - // See: - let latest = self - .provider() - .latest_header()? - .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; - let block_id = latest.hash().into(); - let block = self - .provider() - .recovered_block(block_id, Default::default())? - .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; - - let receipts = self - .provider() - .receipts_by_block(block_id)? - .ok_or(EthApiError::ReceiptsNotFound(block_id.into()))?; - - Ok(Some(BlockAndReceipts { block: Arc::new(block), receipts: Arc::new(receipts) })) - } - /// Returns a [`StateProviderBox`] on a mem-pool built pending block overlaying latest. async fn local_pending_state(&self) -> Result, Self::Error> where Self: SpawnBlocking, { - let Ok(Some(pending_block)) = self.pending_flashblock() else { + let Ok(Some(pending_block)) = self.pending_flashblock().await else { return Ok(None); }; @@ -83,4 +53,27 @@ where Ok(Some(Box::new(state.state_provider(latest_historical)) as StateProviderBox)) } + + /// Returns the locally built pending block + async fn local_pending_block( + &self, + ) -> Result>, Self::Error> { + if let Ok(Some(pending)) = self.pending_flashblock().await { + return Ok(Some(pending.into_block_and_receipts())); + } + + // See: + let latest = self + .provider() + .latest_header()? + .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; + + let latest = self + .cache() + .get_block_and_receipts(latest.hash()) + .await + .map_err(Self::Error::from_eth_err)? + .map(|(block, receipts)| BlockAndReceipts { block, receipts }); + Ok(latest) + } } diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 97fe3a0b5b7..c04a4d2c72d 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -6,6 +6,7 @@ use alloy_eips::eip2718::Encodable2718; use alloy_rpc_types_eth::{Log, TransactionReceipt}; use op_alloy_consensus::{OpReceiptEnvelope, OpTransaction}; use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; +use op_revm::estimate_tx_compressed_size; use reth_chainspec::ChainSpecProvider; use reth_node_api::NodePrimitives; use reth_optimism_evm::RethL1BlockInfo; @@ -131,10 +132,14 @@ pub struct OpReceiptFieldsBuilder { pub l1_blob_base_fee: Option, /// The current L1 blob base fee scalar. pub l1_blob_base_fee_scalar: Option, + /* ---------------------------------------- Isthmus ---------------------------------------- */ /// The current operator fee scalar. pub operator_fee_scalar: Option, /// The current L1 blob base fee scalar. pub operator_fee_constant: Option, + /* ---------------------------------------- Jovian ----------------------------------------- */ + /// The current DA footprint gas scalar. + pub da_footprint_gas_scalar: Option, } impl OpReceiptFieldsBuilder { @@ -154,6 +159,7 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar: None, operator_fee_scalar: None, operator_fee_constant: None, + da_footprint_gas_scalar: None, } } @@ -205,6 +211,8 @@ impl OpReceiptFieldsBuilder { l1_block_info.operator_fee_constant.map(|constant| constant.saturating_to()); } + self.da_footprint_gas_scalar = l1_block_info.da_footprint_gas_scalar; + Ok(self) } @@ -236,6 +244,7 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, + da_footprint_gas_scalar, } = self; OpTransactionReceiptFields { @@ -249,6 +258,7 @@ impl OpReceiptFieldsBuilder { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, + da_footprint_gas_scalar, }, deposit_nonce, deposit_receipt_version, @@ -278,7 +288,7 @@ impl OpReceiptBuilder { let timestamp = input.meta.timestamp; let block_number = input.meta.block_number; let tx_signed = *input.tx.inner(); - let core_receipt = build_receipt(input, None, |receipt, next_log_index, meta| { + let mut core_receipt = build_receipt(input, None, |receipt, next_log_index, meta| { let map_logs = move |receipt: alloy_consensus::Receipt| { let Receipt { status, cumulative_gas_used, logs } = receipt; let logs = Log::collect_for_receipt(next_log_index, meta, logs); @@ -297,12 +307,28 @@ impl OpReceiptBuilder { OpReceipt::Eip7702(receipt) => { OpReceiptEnvelope::Eip7702(map_logs(receipt).into_with_bloom()) } + OpReceipt::Deposit(receipt) => { OpReceiptEnvelope::Deposit(receipt.map_inner(map_logs).into_with_bloom()) } } }); + // In jovian, we're using the blob gas used field to store the current da + // footprint's value. + // We're computing the jovian blob gas used before building the receipt since the inputs get + // consumed by the `build_receipt` function. + chain_spec.is_jovian_active_at_timestamp(timestamp).then(|| { + // Estimate the size of the transaction in bytes and multiply by the DA + // footprint gas scalar. + // Jovian specs: `https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/jovian/exec-engine.md#da-footprint-block-limit` + let da_size = estimate_tx_compressed_size(tx_signed.encoded_2718().as_slice()) + .saturating_div(1_000_000) + .saturating_mul(l1_block_info.da_footprint_gas_scalar.unwrap_or_default().into()); + + core_receipt.blob_gas_used = Some(da_size); + }); + let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp, block_number) .l1_block_info(chain_spec, tx_signed, l1_block_info)? .build(); @@ -324,11 +350,16 @@ impl OpReceiptBuilder { #[cfg(test)] mod test { use super::*; - use alloy_consensus::{Block, BlockBody}; - use alloy_primitives::{hex, U256}; + use alloy_consensus::{transaction::TransactionMeta, Block, BlockBody, Eip658Value, TxEip7702}; + use alloy_op_hardforks::{ + OpChainHardforks, OP_MAINNET_ISTHMUS_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, + }; + use alloy_primitives::{hex, Address, Bytes, Signature, U256}; + use op_alloy_consensus::OpTypedTransaction; use op_alloy_network::eip2718::Decodable2718; use reth_optimism_chainspec::{BASE_MAINNET, OP_MAINNET}; - use reth_optimism_primitives::OpTransactionSigned; + use reth_optimism_primitives::{OpPrimitives, OpTransactionSigned}; + use reth_primitives_traits::Recovered; /// OP Mainnet transaction at index 0 in block 124665056. /// @@ -364,6 +395,7 @@ mod test { l1_blob_base_fee_scalar: Some(1014213), operator_fee_scalar: None, operator_fee_constant: None, + da_footprint_gas_scalar: None, }, deposit_nonce: None, deposit_receipt_version: None, @@ -407,6 +439,7 @@ mod test { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, + da_footprint_gas_scalar, } = receipt_meta.l1_block_info; assert_eq!( @@ -450,6 +483,11 @@ mod test { TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.operator_fee_constant, "incorrect operator fee constant" ); + assert_eq!( + da_footprint_gas_scalar, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.da_footprint_gas_scalar, + "incorrect da footprint gas scalar" + ); } #[test] @@ -458,10 +496,11 @@ mod test { OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) .unwrap(); - let mut l1_block_info = op_revm::L1BlockInfo::default(); - - l1_block_info.operator_fee_scalar = Some(U256::ZERO); - l1_block_info.operator_fee_constant = Some(U256::from(2)); + let mut l1_block_info = op_revm::L1BlockInfo { + operator_fee_scalar: Some(U256::ZERO), + operator_fee_constant: Some(U256::from(2)), + ..Default::default() + }; let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) @@ -481,10 +520,11 @@ mod test { OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) .unwrap(); - let mut l1_block_info = op_revm::L1BlockInfo::default(); - - l1_block_info.operator_fee_scalar = Some(U256::ZERO); - l1_block_info.operator_fee_constant = Some(U256::ZERO); + let mut l1_block_info = op_revm::L1BlockInfo { + operator_fee_scalar: Some(U256::ZERO), + operator_fee_constant: Some(U256::ZERO), + ..Default::default() + }; let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) @@ -535,6 +575,7 @@ mod test { l1_blob_base_fee_scalar, operator_fee_scalar, operator_fee_constant, + da_footprint_gas_scalar, } = receipt_meta.l1_block_info; assert_eq!(l1_gas_price, Some(14121491676), "incorrect l1 base fee (former gas price)"); @@ -546,5 +587,146 @@ mod test { assert_eq!(l1_blob_base_fee_scalar, Some(1055762), "incorrect l1 blob base fee scalar"); assert_eq!(operator_fee_scalar, None, "incorrect operator fee scalar"); assert_eq!(operator_fee_constant, None, "incorrect operator fee constant"); + assert_eq!(da_footprint_gas_scalar, None, "incorrect da footprint gas scalar"); + } + + #[test] + fn da_footprint_gas_scalar_included_in_receipt_post_jovian() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 10; + + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + input: Bytes::from(vec![1, 2]), + access_list: Default::default(), + authorization_list: Default::default(), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let receipt = OpReceiptFieldsBuilder::new(OP_MAINNET_JOVIAN_TIMESTAMP, u64::MAX) + .l1_block_info(&op_hardforks, &tx, &mut l1_block_info) + .expect("should parse revm l1 info") + .build(); + + assert_eq!(receipt.l1_block_info.da_footprint_gas_scalar, Some(DA_FOOTPRINT_GAS_SCALAR)); + } + + #[test] + fn blob_gas_used_included_in_receipt_post_jovian() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 100; + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + access_list: Default::default(), + authorization_list: Default::default(), + input: Bytes::from(vec![0; 1_000_000]), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let op_receipt = OpReceiptBuilder::new( + &op_hardforks, + ConvertReceiptInput:: { + tx: Recovered::new_unchecked(&tx, Address::default()), + receipt: OpReceipt::Eip7702(Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 100, + logs: vec![], + }), + gas_used: 100, + next_log_index: 0, + meta: TransactionMeta { + timestamp: OP_MAINNET_JOVIAN_TIMESTAMP, + ..Default::default() + }, + }, + &mut l1_block_info, + ) + .unwrap(); + + let expected_blob_gas_used = estimate_tx_compressed_size(tx.encoded_2718().as_slice()) + .saturating_div(1_000_000) + .saturating_mul(DA_FOOTPRINT_GAS_SCALAR.into()); + + assert_eq!(op_receipt.core_receipt.blob_gas_used, Some(expected_blob_gas_used)); + } + + #[test] + fn blob_gas_used_not_included_in_receipt_post_isthmus() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 100; + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + access_list: Default::default(), + authorization_list: Default::default(), + input: Bytes::from(vec![0; 1_000_000]), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let op_receipt = OpReceiptBuilder::new( + &op_hardforks, + ConvertReceiptInput:: { + tx: Recovered::new_unchecked(&tx, Address::default()), + receipt: OpReceipt::Eip7702(Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 100, + logs: vec![], + }), + gas_used: 100, + next_log_index: 0, + meta: TransactionMeta { + timestamp: OP_MAINNET_ISTHMUS_TIMESTAMP, + ..Default::default() + }, + }, + &mut l1_block_info, + ) + .unwrap(); + + assert_eq!(op_receipt.core_receipt.blob_gas_used, None); } } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index fb98569db10..58d367012f1 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,27 +1,22 @@ //! Loads and formats OP transaction RPC response. use crate::{OpEthApi, OpEthApiError, SequencerClient}; -use alloy_consensus::TxReceipt as _; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_eth::TransactionInfo; use futures::StreamExt; use op_alloy_consensus::{transaction::OpTransactionInfo, OpTransaction}; use reth_chain_state::CanonStateSubscriptions; use reth_optimism_primitives::DepositReceipt; -use reth_primitives_traits::{BlockBody, SignedTransaction, SignerRecoverable}; -use reth_rpc_convert::transaction::ConvertReceiptInput; +use reth_primitives_traits::{BlockBody, Recovered, SignedTransaction, WithEncoded}; use reth_rpc_eth_api::{ - helpers::{ - receipt::calculate_gas_used_and_next_log_index, spec::SignersForRpc, EthTransactions, - LoadReceipt, LoadTransaction, - }, + helpers::{spec::SignersForRpc, EthTransactions, LoadReceipt, LoadTransaction}, try_into_op_tx_info, EthApiTypes as _, FromEthApiError, FromEvmError, RpcConvert, RpcNodeCore, RpcReceipt, TxInfoMapper, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; +use reth_rpc_eth_types::EthApiError; use reth_storage_api::{errors::ProviderError, ReceiptProvider}; use reth_transaction_pool::{ - AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, + AddedTransactionOutcome, PoolPooledTx, PoolTransaction, TransactionOrigin, TransactionPool, }; use std::{ fmt::{Debug, Formatter}, @@ -44,11 +39,11 @@ where self.inner.eth_api.send_raw_transaction_sync_timeout() } - /// Decodes and recovers the transaction and submits it to the pool. - /// - /// Returns the hash of the transaction. - async fn send_raw_transaction(&self, tx: Bytes) -> Result { - let recovered = recover_raw_transaction(&tx)?; + async fn send_transaction( + &self, + tx: WithEncoded>>, + ) -> Result { + let (tx, recovered) = tx.split(); // broadcast raw transaction to subscribers if there is any. self.eth_api().broadcast_raw_transaction(tx.clone()); @@ -88,21 +83,35 @@ where fn send_raw_transaction_sync( &self, tx: Bytes, - ) -> impl Future, Self::Error>> + Send - where - Self: LoadReceipt + 'static, - { + ) -> impl Future, Self::Error>> + Send { let this = self.clone(); let timeout_duration = self.send_raw_transaction_sync_timeout(); async move { - let hash = EthTransactions::send_raw_transaction(&this, tx).await?; let mut canonical_stream = this.provider().canonical_state_stream(); - let flashblock_rx = this.pending_block_rx(); - let mut flashblock_stream = flashblock_rx.map(WatchStream::new); + let hash = EthTransactions::send_raw_transaction(&this, tx).await?; + let mut flashblock_stream = this.pending_block_rx().map(WatchStream::new); tokio::time::timeout(timeout_duration, async { loop { tokio::select! { + biased; + // check if the tx was preconfirmed in a new flashblock + flashblock = async { + if let Some(stream) = &mut flashblock_stream { + stream.next().await + } else { + futures::future::pending().await + } + } => { + if let Some(flashblock) = flashblock.flatten() { + // if flashblocks are supported, attempt to find id from the pending block + if let Some(receipt) = flashblock + .find_and_convert_transaction_receipt(hash, this.tx_resp_builder()) + { + return receipt; + } + } + } // Listen for regular canonical block updates for inclusion canonical_notification = canonical_stream.next() => { if let Some(notification) = canonical_notification { @@ -118,23 +127,6 @@ where break; } } - // check if the tx was preconfirmed in a new flashblock - _flashblock_update = async { - if let Some(ref mut stream) = flashblock_stream { - stream.next().await - } else { - futures::future::pending().await - } - } => { - // Check flashblocks for faster confirmation (Optimism-specific) - if let Ok(Some(pending_block)) = this.pending_flashblock() { - let block_and_receipts = pending_block.into_block_and_receipts(); - if block_and_receipts.block.body().contains_transaction(&hash) - && let Some(receipt) = this.transaction_receipt(hash).await? { - return Ok(receipt); - } - } - } } } Err(Self::Error::from_eth_err(EthApiError::TransactionConfirmationTimeout { @@ -168,42 +160,11 @@ where if tx_receipt.is_none() { // if flashblocks are supported, attempt to find id from the pending block - if let Ok(Some(pending_block)) = this.pending_flashblock() { - let block_and_receipts = pending_block.into_block_and_receipts(); - if let Some((tx, receipt)) = - block_and_receipts.find_transaction_and_receipt_by_hash(hash) - { - // Build tx receipt from pending block and receipts directly inline. - // This avoids canonical cache lookup that would be done by the - // `build_transaction_receipt` which would result in a block not found - // issue. See: https://github.com/paradigmxyz/reth/issues/18529 - let meta = tx.meta(); - let all_receipts = &block_and_receipts.receipts; - - let (gas_used, next_log_index) = - calculate_gas_used_and_next_log_index(meta.index, all_receipts); - - return Ok(Some( - this.tx_resp_builder() - .convert_receipts_with_block( - vec![ConvertReceiptInput { - tx: tx - .tx() - .clone() - .try_into_recovered_unchecked() - .map_err(Self::Error::from_eth_err)? - .as_recovered_ref(), - gas_used: receipt.cumulative_gas_used() - gas_used, - receipt: receipt.clone(), - next_log_index, - meta, - }], - block_and_receipts.sealed_block(), - )? - .pop() - .unwrap(), - )) - } + if let Ok(Some(pending_block)) = this.pending_flashblock().await && + let Some(Ok(receipt)) = pending_block + .find_and_convert_transaction_receipt(hash, this.tx_resp_builder()) + { + return Ok(Some(receipt)); } } let Some((tx, meta, receipt)) = tx_receipt else { return Ok(None) }; diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 1c9b5d1c39e..10f8ad5dccd 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -12,6 +12,7 @@ pub mod engine; pub mod error; pub mod eth; pub mod historical; +pub mod metrics; pub mod miner; pub mod sequencer; pub mod witness; @@ -21,4 +22,5 @@ pub use engine::OpEngineApiClient; pub use engine::{OpEngineApi, OpEngineApiServer, OP_ENGINE_CAPABILITIES}; pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; pub use eth::{OpEthApi, OpEthApiBuilder, OpReceiptBuilder}; +pub use metrics::SequencerMetrics; pub use sequencer::SequencerClient; diff --git a/crates/optimism/rpc/src/metrics.rs b/crates/optimism/rpc/src/metrics.rs new file mode 100644 index 00000000000..5aa5e3eff3d --- /dev/null +++ b/crates/optimism/rpc/src/metrics.rs @@ -0,0 +1,21 @@ +//! RPC metrics unique for OP-stack. + +use core::time::Duration; +use metrics::Histogram; +use reth_metrics::Metrics; + +/// Optimism sequencer metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.sequencer")] +pub struct SequencerMetrics { + /// How long it takes to forward a transaction to the sequencer + pub(crate) sequencer_forward_latency: Histogram, +} + +impl SequencerMetrics { + /// Records the duration it took to forward a transaction + #[inline] + pub fn record_forward_latency(&self, duration: Duration) { + self.sequencer_forward_latency.record(duration.as_secs_f64()); + } +} diff --git a/crates/optimism/rpc/src/miner.rs b/crates/optimism/rpc/src/miner.rs index a4de556ea13..f8780f37e82 100644 --- a/crates/optimism/rpc/src/miner.rs +++ b/crates/optimism/rpc/src/miner.rs @@ -4,7 +4,7 @@ use alloy_primitives::U64; use jsonrpsee_core::{async_trait, RpcResult}; pub use op_alloy_rpc_jsonrpsee::traits::MinerApiExtServer; use reth_metrics::{metrics::Gauge, Metrics}; -use reth_optimism_payload_builder::config::OpDAConfig; +use reth_optimism_payload_builder::config::{OpDAConfig, OpGasLimitConfig}; use tracing::debug; /// Miner API extension for OP, exposes settings for the data availability configuration via the @@ -12,14 +12,15 @@ use tracing::debug; #[derive(Debug, Clone)] pub struct OpMinerExtApi { da_config: OpDAConfig, + gas_limit_config: OpGasLimitConfig, metrics: OpMinerMetrics, } impl OpMinerExtApi { /// Instantiate the miner API extension with the given, sharable data availability /// configuration. - pub fn new(da_config: OpDAConfig) -> Self { - Self { da_config, metrics: OpMinerMetrics::default() } + pub fn new(da_config: OpDAConfig, gas_limit_config: OpGasLimitConfig) -> Self { + Self { da_config, gas_limit_config, metrics: OpMinerMetrics::default() } } } @@ -35,6 +36,13 @@ impl MinerApiExtServer for OpMinerExtApi { Ok(true) } + + async fn set_gas_limit(&self, gas_limit: U64) -> RpcResult { + debug!(target: "rpc", "Setting gas limit: {}", gas_limit); + self.gas_limit_config.set_gas_limit(gas_limit.to()); + self.metrics.set_gas_limit(gas_limit.to()); + Ok(true) + } } /// Optimism miner metrics @@ -45,6 +53,8 @@ pub struct OpMinerMetrics { max_da_tx_size: Gauge, /// Max DA block size set on the miner max_da_block_size: Gauge, + /// Gas limit set on the miner + gas_limit: Gauge, } impl OpMinerMetrics { @@ -59,4 +69,10 @@ impl OpMinerMetrics { pub fn set_max_da_block_size(&self, size: u64) { self.max_da_block_size.set(size as f64); } + + /// Sets the gas limit gauge value + #[inline] + pub fn set_gas_limit(&self, gas_limit: u64) { + self.gas_limit.set(gas_limit as f64); + } } diff --git a/crates/optimism/rpc/src/sequencer.rs b/crates/optimism/rpc/src/sequencer.rs index 2e66a30275f..8fc8c1b389d 100644 --- a/crates/optimism/rpc/src/sequencer.rs +++ b/crates/optimism/rpc/src/sequencer.rs @@ -1,12 +1,11 @@ //! Helpers for optimism specific RPC implementations. -use crate::SequencerClientError; +use crate::{SequencerClientError, SequencerMetrics}; use alloy_json_rpc::{RpcRecv, RpcSend}; use alloy_primitives::{hex, B256}; use alloy_rpc_client::{BuiltInConnectionString, ClientBuilder, RpcClient as Client}; use alloy_rpc_types_eth::erc4337::TransactionConditional; use alloy_transport_http::Http; -use reth_optimism_txpool::supervisor::metrics::SequencerMetrics; use std::{str::FromStr, sync::Arc, time::Instant}; use thiserror::Error; use tracing::warn; diff --git a/crates/optimism/txpool/src/supervisor/client.rs b/crates/optimism/txpool/src/supervisor/client.rs index b362fae2e10..a49704ac50a 100644 --- a/crates/optimism/txpool/src/supervisor/client.rs +++ b/crates/optimism/txpool/src/supervisor/client.rs @@ -1,7 +1,6 @@ //! This is our custom implementation of validator struct use crate::{ - interop::MaybeInteropTransaction, supervisor::{ metrics::SupervisorMetrics, parse_access_list_items_to_inbox_entries, ExecutingDescriptor, InteropTxValidatorError, @@ -139,8 +138,7 @@ impl SupervisorClient { where InputIter: IntoIterator + Send + 'a, InputIter::IntoIter: Send + 'a, - TItem: - MaybeInteropTransaction + PoolTransaction + Transaction + Clone + Send + Sync + 'static, + TItem: PoolTransaction + Transaction + Send, { stream::iter(txs_to_revalidate.into_iter().map(move |tx_item| { let client_for_async_task = self.clone(); diff --git a/crates/optimism/txpool/src/supervisor/metrics.rs b/crates/optimism/txpool/src/supervisor/metrics.rs index 23eec843025..cb51a52bfc5 100644 --- a/crates/optimism/txpool/src/supervisor/metrics.rs +++ b/crates/optimism/txpool/src/supervisor/metrics.rs @@ -1,4 +1,4 @@ -//! Optimism supervisor and sequencer metrics +//! Optimism supervisor metrics use crate::supervisor::InteropTxValidatorError; use op_alloy_rpc_types::SuperchainDAError; @@ -70,19 +70,3 @@ impl SupervisorMetrics { } } } - -/// Optimism sequencer metrics -#[derive(Metrics, Clone)] -#[metrics(scope = "optimism_transaction_pool.sequencer")] -pub struct SequencerMetrics { - /// How long it takes to forward a transaction to the sequencer - pub(crate) sequencer_forward_latency: Histogram, -} - -impl SequencerMetrics { - /// Records the duration it took to forward a transaction - #[inline] - pub fn record_forward_latency(&self, duration: Duration) { - self.sequencer_forward_latency.record(duration.as_secs_f64()); - } -} diff --git a/crates/optimism/txpool/src/validator.rs b/crates/optimism/txpool/src/validator.rs index 631c4255942..fd4710b8a4e 100644 --- a/crates/optimism/txpool/src/validator.rs +++ b/crates/optimism/txpool/src/validator.rs @@ -28,8 +28,6 @@ pub struct OpL1BlockInfo { l1_block_info: RwLock, /// Current block timestamp. timestamp: AtomicU64, - /// Current block number. - number: AtomicU64, } impl OpL1BlockInfo { @@ -103,7 +101,6 @@ where // so that we will accept txs into the pool before the first block if block.header().number() == 0 { this.block_info.timestamp.store(block.header().timestamp(), Ordering::Relaxed); - this.block_info.number.store(block.header().number(), Ordering::Relaxed); } else { this.update_l1_block_info(block.header(), block.body().transactions().first()); } @@ -141,10 +138,9 @@ where T: Transaction, { self.block_info.timestamp.store(header.timestamp(), Ordering::Relaxed); - self.block_info.number.store(header.number(), Ordering::Relaxed); - if let Some(Ok(cost_addition)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) { - *self.block_info.l1_block_info.write() = cost_addition; + if let Some(Ok(l1_block_info)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) { + *self.block_info.l1_block_info.write() = l1_block_info; } if self.chain_spec().is_interop_active_at_timestamp(header.timestamp()) { diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index f9530d003f5..f3f1b03ab2e 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -512,7 +512,7 @@ where f.debug_tuple("BestPayload").field(&f0).field(&f1).finish() } Self::PayloadTimestamp(f0, f1) => { - f.debug_tuple("PayloadAttributes").field(&f0).field(&f1).finish() + f.debug_tuple("PayloadTimestamp").field(&f0).field(&f1).finish() } Self::Resolve(f0, f1, _f2) => f.debug_tuple("Resolve").field(&f0).field(&f1).finish(), Self::Subscribe(f0) => f.debug_tuple("Subscribe").field(&f0).finish(), diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index e1b2bb61793..0efaa91214b 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -17,6 +17,8 @@ reth-primitives-traits.workspace = true reth-chainspec.workspace = true reth-errors.workspace = true reth-chain-state.workspace = true +reth-execution-types.workspace = true +reth-trie-common.workspace = true # alloy alloy-eips.workspace = true @@ -39,6 +41,8 @@ assert_matches.workspace = true default = ["std"] std = [ "reth-chainspec/std", + "reth-execution-types/std", + "reth-trie-common/std", "alloy-eips/std", "alloy-primitives/std", "alloy-rpc-types-engine/std", diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index ca3cccda883..1c32bb3fa3d 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -26,8 +26,8 @@ pub use error::{ mod traits; pub use traits::{ - BuildNextEnv, BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, - PayloadBuilderAttributes, + BuildNextEnv, BuiltPayload, BuiltPayloadExecutedBlock, PayloadAttributes, + PayloadAttributesBuilder, PayloadBuilderAttributes, }; mod payload; diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 70007ef200e..845a56a2ade 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,7 +1,7 @@ //! Core traits for working with execution payloads. use crate::PayloadBuilderError; -use alloc::{boxed::Box, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, eip7685::Requests, @@ -9,8 +9,60 @@ use alloy_eips::{ use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use core::fmt; -use reth_chain_state::ExecutedBlockWithTrieUpdates; -use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader}; +use either::Either; +use reth_execution_types::ExecutionOutcome; +use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; +use reth_trie_common::{ + updates::{TrieUpdates, TrieUpdatesSorted}, + HashedPostState, HashedPostStateSorted, +}; + +/// Represents an executed block for payload building purposes. +/// +/// This type captures the complete execution state of a built block, +/// including the recovered block, execution outcome, hashed state, and trie updates. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct BuiltPayloadExecutedBlock { + /// Recovered Block + pub recovered_block: Arc>, + /// Block's execution outcome. + pub execution_output: Arc>, + /// Block's hashed state. + /// + /// Supports both unsorted and sorted variants so payload builders can avoid cloning in order + /// to convert from one to the other when it's not necessary. + pub hashed_state: Either, Arc>, + /// Trie updates that result from calculating the state root for the block. + /// + /// Supports both unsorted and sorted variants so payload builders can avoid cloning in order + /// to convert from one to the other when it's not necessary. + pub trie_updates: Either, Arc>, +} + +impl BuiltPayloadExecutedBlock { + /// Converts this into an [`reth_chain_state::ExecutedBlock`]. + /// + /// If the hashed state or trie updates are in sorted form, they will be converted + /// back to their unsorted representations. + pub fn into_executed_payload(self) -> reth_chain_state::ExecutedBlock { + let hashed_state = match self.hashed_state { + Either::Left(unsorted) => unsorted, + Either::Right(sorted) => Arc::new(Arc::unwrap_or_clone(sorted).into()), + }; + + let trie_updates = match self.trie_updates { + Either::Left(unsorted) => unsorted, + Either::Right(sorted) => Arc::new(Arc::unwrap_or_clone(sorted).into()), + }; + + reth_chain_state::ExecutedBlock { + recovered_block: self.recovered_block, + execution_output: self.execution_output, + hashed_state, + trie_updates, + } + } +} /// Represents a successfully built execution payload (block). /// @@ -30,7 +82,7 @@ pub trait BuiltPayload: Send + Sync + fmt::Debug { /// Returns the complete execution result including state updates. /// /// Returns `None` if execution data is not available or not tracked. - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { None } @@ -170,7 +222,7 @@ where } } -impl PayloadAttributesBuilder for either::Either +impl PayloadAttributesBuilder for Either where L: PayloadAttributesBuilder, R: PayloadAttributesBuilder, diff --git a/crates/primitives-traits/src/block/error.rs b/crates/primitives-traits/src/block/error.rs index f61d352bba4..ccb727ce88a 100644 --- a/crates/primitives-traits/src/block/error.rs +++ b/crates/primitives-traits/src/block/error.rs @@ -3,6 +3,37 @@ use crate::transaction::signed::RecoveryError; /// Type alias for [`BlockRecoveryError`] with a [`SealedBlock`](crate::SealedBlock) value. +/// +/// This error type is specifically used when recovering a sealed block fails. +/// It contains the original sealed block that could not be recovered, allowing +/// callers to inspect the problematic block or attempt recovery with different +/// parameters. +/// +/// # Example +/// +/// ```rust +/// use alloy_consensus::{Block, BlockBody, Header, Signed, TxEnvelope, TxLegacy}; +/// use alloy_primitives::{Signature, B256}; +/// use reth_primitives_traits::{block::error::SealedBlockRecoveryError, SealedBlock}; +/// +/// // Create a simple block for demonstration +/// let header = Header::default(); +/// let tx = TxLegacy::default(); +/// let signed_tx = Signed::new_unchecked(tx, Signature::test_signature(), B256::ZERO); +/// let envelope = TxEnvelope::Legacy(signed_tx); +/// let body = BlockBody { transactions: vec![envelope], ommers: vec![], withdrawals: None }; +/// let block = Block::new(header, body); +/// let sealed_block = SealedBlock::new_unchecked(block, B256::ZERO); +/// +/// // Simulate a block recovery operation that fails +/// let block_recovery_result: Result<(), SealedBlockRecoveryError<_>> = +/// Err(SealedBlockRecoveryError::new(sealed_block)); +/// +/// // When block recovery fails, you get the error with the original block +/// let error = block_recovery_result.unwrap_err(); +/// let failed_block = error.into_inner(); +/// // Now you can inspect the failed block or try recovery again +/// ``` pub type SealedBlockRecoveryError = BlockRecoveryError>; /// Error when recovering a block from [`SealedBlock`](crate::SealedBlock) to diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 2aeade9bc17..7705512d633 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -50,17 +50,9 @@ pub mod serde_bincode_compat { } /// Helper trait that unifies all behaviour required by block to support full node operations. -pub trait FullBlock: - Block + alloy_rlp::Encodable + alloy_rlp::Decodable -{ -} +pub trait FullBlock: Block {} -impl FullBlock for T where - T: Block - + alloy_rlp::Encodable - + alloy_rlp::Decodable -{ -} +impl FullBlock for T where T: Block {} /// Helper trait to access [`BlockBody::Transaction`] given a [`Block`]. pub type BlockTx = <::Body as BlockBody>::Transaction; diff --git a/crates/primitives-traits/src/extended.rs b/crates/primitives-traits/src/extended.rs index 4cba4b7d52d..da2bbc533aa 100644 --- a/crates/primitives-traits/src/extended.rs +++ b/crates/primitives-traits/src/extended.rs @@ -142,8 +142,8 @@ where impl SignerRecoverable for Extended where - B: SignedTransaction + IsTyped2718, - T: SignedTransaction, + B: SignerRecoverable, + T: SignerRecoverable, { fn recover_signer(&self) -> Result { delegate!(self => tx.recover_signer()) diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 1cc56ce2cb9..5400f52a204 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -148,6 +148,7 @@ pub use block::{ Block, FullBlock, RecoveredBlock, SealedBlock, }; +#[cfg(test)] mod withdrawal; pub use alloy_eips::eip2718::WithEncoded; @@ -156,6 +157,7 @@ pub mod crypto; mod error; pub use error::{GotExpected, GotExpectedBoxed}; +#[cfg(test)] mod log; pub use alloy_primitives::{logs_bloom, Log, LogData}; @@ -188,7 +190,7 @@ pub use size::InMemorySize; /// Node traits pub mod node; -pub use node::{BlockTy, BodyTy, FullNodePrimitives, HeaderTy, NodePrimitives, ReceiptTy, TxTy}; +pub use node::{BlockTy, BodyTy, HeaderTy, NodePrimitives, ReceiptTy, TxTy}; /// Helper trait that requires de-/serialize implementation since `serde` feature is enabled. #[cfg(feature = "serde")] diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 1f5bfed139e..f23ff222ab6 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,6 +1,5 @@ use crate::{ - Block, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, FullSignedTx, - MaybeSerdeBincodeCompat, Receipt, + FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, FullSignedTx, MaybeSerdeBincodeCompat, }; use core::fmt; @@ -13,7 +12,8 @@ pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static { /// Block primitive. - type Block: Block
+ MaybeSerdeBincodeCompat; + type Block: FullBlock
+ + MaybeSerdeBincodeCompat; /// Block header primitive. type BlockHeader: FullBlockHeader; /// Block body primitive. @@ -24,30 +24,7 @@ pub trait NodePrimitives: /// format that includes the signature and can be included in a block. type SignedTx: FullSignedTx; /// A receipt. - type Receipt: Receipt; -} -/// Helper trait that sets trait bounds on [`NodePrimitives`]. -pub trait FullNodePrimitives -where - Self: NodePrimitives< - Block: FullBlock
, - BlockHeader: FullBlockHeader, - BlockBody: FullBlockBody, - SignedTx: FullSignedTx, - Receipt: FullReceipt, - >, -{ -} - -impl FullNodePrimitives for T where - T: NodePrimitives< - Block: FullBlock
, - BlockHeader: FullBlockHeader, - BlockBody: FullBlockBody, - SignedTx: FullSignedTx, - Receipt: FullReceipt, - > -{ + type Receipt: FullReceipt; } /// Helper adapter type for accessing [`NodePrimitives`] block header types. diff --git a/crates/primitives-traits/src/transaction/access_list.rs b/crates/primitives-traits/src/transaction/access_list.rs index 06c033e36b0..e4d5638f562 100644 --- a/crates/primitives-traits/src/transaction/access_list.rs +++ b/crates/primitives-traits/src/transaction/access_list.rs @@ -8,22 +8,11 @@ mod tests { use proptest::proptest; use proptest_arbitrary_interop::arb; use reth_codecs::{add_arbitrary_tests, Compact}; - use serde::{Deserialize, Serialize}; /// This type is kept for compatibility tests after the codec support was added to alloy-eips /// `AccessList` type natively #[derive( - Clone, - Debug, - PartialEq, - Eq, - Hash, - Default, - RlpDecodableWrapper, - RlpEncodableWrapper, - Serialize, - Deserialize, - Compact, + Clone, Debug, PartialEq, Eq, Default, RlpDecodableWrapper, RlpEncodableWrapper, Compact, )] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact, rlp)] @@ -36,22 +25,9 @@ mod tests { } // This - #[derive( - Clone, - Debug, - PartialEq, - Eq, - Hash, - Default, - RlpDecodable, - RlpEncodable, - Serialize, - Deserialize, - Compact, - )] + #[derive(Clone, Debug, PartialEq, Eq, Default, RlpDecodable, RlpEncodable, Compact)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact, rlp)] - #[serde(rename_all = "camelCase")] struct RethAccessListItem { /// Account address that would be loaded at the start of execution address: Address, diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 665dcab9a88..1717cc6ec3f 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-ethereum-primitives = { workspace = true, features = ["serde"] } -reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-primitives-traits.workspace = true reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true diff --git a/crates/prune/db/Cargo.toml b/crates/prune/db/Cargo.toml new file mode 100644 index 00000000000..269a87bf7b6 --- /dev/null +++ b/crates/prune/db/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "reth-prune-db" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true +description = "Database integration with prune implementation" + +[dependencies] + +[lints] +workspace = true diff --git a/crates/prune/db/src/lib.rs b/crates/prune/db/src/lib.rs new file mode 100644 index 00000000000..ef777085e54 --- /dev/null +++ b/crates/prune/db/src/lib.rs @@ -0,0 +1 @@ +//! An integration of `reth-prune` with `reth-db`. diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 094570b873f..615a793bb89 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-chainspec.workspace = true reth-exex-types.workspace = true reth-db-api.workspace = true reth-errors.workspace = true @@ -25,7 +24,6 @@ reth-primitives-traits.workspace = true reth-static-file-types.workspace = true # ethereum -alloy-consensus.workspace = true alloy-eips.workspace = true # metrics diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index 1987c500da7..f61aa6bd46d 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -1,13 +1,12 @@ use crate::{segments::SegmentSet, Pruner}; use alloy_eips::eip2718::Encodable2718; -use reth_chainspec::MAINNET_PRUNE_DELETE_LIMIT; use reth_config::PruneConfig; use reth_db_api::{table::Value, transaction::DbTxMut}; use reth_exex_types::FinishedExExHeight; use reth_primitives_traits::NodePrimitives; use reth_provider::{ - providers::StaticFileProvider, BlockReader, DBProvider, DatabaseProviderFactory, - NodePrimitivesProvider, PruneCheckpointReader, PruneCheckpointWriter, + providers::StaticFileProvider, BlockReader, ChainStateBlockReader, DBProvider, + DatabaseProviderFactory, NodePrimitivesProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; @@ -30,9 +29,6 @@ pub struct PrunerBuilder { } impl PrunerBuilder { - /// Default timeout for a prune run. - pub const DEFAULT_TIMEOUT: Duration = Duration::from_millis(100); - /// Creates a new [`PrunerBuilder`] from the given [`PruneConfig`]. pub fn new(pruner_config: PruneConfig) -> Self { Self::default() @@ -47,7 +43,7 @@ impl PrunerBuilder { } /// Sets the configuration for every part of the data that can be pruned. - pub fn segments(mut self, segments: PruneModes) -> Self { + pub const fn segments(mut self, segments: PruneModes) -> Self { self.segments = segments; self } @@ -83,6 +79,7 @@ impl PrunerBuilder { ProviderRW: PruneCheckpointWriter + PruneCheckpointReader + BlockReader + + ChainStateBlockReader + StaticFileProviderFactory< Primitives: NodePrimitives, >, @@ -113,6 +110,7 @@ impl PrunerBuilder { Primitives: NodePrimitives, > + DBProvider + BlockReader + + ChainStateBlockReader + PruneCheckpointWriter + PruneCheckpointReader, { @@ -132,8 +130,8 @@ impl Default for PrunerBuilder { fn default() -> Self { Self { block_interval: 5, - segments: PruneModes::none(), - delete_limit: MAINNET_PRUNE_DELETE_LIMIT, + segments: PruneModes::default(), + delete_limit: usize::MAX, timeout: None, finished_exex_height: watch::channel(FinishedExExHeight::NoExExs).1, } diff --git a/crates/prune/prune/src/limiter.rs b/crates/prune/prune/src/limiter.rs index d347ecddbd5..a32e6ab2437 100644 --- a/crates/prune/prune/src/limiter.rs +++ b/crates/prune/prune/src/limiter.rs @@ -96,7 +96,7 @@ impl PruneLimiter { /// Returns the number of deleted entries left before the limit is reached. pub fn deleted_entries_limit_left(&self) -> Option { - self.deleted_entries_limit.as_ref().map(|limit| limit.limit - limit.deleted) + self.deleted_entries_limit.as_ref().map(|limit| limit.limit.saturating_sub(limit.deleted)) } /// Returns the limit on the number of deleted entries (rows in the database). @@ -411,4 +411,35 @@ mod tests { sleep(Duration::new(0, 10_000_000)); // 10 milliseconds assert!(limiter.is_limit_reached(), "Limit should be reached when time limit is reached"); } + + #[test] + fn test_deleted_entries_limit_left_saturation_and_normal() { + // less than limit → no saturation + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); + limiter.increment_deleted_entries_count_by(3); + assert_eq!(limiter.deleted_entries_limit_left(), Some(7)); + + // equal to limit → saturates to 0 + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(3); + limiter.increment_deleted_entries_count_by(3); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + + // overrun past limit → saturates to 0 + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); + limiter.increment_deleted_entries_count_by(12); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + + // lowering limit via set → saturates to 0 if below deleted + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(20); + limiter.increment_deleted_entries_count_by(15); + let limiter = limiter.set_deleted_entries_limit(10); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + + // lowering limit via floor → saturates to 0 if below deleted + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(15); + limiter.increment_deleted_entries_count_by(14); + let denominator = NonZeroUsize::new(8).unwrap(); + let limiter = limiter.floor_deleted_entries_limit_to_multiple_of(denominator); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + } } diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index 1daade01358..f4df3d2a0dd 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -1,6 +1,5 @@ mod receipts; mod set; -mod static_file; mod user; use crate::{PruneLimiter, PrunerError}; @@ -8,15 +7,11 @@ use alloy_primitives::{BlockNumber, TxNumber}; use reth_provider::{errors::provider::ProviderResult, BlockReader, PruneCheckpointWriter}; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; pub use set::SegmentSet; -pub use static_file::{ - Headers as StaticFileHeaders, Receipts as StaticFileReceipts, - Transactions as StaticFileTransactions, -}; use std::{fmt::Debug, ops::RangeInclusive}; use tracing::error; pub use user::{ - AccountHistory, Receipts as UserReceipts, ReceiptsByLogs, SenderRecovery, StorageHistory, - TransactionLookup, + AccountHistory, Bodies, MerkleChangeSets, Receipts as UserReceipts, SenderRecovery, + StorageHistory, TransactionLookup, }; /// A segment represents a pruning of some portion of the data. diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index 12ad6e2c203..68a12552013 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -1,9 +1,7 @@ -//! Common receipts pruning logic shared between user and static file pruning segments. +//! Common receipts pruning logic. //! //! - [`crate::segments::user::Receipts`] is responsible for pruning receipts according to the //! user-configured settings (for example, on a full node or with a custom prune config) -//! - [`crate::segments::static_file::Receipts`] is responsible for pruning receipts on an archive -//! node after static file producer has finished use crate::{db_ext::DbTxPruneExt, segments::PruneInput, PrunerError}; use reth_db_api::{table::Value, tables, transaction::DbTxMut}; diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 08e41bcdf75..acd71f52e1b 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -1,18 +1,16 @@ use crate::segments::{ - AccountHistory, ReceiptsByLogs, Segment, SenderRecovery, StorageHistory, TransactionLookup, - UserReceipts, + AccountHistory, Bodies, MerkleChangeSets, Segment, SenderRecovery, StorageHistory, + TransactionLookup, UserReceipts, }; use alloy_eips::eip2718::Encodable2718; use reth_db_api::{table::Value, transaction::DbTxMut}; use reth_primitives_traits::NodePrimitives; use reth_provider::{ - providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointReader, - PruneCheckpointWriter, StaticFileProviderFactory, + providers::StaticFileProvider, BlockReader, ChainStateBlockReader, DBProvider, + PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; -use super::{StaticFileHeaders, StaticFileReceipts, StaticFileTransactions}; - /// Collection of [`Segment`]. Thread-safe, allocated on the heap. #[derive(Debug)] pub struct SegmentSet { @@ -52,42 +50,38 @@ where > + DBProvider + PruneCheckpointWriter + PruneCheckpointReader - + BlockReader, + + BlockReader + + ChainStateBlockReader, { /// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and /// [`PruneModes`]. pub fn from_components( - static_file_provider: StaticFileProvider, + _static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { + #[expect(deprecated)] let PruneModes { sender_recovery, transaction_lookup, receipts, account_history, storage_history, - bodies_history: _, - receipts_log_filter, + bodies_history, + merkle_changesets, + receipts_log_filter: (), } = prune_modes; Self::default() - // Static file headers - .segment(StaticFileHeaders::new(static_file_provider.clone())) - // Static file transactions - .segment(StaticFileTransactions::new(static_file_provider.clone())) - // Static file receipts - .segment(StaticFileReceipts::new(static_file_provider)) + // Bodies - run first since file deletion is fast + .segment_opt(bodies_history.map(Bodies::new)) + // Merkle changesets + .segment(MerkleChangeSets::new(merkle_changesets)) // Account history .segment_opt(account_history.map(AccountHistory::new)) // Storage history .segment_opt(storage_history.map(StorageHistory::new)) // User receipts .segment_opt(receipts.map(UserReceipts::new)) - // Receipts by logs - .segment_opt( - (!receipts_log_filter.is_empty()) - .then(|| ReceiptsByLogs::new(receipts_log_filter.clone())), - ) // Transaction lookup .segment_opt(transaction_lookup.map(TransactionLookup::new)) // Sender recovery diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs deleted file mode 100644 index 9f3c291bf44..00000000000 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ /dev/null @@ -1,359 +0,0 @@ -use crate::{ - db_ext::DbTxPruneExt, - segments::{PruneInput, Segment}, - PruneLimiter, PrunerError, -}; -use alloy_primitives::BlockNumber; -use itertools::Itertools; -use reth_db_api::{ - cursor::{DbCursorRO, RangeWalker}, - table::Value, - tables, - transaction::DbTxMut, -}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{providers::StaticFileProvider, DBProvider, StaticFileProviderFactory}; -use reth_prune_types::{ - PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, -}; -use reth_static_file_types::StaticFileSegment; -use std::num::NonZeroUsize; -use tracing::trace; - -/// Number of header tables to prune in one step -const HEADER_TABLES_TO_PRUNE: usize = 3; - -#[derive(Debug)] -pub struct Headers { - static_file_provider: StaticFileProvider, -} - -impl Headers { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { - Self { static_file_provider } - } -} - -impl Segment for Headers -where - Provider: StaticFileProviderFactory> - + DBProvider, -{ - fn segment(&self) -> PruneSegment { - PruneSegment::Headers - } - - fn mode(&self) -> Option { - self.static_file_provider - .get_highest_static_file_block(StaticFileSegment::Headers) - .map(PruneMode::before_inclusive) - } - - fn purpose(&self) -> PrunePurpose { - PrunePurpose::StaticFile - } - - fn prune(&self, provider: &Provider, input: PruneInput) -> Result { - let (block_range_start, block_range_end) = match input.get_next_block_range() { - Some(range) => (*range.start(), *range.end()), - None => { - trace!(target: "pruner", "No headers to prune"); - return Ok(SegmentOutput::done()) - } - }; - - let last_pruned_block = - if block_range_start == 0 { None } else { Some(block_range_start - 1) }; - - let range = last_pruned_block.map_or(0, |block| block + 1)..=block_range_end; - - // let mut headers_cursor = provider.tx_ref().cursor_write::()?; - let mut headers_cursor = provider - .tx_ref() - .cursor_write::::BlockHeader>>( - )?; - - let mut header_tds_cursor = - provider.tx_ref().cursor_write::()?; - let mut canonical_headers_cursor = - provider.tx_ref().cursor_write::()?; - - let mut limiter = input.limiter.floor_deleted_entries_limit_to_multiple_of( - NonZeroUsize::new(HEADER_TABLES_TO_PRUNE).unwrap(), - ); - - let tables_iter = HeaderTablesIter::new( - provider, - &mut limiter, - headers_cursor.walk_range(range.clone())?, - header_tds_cursor.walk_range(range.clone())?, - canonical_headers_cursor.walk_range(range)?, - ); - - let mut last_pruned_block: Option = None; - let mut pruned = 0; - for res in tables_iter { - let HeaderTablesIterItem { pruned_block, entries_pruned } = res?; - last_pruned_block = Some(pruned_block); - pruned += entries_pruned; - } - - let done = last_pruned_block == Some(block_range_end); - let progress = limiter.progress(done); - - Ok(SegmentOutput { - progress, - pruned, - checkpoint: Some(SegmentOutputCheckpoint { - block_number: last_pruned_block, - tx_number: None, - }), - }) - } -} -type Walker<'a, Provider, T> = - RangeWalker<'a, T, <::Tx as DbTxMut>::CursorMut>; - -#[allow(missing_debug_implementations)] -struct HeaderTablesIter<'a, Provider> -where - Provider: StaticFileProviderFactory> - + DBProvider, -{ - provider: &'a Provider, - limiter: &'a mut PruneLimiter, - headers_walker: Walker< - 'a, - Provider, - tables::Headers<::BlockHeader>, - >, - header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>, - canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, -} - -struct HeaderTablesIterItem { - pruned_block: BlockNumber, - entries_pruned: usize, -} - -impl<'a, Provider> HeaderTablesIter<'a, Provider> -where - Provider: StaticFileProviderFactory> - + DBProvider, -{ - const fn new( - provider: &'a Provider, - limiter: &'a mut PruneLimiter, - headers_walker: Walker< - 'a, - Provider, - tables::Headers<::BlockHeader>, - >, - header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>, - canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, - ) -> Self { - Self { provider, limiter, headers_walker, header_tds_walker, canonical_headers_walker } - } -} - -impl Iterator for HeaderTablesIter<'_, Provider> -where - Provider: StaticFileProviderFactory> - + DBProvider, -{ - type Item = Result; - fn next(&mut self) -> Option { - if self.limiter.is_limit_reached() { - return None - } - - let mut pruned_block_headers = None; - let mut pruned_block_td = None; - let mut pruned_block_canonical = None; - - if let Err(err) = self.provider.tx_ref().prune_table_with_range_step( - &mut self.headers_walker, - self.limiter, - &mut |_| false, - &mut |row| pruned_block_headers = Some(row.0), - ) { - return Some(Err(err.into())) - } - - if let Err(err) = self.provider.tx_ref().prune_table_with_range_step( - &mut self.header_tds_walker, - self.limiter, - &mut |_| false, - &mut |row| pruned_block_td = Some(row.0), - ) { - return Some(Err(err.into())) - } - - if let Err(err) = self.provider.tx_ref().prune_table_with_range_step( - &mut self.canonical_headers_walker, - self.limiter, - &mut |_| false, - &mut |row| pruned_block_canonical = Some(row.0), - ) { - return Some(Err(err.into())) - } - - if ![pruned_block_headers, pruned_block_td, pruned_block_canonical].iter().all_equal() { - return Some(Err(PrunerError::InconsistentData( - "All headers-related tables should be pruned up to the same height", - ))) - } - - pruned_block_headers.map(move |block| { - Ok(HeaderTablesIterItem { pruned_block: block, entries_pruned: HEADER_TABLES_TO_PRUNE }) - }) - } -} - -#[cfg(test)] -mod tests { - use crate::segments::{ - static_file::headers::HEADER_TABLES_TO_PRUNE, PruneInput, PruneLimiter, Segment, - SegmentOutput, - }; - use alloy_primitives::{BlockNumber, B256, U256}; - use assert_matches::assert_matches; - use reth_db_api::{tables, transaction::DbTx}; - use reth_provider::{ - DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, - StaticFileProviderFactory, - }; - use reth_prune_types::{ - PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, - SegmentOutputCheckpoint, - }; - use reth_stages::test_utils::TestStageDB; - use reth_testing_utils::{generators, generators::random_header_range}; - use tracing::trace; - - #[test] - fn prune() { - reth_tracing::init_test_tracing(); - - let db = TestStageDB::default(); - let mut rng = generators::rng(); - - let headers = random_header_range(&mut rng, 0..100, B256::ZERO); - let tx = db.factory.provider_rw().unwrap().into_tx(); - for header in &headers { - TestStageDB::insert_header(None, &tx, header, U256::ZERO).unwrap(); - } - tx.commit().unwrap(); - - assert_eq!(db.table::().unwrap().len(), headers.len()); - assert_eq!(db.table::().unwrap().len(), headers.len()); - assert_eq!(db.table::().unwrap().len(), headers.len()); - - let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| { - let segment = super::Headers::new(db.factory.static_file_provider()); - let prune_mode = PruneMode::Before(to_block); - let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); - let input = PruneInput { - previous_checkpoint: db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::Headers) - .unwrap(), - to_block, - limiter: limiter.clone(), - }; - - let next_block_number_to_prune = db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::Headers) - .unwrap() - .and_then(|checkpoint| checkpoint.block_number) - .map(|block_number| block_number + 1) - .unwrap_or_default(); - - let provider = db.factory.database_provider_rw().unwrap(); - let result = segment.prune(&provider, input.clone()).unwrap(); - limiter.increment_deleted_entries_count_by(result.pruned); - trace!(target: "pruner::test", - expected_prune_progress=?expected_result.0, - expected_pruned=?expected_result.1, - result=?result, - "SegmentOutput" - ); - - assert_matches!( - result, - SegmentOutput {progress, pruned, checkpoint: Some(_)} - if (progress, pruned) == expected_result - ); - provider - .save_prune_checkpoint( - PruneSegment::Headers, - result.checkpoint.unwrap().as_prune_checkpoint(prune_mode), - ) - .unwrap(); - provider.commit().expect("commit"); - - let last_pruned_block_number = to_block.min( - next_block_number_to_prune + - (input.limiter.deleted_entries_limit().unwrap() / HEADER_TABLES_TO_PRUNE - 1) - as u64, - ); - - assert_eq!( - db.table::().unwrap().len(), - headers.len() - (last_pruned_block_number + 1) as usize - ); - assert_eq!( - db.table::().unwrap().len(), - headers.len() - (last_pruned_block_number + 1) as usize - ); - assert_eq!( - db.table::().unwrap().len(), - headers.len() - (last_pruned_block_number + 1) as usize - ); - assert_eq!( - db.factory.provider().unwrap().get_prune_checkpoint(PruneSegment::Headers).unwrap(), - Some(PruneCheckpoint { - block_number: Some(last_pruned_block_number), - tx_number: None, - prune_mode - }) - ); - }; - - test_prune( - 3, - (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 9), - ); - test_prune(3, (PruneProgress::Finished, 3)); - } - - #[test] - fn prune_cannot_be_done() { - let db = TestStageDB::default(); - - let limiter = PruneLimiter::default().set_deleted_entries_limit(0); - - let input = PruneInput { - previous_checkpoint: None, - to_block: 1, - // Less than total number of tables for `Headers` segment - limiter, - }; - - let provider = db.factory.database_provider_rw().unwrap(); - let segment = super::Headers::new(db.factory.static_file_provider()); - let result = segment.prune(&provider, input).unwrap(); - assert_eq!( - result, - SegmentOutput::not_done( - PruneInterruptReason::DeletedEntriesLimitReached, - Some(SegmentOutputCheckpoint::default()) - ) - ); - } -} diff --git a/crates/prune/prune/src/segments/static_file/mod.rs b/crates/prune/prune/src/segments/static_file/mod.rs deleted file mode 100644 index cb9dc79c6cd..00000000000 --- a/crates/prune/prune/src/segments/static_file/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod headers; -mod receipts; -mod transactions; - -pub use headers::Headers; -pub use receipts::Receipts; -pub use transactions::Transactions; diff --git a/crates/prune/prune/src/segments/static_file/receipts.rs b/crates/prune/prune/src/segments/static_file/receipts.rs deleted file mode 100644 index 6a84cce9c41..00000000000 --- a/crates/prune/prune/src/segments/static_file/receipts.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::{ - segments::{PruneInput, Segment}, - PrunerError, -}; -use reth_db_api::{table::Value, transaction::DbTxMut}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - errors::provider::ProviderResult, providers::StaticFileProvider, BlockReader, DBProvider, - PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, -}; -use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; -use reth_static_file_types::StaticFileSegment; - -#[derive(Debug)] -pub struct Receipts { - static_file_provider: StaticFileProvider, -} - -impl Receipts { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { - Self { static_file_provider } - } -} - -impl Segment for Receipts -where - Provider: StaticFileProviderFactory> - + DBProvider - + PruneCheckpointWriter - + TransactionsProvider - + BlockReader, -{ - fn segment(&self) -> PruneSegment { - PruneSegment::Receipts - } - - fn mode(&self) -> Option { - self.static_file_provider - .get_highest_static_file_block(StaticFileSegment::Receipts) - .map(PruneMode::before_inclusive) - } - - fn purpose(&self) -> PrunePurpose { - PrunePurpose::StaticFile - } - - fn prune(&self, provider: &Provider, input: PruneInput) -> Result { - crate::segments::receipts::prune(provider, input) - } - - fn save_checkpoint( - &self, - provider: &Provider, - checkpoint: PruneCheckpoint, - ) -> ProviderResult<()> { - crate::segments::receipts::save_checkpoint(provider, checkpoint) - } -} diff --git a/crates/prune/prune/src/segments/static_file/transactions.rs b/crates/prune/prune/src/segments/static_file/transactions.rs deleted file mode 100644 index 115ee2ca39a..00000000000 --- a/crates/prune/prune/src/segments/static_file/transactions.rs +++ /dev/null @@ -1,225 +0,0 @@ -use crate::{ - db_ext::DbTxPruneExt, - segments::{PruneInput, Segment}, - PrunerError, -}; -use reth_db_api::{table::Value, tables, transaction::DbTxMut}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - providers::StaticFileProvider, BlockReader, DBProvider, StaticFileProviderFactory, - TransactionsProvider, -}; -use reth_prune_types::{ - PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, -}; -use reth_static_file_types::StaticFileSegment; -use tracing::trace; - -/// The type responsible for pruning transactions in the database and history expiry. -#[derive(Debug)] -pub struct Transactions { - static_file_provider: StaticFileProvider, -} - -impl Transactions { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { - Self { static_file_provider } - } -} - -impl Segment for Transactions -where - Provider: DBProvider - + TransactionsProvider - + BlockReader - + StaticFileProviderFactory>, -{ - fn segment(&self) -> PruneSegment { - PruneSegment::Transactions - } - - fn mode(&self) -> Option { - self.static_file_provider - .get_highest_static_file_block(StaticFileSegment::Transactions) - .map(PruneMode::before_inclusive) - } - - fn purpose(&self) -> PrunePurpose { - PrunePurpose::StaticFile - } - - fn prune(&self, provider: &Provider, input: PruneInput) -> Result { - let tx_range = match input.get_next_tx_num_range(provider)? { - Some(range) => range, - None => { - trace!(target: "pruner", "No transactions to prune"); - return Ok(SegmentOutput::done()) - } - }; - - let mut limiter = input.limiter; - - let mut last_pruned_transaction = *tx_range.end(); - let (pruned, done) = provider.tx_ref().prune_table_with_range::::SignedTx, - >>( - tx_range, - &mut limiter, - |_| false, - |row| last_pruned_transaction = row.0, - )?; - trace!(target: "pruner", %pruned, %done, "Pruned transactions"); - - let last_pruned_block = provider - .transaction_block(last_pruned_transaction)? - .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? - // If there's more transactions to prune, set the checkpoint block number to previous, - // so we could finish pruning its transactions on the next run. - .checked_sub(if done { 0 } else { 1 }); - - let progress = limiter.progress(done); - - Ok(SegmentOutput { - progress, - pruned, - checkpoint: Some(SegmentOutputCheckpoint { - block_number: last_pruned_block, - tx_number: Some(last_pruned_transaction), - }), - }) - } -} - -#[cfg(test)] -mod tests { - use crate::segments::{PruneInput, PruneLimiter, Segment}; - use alloy_primitives::{BlockNumber, TxNumber, B256}; - use assert_matches::assert_matches; - use itertools::{ - FoldWhile::{Continue, Done}, - Itertools, - }; - use reth_db_api::tables; - use reth_provider::{ - DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, - StaticFileProviderFactory, - }; - use reth_prune_types::{ - PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, - SegmentOutput, - }; - use reth_stages::test_utils::{StorageKind, TestStageDB}; - use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; - use std::ops::Sub; - - #[test] - fn prune() { - let db = TestStageDB::default(); - let mut rng = generators::rng(); - - let blocks = random_block_range( - &mut rng, - 1..=100, - BlockRangeParams { parent: Some(B256::ZERO), tx_count: 2..3, ..Default::default() }, - ); - db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); - - let transactions = - blocks.iter().flat_map(|block| &block.body().transactions).collect::>(); - - assert_eq!(db.table::().unwrap().len(), transactions.len()); - - let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| { - let segment = super::Transactions::new(db.factory.static_file_provider()); - let prune_mode = PruneMode::Before(to_block); - let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); - let input = PruneInput { - previous_checkpoint: db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::Transactions) - .unwrap(), - to_block, - limiter: limiter.clone(), - }; - - let next_tx_number_to_prune = db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::Transactions) - .unwrap() - .and_then(|checkpoint| checkpoint.tx_number) - .map(|tx_number| tx_number + 1) - .unwrap_or_default(); - - let provider = db.factory.database_provider_rw().unwrap(); - let result = segment.prune(&provider, input.clone()).unwrap(); - limiter.increment_deleted_entries_count_by(result.pruned); - - assert_matches!( - result, - SegmentOutput {progress, pruned, checkpoint: Some(_)} - if (progress, pruned) == expected_result - ); - - provider - .save_prune_checkpoint( - PruneSegment::Transactions, - result.checkpoint.unwrap().as_prune_checkpoint(prune_mode), - ) - .unwrap(); - provider.commit().expect("commit"); - - let last_pruned_tx_number = blocks - .iter() - .take(to_block as usize) - .map(|block| block.transaction_count()) - .sum::() - .min( - next_tx_number_to_prune as usize + - input.limiter.deleted_entries_limit().unwrap(), - ) - .sub(1); - - let last_pruned_block_number = blocks - .iter() - .fold_while((0, 0), |(_, mut tx_count), block| { - tx_count += block.transaction_count(); - - if tx_count > last_pruned_tx_number { - Done((block.number, tx_count)) - } else { - Continue((block.number, tx_count)) - } - }) - .into_inner() - .0 - .checked_sub(if result.progress.is_finished() { 0 } else { 1 }); - - assert_eq!( - db.table::().unwrap().len(), - transactions.len() - (last_pruned_tx_number + 1) - ); - assert_eq!( - db.factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::Transactions) - .unwrap(), - Some(PruneCheckpoint { - block_number: last_pruned_block_number, - tx_number: Some(last_pruned_tx_number as TxNumber), - prune_mode - }) - ); - }; - - test_prune( - 6, - (PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 10), - ); - test_prune(6, (PruneProgress::Finished, 2)); - } -} diff --git a/crates/prune/prune/src/segments/user/account_history.rs b/crates/prune/prune/src/segments/user/account_history.rs index 3c18cd1befc..317337f050e 100644 --- a/crates/prune/prune/src/segments/user/account_history.rs +++ b/crates/prune/prune/src/segments/user/account_history.rs @@ -45,7 +45,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/bodies.rs b/crates/prune/prune/src/segments/user/bodies.rs new file mode 100644 index 00000000000..0a6a432754b --- /dev/null +++ b/crates/prune/prune/src/segments/user/bodies.rs @@ -0,0 +1,327 @@ +use crate::{ + segments::{PruneInput, Segment}, + PrunerError, +}; +use reth_provider::{BlockReader, StaticFileProviderFactory}; +use reth_prune_types::{ + PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, +}; +use reth_static_file_types::StaticFileSegment; + +/// Segment responsible for pruning transactions in static files. +/// +/// This segment is controlled by the `bodies_history` configuration. +#[derive(Debug)] +pub struct Bodies { + mode: PruneMode, +} + +impl Bodies { + /// Creates a new [`Bodies`] segment with the given prune mode. + pub const fn new(mode: PruneMode) -> Self { + Self { mode } + } +} + +impl Segment for Bodies +where + Provider: StaticFileProviderFactory + BlockReader, +{ + fn segment(&self) -> PruneSegment { + PruneSegment::Bodies + } + + fn mode(&self) -> Option { + Some(self.mode) + } + + fn purpose(&self) -> PrunePurpose { + PrunePurpose::User + } + + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { + let deleted_headers = provider + .static_file_provider() + .delete_segment_below_block(StaticFileSegment::Transactions, input.to_block + 1)?; + + if deleted_headers.is_empty() { + return Ok(SegmentOutput::done()) + } + + let tx_ranges = deleted_headers.iter().filter_map(|header| header.tx_range()); + + let pruned = tx_ranges.clone().map(|range| range.len()).sum::() as usize; + + Ok(SegmentOutput { + progress: PruneProgress::Finished, + pruned, + checkpoint: Some(SegmentOutputCheckpoint { + block_number: Some(input.to_block), + tx_number: tx_ranges.map(|range| range.end()).max(), + }), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Pruner; + use alloy_primitives::BlockNumber; + use reth_exex_types::FinishedExExHeight; + use reth_provider::{ + test_utils::{create_test_provider_factory, MockNodeTypesWithDB}, + ProviderFactory, StaticFileWriter, + }; + use reth_prune_types::{PruneMode, PruneProgress, PruneSegment}; + use reth_static_file_types::{ + SegmentHeader, SegmentRangeInclusive, StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE, + }; + + /// Creates empty static file jars at 500k block intervals up to the tip block. + /// + /// Each jar contains sequential transaction ranges for testing deletion logic. + fn setup_static_file_jars(provider: &P, tip_block: u64) { + let num_jars = (tip_block + 1) / DEFAULT_BLOCKS_PER_STATIC_FILE; + let txs_per_jar = 1000; + let static_file_provider = provider.static_file_provider(); + + let mut writer = + static_file_provider.latest_writer(StaticFileSegment::Transactions).unwrap(); + + for jar_idx in 0..num_jars { + let block_start = jar_idx * DEFAULT_BLOCKS_PER_STATIC_FILE; + let block_end = ((jar_idx + 1) * DEFAULT_BLOCKS_PER_STATIC_FILE - 1).min(tip_block); + + let tx_start = jar_idx * txs_per_jar; + let tx_end = tx_start + txs_per_jar - 1; + + *writer.user_header_mut() = SegmentHeader::new( + SegmentRangeInclusive::new(block_start, block_end), + Some(SegmentRangeInclusive::new(block_start, block_end)), + Some(SegmentRangeInclusive::new(tx_start, tx_end)), + StaticFileSegment::Transactions, + ); + + writer.inner().set_dirty(); + writer.commit().expect("commit empty jar"); + + if jar_idx < num_jars - 1 { + writer.increment_block(block_end + 1).expect("increment block"); + } + } + + static_file_provider.initialize_index().expect("initialize index"); + } + + struct PruneTestCase { + prune_mode: PruneMode, + expected_pruned: usize, + expected_lowest_block: Option, + } + + fn run_prune_test( + factory: &ProviderFactory, + finished_exex_height_rx: &tokio::sync::watch::Receiver, + test_case: PruneTestCase, + tip: BlockNumber, + ) { + let bodies = Bodies::new(test_case.prune_mode); + let segments: Vec>> = vec![Box::new(bodies)]; + + let mut pruner = Pruner::new_with_factory( + factory.clone(), + segments, + 5, + 10000, + None, + finished_exex_height_rx.clone(), + ); + + let result = pruner.run(tip).expect("pruner run"); + + assert_eq!(result.progress, PruneProgress::Finished); + assert_eq!(result.segments.len(), 1); + + let (segment, output) = &result.segments[0]; + assert_eq!(*segment, PruneSegment::Bodies); + assert_eq!(output.pruned, test_case.expected_pruned); + + let static_provider = factory.static_file_provider(); + assert_eq!( + static_provider.get_lowest_static_file_block(StaticFileSegment::Transactions), + test_case.expected_lowest_block + ); + assert_eq!( + static_provider.get_highest_static_file_block(StaticFileSegment::Transactions), + Some(tip) + ); + } + + #[test] + fn bodies_prune_through_pruner() { + let factory = create_test_provider_factory(); + let tip = 2_499_999; + setup_static_file_jars(&factory, tip); + + let (_, finished_exex_height_rx) = tokio::sync::watch::channel(FinishedExExHeight::NoExExs); + + let test_cases = vec![ + // Test 1: PruneMode::Before(750_000) → deletes jar 1 (0-499_999) + PruneTestCase { + prune_mode: PruneMode::Before(750_000), + expected_pruned: 1000, + expected_lowest_block: Some(999_999), + }, + // Test 2: PruneMode::Before(850_000) → no deletion (jar 2: 500_000-999_999 contains + // target) + PruneTestCase { + prune_mode: PruneMode::Before(850_000), + expected_pruned: 0, + expected_lowest_block: Some(999_999), + }, + // Test 3: PruneMode::Before(1_599_999) → deletes jar 2 (500_000-999_999) and jar 3 + // (1_000_000-1_499_999) + PruneTestCase { + prune_mode: PruneMode::Before(1_599_999), + expected_pruned: 2000, + expected_lowest_block: Some(1_999_999), + }, + // Test 4: PruneMode::Distance(500_000) with tip=2_499_999 → deletes jar 4 + // (1_500_000-1_999_999) + PruneTestCase { + prune_mode: PruneMode::Distance(500_000), + expected_pruned: 1000, + expected_lowest_block: Some(2_499_999), + }, + // Test 5: PruneMode::Before(2_300_000) → no deletion (jar 5: 2_000_000-2_499_999 + // contains target) + PruneTestCase { + prune_mode: PruneMode::Before(2_300_000), + expected_pruned: 0, + expected_lowest_block: Some(2_499_999), + }, + ]; + + for test_case in test_cases { + run_prune_test(&factory, &finished_exex_height_rx, test_case, tip); + } + } + + #[test] + fn min_block_updated_on_sync() { + // Regression test: update_index must update min_block to prevent stale values + // that can cause pruner to incorrectly delete static files when PruneMode::Before(0) is + // used. + + struct MinBlockTestCase { + // Block range + initial_range: Option, + updated_range: SegmentRangeInclusive, + // Min block + expected_before_update: Option, + expected_after_update: BlockNumber, + // Test delete_segment_below_block with this value + delete_below_block: BlockNumber, + // Expected number of deleted segments + expected_deleted: usize, + } + + let test_cases = vec![ + // Test 1: Empty initial state (None) -> syncs to block 100 + MinBlockTestCase { + initial_range: None, + updated_range: SegmentRangeInclusive::new(0, 100), + expected_before_update: None, + expected_after_update: 100, + delete_below_block: 1, + expected_deleted: 0, + }, + // Test 2: Genesis state [0..=0] -> syncs to block 100 (eg. op-reth node after op-reth + // init-state) + MinBlockTestCase { + initial_range: Some(SegmentRangeInclusive::new(0, 0)), + updated_range: SegmentRangeInclusive::new(0, 100), + expected_before_update: Some(0), + expected_after_update: 100, + delete_below_block: 1, + expected_deleted: 0, + }, + // Test 3: Existing state [0..=50] -> syncs to block 200 + MinBlockTestCase { + initial_range: Some(SegmentRangeInclusive::new(0, 50)), + updated_range: SegmentRangeInclusive::new(0, 200), + expected_before_update: Some(50), + expected_after_update: 200, + delete_below_block: 150, + expected_deleted: 0, + }, + ]; + + for ( + idx, + MinBlockTestCase { + initial_range, + updated_range, + expected_before_update, + expected_after_update, + delete_below_block, + expected_deleted, + }, + ) in test_cases.into_iter().enumerate() + { + let factory = create_test_provider_factory(); + let static_provider = factory.static_file_provider(); + + let mut writer = + static_provider.latest_writer(StaticFileSegment::Transactions).unwrap(); + + // Set up initial state if provided + if let Some(initial_range) = initial_range { + *writer.user_header_mut() = SegmentHeader::new( + initial_range, + Some(initial_range), + Some(initial_range), + StaticFileSegment::Transactions, + ); + writer.inner().set_dirty(); + writer.commit().unwrap(); + static_provider.initialize_index().unwrap(); + } + + // Verify initial state + assert_eq!( + static_provider.get_lowest_static_file_block(StaticFileSegment::Transactions), + expected_before_update, + "Test case {}: Initial min_block mismatch", + idx + ); + + // Update to new range + *writer.user_header_mut() = SegmentHeader::new( + updated_range, + Some(updated_range), + Some(updated_range), + StaticFileSegment::Transactions, + ); + writer.inner().set_dirty(); + writer.commit().unwrap(); // update_index is called inside + + // Verify min_block was updated (not stuck at stale value) + assert_eq!( + static_provider.get_lowest_static_file_block(StaticFileSegment::Transactions), + Some(expected_after_update), + "Test case {}: min_block should be updated to {} (not stuck at stale value)", + idx, + expected_after_update + ); + + // Verify delete_segment_below_block behaves correctly with updated min_block + let deleted = static_provider + .delete_segment_below_block(StaticFileSegment::Transactions, delete_below_block) + .unwrap(); + + assert_eq!(deleted.len(), expected_deleted); + } + } +} diff --git a/crates/prune/prune/src/segments/user/merkle_change_sets.rs b/crates/prune/prune/src/segments/user/merkle_change_sets.rs new file mode 100644 index 00000000000..89cc4567b7d --- /dev/null +++ b/crates/prune/prune/src/segments/user/merkle_change_sets.rs @@ -0,0 +1,116 @@ +use crate::{ + db_ext::DbTxPruneExt, + segments::{PruneInput, Segment}, + PrunerError, +}; +use alloy_primitives::B256; +use reth_db_api::{models::BlockNumberHashedAddress, table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + errors::provider::ProviderResult, BlockReader, ChainStateBlockReader, DBProvider, + NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, +}; +use reth_prune_types::{ + PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, +}; +use tracing::{instrument, trace}; + +#[derive(Debug)] +pub struct MerkleChangeSets { + mode: PruneMode, +} + +impl MerkleChangeSets { + pub const fn new(mode: PruneMode) -> Self { + Self { mode } + } +} + +impl Segment for MerkleChangeSets +where + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + ChainStateBlockReader + + NodePrimitivesProvider>, +{ + fn segment(&self) -> PruneSegment { + PruneSegment::MerkleChangeSets + } + + fn mode(&self) -> Option { + Some(self.mode) + } + + fn purpose(&self) -> PrunePurpose { + PrunePurpose::User + } + + #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { + let Some(block_range) = input.get_next_block_range() else { + trace!(target: "pruner", "No change sets to prune"); + return Ok(SegmentOutput::done()) + }; + + let block_range_end = *block_range.end(); + let mut limiter = input.limiter; + + // Create range for StoragesTrieChangeSets which uses BlockNumberHashedAddress as key + let storage_range_start: BlockNumberHashedAddress = + (*block_range.start(), B256::ZERO).into(); + let storage_range_end: BlockNumberHashedAddress = + (*block_range.end() + 1, B256::ZERO).into(); + let storage_range = storage_range_start..storage_range_end; + + let mut last_storages_pruned_block = None; + let (storages_pruned, done) = + provider.tx_ref().prune_table_with_range::( + storage_range, + &mut limiter, + |_| false, + |(BlockNumberHashedAddress((block_number, _)), _)| { + last_storages_pruned_block = Some(block_number); + }, + )?; + + trace!(target: "pruner", %storages_pruned, %done, "Pruned storages change sets"); + + let mut last_accounts_pruned_block = block_range_end; + let last_storages_pruned_block = last_storages_pruned_block + // If there's more storage changesets to prune, set the checkpoint block number to + // previous, so we could finish pruning its storage changesets on the next run. + .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) + .unwrap_or(block_range_end); + + let (accounts_pruned, done) = + provider.tx_ref().prune_table_with_range::( + block_range, + &mut limiter, + |_| false, + |row| last_accounts_pruned_block = row.0, + )?; + + trace!(target: "pruner", %accounts_pruned, %done, "Pruned accounts change sets"); + + let progress = limiter.progress(done); + + Ok(SegmentOutput { + progress, + pruned: accounts_pruned + storages_pruned, + checkpoint: Some(SegmentOutputCheckpoint { + block_number: Some(last_storages_pruned_block.min(last_accounts_pruned_block)), + tx_number: None, + }), + }) + } + + fn save_checkpoint( + &self, + provider: &Provider, + checkpoint: PruneCheckpoint, + ) -> ProviderResult<()> { + provider.save_prune_checkpoint(PruneSegment::MerkleChangeSets, checkpoint) + } +} diff --git a/crates/prune/prune/src/segments/user/mod.rs b/crates/prune/prune/src/segments/user/mod.rs index 0b787d14dae..ef7ae05a9d5 100644 --- a/crates/prune/prune/src/segments/user/mod.rs +++ b/crates/prune/prune/src/segments/user/mod.rs @@ -1,14 +1,16 @@ mod account_history; +mod bodies; mod history; +mod merkle_change_sets; mod receipts; -mod receipts_by_logs; mod sender_recovery; mod storage_history; mod transaction_lookup; pub use account_history::AccountHistory; +pub use bodies::Bodies; +pub use merkle_change_sets::MerkleChangeSets; pub use receipts::Receipts; -pub use receipts_by_logs::ReceiptsByLogs; pub use sender_recovery::SenderRecovery; pub use storage_history::StorageHistory; pub use transaction_lookup::TransactionLookup; diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index ecb0f3423be..03faddc1d5b 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -42,7 +42,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { crate::segments::receipts::prune(provider, input) } diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs deleted file mode 100644 index 0849db52518..00000000000 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ /dev/null @@ -1,364 +0,0 @@ -use crate::{ - db_ext::DbTxPruneExt, - segments::{PruneInput, Segment}, - PrunerError, -}; -use alloy_consensus::TxReceipt; -use reth_db_api::{table::Value, tables, transaction::DbTxMut}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, -}; -use reth_prune_types::{ - PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, SegmentOutput, - MINIMUM_PRUNING_DISTANCE, -}; -use tracing::{instrument, trace}; -#[derive(Debug)] -pub struct ReceiptsByLogs { - config: ReceiptsLogPruneConfig, -} - -impl ReceiptsByLogs { - pub const fn new(config: ReceiptsLogPruneConfig) -> Self { - Self { config } - } -} - -impl Segment for ReceiptsByLogs -where - Provider: DBProvider - + PruneCheckpointWriter - + TransactionsProvider - + BlockReader - + NodePrimitivesProvider>, -{ - fn segment(&self) -> PruneSegment { - PruneSegment::ContractLogs - } - - fn mode(&self) -> Option { - None - } - - fn purpose(&self) -> PrunePurpose { - PrunePurpose::User - } - - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] - fn prune(&self, provider: &Provider, input: PruneInput) -> Result { - // Contract log filtering removes every receipt possible except the ones in the list. So, - // for the other receipts it's as if they had a `PruneMode::Distance()` of - // `MINIMUM_PRUNING_DISTANCE`. - let to_block = PruneMode::Distance(MINIMUM_PRUNING_DISTANCE) - .prune_target_block(input.to_block, PruneSegment::ContractLogs, PrunePurpose::User)? - .map(|(bn, _)| bn) - .unwrap_or_default(); - - // Get status checkpoint from latest run - let mut last_pruned_block = - input.previous_checkpoint.and_then(|checkpoint| checkpoint.block_number); - - let initial_last_pruned_block = last_pruned_block; - - let mut from_tx_number = match initial_last_pruned_block { - Some(block) => provider - .block_body_indices(block)? - .map(|block| block.last_tx_num() + 1) - .unwrap_or(0), - None => 0, - }; - - // Figure out what receipts have already been pruned, so we can have an accurate - // `address_filter` - let address_filter = self.config.group_by_block(input.to_block, last_pruned_block)?; - - // Splits all transactions in different block ranges. Each block range will have its own - // filter address list and will check it while going through the table - // - // Example: - // For an `address_filter` such as: - // { block9: [a1, a2], block20: [a3, a4, a5] } - // - // The following structures will be created in the exact order as showed: - // `block_ranges`: [ - // (block0, block8, 0 addresses), - // (block9, block19, 2 addresses), - // (block20, to_block, 5 addresses) - // ] - // `filtered_addresses`: [a1, a2, a3, a4, a5] - // - // The first range will delete all receipts between block0 - block8 - // The second range will delete all receipts between block9 - 19, except the ones with - // emitter logs from these addresses: [a1, a2]. - // The third range will delete all receipts between block20 - to_block, except the ones with - // emitter logs from these addresses: [a1, a2, a3, a4, a5] - let mut block_ranges = vec![]; - let mut blocks_iter = address_filter.iter().peekable(); - let mut filtered_addresses = vec![]; - - while let Some((start_block, addresses)) = blocks_iter.next() { - filtered_addresses.extend_from_slice(addresses); - - // This will clear all receipts before the first appearance of a contract log or since - // the block after the last pruned one. - if block_ranges.is_empty() { - let init = last_pruned_block.map(|b| b + 1).unwrap_or_default(); - if init < *start_block { - block_ranges.push((init, *start_block - 1, 0)); - } - } - - let end_block = - blocks_iter.peek().map(|(next_block, _)| *next_block - 1).unwrap_or(to_block); - - // Addresses in lower block ranges, are still included in the inclusion list for future - // ranges. - block_ranges.push((*start_block, end_block, filtered_addresses.len())); - } - - trace!( - target: "pruner", - ?block_ranges, - ?filtered_addresses, - "Calculated block ranges and filtered addresses", - ); - - let mut limiter = input.limiter; - - let mut done = true; - let mut pruned = 0; - let mut last_pruned_transaction = None; - for (start_block, end_block, num_addresses) in block_ranges { - let block_range = start_block..=end_block; - - // Calculate the transaction range from this block range - let tx_range_end = match provider.block_body_indices(end_block)? { - Some(body) => body.last_tx_num(), - None => { - trace!( - target: "pruner", - ?block_range, - "No receipts to prune." - ); - continue - } - }; - let tx_range = from_tx_number..=tx_range_end; - - // Delete receipts, except the ones in the inclusion list - let mut last_skipped_transaction = 0; - let deleted; - (deleted, done) = provider.tx_ref().prune_table_with_range::::Receipt, - >>( - tx_range, - &mut limiter, - |(tx_num, receipt)| { - let skip = num_addresses > 0 && - receipt.logs().iter().any(|log| { - filtered_addresses[..num_addresses].contains(&&log.address) - }); - - if skip { - last_skipped_transaction = *tx_num; - } - skip - }, - |row| last_pruned_transaction = Some(row.0), - )?; - - trace!(target: "pruner", %deleted, %done, ?block_range, "Pruned receipts"); - - pruned += deleted; - - // For accurate checkpoints we need to know that we have checked every transaction. - // Example: we reached the end of the range, and the last receipt is supposed to skip - // its deletion. - let last_pruned_transaction = *last_pruned_transaction - .insert(last_pruned_transaction.unwrap_or_default().max(last_skipped_transaction)); - - last_pruned_block = Some( - provider - .transaction_block(last_pruned_transaction)? - .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? - // If there's more receipts to prune, set the checkpoint block number to - // previous, so we could finish pruning its receipts on the - // next run. - .saturating_sub(if done { 0 } else { 1 }), - ); - - if limiter.is_limit_reached() { - done &= end_block == to_block; - break - } - - from_tx_number = last_pruned_transaction + 1; - } - - // If there are contracts using `PruneMode::Distance(_)` there will be receipts before - // `to_block` that become eligible to be pruned in future runs. Therefore, our checkpoint is - // not actually `to_block`, but the `lowest_block_with_distance` from any contract. - // This ensures that in future pruner runs we can prune all these receipts between the - // previous `lowest_block_with_distance` and the new one using - // `get_next_tx_num_range_from_checkpoint`. - // - // Only applies if we were able to prune everything intended for this run, otherwise the - // checkpoint is the `last_pruned_block`. - let prune_mode_block = self - .config - .lowest_block_with_distance(input.to_block, initial_last_pruned_block)? - .unwrap_or(to_block); - - provider.save_prune_checkpoint( - PruneSegment::ContractLogs, - PruneCheckpoint { - block_number: Some(prune_mode_block.min(last_pruned_block.unwrap_or(u64::MAX))), - tx_number: last_pruned_transaction, - prune_mode: PruneMode::Before(prune_mode_block), - }, - )?; - - let progress = limiter.progress(done); - - Ok(SegmentOutput { progress, pruned, checkpoint: None }) - } -} - -#[cfg(test)] -mod tests { - use crate::segments::{PruneInput, PruneLimiter, ReceiptsByLogs, Segment}; - use alloy_primitives::B256; - use assert_matches::assert_matches; - use reth_db_api::{cursor::DbCursorRO, tables, transaction::DbTx}; - use reth_primitives_traits::InMemorySize; - use reth_provider::{ - DBProvider, DatabaseProviderFactory, PruneCheckpointReader, TransactionsProvider, - }; - use reth_prune_types::{PruneMode, PruneSegment, ReceiptsLogPruneConfig}; - use reth_stages::test_utils::{StorageKind, TestStageDB}; - use reth_testing_utils::generators::{ - self, random_block_range, random_eoa_account, random_log, random_receipt, BlockRangeParams, - }; - use std::collections::BTreeMap; - - #[test] - fn prune_receipts_by_logs() { - reth_tracing::init_test_tracing(); - - let db = TestStageDB::default(); - let mut rng = generators::rng(); - - let tip = 20000; - let blocks = [ - random_block_range( - &mut rng, - 0..=100, - BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..5, ..Default::default() }, - ), - random_block_range( - &mut rng, - (100 + 1)..=(tip - 100), - BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, - ), - random_block_range( - &mut rng, - (tip - 100 + 1)..=tip, - BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..5, ..Default::default() }, - ), - ] - .concat(); - db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); - - let mut receipts = Vec::new(); - - let (deposit_contract_addr, _) = random_eoa_account(&mut rng); - for block in &blocks { - receipts.reserve_exact(block.body().size()); - for (txi, transaction) in block.body().transactions.iter().enumerate() { - let mut receipt = random_receipt(&mut rng, transaction, Some(1), None); - receipt.logs.push(random_log( - &mut rng, - (txi == (block.transaction_count() - 1)).then_some(deposit_contract_addr), - Some(1), - )); - receipts.push((receipts.len() as u64, receipt)); - } - } - db.insert_receipts(receipts).expect("insert receipts"); - - assert_eq!( - db.table::().unwrap().len(), - blocks.iter().map(|block| block.transaction_count()).sum::() - ); - assert_eq!( - db.table::().unwrap().len(), - db.table::().unwrap().len() - ); - - let run_prune = || { - let provider = db.factory.database_provider_rw().unwrap(); - - let prune_before_block: usize = 20; - let prune_mode = PruneMode::Before(prune_before_block as u64); - let receipts_log_filter = - ReceiptsLogPruneConfig(BTreeMap::from([(deposit_contract_addr, prune_mode)])); - - let limiter = PruneLimiter::default().set_deleted_entries_limit(10); - - let result = ReceiptsByLogs::new(receipts_log_filter).prune( - &provider, - PruneInput { - previous_checkpoint: db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::ContractLogs) - .unwrap(), - to_block: tip, - limiter, - }, - ); - provider.commit().expect("commit"); - - assert_matches!(result, Ok(_)); - let output = result.unwrap(); - - let (pruned_block, pruned_tx) = db - .factory - .provider() - .unwrap() - .get_prune_checkpoint(PruneSegment::ContractLogs) - .unwrap() - .map(|checkpoint| (checkpoint.block_number.unwrap(), checkpoint.tx_number.unwrap())) - .unwrap_or_default(); - - // All receipts are in the end of the block - let unprunable = pruned_block.saturating_sub(prune_before_block as u64 - 1); - - assert_eq!( - db.table::().unwrap().len(), - blocks.iter().map(|block| block.transaction_count()).sum::() - - ((pruned_tx + 1) - unprunable) as usize - ); - - output.progress.is_finished() - }; - - while !run_prune() {} - - let provider = db.factory.provider().unwrap(); - let mut cursor = provider.tx_ref().cursor_read::().unwrap(); - let walker = cursor.walk(None).unwrap(); - for receipt in walker { - let (tx_num, receipt) = receipt.unwrap(); - - // Either we only find our contract, or the receipt is part of the unprunable receipts - // set by tip - 128 - assert!( - receipt.logs.iter().any(|l| l.address == deposit_contract_addr) || - provider.transaction_block(tx_num).unwrap().unwrap() > tip - 128, - ); - } - } -} diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index 35ee487203a..9fbad8c428c 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -37,7 +37,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/storage_history.rs b/crates/prune/prune/src/segments/user/storage_history.rs index ee7447c37da..a4ad37bf789 100644 --- a/crates/prune/prune/src/segments/user/storage_history.rs +++ b/crates/prune/prune/src/segments/user/storage_history.rs @@ -47,7 +47,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let range = match input.get_next_block_range() { Some(range) => range, diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index e218f623ed5..fed90d84f2d 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -6,8 +6,11 @@ use crate::{ use alloy_eips::eip2718::Encodable2718; use rayon::prelude::*; use reth_db_api::{tables, transaction::DbTxMut}; -use reth_provider::{BlockReader, DBProvider, PruneCheckpointReader}; -use reth_prune_types::{PruneMode, PrunePurpose, PruneSegment, SegmentOutputCheckpoint}; +use reth_provider::{BlockReader, DBProvider, PruneCheckpointReader, StaticFileProviderFactory}; +use reth_prune_types::{ + PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutputCheckpoint, +}; +use reth_static_file_types::StaticFileSegment; use tracing::{debug, instrument, trace}; #[derive(Debug)] @@ -23,8 +26,10 @@ impl TransactionLookup { impl Segment for TransactionLookup where - Provider: - DBProvider + BlockReader + PruneCheckpointReader, + Provider: DBProvider + + BlockReader + + PruneCheckpointReader + + StaticFileProviderFactory, { fn segment(&self) -> PruneSegment { PruneSegment::TransactionLookup @@ -38,7 +43,7 @@ where PrunePurpose::User } - #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] fn prune( &self, provider: &Provider, @@ -47,18 +52,26 @@ where // It is not possible to prune TransactionLookup data for which we don't have transaction // data. If the TransactionLookup checkpoint is lagging behind (which can happen e.g. when // pre-merge history is dropped and then later tx lookup pruning is enabled) then we can - // only prune from the tx checkpoint and onwards. - if let Some(txs_checkpoint) = provider.get_prune_checkpoint(PruneSegment::Transactions)? && + // only prune from the lowest static file. + if let Some(lowest_range) = + provider.static_file_provider().get_lowest_range(StaticFileSegment::Transactions) && input .previous_checkpoint - .is_none_or(|checkpoint| checkpoint.block_number < txs_checkpoint.block_number) + .is_none_or(|checkpoint| checkpoint.block_number < Some(lowest_range.start())) { - input.previous_checkpoint = Some(txs_checkpoint); - debug!( - target: "pruner", - transactions_checkpoint = ?input.previous_checkpoint, - "No TransactionLookup checkpoint found, using Transactions checkpoint as fallback" - ); + let new_checkpoint = lowest_range.start().saturating_sub(1); + if let Some(body_indices) = provider.block_body_indices(new_checkpoint)? { + input.previous_checkpoint = Some(PruneCheckpoint { + block_number: Some(new_checkpoint), + tx_number: Some(body_indices.last_tx_num()), + prune_mode: self.mode, + }); + debug!( + target: "pruner", + static_file_checkpoint = ?input.previous_checkpoint, + "Using static file transaction checkpoint as TransactionLookup starting point" + ); + } } let (start, end) = match input.get_next_tx_num_range(provider)? { diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index b60621b331a..30adbb14d91 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -16,6 +16,7 @@ reth-codecs = { workspace = true, optional = true } alloy-primitives.workspace = true derive_more.workspace = true +strum = { workspace = true, features = ["derive"] } thiserror.workspace = true modular-bitfield = { workspace = true, optional = true } @@ -42,6 +43,7 @@ std = [ "serde?/std", "serde_json/std", "thiserror/std", + "strum/std", ] test-utils = [ "std", diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 315063278b2..a588693892a 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -18,10 +18,6 @@ mod pruner; mod segment; mod target; -use alloc::{collections::BTreeMap, vec::Vec}; -use alloy_primitives::{Address, BlockNumber}; -use core::ops::Deref; - pub use checkpoint::PruneCheckpoint; pub use event::PrunerEvent; pub use mode::PruneMode; @@ -31,300 +27,3 @@ pub use pruner::{ }; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; pub use target::{PruneModes, UnwindTargetPrunedError, MINIMUM_PRUNING_DISTANCE}; - -/// Configuration for pruning receipts not associated with logs emitted by the specified contracts. -#[derive(Debug, Clone, PartialEq, Eq, Default)] -#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] -pub struct ReceiptsLogPruneConfig(pub BTreeMap); - -impl ReceiptsLogPruneConfig { - /// Checks if the configuration is empty - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Given the `tip` block number, consolidates the structure so it can easily be queried for - /// filtering across a range of blocks. - /// - /// Example: - /// - /// `{ addrA: Before(872), addrB: Before(500), addrC: Distance(128) }` - /// - /// for `tip: 1000`, gets transformed to a map such as: - /// - /// `{ 500: [addrB], 872: [addrA, addrC] }` - /// - /// The [`BlockNumber`] key of the new map should be viewed as `PruneMode::Before(block)`, which - /// makes the previous result equivalent to - /// - /// `{ Before(500): [addrB], Before(872): [addrA, addrC] }` - pub fn group_by_block( - &self, - tip: BlockNumber, - pruned_block: Option, - ) -> Result>, PruneSegmentError> { - let mut map = BTreeMap::new(); - let base_block = pruned_block.unwrap_or_default() + 1; - - for (address, mode) in &self.0 { - // Getting `None`, means that there is nothing to prune yet, so we need it to include in - // the BTreeMap (block = 0), otherwise it will be excluded. - // Reminder that this BTreeMap works as an inclusion list that excludes (prunes) all - // other receipts. - // - // Reminder, that we increment because the [`BlockNumber`] key of the new map should be - // viewed as `PruneMode::Before(block)` - let block = base_block.max( - mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? - .map(|(block, _)| block) - .unwrap_or_default() + - 1, - ); - - map.entry(block).or_insert_with(Vec::new).push(address) - } - Ok(map) - } - - /// Returns the lowest block where we start filtering logs which use `PruneMode::Distance(_)`. - pub fn lowest_block_with_distance( - &self, - tip: BlockNumber, - pruned_block: Option, - ) -> Result, PruneSegmentError> { - let pruned_block = pruned_block.unwrap_or_default(); - let mut lowest = None; - - for mode in self.values() { - if mode.is_distance() && - let Some((block, _)) = - mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? - { - lowest = Some(lowest.unwrap_or(u64::MAX).min(block)); - } - } - - Ok(lowest.map(|lowest| lowest.max(pruned_block))) - } -} - -impl Deref for ReceiptsLogPruneConfig { - type Target = BTreeMap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_group_by_block_empty_config() { - let config = ReceiptsLogPruneConfig(BTreeMap::new()); - let tip = 1000; - let pruned_block = None; - - let result = config.group_by_block(tip, pruned_block).unwrap(); - assert!(result.is_empty(), "The result should be empty when the config is empty"); - } - - #[test] - fn test_group_by_block_single_entry() { - let mut config_map = BTreeMap::new(); - let address = Address::new([1; 20]); - let prune_mode = PruneMode::Before(500); - config_map.insert(address, prune_mode); - - let config = ReceiptsLogPruneConfig(config_map); - // Big tip to have something to prune for the target block - let tip = 3000000; - let pruned_block = Some(400); - - let result = config.group_by_block(tip, pruned_block).unwrap(); - - // Expect one entry with block 500 and the corresponding address - assert_eq!(result.len(), 1); - assert_eq!(result[&500], vec![&address], "Address should be grouped under block 500"); - - // Tip smaller than the target block, so that we have nothing to prune for the block - let tip = 300; - let pruned_block = Some(400); - - let result = config.group_by_block(tip, pruned_block).unwrap(); - - // Expect one entry with block 400 and the corresponding address - assert_eq!(result.len(), 1); - assert_eq!(result[&401], vec![&address], "Address should be grouped under block 400"); - } - - #[test] - fn test_group_by_block_multiple_entries() { - let mut config_map = BTreeMap::new(); - let address1 = Address::new([1; 20]); - let address2 = Address::new([2; 20]); - let prune_mode1 = PruneMode::Before(600); - let prune_mode2 = PruneMode::Before(800); - config_map.insert(address1, prune_mode1); - config_map.insert(address2, prune_mode2); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 900000; - let pruned_block = Some(400); - - let result = config.group_by_block(tip, pruned_block).unwrap(); - - // Expect two entries: one for block 600 and another for block 800 - assert_eq!(result.len(), 2); - assert_eq!(result[&600], vec![&address1], "Address1 should be grouped under block 600"); - assert_eq!(result[&800], vec![&address2], "Address2 should be grouped under block 800"); - } - - #[test] - fn test_group_by_block_with_distance_prune_mode() { - let mut config_map = BTreeMap::new(); - let address = Address::new([1; 20]); - let prune_mode = PruneMode::Distance(100000); - config_map.insert(address, prune_mode); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 100100; - // Pruned block is smaller than the target block - let pruned_block = Some(50); - - let result = config.group_by_block(tip, pruned_block).unwrap(); - - // Expect the entry to be grouped under block 100 (tip - distance) - assert_eq!(result.len(), 1); - assert_eq!(result[&101], vec![&address], "Address should be grouped under block 100"); - - let tip = 100100; - // Pruned block is larger than the target block - let pruned_block = Some(800); - - let result = config.group_by_block(tip, pruned_block).unwrap(); - - // Expect the entry to be grouped under block 800 which is larger than tip - distance - assert_eq!(result.len(), 1); - assert_eq!(result[&801], vec![&address], "Address should be grouped under block 800"); - } - - #[test] - fn test_lowest_block_with_distance_empty_config() { - let config = ReceiptsLogPruneConfig(BTreeMap::new()); - let tip = 1000; - let pruned_block = None; - - let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); - assert_eq!(result, None, "The result should be None when the config is empty"); - } - - #[test] - fn test_lowest_block_with_distance_no_distance_mode() { - let mut config_map = BTreeMap::new(); - let address = Address::new([1; 20]); - let prune_mode = PruneMode::Before(500); - config_map.insert(address, prune_mode); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 1000; - let pruned_block = None; - - let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); - assert_eq!(result, None, "The result should be None when there are no Distance modes"); - } - - #[test] - fn test_lowest_block_with_distance_single_entry() { - let mut config_map = BTreeMap::new(); - let address = Address::new([1; 20]); - let prune_mode = PruneMode::Distance(100000); - config_map.insert(address, prune_mode); - - let config = ReceiptsLogPruneConfig(config_map); - - let tip = 100100; - let pruned_block = Some(400); - - // Expect the lowest block to be 400 as 400 > 100100 - 100000 (tip - distance) - assert_eq!( - config.lowest_block_with_distance(tip, pruned_block).unwrap(), - Some(400), - "The lowest block should be 400" - ); - - let tip = 100100; - let pruned_block = Some(50); - - // Expect the lowest block to be 100 as 100 > 50 (pruned block) - assert_eq!( - config.lowest_block_with_distance(tip, pruned_block).unwrap(), - Some(100), - "The lowest block should be 100" - ); - } - - #[test] - fn test_lowest_block_with_distance_multiple_entries_last() { - let mut config_map = BTreeMap::new(); - let address1 = Address::new([1; 20]); - let address2 = Address::new([2; 20]); - let prune_mode1 = PruneMode::Distance(100100); - let prune_mode2 = PruneMode::Distance(100300); - config_map.insert(address1, prune_mode1); - config_map.insert(address2, prune_mode2); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 200300; - let pruned_block = Some(100); - - // The lowest block should be 200300 - 100300 = 100000: - // - First iteration will return 100200 => 200300 - 100100 = 100200 - // - Second iteration will return 100000 => 200300 - 100300 = 100000 < 100200 - // - Final result is 100000 - assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); - } - - #[test] - fn test_lowest_block_with_distance_multiple_entries_first() { - let mut config_map = BTreeMap::new(); - let address1 = Address::new([1; 20]); - let address2 = Address::new([2; 20]); - let prune_mode1 = PruneMode::Distance(100400); - let prune_mode2 = PruneMode::Distance(100300); - config_map.insert(address1, prune_mode1); - config_map.insert(address2, prune_mode2); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 200300; - let pruned_block = Some(100); - - // The lowest block should be 200300 - 100400 = 99900: - // - First iteration, lowest block is 200300 - 100400 = 99900 - // - Second iteration, lowest block is still 99900 < 200300 - 100300 = 100000 - // - Final result is 99900 - assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(99900)); - } - - #[test] - fn test_lowest_block_with_distance_multiple_entries_pruned_block() { - let mut config_map = BTreeMap::new(); - let address1 = Address::new([1; 20]); - let address2 = Address::new([2; 20]); - let prune_mode1 = PruneMode::Distance(100400); - let prune_mode2 = PruneMode::Distance(100300); - config_map.insert(address1, prune_mode1); - config_map.insert(address2, prune_mode2); - - let config = ReceiptsLogPruneConfig(config_map); - let tip = 200300; - let pruned_block = Some(100000); - - // The lowest block should be 100000 because: - // - Lowest is 200300 - 100400 = 99900 < 200300 - 100300 = 100000 - // - Lowest is compared to the pruned block 100000: 100000 > 99900 - // - Finally the lowest block is 100000 - assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); - } -} diff --git a/crates/prune/types/src/mode.rs b/crates/prune/types/src/mode.rs index 4c09ccfa639..0565087673d 100644 --- a/crates/prune/types/src/mode.rs +++ b/crates/prune/types/src/mode.rs @@ -129,7 +129,11 @@ mod tests { // Test for a scenario where there are no minimum blocks and Full can be used assert_eq!( - PruneMode::Full.prune_target_block(tip, PruneSegment::Transactions, PrunePurpose::User), + PruneMode::Full.prune_target_block( + tip, + PruneSegment::TransactionLookup, + PrunePurpose::User + ), Ok(Some((tip, PruneMode::Full))), ); } diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index e131f353fe3..36e39fcb585 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -1,9 +1,16 @@ +#![allow(deprecated)] // necessary to all defining deprecated `PruneSegment` variants + use crate::MINIMUM_PRUNING_DISTANCE; use derive_more::Display; +use strum::{EnumIter, IntoEnumIterator}; use thiserror::Error; /// Segment of the data that can be pruned. -#[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] +/// +/// VERY IMPORTANT NOTE: new variants must be added to the end of this enum, and old variants which +/// are no longer used must not be removed from this enum. The variant index is encoded directly +/// when writing to the `PruneCheckpoint` table, so changing the order here will corrupt the table. +#[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, EnumIter)] #[cfg_attr(test, derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] @@ -21,11 +28,19 @@ pub enum PruneSegment { AccountHistory, /// Prune segment responsible for the `StorageChangeSets` and `StoragesHistory` tables. StorageHistory, - /// Prune segment responsible for the `CanonicalHeaders`, `Headers` and - /// `HeaderTerminalDifficulties` tables. + #[deprecated = "Variant indexes cannot be changed"] + #[strum(disabled)] + /// Prune segment responsible for the `CanonicalHeaders`, `Headers` tables. Headers, + #[deprecated = "Variant indexes cannot be changed"] + #[strum(disabled)] /// Prune segment responsible for the `Transactions` table. Transactions, + /// Prune segment responsible for all rows in `AccountsTrieChangeSets` and + /// `StoragesTrieChangeSets` table. + MerkleChangeSets, + /// Prune segment responsible for bodies (transactions in static files). + Bodies, } #[cfg(test)] @@ -37,17 +52,28 @@ impl Default for PruneSegment { } impl PruneSegment { + /// Returns an iterator over all variants of [`PruneSegment`]. + /// + /// Excludes deprecated variants that are no longer used, but can still be found in the + /// database. + pub fn variants() -> impl Iterator { + Self::iter() + } + /// Returns minimum number of blocks to keep in the database for this segment. pub const fn min_blocks(&self, purpose: PrunePurpose) -> u64 { match self { - Self::SenderRecovery | Self::TransactionLookup | Self::Headers | Self::Transactions => { - 0 - } + Self::SenderRecovery | Self::TransactionLookup => 0, Self::Receipts if purpose.is_static_file() => 0, - Self::ContractLogs | Self::AccountHistory | Self::StorageHistory => { - MINIMUM_PRUNING_DISTANCE - } + Self::ContractLogs | + Self::AccountHistory | + Self::StorageHistory | + Self::MerkleChangeSets | + Self::Bodies | Self::Receipts => MINIMUM_PRUNING_DISTANCE, + #[expect(deprecated)] + #[expect(clippy::match_same_arms)] + Self::Headers | Self::Transactions => 0, } } @@ -90,3 +116,20 @@ pub enum PruneSegmentError { #[error("the configuration provided for {0} is invalid")] Configuration(PruneSegment), } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_prune_segment_iter_excludes_deprecated() { + let segments: Vec = PruneSegment::variants().collect(); + + // Verify deprecated variants are not included derived iter + #[expect(deprecated)] + { + assert!(!segments.contains(&PruneSegment::Headers)); + assert!(!segments.contains(&PruneSegment::Transactions)); + } + } +} diff --git a/crates/prune/types/src/target.rs b/crates/prune/types/src/target.rs index 574a0e2e555..bb61c006cdc 100644 --- a/crates/prune/types/src/target.rs +++ b/crates/prune/types/src/target.rs @@ -2,7 +2,7 @@ use alloy_primitives::BlockNumber; use derive_more::Display; use thiserror::Error; -use crate::{PruneCheckpoint, PruneMode, PruneSegment, ReceiptsLogPruneConfig}; +use crate::{PruneCheckpoint, PruneMode, PruneSegment}; /// Minimum distance from the tip necessary for the node to work correctly: /// 1. Minimum 2 epochs (32 blocks per epoch) required to handle any reorg according to the @@ -36,8 +36,13 @@ pub enum HistoryType { StorageHistory, } +/// Default pruning mode for merkle changesets +const fn default_merkle_changesets_mode() -> PruneMode { + PruneMode::Distance(MINIMUM_PRUNING_DISTANCE) +} + /// Pruning configuration for every segment of the data that can be pruned. -#[derive(Debug, Clone, Default, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq)] #[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "serde"), serde(default))] pub struct PruneModes { @@ -84,22 +89,41 @@ pub struct PruneModes { ) )] pub bodies_history: Option, - /// Receipts pruning configuration by retaining only those receipts that contain logs emitted - /// by the specified addresses, discarding others. This setting is overridden by `receipts`. - /// - /// The [`BlockNumber`](`crate::BlockNumber`) represents the starting block from which point - /// onwards the receipts are preserved. - pub receipts_log_filter: ReceiptsLogPruneConfig, + /// Merkle Changesets pruning configuration for `AccountsTrieChangeSets` and + /// `StoragesTrieChangeSets`. + #[cfg_attr( + any(test, feature = "serde"), + serde( + default = "default_merkle_changesets_mode", + deserialize_with = "deserialize_prune_mode_with_min_blocks::" + ) + )] + pub merkle_changesets: PruneMode, + /// Receipts log filtering has been deprecated and will be removed in a future release. + #[deprecated] + #[cfg_attr(any(test, feature = "serde"), serde(skip))] + pub receipts_log_filter: (), } -impl PruneModes { - /// Sets pruning to no target. - pub fn none() -> Self { - Self::default() +impl Default for PruneModes { + fn default() -> Self { + Self { + sender_recovery: None, + transaction_lookup: None, + receipts: None, + account_history: None, + storage_history: None, + bodies_history: None, + merkle_changesets: default_merkle_changesets_mode(), + #[expect(deprecated)] + receipts_log_filter: (), + } } +} +impl PruneModes { /// Sets pruning to all targets. - pub fn all() -> Self { + pub const fn all() -> Self { Self { sender_recovery: Some(PruneMode::Full), transaction_lookup: Some(PruneMode::Full), @@ -107,18 +131,15 @@ impl PruneModes { account_history: Some(PruneMode::Full), storage_history: Some(PruneMode::Full), bodies_history: Some(PruneMode::Full), - receipts_log_filter: Default::default(), + merkle_changesets: PruneMode::Full, + #[expect(deprecated)] + receipts_log_filter: (), } } /// Returns whether there is any kind of receipt pruning configuration. - pub fn has_receipts_pruning(&self) -> bool { - self.receipts.is_some() || !self.receipts_log_filter.is_empty() - } - - /// Returns true if all prune modes are set to [`None`]. - pub fn is_empty(&self) -> bool { - self == &Self::none() + pub const fn has_receipts_pruning(&self) -> bool { + self.receipts.is_some() } /// Returns an error if we can't unwind to the targeted block because the target block is @@ -170,6 +191,28 @@ impl PruneModes { } } +/// Deserializes [`PruneMode`] and validates that the value is not less than the const +/// generic parameter `MIN_BLOCKS`. This parameter represents the number of blocks that needs to be +/// left in database after the pruning. +/// +/// 1. For [`PruneMode::Full`], it fails if `MIN_BLOCKS > 0`. +/// 2. For [`PruneMode::Distance`], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is needed because +/// `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we have one +/// block in the database. +#[cfg(any(test, feature = "serde"))] +fn deserialize_prune_mode_with_min_blocks< + 'de, + const MIN_BLOCKS: u64, + D: serde::Deserializer<'de>, +>( + deserializer: D, +) -> Result { + use serde::Deserialize; + let prune_mode = PruneMode::deserialize(deserializer)?; + serde_deserialize_validate::(&prune_mode)?; + Ok(prune_mode) +} + /// Deserializes [`Option`] and validates that the value is not less than the const /// generic parameter `MIN_BLOCKS`. This parameter represents the number of blocks that needs to be /// left in database after the pruning. @@ -186,12 +229,21 @@ fn deserialize_opt_prune_mode_with_min_blocks< >( deserializer: D, ) -> Result, D::Error> { - use alloc::format; use serde::Deserialize; let prune_mode = Option::::deserialize(deserializer)?; + if let Some(prune_mode) = prune_mode.as_ref() { + serde_deserialize_validate::(prune_mode)?; + } + Ok(prune_mode) +} +#[cfg(any(test, feature = "serde"))] +fn serde_deserialize_validate<'a, 'de, const MIN_BLOCKS: u64, D: serde::Deserializer<'de>>( + prune_mode: &'a PruneMode, +) -> Result<(), D::Error> { + use alloc::format; match prune_mode { - Some(PruneMode::Full) if MIN_BLOCKS > 0 => { + PruneMode::Full if MIN_BLOCKS > 0 => { Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str("full"), // This message should have "expected" wording @@ -199,15 +251,15 @@ fn deserialize_opt_prune_mode_with_min_blocks< .as_str(), )) } - Some(PruneMode::Distance(distance)) if distance < MIN_BLOCKS => { + PruneMode::Distance(distance) if *distance < MIN_BLOCKS => { Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Unsigned(distance), + serde::de::Unexpected::Unsigned(*distance), // This message should have "expected" wording &format!("prune mode that leaves at least {MIN_BLOCKS} blocks in the database") .as_str(), )) } - _ => Ok(prune_mode), + _ => Ok(()), } } @@ -240,7 +292,7 @@ mod tests { #[test] fn test_unwind_target_unpruned() { // Test case 1: No pruning configured - should always succeed - let prune_modes = PruneModes::none(); + let prune_modes = PruneModes::default(); assert!(prune_modes.ensure_unwind_target_unpruned(1000, 500, &[]).is_ok()); assert!(prune_modes.ensure_unwind_target_unpruned(1000, 0, &[]).is_ok()); diff --git a/crates/ress/protocol/src/lib.rs b/crates/ress/protocol/src/lib.rs index 50db2a3191c..82820cc5a31 100644 --- a/crates/ress/protocol/src/lib.rs +++ b/crates/ress/protocol/src/lib.rs @@ -1,5 +1,30 @@ -//! `ress` protocol is an `RLPx` subprotocol for stateless nodes. -//! following [RLPx specs](https://github.com/ethereum/devp2p/blob/master/rlpx.md) +//! RESS protocol for stateless Ethereum nodes. +//! +//! Enables stateless nodes to fetch execution witnesses, bytecode, and block data from +//! stateful peers for minimal on-disk state with full execution capability. +//! +//! ## Node Types +//! +//! - **Stateless**: Minimal state, requests data on-demand +//! - **Stateful**: Full Ethereum nodes providing state data +//! +//! Valid connections: Stateless ↔ Stateless ✅, Stateless ↔ Stateful ✅, Stateful ↔ Stateful ❌ +//! +//! ## Messages +//! +//! - `NodeType (0x00)`: Handshake +//! - `GetHeaders/Headers (0x01/0x02)`: Block headers +//! - `GetBlockBodies/BlockBodies (0x03/0x04)`: Block bodies +//! - `GetBytecode/Bytecode (0x05/0x06)`: Contract bytecode +//! - `GetWitness/Witness (0x07/0x08)`: Execution witnesses +//! +//! ## Flow +//! +//! 1. Exchange `NodeType` for compatibility +//! 2. Download ancestor blocks via headers/bodies +//! 3. For new payloads: request witness → get missing bytecode → execute +//! +//! Protocol version: `ress/1` #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/ress/provider/src/lib.rs b/crates/ress/provider/src/lib.rs index da3c5190902..0d88dcc0f25 100644 --- a/crates/ress/provider/src/lib.rs +++ b/crates/ress/provider/src/lib.rs @@ -11,9 +11,7 @@ use alloy_consensus::BlockHeader as _; use alloy_primitives::{Bytes, B256}; use parking_lot::Mutex; -use reth_chain_state::{ - ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, MemoryOverlayStateProvider, -}; +use reth_chain_state::{ExecutedBlock, MemoryOverlayStateProvider}; use reth_errors::{ProviderError, ProviderResult}; use reth_ethereum_primitives::{Block, BlockBody, EthPrimitives}; use reth_evm::{execute::Executor, ConfigureEvm}; @@ -125,10 +123,8 @@ where self.pending_state.invalid_recovered_block(&ancestor_hash) { trace!(target: "reth::ress_provider", %block_hash, %ancestor_hash, "Using invalid ancestor block for witness construction"); - executed = Some(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { recovered_block: invalid, ..Default::default() }, - trie: ExecutedTrieUpdates::empty(), - }); + executed = + Some(ExecutedBlock { recovered_block: invalid, ..Default::default() }); } let Some(executed) = executed else { @@ -162,14 +158,8 @@ where let witness_state_provider = self.provider.state_by_block_hash(ancestor_hash)?; let mut trie_input = TrieInput::default(); for block in executed_ancestors.into_iter().rev() { - if let Some(trie_updates) = block.trie.as_ref() { - trie_input.append_cached_ref(trie_updates, &block.hashed_state); - } else { - trace!(target: "reth::ress_provider", ancestor = ?block.recovered_block().num_hash(), "Missing trie updates for ancestor block"); - return Err(ProviderError::TrieWitnessError( - "missing trie updates for ancestor".to_owned(), - )); - } + let trie_updates = block.trie_updates.as_ref(); + trie_input.append_cached_ref(trie_updates, &block.hashed_state); } let mut hashed_state = db.into_state(); hashed_state.extend(record.hashed_state); diff --git a/crates/ress/provider/src/pending_state.rs b/crates/ress/provider/src/pending_state.rs index e1a84661fc2..f536acdb60a 100644 --- a/crates/ress/provider/src/pending_state.rs +++ b/crates/ress/provider/src/pending_state.rs @@ -5,7 +5,7 @@ use alloy_primitives::{ }; use futures::StreamExt; use parking_lot::RwLock; -use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_chain_state::ExecutedBlock; use reth_ethereum_primitives::EthPrimitives; use reth_node_api::{ConsensusEngineEvent, NodePrimitives}; use reth_primitives_traits::{Bytecode, RecoveredBlock}; @@ -20,14 +20,14 @@ pub struct PendingState(Arc>>); #[derive(Default, Debug)] struct PendingStateInner { - blocks_by_hash: B256Map>, + blocks_by_hash: B256Map>, invalid_blocks_by_hash: B256Map>>, block_hashes_by_number: BTreeMap, } impl PendingState { /// Insert executed block with trie updates. - pub fn insert_block(&self, block: ExecutedBlockWithTrieUpdates) { + pub fn insert_block(&self, block: ExecutedBlock) { let mut this = self.0.write(); let block_hash = block.recovered_block.hash(); this.block_hashes_by_number @@ -46,13 +46,13 @@ impl PendingState { } /// Returns only valid executed blocks by hash. - pub fn executed_block(&self, hash: &B256) -> Option> { + pub fn executed_block(&self, hash: &B256) -> Option> { self.0.read().blocks_by_hash.get(hash).cloned() } /// Returns valid recovered block. pub fn recovered_block(&self, hash: &B256) -> Option>> { - self.executed_block(hash).map(|b| b.recovered_block.clone()) + self.executed_block(hash).map(|b| b.recovered_block) } /// Returns invalid recovered block. diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 488a685b382..92036e39085 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -68,3 +68,4 @@ optional-checks = [ "optional-eip3607", "optional-no-base-fee", ] +memory_limit = ["revm/memory_limit"] diff --git a/crates/revm/src/cached.rs b/crates/revm/src/cached.rs index bf4bd6d5d1b..d40e814c12a 100644 --- a/crates/revm/src/cached.rs +++ b/crates/revm/src/cached.rs @@ -146,11 +146,11 @@ impl Database for CachedReadsDbMut<'_, DB> { } fn block_hash(&mut self, number: u64) -> Result { - let code = match self.cached.block_hashes.entry(number) { + let hash = match self.cached.block_hashes.entry(number) { Entry::Occupied(entry) => *entry.get(), Entry::Vacant(entry) => *entry.insert(self.db.block_hash_ref(number)?), }; - Ok(code) + Ok(hash) } } diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index 19992ead498..fda19c7cb31 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -27,7 +27,7 @@ pub(crate) struct Batch { // Batch responses must be sent back as a single message so we read the results from each // request in the batch and read the results off of a new channel, `rx_batch`, and then send the // complete batch response back to the client over `tx`. -#[instrument(name = "batch", skip(b), level = "TRACE")] +#[instrument(name = "batch", skip(b))] pub(crate) async fn process_batch_request( b: Batch, max_response_body_size: usize, @@ -98,7 +98,7 @@ where } } -#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service), level = "TRACE")] +#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service))] pub(crate) async fn execute_call_with_tracing<'a, S>( req: Request<'a>, rpc_service: &S, diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index b6114938d2b..75431b915a5 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -391,7 +391,7 @@ where fn call(&mut self, request: String) -> Self::Future { trace!("{:?}", request); - let cfg = RpcServiceCfg::CallsAndSubscriptions { + let cfg = RpcServiceCfg { bounded_subscriptions: BoundedSubscriptions::new( self.inner.server_cfg.max_subscriptions_per_connection, ), @@ -443,7 +443,7 @@ struct ProcessConnection<'a, HttpMiddleware, RpcMiddleware> { } /// Spawns the IPC connection onto a new task -#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id), level = "INFO")] +#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id))] fn process_connection( params: ProcessConnection<'_, HttpMiddleware, RpcMiddleware>, ) where diff --git a/crates/rpc/ipc/src/server/rpc_service.rs b/crates/rpc/ipc/src/server/rpc_service.rs index 75bd53ad6d5..f7fcdace4c4 100644 --- a/crates/rpc/ipc/src/server/rpc_service.rs +++ b/crates/rpc/ipc/src/server/rpc_service.rs @@ -25,17 +25,11 @@ pub struct RpcService { } /// Configuration of the `RpcService`. -#[allow(dead_code)] #[derive(Clone, Debug)] -pub(crate) enum RpcServiceCfg { - /// The server supports only calls. - OnlyCalls, - /// The server supports both method calls and subscriptions. - CallsAndSubscriptions { - bounded_subscriptions: BoundedSubscriptions, - sink: MethodSink, - id_provider: Arc, - }, +pub(crate) struct RpcServiceCfg { + pub(crate) bounded_subscriptions: BoundedSubscriptions, + pub(crate) sink: MethodSink, + pub(crate) id_provider: Arc, } impl RpcService { @@ -82,30 +76,20 @@ impl RpcServiceT for RpcService { ResponseFuture::future(fut) } MethodCallback::Subscription(callback) => { - let RpcServiceCfg::CallsAndSubscriptions { - bounded_subscriptions, - sink, - id_provider, - } = &self.cfg - else { - tracing::warn!(id = ?id, method = %name, "Attempted subscription on a service not configured for subscriptions."); - let rp = - MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError)); - return ResponseFuture::ready(rp); - }; - - if let Some(p) = bounded_subscriptions.acquire() { + let cfg = &self.cfg; + + if let Some(p) = cfg.bounded_subscriptions.acquire() { let conn_state = SubscriptionState { conn_id, - id_provider: &**id_provider, + id_provider: &*cfg.id_provider, subscription_permit: p, }; let fut = - callback(id.clone(), params, sink.clone(), conn_state, extensions); + callback(id.clone(), params, cfg.sink.clone(), conn_state, extensions); ResponseFuture::future(fut) } else { - let max = bounded_subscriptions.max(); + let max = cfg.bounded_subscriptions.max(); let rp = MethodResponse::error(id, reject_too_many_subscriptions(max)); ResponseFuture::ready(rp) } @@ -114,13 +98,6 @@ impl RpcServiceT for RpcService { // Don't adhere to any resource or subscription limits; always let unsubscribing // happen! - let RpcServiceCfg::CallsAndSubscriptions { .. } = self.cfg else { - tracing::warn!(id = ?id, method = %name, "Attempted unsubscription on a service not configured for subscriptions."); - let rp = - MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError)); - return ResponseFuture::ready(rp); - }; - let rp = callback(id, params, conn_id, max_response_body_size, extensions); ResponseFuture::ready(rp) } diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 5dd7401782f..0fca5f18457 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -222,7 +222,7 @@ pub trait DebugApi { /// Returns the raw value of a key stored in the database. #[method(name = "dbGet")] - async fn debug_db_get(&self, key: String) -> RpcResult<()>; + async fn debug_db_get(&self, key: String) -> RpcResult>; /// Retrieves the state that corresponds to the block number and returns a list of accounts /// (including storage and code). diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 011e24d468b..4d57bdec7d8 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -105,6 +105,7 @@ impl RethRpcServerConfig for RpcServerArgs { .proof_permits(self.rpc_proof_permits) .pending_block_kind(self.rpc_pending_block) .raw_tx_forwarder(self.rpc_forwarder.clone()) + .rpc_evm_memory_limit(self.rpc_evm_memory_limit) } fn flashbots_config(&self) -> ValidationApiConfig { diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ed8114e7e91..6bd4223f60f 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -34,7 +34,7 @@ use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_consensus::{ConsensusError, FullConsensus}; use reth_evm::ConfigureEvm; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::{NodePrimitives, TxTy}; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthApi, EthApiBuilder, EthBundle, MinerApi, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, TxPoolApi, ValidationApiConfig, Web3Api, @@ -670,6 +670,7 @@ where RpcBlock, RpcReceipt, RpcHeader, + TxTy, > + EthApiTypes, EvmConfig: ConfigureEvm + 'static, { @@ -691,7 +692,7 @@ where /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_ots(&mut self) -> &mut Self where - EthApi: TraceExt + EthTransactions, + EthApi: TraceExt + EthTransactions, { let otterscan_api = self.otterscan_api(); self.modules.insert(RethRpcModule::Ots, otterscan_api.into_rpc().into()); @@ -1965,6 +1966,25 @@ impl TransportRpcModules { self.add_or_replace_ipc(other)?; Ok(()) } + /// Adds or replaces the given [`Methods`] in the transport modules where the specified + /// [`RethRpcModule`] is configured. + pub fn add_or_replace_if_module_configured( + &mut self, + module: RethRpcModule, + other: impl Into, + ) -> Result<(), RegisterMethodError> { + let other = other.into(); + if self.module_config().contains_http(&module) { + self.add_or_replace_http(other.clone())?; + } + if self.module_config().contains_ws(&module) { + self.add_or_replace_ws(other.clone())?; + } + if self.module_config().contains_ipc(&module) { + self.add_or_replace_ipc(other)?; + } + Ok(()) + } } /// Returns the methods installed in the given module that match the given filter. @@ -2521,4 +2541,56 @@ mod tests { assert!(modules.ipc.as_ref().unwrap().method("anything").is_some()); assert!(modules.ws.as_ref().unwrap().method("anything").is_some()); } + + #[test] + fn test_add_or_replace_if_module_configured() { + // Create a config that enables RethRpcModule::Eth for HTTP and WS, but NOT IPC + let config = TransportRpcModuleConfig::default() + .with_http([RethRpcModule::Eth]) + .with_ws([RethRpcModule::Eth]); + + // Create HTTP module with an existing method (to test "replace") + let mut http_module = RpcModule::new(()); + http_module.register_method("eth_existing", |_, _, _| "original").unwrap(); + + // Create WS module with the same existing method + let mut ws_module = RpcModule::new(()); + ws_module.register_method("eth_existing", |_, _, _| "original").unwrap(); + + // Create IPC module (empty, to ensure no changes) + let ipc_module = RpcModule::new(()); + + // Set up TransportRpcModules with the config and modules + let mut modules = TransportRpcModules { + config, + http: Some(http_module), + ws: Some(ws_module), + ipc: Some(ipc_module), + }; + + // Create new methods: one to replace an existing method, one to add a new one + let mut new_module = RpcModule::new(()); + new_module.register_method("eth_existing", |_, _, _| "replaced").unwrap(); // Replace + new_module.register_method("eth_new", |_, _, _| "added").unwrap(); // Add + let new_methods: Methods = new_module.into(); + + // Call the function for RethRpcModule::Eth + let result = modules.add_or_replace_if_module_configured(RethRpcModule::Eth, new_methods); + assert!(result.is_ok(), "Function should succeed"); + + // Verify HTTP: existing method still exists (replaced), new method added + let http = modules.http.as_ref().unwrap(); + assert!(http.method("eth_existing").is_some()); + assert!(http.method("eth_new").is_some()); + + // Verify WS: existing method still exists (replaced), new method added + let ws = modules.ws.as_ref().unwrap(); + assert!(ws.method("eth_existing").is_some()); + assert!(ws.method("eth_new").is_some()); + + // Verify IPC: no changes (Eth not configured for IPC) + let ipc = modules.ipc.as_ref().unwrap(); + assert!(ipc.method("eth_existing").is_none()); + assert!(ipc.method("eth_new").is_none()); + } } diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index a790253d266..6be4d5d965d 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -18,7 +18,7 @@ use jsonrpsee::{ rpc_params, types::error::ErrorCode, }; -use reth_ethereum_primitives::Receipt; +use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_network_peers::NodeRecord; use reth_rpc_api::{ clients::{AdminApiClient, EthApiClient}, @@ -176,38 +176,38 @@ where .unwrap(); // Implemented - EthApiClient::::protocol_version( + EthApiClient::::protocol_version( client, ) .await .unwrap(); - EthApiClient::::chain_id(client) + EthApiClient::::chain_id(client) .await .unwrap(); - EthApiClient::::accounts(client) + EthApiClient::::accounts(client) .await .unwrap(); - EthApiClient::::get_account( + EthApiClient::::get_account( client, address, block_number.into(), ) .await .unwrap(); - EthApiClient::::block_number(client) + EthApiClient::::block_number(client) .await .unwrap(); - EthApiClient::::get_code( + EthApiClient::::get_code( client, address, None, ) .await .unwrap(); - EthApiClient::::send_raw_transaction( + EthApiClient::::send_raw_transaction( client, tx, ) .await .unwrap(); - EthApiClient::::fee_history( + EthApiClient::::fee_history( client, U64::from(0), block_number, @@ -215,17 +215,17 @@ where ) .await .unwrap(); - EthApiClient::::balance( + EthApiClient::::balance( client, address, None, ) .await .unwrap(); - EthApiClient::::transaction_count( + EthApiClient::::transaction_count( client, address, None, ) .await .unwrap(); - EthApiClient::::storage_at( + EthApiClient::::storage_at( client, address, U256::default().into(), @@ -233,80 +233,80 @@ where ) .await .unwrap(); - EthApiClient::::block_by_hash( + EthApiClient::::block_by_hash( client, hash, false, ) .await .unwrap(); - EthApiClient::::block_by_number( + EthApiClient::::block_by_number( client, block_number, false, ) .await .unwrap(); - EthApiClient::::block_transaction_count_by_number( + EthApiClient::::block_transaction_count_by_number( client, block_number, ) .await .unwrap(); - EthApiClient::::block_transaction_count_by_hash( + EthApiClient::::block_transaction_count_by_hash( client, hash, ) .await .unwrap(); - EthApiClient::::block_uncles_count_by_hash(client, hash) + EthApiClient::::block_uncles_count_by_hash(client, hash) .await .unwrap(); - EthApiClient::::block_uncles_count_by_number( + EthApiClient::::block_uncles_count_by_number( client, block_number, ) .await .unwrap(); - EthApiClient::::uncle_by_block_hash_and_index( + EthApiClient::::uncle_by_block_hash_and_index( client, hash, index, ) .await .unwrap(); - EthApiClient::::uncle_by_block_number_and_index( + EthApiClient::::uncle_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::sign( + EthApiClient::::sign( client, address, bytes.clone(), ) .await .unwrap_err(); - EthApiClient::::sign_typed_data( + EthApiClient::::sign_typed_data( client, address, typed_data, ) .await .unwrap_err(); - EthApiClient::::transaction_by_hash( + EthApiClient::::transaction_by_hash( client, tx_hash, ) .await .unwrap(); - EthApiClient::::transaction_by_block_hash_and_index( + EthApiClient::::transaction_by_block_hash_and_index( client, hash, index, ) .await .unwrap(); - EthApiClient::::transaction_by_block_number_and_index( + EthApiClient::::transaction_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::create_access_list( + EthApiClient::::create_access_list( client, call_request.clone(), Some(block_number.into()), @@ -314,7 +314,7 @@ where ) .await .unwrap_err(); - EthApiClient::::estimate_gas( + EthApiClient::::estimate_gas( client, call_request.clone(), Some(block_number.into()), @@ -322,7 +322,7 @@ where ) .await .unwrap_err(); - EthApiClient::::call( + EthApiClient::::call( client, call_request.clone(), Some(block_number.into()), @@ -331,38 +331,38 @@ where ) .await .unwrap_err(); - EthApiClient::::syncing(client) + EthApiClient::::syncing(client) .await .unwrap(); - EthApiClient::::send_transaction( + EthApiClient::::send_transaction( client, transaction_request.clone(), ) .await .unwrap_err(); - EthApiClient::::sign_transaction( + EthApiClient::::sign_transaction( client, transaction_request, ) .await .unwrap_err(); - EthApiClient::::hashrate(client) + EthApiClient::::hashrate(client) .await .unwrap(); - EthApiClient::::submit_hashrate( + EthApiClient::::submit_hashrate( client, U256::default(), B256::default(), ) .await .unwrap(); - EthApiClient::::gas_price(client) + EthApiClient::::gas_price(client) .await .unwrap_err(); - EthApiClient::::max_priority_fee_per_gas(client) + EthApiClient::::max_priority_fee_per_gas(client) .await .unwrap_err(); - EthApiClient::::get_proof( + EthApiClient::::get_proof( client, address, vec![], @@ -372,35 +372,66 @@ where .unwrap(); // Unimplemented - assert!(is_unimplemented( - EthApiClient::::author(client) + assert!( + is_unimplemented( + EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::author(client) .await .err() .unwrap() - )); - assert!(is_unimplemented( - EthApiClient::::is_mining(client) + ) + ); + assert!( + is_unimplemented( + EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::is_mining(client) .await .err() .unwrap() - )); - assert!(is_unimplemented( - EthApiClient::::get_work(client) + ) + ); + assert!( + is_unimplemented( + EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::get_work(client) .await .err() .unwrap() - )); - assert!(is_unimplemented( - EthApiClient::::submit_work( - client, - B64::default(), - B256::default(), - B256::default() ) - .await - .err() - .unwrap() - )); + ); + assert!( + is_unimplemented( + EthApiClient::< + TransactionRequest, + Transaction, + Block, + Receipt, + Header, + TransactionSigned, + >::submit_work(client, B64::default(), B256::default(), B256::default()) + .await + .err() + .unwrap() + ) + ); EthCallBundleApiClient::call_bundle(client, Default::default()).await.unwrap_err(); } @@ -1663,3 +1694,47 @@ async fn test_eth_fee_history_raw() { ) .await; } + +#[tokio::test(flavor = "multi_thread")] +async fn test_debug_db_get() { + reth_tracing::init_test_tracing(); + + let handle = launch_http(vec![RethRpcModule::Debug]).await; + let client = handle.http_client().unwrap(); + + let valid_test_cases = [ + "0x630000000000000000000000000000000000000000000000000000000000000000", + "c00000000000000000000000000000000", + ]; + + for key in valid_test_cases { + DebugApiClient::<()>::debug_db_get(&client, key.into()).await.unwrap(); + } + + // Invalid test cases + let test_cases = [ + ("0x0000", "Key must be 33 bytes, got 2"), + ("00", "Key must be 33 bytes, got 2"), + ( + "0x000000000000000000000000000000000000000000000000000000000000000000", + "Key prefix must be 0x63", + ), + ("000000000000000000000000000000000", "Key prefix must be 0x63"), + ("0xc0000000000000000000000000000000000000000000000000000000000000000", "Invalid hex key"), + ]; + + let match_error_msg = |err: jsonrpsee::core::client::Error, expected: String| -> bool { + match err { + jsonrpsee::core::client::Error::Call(error_obj) => { + error_obj.code() == ErrorCode::InvalidParams.code() && + error_obj.message() == expected + } + _ => false, + } + }; + + for (key, expected) in test_cases { + let err = DebugApiClient::<()>::debug_db_get(&client, key.into()).await.unwrap_err(); + assert!(match_error_msg(err, expected.into())); + } +} diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index 60541a57c39..9a70356bcac 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -5,6 +5,7 @@ use jsonrpsee::{ server::middleware::rpc::RpcServiceT, types::Request, }; +use reth_ethereum_primitives::TransactionSigned; use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig}; use reth_rpc_eth_api::EthApiClient; use reth_rpc_server_types::RpcModuleSelection; @@ -85,7 +86,7 @@ async fn test_rpc_middleware() { .unwrap(); let client = handle.http_client().unwrap(); - EthApiClient::::protocol_version( + EthApiClient::::protocol_version( &client, ) .await diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 5b0a4b1af51..2cdb27dc254 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -1,8 +1,7 @@ //! Compatibility functions for rpc `Transaction` type. - use crate::{ fees::{CallFees, CallFeesError}, - RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, + RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, SignableTxRequest, }; use alloy_consensus::{ error::ValueError, transaction::Recovered, EthereumTxEnvelope, Sealable, TxEip4844, @@ -17,7 +16,7 @@ use core::error; use dyn_clone::DynClone; use reth_evm::{ revm::context_interface::{either::Either, Block}, - ConfigureEvm, SpecFor, TxEnvFor, + BlockEnvFor, ConfigureEvm, EvmEnvFor, TxEnvFor, }; use reth_primitives_traits::{ BlockTy, HeaderTy, NodePrimitives, SealedBlock, SealedHeader, SealedHeaderFor, TransactionMeta, @@ -123,19 +122,16 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { /// Associated lower layer consensus types to convert from and into types of [`Self::Network`]. type Primitives: NodePrimitives; + /// The EVM configuration. + type Evm: ConfigureEvm; + /// Associated upper layer JSON-RPC API network requests and responses to convert from and into /// types of [`Self::Primitives`]. - type Network: RpcTypes + Send + Sync + Unpin + Clone + Debug; - - /// A set of variables for executing a transaction. - type TxEnv; + type Network: RpcTypes>>; /// An associated RPC conversion error. type Error: error::Error + Into>; - /// The EVM specification identifier. - type Spec; - /// Wrapper for `fill()` with default `TransactionInfo` /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. @@ -169,9 +165,8 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { fn tx_env( &self, request: RpcTxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result; + evm_env: &EvmEnvFor, + ) -> Result, Self::Error>; /// Converts a set of primitive receipts to RPC representations. It is guaranteed that all /// receipts are from the same block. @@ -199,8 +194,8 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { } dyn_clone::clone_trait_object!( - - RpcConvert + + RpcConvert ); /// Converts `self` into `T`. The opposite of [`FromConsensusTx`]. @@ -439,7 +434,7 @@ impl TryIntoSimTx> for TransactionRequest { /// implementation for free, thanks to the blanket implementation, unless the conversion requires /// more context. For example, some configuration parameters or access handles to database, network, /// etc. -pub trait TxEnvConverter: +pub trait TxEnvConverter: Debug + Send + Sync + Unpin + Clone + 'static { /// An associated error that can occur during conversion. @@ -451,31 +446,30 @@ pub trait TxEnvConverter: fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result; + evm_env: &EvmEnvFor, + ) -> Result, Self::Error>; } -impl TxEnvConverter for () +impl TxEnvConverter for () where - TxReq: TryIntoTxEnv, + TxReq: TryIntoTxEnv, BlockEnvFor>, + Evm: ConfigureEvm, { type Error = TxReq::Err; fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result { - tx_req.try_into_tx_env(cfg_env, block_env) + evm_env: &EvmEnvFor, + ) -> Result, Self::Error> { + tx_req.try_into_tx_env(&evm_env.cfg_env, &evm_env.block_env) } } /// Converts rpc transaction requests into transaction environment using a closure. -impl TxEnvConverter for F +impl TxEnvConverter for F where - F: Fn(TxReq, &CfgEnv, &BlockEnv) -> Result + F: Fn(TxReq, &EvmEnvFor) -> Result, E> + Debug + Send + Sync @@ -483,6 +477,7 @@ where + Clone + 'static, TxReq: Clone, + Evm: ConfigureEvm, E: error::Error + Send + Sync + 'static, { type Error = E; @@ -490,17 +485,16 @@ where fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result { - self(tx_req, cfg_env, block_env) + evm_env: &EvmEnvFor, + ) -> Result, Self::Error> { + self(tx_req, evm_env) } } /// Converts `self` into `T`. /// /// Should create an executable transaction environment using [`TransactionRequest`]. -pub trait TryIntoTxEnv { +pub trait TryIntoTxEnv { /// An associated error that can occur during the conversion. type Err; @@ -836,7 +830,6 @@ impl } /// Converts `self` into a boxed converter. - #[expect(clippy::type_complexity)] pub fn erased( self, ) -> Box< @@ -844,8 +837,7 @@ impl Primitives = ::Primitives, Network = ::Network, Error = ::Error, - TxEnv = ::TxEnv, - Spec = ::Spec, + Evm = ::Evm, >, > where @@ -908,7 +900,7 @@ impl RpcConvert for RpcConverter where N: NodePrimitives, - Network: RpcTypes + Send + Sync + Unpin + Clone + Debug, + Network: RpcTypes>, Evm: ConfigureEvm + 'static, Receipt: ReceiptConverter< N, @@ -933,13 +925,12 @@ where SimTx: SimTxConverter, TxTy>, RpcTx: RpcTxConverter, Network::TransactionResponse, >>::Out>, - TxEnv: TxEnvConverter, TxEnvFor, SpecFor>, + TxEnv: TxEnvConverter, Evm>, { type Primitives = N; + type Evm = Evm; type Network = Network; - type TxEnv = TxEnvFor; type Error = Receipt::Error; - type Spec = SpecFor; fn fill( &self, @@ -965,10 +956,9 @@ where fn tx_env( &self, request: RpcTxReq, - cfg_env: &CfgEnv>, - block_env: &BlockEnv, - ) -> Result { - self.tx_env_converter.convert_tx_env(request, cfg_env, block_env).map_err(Into::into) + evm_env: &EvmEnvFor, + ) -> Result, Self::Error> { + self.tx_env_converter.convert_tx_env(request, evm_env).map_err(Into::into) } fn convert_receipts( diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 6aeadeecba5..8902a111f27 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -21,8 +21,7 @@ use reth_chainspec::EthereumHardforks; use reth_engine_primitives::{ConsensusEngineHandle, EngineApiValidator, EngineTypes}; use reth_payload_builder::PayloadStore; use reth_payload_primitives::{ - validate_payload_timestamp, EngineApiMessageVersion, ExecutionPayload, PayloadOrAttributes, - PayloadTypes, + validate_payload_timestamp, EngineApiMessageVersion, PayloadOrAttributes, PayloadTypes, }; use reth_primitives_traits::{Block, BlockBody}; use reth_rpc_api::{EngineApiServer, IntoEngineApiRpcModule}; @@ -161,12 +160,9 @@ where payload: PayloadT::ExecutionData, ) -> EngineApiResult { let start = Instant::now(); - let gas_used = payload.gas_used(); - let res = Self::new_payload_v1(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v1.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); res } @@ -197,12 +193,9 @@ where payload: PayloadT::ExecutionData, ) -> EngineApiResult { let start = Instant::now(); - let gas_used = payload.gas_used(); - let res = Self::new_payload_v2(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v2.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); res } @@ -234,12 +227,10 @@ where payload: PayloadT::ExecutionData, ) -> RpcResult { let start = Instant::now(); - let gas_used = payload.gas_used(); let res = Self::new_payload_v3(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v3.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); Ok(res?) } @@ -271,13 +262,10 @@ where payload: PayloadT::ExecutionData, ) -> RpcResult { let start = Instant::now(); - let gas_used = payload.gas_used(); - let res = Self::new_payload_v4(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v4.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); Ok(res?) } @@ -320,7 +308,6 @@ where let start = Instant::now(); let res = Self::fork_choice_updated_v1(self, state, payload_attrs).await; self.inner.metrics.latency.fork_choice_updated_v1.record(start.elapsed()); - self.inner.metrics.fcu_response.update_response_metrics(&res); res } @@ -346,7 +333,6 @@ where let start = Instant::now(); let res = Self::fork_choice_updated_v2(self, state, payload_attrs).await; self.inner.metrics.latency.fork_choice_updated_v2.record(start.elapsed()); - self.inner.metrics.fcu_response.update_response_metrics(&res); res } @@ -372,7 +358,6 @@ where let start = Instant::now(); let res = Self::fork_choice_updated_v3(self, state, payload_attrs).await; self.inner.metrics.latency.fork_choice_updated_v3.record(start.elapsed()); - self.inner.metrics.fcu_response.update_response_metrics(&res); res } diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 2578b2f44e5..6155c004c36 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{B256, U256}; +use alloy_primitives::B256; use alloy_rpc_types_engine::{ ForkchoiceUpdateError, INVALID_FORK_CHOICE_STATE_ERROR, INVALID_FORK_CHOICE_STATE_ERROR_MSG, INVALID_PAYLOAD_ATTRIBUTES_ERROR, INVALID_PAYLOAD_ATTRIBUTES_ERROR_MSG, @@ -59,17 +59,6 @@ pub enum EngineApiError { /// Requested number of items count: u64, }, - /// Terminal total difficulty mismatch during transition configuration exchange. - #[error( - "invalid transition terminal total difficulty: \ - execution: {execution}, consensus: {consensus}" - )] - TerminalTD { - /// Execution terminal total difficulty value. - execution: U256, - /// Consensus terminal total difficulty value. - consensus: U256, - }, /// Terminal block hash mismatch during transition configuration exchange. #[error( "invalid transition terminal block hash: \ @@ -202,7 +191,6 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { } }, // Any other server error - EngineApiError::TerminalTD { .. } | EngineApiError::TerminalBlockHash { .. } | EngineApiError::NewPayload(_) | EngineApiError::Internal(_) | diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 95156e490b7..19f8a1520b5 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -1,8 +1,4 @@ -use std::time::Duration; - -use crate::EngineApiError; -use alloy_rpc_types_engine::{ForkchoiceUpdated, PayloadStatus, PayloadStatusEnum}; -use metrics::{Counter, Gauge, Histogram}; +use metrics::{Counter, Histogram}; use reth_metrics::Metrics; /// All beacon consensus engine metrics @@ -10,10 +6,6 @@ use reth_metrics::Metrics; pub(crate) struct EngineApiMetrics { /// Engine API latency metrics pub(crate) latency: EngineApiLatencyMetrics, - /// Engine API forkchoiceUpdated response type metrics - pub(crate) fcu_response: ForkchoiceUpdatedResponseMetrics, - /// Engine API newPayload response type metrics - pub(crate) new_payload_response: NewPayloadStatusResponseMetrics, /// Blob-related metrics pub(crate) blob_metrics: BlobMetrics, } @@ -58,58 +50,6 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) get_blobs_v2: Histogram, } -/// Metrics for engine API forkchoiceUpdated responses. -#[derive(Metrics)] -#[metrics(scope = "engine.rpc")] -pub(crate) struct ForkchoiceUpdatedResponseMetrics { - /// The total count of forkchoice updated messages received. - pub(crate) forkchoice_updated_messages: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Invalid`](alloy_rpc_types_engine::PayloadStatusEnum#Invalid). - pub(crate) forkchoice_updated_invalid: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Valid`](alloy_rpc_types_engine::PayloadStatusEnum#Valid). - pub(crate) forkchoice_updated_valid: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Syncing`](alloy_rpc_types_engine::PayloadStatusEnum#Syncing). - pub(crate) forkchoice_updated_syncing: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Accepted`](alloy_rpc_types_engine::PayloadStatusEnum#Accepted). - pub(crate) forkchoice_updated_accepted: Counter, - /// The total count of forkchoice updated messages that were unsuccessful, i.e. we responded - /// with an error type that is not a [`PayloadStatusEnum`]. - pub(crate) forkchoice_updated_error: Counter, -} - -/// Metrics for engine API newPayload responses. -#[derive(Metrics)] -#[metrics(scope = "engine.rpc")] -pub(crate) struct NewPayloadStatusResponseMetrics { - /// The total count of new payload messages received. - pub(crate) new_payload_messages: Counter, - /// The total count of new payload messages that we responded to with - /// [Invalid](alloy_rpc_types_engine::PayloadStatusEnum#Invalid). - pub(crate) new_payload_invalid: Counter, - /// The total count of new payload messages that we responded to with - /// [Valid](alloy_rpc_types_engine::PayloadStatusEnum#Valid). - pub(crate) new_payload_valid: Counter, - /// The total count of new payload messages that we responded to with - /// [Syncing](alloy_rpc_types_engine::PayloadStatusEnum#Syncing). - pub(crate) new_payload_syncing: Counter, - /// The total count of new payload messages that we responded to with - /// [Accepted](alloy_rpc_types_engine::PayloadStatusEnum#Accepted). - pub(crate) new_payload_accepted: Counter, - /// The total count of new payload messages that were unsuccessful, i.e. we responded with an - /// error type that is not a [`PayloadStatusEnum`]. - pub(crate) new_payload_error: Counter, - /// The total gas of valid new payload messages received. - pub(crate) new_payload_total_gas: Histogram, - /// The gas per second of valid new payload messages received. - pub(crate) new_payload_gas_per_second: Histogram, - /// Latency for the last `engine_newPayloadV*` call - pub(crate) new_payload_last: Gauge, -} - #[derive(Metrics)] #[metrics(scope = "engine.rpc.blobs")] pub(crate) struct BlobMetrics { @@ -126,48 +66,3 @@ pub(crate) struct BlobMetrics { /// Number of times getBlobsV2 responded with “miss” pub(crate) get_blobs_requests_failure_total: Counter, } - -impl NewPayloadStatusResponseMetrics { - /// Increment the newPayload counter based on the given rpc result - pub(crate) fn update_response_metrics( - &self, - result: &Result, - gas_used: u64, - time: Duration, - ) { - self.new_payload_last.set(time); - match result { - Ok(status) => match status.status { - PayloadStatusEnum::Valid => { - self.new_payload_valid.increment(1); - self.new_payload_total_gas.record(gas_used as f64); - self.new_payload_gas_per_second.record(gas_used as f64 / time.as_secs_f64()); - } - PayloadStatusEnum::Syncing => self.new_payload_syncing.increment(1), - PayloadStatusEnum::Accepted => self.new_payload_accepted.increment(1), - PayloadStatusEnum::Invalid { .. } => self.new_payload_invalid.increment(1), - }, - Err(_) => self.new_payload_error.increment(1), - } - self.new_payload_messages.increment(1); - } -} - -impl ForkchoiceUpdatedResponseMetrics { - /// Increment the forkchoiceUpdated counter based on the given rpc result - pub(crate) fn update_response_metrics( - &self, - result: &Result, - ) { - match result { - Ok(status) => match status.payload_status.status { - PayloadStatusEnum::Valid => self.forkchoice_updated_valid.increment(1), - PayloadStatusEnum::Syncing => self.forkchoice_updated_syncing.increment(1), - PayloadStatusEnum::Accepted => self.forkchoice_updated_accepted.increment(1), - PayloadStatusEnum::Invalid { .. } => self.forkchoice_updated_invalid.increment(1), - }, - Err(_) => self.forkchoice_updated_error.increment(1), - } - self.forkchoice_updated_messages.increment(1); - } -} diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index a2293b46309..fc3f6e1a12e 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee"] } +revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee", "optional_fee_charge", "memory_limit"] } reth-chain-state.workspace = true revm-inspectors.workspace = true reth-primitives-traits = { workspace = true, features = ["rpc-compat"] } diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index ed05f9d373b..4e0afbf6ab9 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -16,7 +16,9 @@ use alloy_rpc_types_eth::{ }; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_primitives_traits::TxTy; use reth_rpc_convert::RpcTxReq; +use reth_rpc_eth_types::FillTransactionResult; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; @@ -29,6 +31,7 @@ pub trait FullEthApiServer: RpcBlock, RpcReceipt, RpcHeader, + TxTy, > + FullEthApi + Clone { @@ -41,6 +44,7 @@ impl FullEthApiServer for T where RpcBlock, RpcReceipt, RpcHeader, + TxTy, > + FullEthApi + Clone { @@ -49,7 +53,15 @@ impl FullEthApiServer for T where /// Eth rpc interface: #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] -pub trait EthApi { +pub trait EthApi< + TxReq: RpcObject, + T: RpcObject, + B: RpcObject, + R: RpcObject, + H: RpcObject, + RawTx: RpcObject, +> +{ /// Returns the protocol version encoded as a string. #[method(name = "protocolVersion")] async fn protocol_version(&self) -> RpcResult; @@ -228,6 +240,10 @@ pub trait EthApi>, ) -> RpcResult; + /// Fills the defaults on a given unsigned transaction. + #[method(name = "fillTransaction")] + async fn fill_transaction(&self, request: TxReq) -> RpcResult>; + /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the /// optionality of state overrides #[method(name = "callMany")] @@ -388,6 +404,7 @@ impl RpcBlock, RpcReceipt, RpcHeader, + TxTy, > for T where T: FullEthApi, @@ -682,6 +699,15 @@ where .await?) } + /// Handler for: `eth_fillTransaction` + async fn fill_transaction( + &self, + request: RpcTxReq, + ) -> RpcResult>> { + trace!(target: "rpc::eth", ?request, "Serving eth_fillTransaction"); + Ok(EthTransactions::fill_transaction(self, request).await?) + } + /// Handler for: `eth_callMany` async fn call_many( &self, @@ -801,7 +827,7 @@ where /// Handler for: `eth_sendTransaction` async fn send_transaction(&self, request: RpcTxReq) -> RpcResult { trace!(target: "rpc::eth", ?request, "Serving eth_sendTransaction"); - Ok(EthTransactions::send_transaction(self, request).await?) + Ok(EthTransactions::send_transaction_request(self, request).await?) } /// Handler for: `eth_sendRawTransaction` diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index b96dab882a0..05f0de87464 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -20,15 +20,12 @@ use alloy_rpc_types_eth::{ use futures::Future; use reth_errors::{ProviderError, RethError}; use reth_evm::{ - ConfigureEvm, Evm, EvmEnv, EvmEnvFor, HaltReasonFor, InspectorFor, SpecFor, TransactionEnv, - TxEnvFor, + env::BlockEnvironment, ConfigureEvm, Evm, EvmEnvFor, HaltReasonFor, InspectorFor, + TransactionEnv, TxEnvFor, }; use reth_node_api::BlockBody; use reth_primitives_traits::Recovered; -use reth_revm::{ - database::StateProviderDatabase, - db::{CacheDB, State}, -}; +use reth_revm::{database::StateProviderDatabase, db::State}; use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -38,6 +35,7 @@ use reth_rpc_eth_types::{ }; use reth_storage_api::{BlockIdReader, ProviderTx}; use revm::{ + context::Block, context_interface::{ result::{ExecutionResult, ResultAndState}, Transaction, @@ -115,7 +113,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA evm_env.cfg_env.disable_nonce_check = true; evm_env.cfg_env.disable_base_fee = true; evm_env.cfg_env.tx_gas_limit_cap = Some(u64::MAX); - evm_env.block_env.basefee = 0; + evm_env.block_env.inner_mut().basefee = 0; } let SimBlock { block_overrides, state_overrides, calls } = block; @@ -123,19 +121,23 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA if let Some(block_overrides) = block_overrides { // ensure we don't allow uncapped gas limit per block if let Some(gas_limit_override) = block_overrides.gas_limit && - gas_limit_override > evm_env.block_env.gas_limit && + gas_limit_override > evm_env.block_env.gas_limit() && gas_limit_override > this.call_gas_limit() { return Err(EthApiError::other(EthSimulateError::GasLimitReached).into()) } - apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env); + apply_block_overrides( + block_overrides, + &mut db, + evm_env.block_env.inner_mut(), + ); } if let Some(state_overrides) = state_overrides { apply_state_overrides(state_overrides, &mut db) .map_err(Self::Error::from_eth_err)?; } - let block_gas_limit = evm_env.block_env.gas_limit; + let block_gas_limit = evm_env.block_env.gas_limit(); let chain_id = evm_env.cfg_env.chain_id; let default_gas_limit = { @@ -281,7 +283,8 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let this = self.clone(); self.spawn_with_state_at_block(at.into(), move |state| { let mut all_results = Vec::with_capacity(bundles.len()); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); if replay_block_txs { // only need to replay the transactions in the block if not all transactions are @@ -295,7 +298,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } // transact all bundles - for bundle in bundles { + for (bundle_index, bundle) in bundles.into_iter().enumerate() { let Bundle { transactions, block_override } = bundle; if transactions.is_empty() { // Skip empty bundles @@ -306,15 +309,30 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let block_overrides = block_override.map(Box::new); // transact all transactions in the bundle - for tx in transactions { + for (tx_index, tx) in transactions.into_iter().enumerate() { // Apply overrides, state overrides are only applied for the first tx in the // request let overrides = EvmOverrides::new(state_override.take(), block_overrides.clone()); - let (current_evm_env, prepared_tx) = - this.prepare_call_env(evm_env.clone(), tx, &mut db, overrides)?; - let res = this.transact(&mut db, current_evm_env, prepared_tx)?; + let (current_evm_env, prepared_tx) = this + .prepare_call_env(evm_env.clone(), tx, &mut db, overrides) + .map_err(|err| { + Self::Error::from_eth_err(EthApiError::call_many_error( + bundle_index, + tx_index, + err.into(), + )) + })?; + let res = this.transact(&mut db, current_evm_env, prepared_tx).map_err( + |err| { + Self::Error::from_eth_err(EthApiError::call_many_error( + bundle_index, + tx_index, + err.into(), + )) + }, + )?; match ensure_success::<_, Self::Error>(res.result) { Ok(output) => { @@ -379,7 +397,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA { self.spawn_blocking_io_fut(move |this| async move { let state = this.state_at_block_id(at).await?; - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); if let Some(state_overrides) = state_override { apply_state_overrides(state_overrides, &mut db) @@ -404,7 +422,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let cap = this.caller_gas_allowance(&mut db, &evm_env, &tx_env)?; // no gas limit was provided in the request, so we need to cap the request's gas // limit - tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); + tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit())); } // can consume the list since we're not using the request anymore @@ -461,7 +479,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// Executes code on state. pub trait Call: LoadState< - RpcConvert: RpcConvert, Spec = SpecFor>, + RpcConvert: RpcConvert, Error: FromEvmError + From<::Error> + From, @@ -475,6 +493,9 @@ pub trait Call: /// Returns the maximum number of blocks accepted for `eth_simulateV1`. fn max_simulate_blocks(&self) -> u64; + /// Returns the maximum memory the EVM can allocate per RPC request. + fn evm_memory_limit(&self) -> u64; + /// Returns the max gas limit that the caller can afford given a transaction environment. fn caller_gas_allowance( &self, @@ -520,7 +541,7 @@ pub trait Call: Ok(res) } - /// Executes the [`EvmEnv`] against the given [Database] without committing state + /// Executes the [`reth_evm::EvmEnv`] against the given [Database] without committing state /// changes. fn transact_with_inspector( &self, @@ -574,7 +595,7 @@ pub trait Call: /// Prepares the state and env for the given [`RpcTxReq`] at the given [`BlockId`] and /// executes the closure on a new task returning the result of the closure. /// - /// This returns the configured [`EvmEnv`] for the given [`RpcTxReq`] at + /// This returns the configured [`reth_evm::EvmEnv`] for the given [`RpcTxReq`] at /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. /// /// This is primarily used by `eth_call`. @@ -609,8 +630,9 @@ pub trait Call: let this = self.clone(); self.spawn_blocking_io_fut(move |_| async move { let state = this.state_at_block_id(at).await?; - let mut db = - CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); + let mut db = State::builder() + .with_database(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))) + .build(); let (evm_env, tx_env) = this.prepare_call_env(evm_env, request, &mut db, overrides)?; @@ -661,7 +683,8 @@ pub trait Call: let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); let block_txs = block.transactions_recovered(); // replay all transactions prior to the targeted transaction @@ -680,7 +703,7 @@ pub trait Call: /// Replays all the transactions until the target transaction is found. /// /// All transactions before the target transaction are executed and their changes are written to - /// the _runtime_ db ([`CacheDB`]). + /// the _runtime_ db ([`State`]). /// /// Note: This assumes the target transaction is in the given iterator. /// Returns the index of the target transaction in the given iterator. @@ -712,10 +735,10 @@ pub trait Call: /// /// All `TxEnv` fields are derived from the given [`RpcTxReq`], if fields are - /// `None`, they fall back to the [`EvmEnv`]'s settings. + /// `None`, they fall back to the [`reth_evm::EvmEnv`]'s settings. fn create_txn_env( &self, - evm_env: &EvmEnv>, + evm_env: &EvmEnvFor, mut request: RpcTxReq<::Network>, mut db: impl Database>, ) -> Result, Self::Error> { @@ -728,10 +751,10 @@ pub trait Call: request.as_mut().set_nonce(nonce); } - Ok(self.tx_resp_builder().tx_env(request, &evm_env.cfg_env, &evm_env.block_env)?) + Ok(self.tx_resp_builder().tx_env(request, evm_env)?) } - /// Prepares the [`EvmEnv`] for execution of calls. + /// Prepares the [`reth_evm::EvmEnv`] for execution of calls. /// /// Does not commit any changes to the underlying database. /// @@ -786,11 +809,18 @@ pub trait Call: // Disable EIP-7825 transaction gas limit to support larger transactions evm_env.cfg_env.tx_gas_limit_cap = Some(u64::MAX); + // Disable additional fee charges, e.g. opstack operator fee charge + // See: + // + evm_env.cfg_env.disable_fee_charge = true; + + evm_env.cfg_env.memory_limit = self.evm_memory_limit(); + // set nonce to None so that the correct nonce is chosen by the EVM request.as_mut().take_nonce(); if let Some(block_overrides) = overrides.block { - apply_block_overrides(*block_overrides, db, &mut evm_env.block_env); + apply_block_overrides(*block_overrides, db, evm_env.block_env.inner_mut()); } if let Some(state_overrides) = overrides.state { apply_state_overrides(state_overrides, db) @@ -801,7 +831,7 @@ pub trait Call: // lower the basefee to 0 to avoid breaking EVM invariants (basefee < gasprice): if tx_env.gas_price() == 0 { - evm_env.block_env.basefee = 0; + evm_env.block_env.inner_mut().basefee = 0; } if !request_has_gas_limit { @@ -811,7 +841,7 @@ pub trait Call: trace!(target: "rpc::eth::call", ?tx_env, "Applying gas limit cap with caller allowance"); let cap = self.caller_gas_allowance(db, &evm_env, &tx_env)?; // ensure we cap gas_limit to the block's - tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); + tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit())); } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index cca674e9739..6c14f96049c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -10,7 +10,7 @@ use futures::Future; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_errors::ProviderError; use reth_evm::{ConfigureEvm, Database, Evm, EvmEnvFor, EvmFor, TransactionEnv, TxEnvFor}; -use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_revm::{database::StateProviderDatabase, db::State}; use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ error::{api::FromEvmHalt, FromEvmError}, @@ -18,7 +18,10 @@ use reth_rpc_eth_types::{ }; use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; use reth_storage_api::StateProvider; -use revm::context_interface::{result::ExecutionResult, Transaction}; +use revm::{ + context::Block, + context_interface::{result::ExecutionResult, Transaction}, +}; use tracing::trace; /// Gas execution estimates @@ -60,10 +63,10 @@ pub trait EstimateCall: Call { let tx_request_gas_limit = request.as_ref().gas_limit(); let tx_request_gas_price = request.as_ref().gas_price(); // the gas limit of the corresponding block - let max_gas_limit = evm_env - .cfg_env - .tx_gas_limit_cap - .map_or(evm_env.block_env.gas_limit, |cap| cap.min(evm_env.block_env.gas_limit)); + let max_gas_limit = evm_env.cfg_env.tx_gas_limit_cap.map_or_else( + || evm_env.block_env.gas_limit(), + |cap| cap.min(evm_env.block_env.gas_limit()), + ); // Determine the highest possible gas limit, considering both the request's specified limit // and the block's limit. @@ -78,7 +81,7 @@ pub trait EstimateCall: Call { .unwrap_or(max_gas_limit); // Configure the evm env - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); // Apply any state overrides if specified. if let Some(state_override) = state_override { @@ -90,7 +93,7 @@ pub trait EstimateCall: Call { // Check if this is a basic transfer (no input data to account with no code) let is_basic_transfer = if tx_env.input().is_empty() && let TxKind::Call(to) = tx_env.kind() && - let Ok(code) = db.db.account_code(&to) + let Ok(code) = db.database.account_code(&to) { code.map(|code| code.is_empty()).unwrap_or(true) } else { @@ -231,9 +234,8 @@ pub trait EstimateCall: Call { // An estimation error is allowed once the current gas limit range used in the binary // search is small enough (less than 1.5% of the highest gas limit) // Result< - PendingBlockEnv< - ProviderBlock, - ProviderReceipt, - SpecFor, - >, - Self::Error, - > { + fn pending_block_env_and_cfg(&self) -> Result, Self::Error> { if let Some(block) = self.provider().pending_block().map_err(Self::Error::from_eth_err)? && let Some(receipts) = self .provider() @@ -166,7 +156,7 @@ pub trait LoadPendingBlock: // Is the pending block cached? if let Some(pending_block) = lock.as_ref() { // Is the cached block not expired and latest is its parent? - if pending.evm_env.block_env.number == U256::from(pending_block.block().number()) && + if pending.evm_env.block_env.number() == U256::from(pending_block.block().number()) && parent.hash() == pending_block.block().parent_hash() && now <= pending_block.expires_at { @@ -265,14 +255,14 @@ pub trait LoadPendingBlock: .unwrap_or_else(BlobParams::cancun); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; - let block_gas_limit: u64 = block_env.gas_limit; + let block_gas_limit: u64 = block_env.gas_limit(); // Only include transactions if not configured as Empty if !self.pending_block_kind().is_empty() { let mut best_txs = self .pool() .best_transactions_with_attributes(BestTransactionsAttributes::new( - block_env.basefee, + block_env.basefee(), block_env.blob_gasprice().map(|gasprice| gasprice as u64), )) // freeze to get a block as fast as possible @@ -369,7 +359,7 @@ pub trait LoadPendingBlock: } } - let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = + let BlockBuilderOutcome { execution_result, block, hashed_state, trie_updates } = builder.finish(NoopProvider::default()).map_err(Self::Error::from_eth_err)?; let execution_outcome = ExecutionOutcome::new( @@ -383,6 +373,7 @@ pub trait LoadPendingBlock: recovered_block: block.into(), execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), + trie_updates: Arc::new(trie_updates), }) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 58c3e8897dc..12215fbff1e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -6,27 +6,11 @@ use alloy_consensus::{transaction::TransactionMeta, TxReceipt}; use futures::Future; use reth_primitives_traits::SignerRecoverable; use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert}; -use reth_rpc_eth_types::{error::FromEthApiError, EthApiError}; +use reth_rpc_eth_types::{ + error::FromEthApiError, utils::calculate_gas_used_and_next_log_index, EthApiError, +}; use reth_storage_api::{ProviderReceipt, ProviderTx}; -/// Calculates the gas used and next log index for a transaction at the given index -pub fn calculate_gas_used_and_next_log_index( - tx_index: u64, - all_receipts: &[impl TxReceipt], -) -> (u64, usize) { - let mut gas_used = 0; - let mut next_log_index = 0; - - if tx_index > 0 { - for receipt in all_receipts.iter().take(tx_index as usize) { - gas_used = receipt.cumulative_gas_used(); - next_log_index += receipt.logs().len(); - } - } - - (gas_used, next_log_index) -} - /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index 4060be138e0..c54c8943c0a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -32,11 +32,3 @@ pub trait EthSigner: Send + Sync + DynClone { } dyn_clone::clone_trait_object!( EthSigner); - -/// Adds 20 random dev signers for access via the API. Used in dev mode. -#[auto_impl::auto_impl(&)] -pub trait AddDevSigners { - /// Generates 20 random developer accounts. - /// Used in DEV mode. - fn with_dev_accounts(&self); -} diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index a3c79416cfe..30ba12165ea 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -13,20 +13,20 @@ use reth_evm::{ Evm, EvmEnvFor, EvmFor, HaltReasonFor, InspectorFor, TxEnvFor, }; use reth_primitives_traits::{BlockBody, Recovered, RecoveredBlock}; -use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_revm::{database::StateProviderDatabase, db::State}; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, EthApiError, }; use reth_storage_api::{ProviderBlock, ProviderTx}; -use revm::{context_interface::result::ResultAndState, DatabaseCommit}; +use revm::{context::Block, context_interface::result::ResultAndState, DatabaseCommit}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::sync::Arc; /// Executes CPU heavy tasks. pub trait Trace: LoadState> { - /// Executes the [`TxEnvFor`] with [`EvmEnvFor`] against the given [Database] without committing - /// state changes. + /// Executes the [`TxEnvFor`] with [`reth_evm::EvmEnv`] against the given [Database] without + /// committing state changes. fn inspect( &self, db: DB, @@ -68,7 +68,7 @@ pub trait Trace: LoadState> { + 'static, { self.with_state_at_block(at, move |this, state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); let mut inspector = TracingInspector::new(config); let res = this.inspect(&mut db, evm_env, tx_env, &mut inspector)?; f(inspector, res) @@ -103,7 +103,7 @@ pub trait Trace: LoadState> { { let this = self.clone(); self.spawn_with_state_at_block(at, move |state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); let mut inspector = TracingInspector::new(config); let res = this.inspect(&mut db, evm_env, tx_env, &mut inspector)?; f(inspector, res, db) @@ -184,7 +184,8 @@ pub trait Trace: LoadState> { let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); let block_txs = block.transactions_recovered(); this.apply_pre_execution_changes(&block, &mut db, &evm_env)?; @@ -301,13 +302,14 @@ pub trait Trace: LoadState> { let state_at = block.parent_hash(); let block_hash = block.hash(); - let block_number = evm_env.block_env.number.saturating_to(); - let base_fee = evm_env.block_env.basefee; + let block_number = evm_env.block_env.number().saturating_to(); + let base_fee = evm_env.block_env.basefee(); // now get the state let state = this.state_at_block_id(state_at.into()).await?; - let mut db = - CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); + let mut db = State::builder() + .with_database(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))) + .build(); this.apply_pre_execution_changes(&block, &mut db, &evm_env)?; diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 81909b3f36e..2b1f3d05332 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -1,7 +1,7 @@ //! Database access for `eth_` transaction RPC methods. Loads transaction and receipt data w.r.t. //! network. -use super::{EthApiSpec, EthSigner, LoadBlock, LoadReceipt, LoadState, SpawnBlocking}; +use super::{EthApiSpec, EthSigner, LoadBlock, LoadFee, LoadReceipt, LoadState, SpawnBlocking}; use crate::{ helpers::{estimate::EstimateCall, spec::SignersForRpc}, FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt, @@ -13,24 +13,25 @@ use alloy_consensus::{ }; use alloy_dyn_abi::TypedData; use alloy_eips::{eip2718::Encodable2718, BlockId}; -use alloy_network::TransactionBuilder; -use alloy_primitives::{Address, Bytes, TxHash, B256}; +use alloy_network::{TransactionBuilder, TransactionBuilder4844}; +use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionInfo}; use futures::{Future, StreamExt}; use reth_chain_state::CanonStateSubscriptions; use reth_node_api::BlockBody; -use reth_primitives_traits::{RecoveredBlock, SignedTransaction}; +use reth_primitives_traits::{Recovered, RecoveredBlock, SignedTransaction, TxTy, WithEncoded}; use reth_rpc_convert::{transaction::RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ - utils::binary_search, EthApiError, EthApiError::TransactionConfirmationTimeout, SignError, - TransactionSource, + utils::{binary_search, recover_raw_transaction}, + EthApiError::{self, TransactionConfirmationTimeout}, + FillTransactionResult, SignError, TransactionSource, }; use reth_storage_api::{ BlockNumReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider, }; use reth_transaction_pool::{ - AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, + AddedTransactionOutcome, PoolPooledTx, PoolTransaction, TransactionOrigin, TransactionPool, }; use std::{sync::Arc, time::Duration}; @@ -76,6 +77,17 @@ pub trait EthTransactions: LoadTransaction { fn send_raw_transaction( &self, tx: Bytes, + ) -> impl Future> + Send { + async move { + let recovered = recover_raw_transaction::>(&tx)?; + self.send_transaction(WithEncoded::new(tx, recovered)).await + } + } + + /// Submits the transaction to the pool. + fn send_transaction( + &self, + tx: WithEncoded>>, ) -> impl Future> + Send; /// Decodes and recovers the transaction and submits it to the pool. @@ -91,8 +103,8 @@ pub trait EthTransactions: LoadTransaction { let this = self.clone(); let timeout_duration = self.send_raw_transaction_sync_timeout(); async move { - let hash = EthTransactions::send_raw_transaction(&this, tx).await?; let mut stream = this.provider().canonical_state_stream(); + let hash = EthTransactions::send_raw_transaction(&this, tx).await?; tokio::time::timeout(timeout_duration, async { while let Some(notification) = stream.next().await { let chain = notification.committed(); @@ -307,10 +319,8 @@ pub trait EthTransactions: LoadTransaction { return Ok(Some(self.tx_resp_builder().fill_pending(transaction)?)); } - // Check if the sender is a contract - if !self.get_code(sender, None).await?.is_empty() { - return Ok(None); - } + // Note: we can't optimize for contracts (account with code) and cannot shortcircuit if + // the address has code, because with 7702 EOAs can also have code let highest = self.transaction_count(sender, None).await?.saturating_to::(); @@ -386,7 +396,7 @@ pub trait EthTransactions: LoadTransaction { /// Signs transaction with a matching signer, if any and submits the transaction to the pool. /// Returns the hash of the signed transaction. - fn send_transaction( + fn send_transaction_request( &self, mut request: RpcTxReq, ) -> impl Future> + Send @@ -436,6 +446,75 @@ pub trait EthTransactions: LoadTransaction { } } + /// Fills the defaults on a given unsigned transaction. + fn fill_transaction( + &self, + mut request: RpcTxReq, + ) -> impl Future>, Self::Error>> + Send + where + Self: EthApiSpec + LoadBlock + EstimateCall + LoadFee, + { + async move { + let from = match request.as_ref().from() { + Some(from) => from, + None => return Err(SignError::NoAccount.into_eth_err()), + }; + + if request.as_ref().value().is_none() { + request.as_mut().set_value(U256::ZERO); + } + + if request.as_ref().nonce().is_none() { + let nonce = self.next_available_nonce(from).await?; + request.as_mut().set_nonce(nonce); + } + + let chain_id = self.chain_id(); + request.as_mut().set_chain_id(chain_id.to()); + + if request.as_ref().has_eip4844_fields() && + request.as_ref().max_fee_per_blob_gas().is_none() + { + let blob_fee = self.blob_base_fee().await?; + request.as_mut().set_max_fee_per_blob_gas(blob_fee.to()); + } + + if request.as_ref().blob_sidecar().is_some() && + request.as_ref().blob_versioned_hashes.is_none() + { + request.as_mut().populate_blob_hashes(); + } + + if request.as_ref().gas_limit().is_none() { + let estimated_gas = + self.estimate_gas_at(request.clone(), BlockId::pending(), None).await?; + request.as_mut().set_gas_limit(estimated_gas.to()); + } + + if request.as_ref().gas_price().is_none() { + let tip = if let Some(tip) = request.as_ref().max_priority_fee_per_gas() { + tip + } else { + let tip = self.suggested_priority_fee().await?.to::(); + request.as_mut().set_max_priority_fee_per_gas(tip); + tip + }; + if request.as_ref().max_fee_per_gas().is_none() { + let header = + self.provider().latest_header().map_err(Self::Error::from_eth_err)?; + let base_fee = header.and_then(|h| h.base_fee_per_gas()).unwrap_or_default(); + request.as_mut().set_max_fee_per_gas(base_fee as u128 + tip); + } + } + + let tx = self.tx_resp_builder().build_simulate_v1_transaction(request)?; + + let raw = tx.encoded_2718().into(); + + Ok(FillTransactionResult { raw, tx }) + } + } + /// Signs a transaction, with configured signers. fn sign_request( &self, diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 22100520016..ed4fcfa5c80 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -2,11 +2,9 @@ use crate::{AsEthApiError, FromEthApiError, RpcNodeCore}; use alloy_rpc_types_eth::Block; -use reth_chain_state::CanonStateSubscriptions; -use reth_rpc_convert::RpcConvert; +use reth_rpc_convert::{RpcConvert, SignableTxRequest}; pub use reth_rpc_convert::{RpcTransaction, RpcTxReq, RpcTypes}; -use reth_storage_api::{ProviderTx, ReceiptProvider, TransactionsProvider}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use reth_storage_api::ProviderTx; use std::{ error::Error, fmt::{self}, @@ -52,12 +50,11 @@ pub type RpcError = ::Error; /// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API. pub trait FullEthApiTypes where - Self: RpcNodeCore< - Provider: TransactionsProvider + ReceiptProvider + CanonStateSubscriptions, - Pool: TransactionPool< - Transaction: PoolTransaction>, + Self: RpcNodeCore + + EthApiTypes< + NetworkTypes: RpcTypes< + TransactionRequest: SignableTxRequest>, >, - > + EthApiTypes< RpcConvert: RpcConvert< Primitives = Self::Primitives, Network = Self::NetworkTypes, @@ -68,12 +65,11 @@ where } impl FullEthApiTypes for T where - T: RpcNodeCore< - Provider: TransactionsProvider + ReceiptProvider + CanonStateSubscriptions, - Pool: TransactionPool< - Transaction: PoolTransaction>, + T: RpcNodeCore + + EthApiTypes< + NetworkTypes: RpcTypes< + TransactionRequest: SignableTxRequest>, >, - > + EthApiTypes< RpcConvert: RpcConvert< Primitives = ::Primitives, Network = Self::NetworkTypes, diff --git a/crates/rpc/rpc-eth-types/src/builder/config.rs b/crates/rpc/rpc-eth-types/src/builder/config.rs index 47f15ae5ae7..ded50ab4a83 100644 --- a/crates/rpc/rpc-eth-types/src/builder/config.rs +++ b/crates/rpc/rpc-eth-types/src/builder/config.rs @@ -95,6 +95,8 @@ pub struct EthConfig { pub raw_tx_forwarder: ForwardConfig, /// Timeout duration for `send_raw_transaction_sync` RPC method. pub send_raw_transaction_sync_timeout: Duration, + /// Maximum memory the EVM can allocate per RPC request. + pub rpc_evm_memory_limit: u64, } impl EthConfig { @@ -126,6 +128,7 @@ impl Default for EthConfig { pending_block_kind: PendingBlockKind::Full, raw_tx_forwarder: ForwardConfig::default(), send_raw_transaction_sync_timeout: RPC_DEFAULT_SEND_RAW_TX_SYNC_TIMEOUT_SECS, + rpc_evm_memory_limit: (1 << 32) - 1, } } } @@ -216,6 +219,12 @@ impl EthConfig { self.send_raw_transaction_sync_timeout = timeout; self } + + /// Configures the maximum memory the EVM can allocate per RPC request. + pub const fn rpc_evm_memory_limit(mut self, memory_limit: u64) -> Self { + self.rpc_evm_memory_limit = memory_limit; + self + } } /// Config for the filter diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index abb8983485a..8209af0fa53 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -8,14 +8,14 @@ use reth_revm::{database::StateProviderDatabase, DatabaseRef}; use reth_storage_api::{BytecodeReader, HashedPostStateProvider, StateProvider}; use reth_trie::{HashedStorage, MultiProofTargets}; use revm::{ - database::{BundleState, CacheDB}, + database::{BundleState, State}, primitives::HashMap, state::{AccountInfo, Bytecode}, Database, DatabaseCommit, }; -/// Helper alias type for the state's [`CacheDB`] -pub type StateCacheDb<'a> = CacheDB>>; +/// Helper alias type for the state's [`State`] +pub type StateCacheDb<'a> = State>>; /// Hack to get around 'higher-ranked lifetime error', see /// diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 1f3ee7dd6dd..b8814785478 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -69,7 +69,7 @@ pub enum EthApiError { InvalidTransactionSignature, /// Errors related to the transaction pool #[error(transparent)] - PoolError(RpcPoolError), + PoolError(#[from] RpcPoolError), /// Header not found for block hash/number/tag #[error("header not found")] HeaderNotFound(BlockId), @@ -186,6 +186,16 @@ pub enum EthApiError { /// Error thrown when batch tx send channel fails #[error("Batch transaction sender channel closed")] BatchTxSendError, + /// Error that occurred during `call_many` execution with bundle and transaction context + #[error("call_many error in bundle {bundle_index} and transaction {tx_index}: {}", .error.message())] + CallManyError { + /// Bundle index where the error occurred + bundle_index: usize, + /// Transaction index within the bundle where the error occurred + tx_index: usize, + /// The underlying error object + error: jsonrpsee_types::ErrorObject<'static>, + }, /// Any other error #[error("{0}")] Other(Box), @@ -197,6 +207,15 @@ impl EthApiError { Self::Other(Box::new(err)) } + /// Creates a new [`EthApiError::CallManyError`] variant. + pub const fn call_many_error( + bundle_index: usize, + tx_index: usize, + error: jsonrpsee_types::ErrorObject<'static>, + ) -> Self { + Self::CallManyError { bundle_index, tx_index, error } + } + /// Returns `true` if error is [`RpcInvalidTransactionError::GasTooHigh`] pub const fn is_gas_too_high(&self) -> bool { matches!( @@ -304,6 +323,16 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { EthApiError::BatchTxSendError => { internal_rpc_err("Batch transaction sender channel closed".to_string()) } + EthApiError::CallManyError { bundle_index, tx_index, error } => { + jsonrpsee_types::error::ErrorObject::owned( + error.code(), + format!( + "call_many error in bundle {bundle_index} and transaction {tx_index}: {}", + error.message() + ), + error.data(), + ) + } } } } @@ -433,7 +462,6 @@ impl From for EthApiError { } ProviderError::BestBlockNotFound => Self::HeaderNotFound(BlockId::latest()), ProviderError::BlockNumberForTransactionIndexNotFound => Self::UnknownBlockOrTxIndex, - ProviderError::TotalDifficultyNotFound(num) => Self::HeaderNotFound(num.into()), ProviderError::FinalizedBlockNotFound => Self::HeaderNotFound(BlockId::finalized()), ProviderError::SafeBlockNotFound => Self::HeaderNotFound(BlockId::safe()), err => Self::Internal(err.into()), @@ -591,6 +619,9 @@ pub enum RpcInvalidTransactionError { /// Contains the gas limit. #[error("out of gas: gas exhausted during memory expansion: {0}")] MemoryOutOfGas(u64), + /// Memory limit was exceeded during memory expansion. + #[error("out of memory: memory limit exceeded during memory expansion")] + MemoryLimitOutOfGas, /// Gas limit was exceeded during precompile execution. /// Contains the gas limit. #[error("out of gas: gas exhausted during precompiled contract execution: {0}")] @@ -681,7 +712,7 @@ impl RpcInvalidTransactionError { /// Converts the halt error /// /// Takes the configured gas limit of the transaction which is attached to the error - pub const fn halt(reason: HaltReason, gas_limit: u64) -> Self { + pub fn halt(reason: HaltReason, gas_limit: u64) -> Self { match reason { HaltReason::OutOfGas(err) => Self::out_of_gas(err, gas_limit), HaltReason::NonceOverflow => Self::NonceMaxValue, @@ -695,7 +726,8 @@ impl RpcInvalidTransactionError { OutOfGasError::Basic | OutOfGasError::ReentrancySentry => { Self::BasicOutOfGas(gas_limit) } - OutOfGasError::Memory | OutOfGasError::MemoryLimit => Self::MemoryOutOfGas(gas_limit), + OutOfGasError::Memory => Self::MemoryOutOfGas(gas_limit), + OutOfGasError::MemoryLimit => Self::MemoryLimitOutOfGas, OutOfGasError::Precompile => Self::PrecompileOutOfGas(gas_limit), OutOfGasError::InvalidOperand => Self::InvalidOperandOutOfGas(gas_limit), } @@ -762,7 +794,7 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::BlobVersionedHashesNotSupported => { Self::BlobVersionedHashesNotSupported } - InvalidTransaction::BlobGasPriceGreaterThanMax => Self::BlobFeeCapTooLow, + InvalidTransaction::BlobGasPriceGreaterThanMax { .. } => Self::BlobFeeCapTooLow, InvalidTransaction::EmptyBlobs => Self::BlobTransactionMissingBlobHashes, InvalidTransaction::BlobVersionNotSupported => Self::BlobHashVersionMismatch, InvalidTransaction::TooManyBlobs { have, .. } => Self::TooManyBlobs { have }, @@ -780,6 +812,7 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::Eip7873MissingTarget => { Self::other(internal_rpc_err(err.to_string())) } + InvalidTransaction::Str(_) => Self::other(internal_rpc_err(err.to_string())), } } } @@ -901,8 +934,13 @@ pub enum RpcPoolError { #[error("negative value")] NegativeValue, /// When oversized data is encountered - #[error("oversized data")] - OversizedData, + #[error("oversized data: transaction size {size}, limit {limit}")] + OversizedData { + /// Size of the transaction/input data that exceeded the limit. + size: usize, + /// Configured limit that was exceeded. + limit: usize, + }, /// When the max initcode size is exceeded #[error("max initcode size exceeded")] ExceedsMaxInitCodeSize, @@ -944,7 +982,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { RpcPoolError::MaxTxGasLimitExceeded | RpcPoolError::ExceedsFeeCap { .. } | RpcPoolError::NegativeValue | - RpcPoolError::OversizedData | + RpcPoolError::OversizedData { .. } | RpcPoolError::ExceedsMaxInitCodeSize | RpcPoolError::PoolTransactionError(_) | RpcPoolError::Eip4844(_) | @@ -988,7 +1026,9 @@ impl From for RpcPoolError { InvalidPoolTransactionError::IntrinsicGasTooLow => { Self::Invalid(RpcInvalidTransactionError::GasTooLow) } - InvalidPoolTransactionError::OversizedData(_, _) => Self::OversizedData, + InvalidPoolTransactionError::OversizedData { size, limit } => { + Self::OversizedData { size, limit } + } InvalidPoolTransactionError::Underpriced => Self::Underpriced, InvalidPoolTransactionError::Eip2681 => { Self::Invalid(RpcInvalidTransactionError::NonceMaxValue) diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index 9c603e4864e..7378ad99629 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -35,5 +35,5 @@ pub use gas_oracle::{ }; pub use id_provider::EthSubscriptionIdProvider; pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -pub use transaction::TransactionSource; +pub use transaction::{FillTransactionResult, TransactionSource}; pub use tx_forward::ForwardConfig; diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 05ad6fb4e27..3150fffdc56 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,27 +4,26 @@ use std::{sync::Arc, time::Instant}; -use crate::block::BlockAndReceipts; -use alloy_consensus::BlockHeader; +use crate::{block::BlockAndReceipts, utils::calculate_gas_used_and_next_log_index}; +use alloy_consensus::{BlockHeader, TxReceipt}; use alloy_eips::{BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockHash, B256}; +use alloy_primitives::{BlockHash, TxHash, B256}; use derive_more::Constructor; -use reth_chain_state::{ - BlockState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, -}; +use reth_chain_state::{BlockState, ExecutedBlock}; use reth_ethereum_primitives::Receipt; -use reth_evm::EvmEnv; +use reth_evm::{ConfigureEvm, EvmEnvFor}; use reth_primitives_traits::{ - Block, BlockTy, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, + Block, BlockTy, IndexedTx, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, }; +use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert, RpcTypes}; -/// Configured [`EvmEnv`] for a pending block. +/// Configured [`reth_evm::EvmEnv`] for a pending block. #[derive(Debug, Clone, Constructor)] -pub struct PendingBlockEnv { - /// Configured [`EvmEnv`] for the pending block. - pub evm_env: EvmEnv, +pub struct PendingBlockEnv { + /// Configured [`reth_evm::EvmEnv`] for the pending block. + pub evm_env: EvmEnvFor, /// Origin block for the config - pub origin: PendingBlockEnvOrigin, + pub origin: PendingBlockEnvOrigin, ReceiptTy>, } /// The origin for a configured [`PendingBlockEnv`] @@ -131,15 +130,56 @@ impl PendingBlock { pub fn parent_hash(&self) -> BlockHash { self.executed_block.recovered_block().parent_hash() } + + /// Finds a transaction by hash and returns it along with its corresponding receipt. + /// + /// Returns `None` if the transaction is not found in this block. + pub fn find_transaction_and_receipt_by_hash( + &self, + tx_hash: TxHash, + ) -> Option<(IndexedTx<'_, N::Block>, &N::Receipt)> { + let indexed_tx = self.executed_block.recovered_block().find_indexed(tx_hash)?; + let receipt = self.receipts.get(indexed_tx.index())?; + Some((indexed_tx, receipt)) + } + + /// Returns the rpc transaction receipt for the given transaction hash if it exists. + /// + /// This uses the given converter to turn [`Self::find_transaction_and_receipt_by_hash`] into + /// the rpc format. + pub fn find_and_convert_transaction_receipt( + &self, + tx_hash: TxHash, + converter: &C, + ) -> Option::Receipt, C::Error>> + where + C: RpcConvert, + { + let (tx, receipt) = self.find_transaction_and_receipt_by_hash(tx_hash)?; + let meta = tx.meta(); + let all_receipts = &self.receipts; + + let (gas_used, next_log_index) = + calculate_gas_used_and_next_log_index(meta.index, all_receipts); + + converter + .convert_receipts_with_block( + vec![ConvertReceiptInput { + tx: tx.recovered_tx(), + gas_used: receipt.cumulative_gas_used() - gas_used, + receipt: receipt.clone(), + next_log_index, + meta, + }], + self.executed_block.sealed_block(), + ) + .map(|mut receipts| receipts.pop()) + .transpose() + } } impl From> for BlockState { fn from(pending_block: PendingBlock) -> Self { - Self::new(ExecutedBlockWithTrieUpdates::::new( - pending_block.executed_block.recovered_block, - pending_block.executed_block.execution_output, - pending_block.executed_block.hashed_state, - ExecutedTrieUpdates::Missing, - )) + Self::new(pending_block.executed_block) } } diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 5492e127b77..ec63443da3d 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -24,6 +24,7 @@ use reth_rpc_convert::{RpcBlock, RpcConvert, RpcTxReq}; use reth_rpc_server_types::result::rpc_err; use reth_storage_api::noop::NoopProvider; use revm::{ + context::Block, context_interface::result::ExecutionResult, primitives::{Address, Bytes, TxKind}, Database, @@ -88,7 +89,7 @@ where let tx = resolve_transaction( call, default_gas_limit, - builder.evm().block().basefee, + builder.evm().block().basefee(), chain_id, builder.evm_mut().db_mut(), tx_resp_builder, diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index de3323d61e6..3d099f01188 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -2,11 +2,21 @@ //! //! Transaction wrapper that labels transaction with its origin. -use alloy_primitives::B256; +use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_eth::TransactionInfo; use reth_ethereum_primitives::TransactionSigned; use reth_primitives_traits::{NodePrimitives, Recovered, SignedTransaction}; use reth_rpc_convert::{RpcConvert, RpcTransaction}; +use serde::{Deserialize, Serialize}; + +/// Response type for `eth_fillTransaction` RPC method. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FillTransactionResult { + /// RLP-encoded transaction bytes + pub raw: Bytes, + /// Filled transaction object + pub tx: T, +} /// Represents from where a transaction was fetched. #[derive(Debug, Clone, Eq, PartialEq)] diff --git a/crates/rpc/rpc-eth-types/src/utils.rs b/crates/rpc/rpc-eth-types/src/utils.rs index 69f9833af5e..4a613c1915b 100644 --- a/crates/rpc/rpc-eth-types/src/utils.rs +++ b/crates/rpc/rpc-eth-types/src/utils.rs @@ -1,9 +1,28 @@ //! Commonly used code snippets use super::{EthApiError, EthResult}; +use alloy_consensus::TxReceipt; use reth_primitives_traits::{Recovered, SignedTransaction}; use std::future::Future; +/// Calculates the gas used and next log index for a transaction at the given index +pub fn calculate_gas_used_and_next_log_index( + tx_index: u64, + all_receipts: &[impl TxReceipt], +) -> (u64, usize) { + let mut gas_used = 0; + let mut next_log_index = 0; + + if tx_index > 0 { + for receipt in all_receipts.iter().take(tx_index as usize) { + gas_used = receipt.cumulative_gas_used(); + next_log_index += receipt.logs().len(); + } + } + + (gas_used, next_log_index) +} + /// Recovers a [`SignedTransaction`] from an enveloped encoded byte stream. /// /// This is a helper function that returns the appropriate RPC-specific error if the input data is diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index 4f91e7e63c0..65fc3e86e02 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -15,7 +15,7 @@ use alloy_rpc_types_trace::{ }; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_ethereum_primitives::Receipt; +use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_rpc_api::{clients::DebugApiClient, EthApiClient}; const NOOP_TRACER: &str = include_str!("../assets/noop-tracer.js"); @@ -77,7 +77,7 @@ pub trait DebugApiExt { impl DebugApiExt for T where - T: EthApiClient + T: EthApiClient + DebugApiClient + Sync, { diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index 301d65a820b..19e0b202dc6 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -8,7 +8,7 @@ use alloy_rpc_types_trace::{ use futures::StreamExt; use jsonrpsee::http_client::HttpClientBuilder; use jsonrpsee_http_client::HttpClient; -use reth_ethereum_primitives::Receipt; +use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_rpc_api_testing_util::{debug::DebugApiExt, trace::TraceApiExt, utils::parse_env_url}; use reth_rpc_eth_api::EthApiClient; use std::time::Instant; @@ -118,6 +118,7 @@ async fn debug_trace_block_entire_chain() { Block, Receipt, Header, + TransactionSigned, >>::block_number(&client) .await .unwrap() @@ -152,6 +153,7 @@ async fn debug_trace_block_opcodes_entire_chain() { Block, Receipt, Header, + TransactionSigned, >>::block_number(&client) .await .unwrap() diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 8fc801b2a54..22745826307 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -45,7 +45,7 @@ reth-trie-common.workspace = true alloy-evm = { workspace = true, features = ["overrides"] } alloy-consensus.workspace = true alloy-signer.workspace = true -alloy-signer-local.workspace = true +alloy-signer-local = { workspace = true, features = ["mnemonic"] } alloy-eips = { workspace = true, features = ["kzg"] } alloy-dyn-abi.workspace = true alloy-genesis.workspace = true @@ -63,7 +63,7 @@ alloy-rpc-types-txpool.workspace = true alloy-rpc-types-admin.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["kzg"] } alloy-serde.workspace = true -revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee"] } +revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee", "memory_limit"] } revm-primitives = { workspace = true, features = ["serde"] } # scroll @@ -109,5 +109,9 @@ rand.workspace = true jsonrpsee = { workspace = true, features = ["client"] } [features] -js-tracer = ["revm-inspectors/js-tracer", "reth-rpc-eth-types/js-tracer"] +js-tracer = [ + "revm-inspectors/js-tracer", + "reth-rpc-eth-types/js-tracer", + "reth-rpc-eth-api/js-tracer", +] scroll = ["reth-scroll-evm"] diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index ce548230864..af5e1ae2ef9 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -14,6 +14,7 @@ use reth_network_types::PeerKind; use reth_rpc_api::AdminApiServer; use reth_rpc_server_types::ToRpcResult; use reth_transaction_pool::TransactionPool; +use revm_primitives::keccak256; /// `admin` API implementation. /// @@ -74,34 +75,25 @@ where let mut infos = Vec::with_capacity(peers.len()); for peer in peers { - if let Ok(pk) = id2pk(peer.remote_id) { - infos.push(PeerInfo { - id: pk.to_string(), - name: peer.client_version.to_string(), - enode: peer.enode, - enr: peer.enr, - caps: peer - .capabilities - .capabilities() - .iter() - .map(|cap| cap.to_string()) - .collect(), - network: PeerNetworkInfo { - remote_address: peer.remote_addr, - local_address: peer.local_addr.unwrap_or_else(|| self.network.local_addr()), - inbound: peer.direction.is_incoming(), - trusted: peer.kind.is_trusted(), - static_node: peer.kind.is_static(), - }, - protocols: PeerProtocolInfo { - eth: Some(EthPeerInfo::Info(EthInfo { - version: peer.status.version as u64, - })), - snap: None, - other: Default::default(), - }, - }) - } + infos.push(PeerInfo { + id: keccak256(peer.remote_id.as_slice()).to_string(), + name: peer.client_version.to_string(), + enode: peer.enode, + enr: peer.enr, + caps: peer.capabilities.capabilities().iter().map(|cap| cap.to_string()).collect(), + network: PeerNetworkInfo { + remote_address: peer.remote_addr, + local_address: peer.local_addr.unwrap_or_else(|| self.network.local_addr()), + inbound: peer.direction.is_incoming(), + trusted: peer.kind.is_trusted(), + static_node: peer.kind.is_static(), + }, + protocols: PeerProtocolInfo { + eth: Some(EthPeerInfo::Info(EthInfo { version: peer.status.version as u64 })), + snap: None, + other: Default::default(), + }, + }) } Ok(infos) diff --git a/crates/rpc/rpc/src/aliases.rs b/crates/rpc/rpc/src/aliases.rs index 4e317305ca4..8854f1b607d 100644 --- a/crates/rpc/rpc/src/aliases.rs +++ b/crates/rpc/rpc/src/aliases.rs @@ -1,4 +1,4 @@ -use reth_evm::{ConfigureEvm, SpecFor, TxEnvFor}; +use reth_evm::ConfigureEvm; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_types::EthApiError; @@ -8,7 +8,6 @@ pub type DynRpcConverter = Box< Primitives = ::Primitives, Network = Network, Error = Error, - TxEnv = TxEnvFor, - Spec = SpecFor, + Evm = Evm, >, >; diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index bca7a85c9dc..b6c42594d68 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -3,8 +3,9 @@ use alloy_consensus::{ BlockHeader, }; use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; +use alloy_evm::env::BlockEnvironment; use alloy_genesis::ChainConfig; -use alloy_primitives::{uint, Address, Bytes, B256}; +use alloy_primitives::{hex::decode, uint, Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_eth::{ @@ -21,11 +22,7 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::{execute::Executor, ConfigureEvm, EvmEnvFor, TxEnvFor}; use reth_primitives_traits::{Block as _, BlockBody, ReceiptWithBloom, RecoveredBlock}; -use reth_revm::{ - database::StateProviderDatabase, - db::{CacheDB, State}, - witness::ExecutionWitnessRecord, -}; +use reth_revm::{database::StateProviderDatabase, db::State, witness::ExecutionWitnessRecord}; use reth_rpc_api::DebugApiServer; use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ @@ -40,7 +37,7 @@ use reth_storage_api::{ }; use reth_tasks::pool::BlockingTaskGuard; use reth_trie_common::{updates::TrieUpdates, HashedPostState}; -use revm::{context_interface::Transaction, state::EvmState, DatabaseCommit}; +use revm::{context::Block, context_interface::Transaction, state::EvmState, DatabaseCommit}; use revm_inspectors::tracing::{ FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext, }; @@ -99,7 +96,8 @@ where self.eth_api() .spawn_with_state_at_block(block.parent_hash().into(), move |state| { let mut results = Vec::with_capacity(block.body().transactions().len()); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); this.eth_api().apply_pre_execution_changes(&block, &mut db, &evm_env)?; @@ -168,8 +166,6 @@ where .iter() .map(|tx| tx.recover_signer().map_err(Eth::Error::from_eth_err)) .collect::, _>>()? - .into_iter() - .collect() } else { block .body() @@ -177,8 +173,6 @@ where .iter() .map(|tx| tx.recover_signer_unchecked().map_err(Eth::Error::from_eth_err)) .collect::, _>>()? - .into_iter() - .collect() }; self.trace_block(Arc::new(block.into_recovered_with_signers(senders)), evm_env, opts).await @@ -233,7 +227,8 @@ where // configure env for the target transaction let tx = transaction.into_recovered(); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); this.eth_api().apply_pre_execution_changes(&block, &mut db, &evm_env)?; @@ -372,8 +367,8 @@ where let db = db.0; let tx_info = TransactionInfo { - block_number: Some(evm_env.block_env.number.saturating_to()), - base_fee: Some(evm_env.block_env.basefee), + block_number: Some(evm_env.block_env.number().saturating_to()), + base_fee: Some(evm_env.block_env.basefee()), hash: None, block_hash: None, index: None, @@ -419,6 +414,11 @@ where Ok(frame.into()) } + _ => { + // Note: this match is non-exhaustive in case we need to add support for + // additional tracers + Err(EthApiError::Unsupported("unsupported tracer").into()) + } }, #[cfg(not(feature = "js-tracer"))] GethDebugTracerType::JsTracer(_) => { @@ -533,7 +533,8 @@ where .spawn_with_state_at_block(at.into(), move |state| { // the outer vec for the bundles let mut all_bundles = Vec::with_capacity(bundles.len()); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); if replay_block_txs { // only need to replay the transactions in the block if not all transactions are @@ -589,8 +590,8 @@ where results.push(trace); } // Increment block_env number and timestamp for the next bundle - evm_env.block_env.number += uint!(1_U256); - evm_env.block_env.timestamp += uint!(12_U256); + evm_env.block_env.inner_mut().number += uint!(1_U256); + evm_env.block_env.inner_mut().timestamp += uint!(12_U256); all_bundles.push(results); } @@ -748,8 +749,8 @@ where .map(|c| c.tx_index.map(|i| i as u64)) .unwrap_or_default(), block_hash: transaction_context.as_ref().map(|c| c.block_hash).unwrap_or_default(), - block_number: Some(evm_env.block_env.number.saturating_to()), - base_fee: Some(evm_env.block_env.basefee), + block_number: Some(evm_env.block_env.number().saturating_to()), + base_fee: Some(evm_env.block_env.basefee()), }; if let Some(tracer) = tracer { @@ -845,6 +846,11 @@ where return Ok((frame.into(), res.state)); } + _ => { + // Note: this match is non-exhaustive in case we need to add support for + // additional tracers + Err(EthApiError::Unsupported("unsupported tracer").into()) + } }, #[cfg(not(feature = "js-tracer"))] GethDebugTracerType::JsTracer(_) => { @@ -1144,8 +1150,38 @@ where Ok(()) } - async fn debug_db_get(&self, _key: String) -> RpcResult<()> { - Ok(()) + /// `debug_db_get` - database key lookup + /// + /// Currently supported: + /// * Contract bytecode associated with a code hash. The key format is: `<0x63>` + /// * Prefix byte: 0x63 (required) + /// * Code hash: 32 bytes + /// Must be provided as either: + /// * Hex string: "0x63..." (66 hex characters after 0x) + /// * Raw byte string: raw byte string (33 bytes) + /// See Geth impl: + async fn debug_db_get(&self, key: String) -> RpcResult> { + let key_bytes = if key.starts_with("0x") { + decode(&key).map_err(|_| EthApiError::InvalidParams("Invalid hex key".to_string()))? + } else { + key.into_bytes() + }; + + if key_bytes.len() != 33 { + return Err(EthApiError::InvalidParams(format!( + "Key must be 33 bytes, got {}", + key_bytes.len() + )) + .into()); + } + if key_bytes[0] != 0x63 { + return Err(EthApiError::InvalidParams("Key prefix must be 0x63".to_string()).into()); + } + + let code_hash = B256::from_slice(&key_bytes[1..33]); + + // No block ID is provided, so it defaults to the latest block + self.debug_code_by_hash(code_hash, None).await.map_err(Into::into) } async fn debug_dump_block(&self, _number: BlockId) -> RpcResult<()> { diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index a0e0bd30931..b7e62fadb75 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -5,6 +5,7 @@ use alloy_rpc_types_eth::{ }; use alloy_serde::JsonStorageKey; use jsonrpsee::core::RpcResult as Result; +use reth_primitives_traits::TxTy; use reth_rpc_api::{EngineEthApiServer, EthApiServer}; use reth_rpc_convert::RpcTxReq; /// Re-export for convenience @@ -16,7 +17,7 @@ use tracing_futures::Instrument; macro_rules! engine_span { () => { - tracing::trace_span!(target: "rpc", "engine") + tracing::info_span!(target: "rpc", "engine") }; } @@ -49,6 +50,7 @@ where RpcBlock, RpcReceipt, RpcHeader, + TxTy, > + FullEthApiTypes, EthFilter: EngineEthFilter, { diff --git a/crates/rpc/rpc/src/eth/builder.rs b/crates/rpc/rpc/src/eth/builder.rs index c34d268d64a..ff01903736b 100644 --- a/crates/rpc/rpc/src/eth/builder.rs +++ b/crates/rpc/rpc/src/eth/builder.rs @@ -44,6 +44,7 @@ pub struct EthApiBuilder { pending_block_kind: PendingBlockKind, raw_tx_forwarder: ForwardConfig, send_raw_transaction_sync_timeout: Duration, + evm_memory_limit: u64, } impl @@ -94,6 +95,7 @@ impl EthApiBuilder { pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; EthApiBuilder { components, @@ -114,6 +116,7 @@ impl EthApiBuilder { pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } } } @@ -145,6 +148,7 @@ where pending_block_kind: PendingBlockKind::Full, raw_tx_forwarder: ForwardConfig::default(), send_raw_transaction_sync_timeout: Duration::from_secs(30), + evm_memory_limit: (1 << 32) - 1, } } } @@ -183,6 +187,7 @@ where pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; EthApiBuilder { components, @@ -203,6 +208,7 @@ where pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } } @@ -230,6 +236,7 @@ where pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; EthApiBuilder { components, @@ -250,6 +257,7 @@ where pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } } @@ -477,6 +485,7 @@ where pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; let provider = components.provider().clone(); @@ -517,6 +526,7 @@ where pending_block_kind, raw_tx_forwarder.forwarder_client(), send_raw_transaction_sync_timeout, + evm_memory_limit, ) } @@ -541,4 +551,10 @@ where self.send_raw_transaction_sync_timeout = timeout; self } + + /// Sets the maximum memory the EVM can allocate per RPC request. + pub const fn evm_memory_limit(mut self, memory_limit: u64) -> Self { + self.evm_memory_limit = memory_limit; + self + } } diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 0303c78e4be..aa58ea014a8 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -2,13 +2,13 @@ use alloy_consensus::{transaction::TxHashRef, EnvKzgSettings, Transaction as _}; use alloy_eips::eip7840::BlobParams; +use alloy_evm::env::BlockEnvironment; use alloy_primitives::{uint, Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ConfigureEvm, Evm}; - -use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_revm::{database::StateProviderDatabase, State}; use reth_rpc_eth_api::{ helpers::{Call, EthTransactions, LoadPendingBlock}, EthCallBundleApiServer, FromEthApiError, FromEvmError, @@ -18,7 +18,9 @@ use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{ EthBlobTransactionSidecar, EthPoolTransaction, PoolPooledTx, PoolTransaction, TransactionPool, }; -use revm::{context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef}; +use revm::{ + context::Block, context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef, +}; use std::sync::Arc; /// `Eth` bundle implementation. @@ -89,18 +91,18 @@ where let (mut evm_env, at) = self.eth_api().evm_env_at(block_id).await?; if let Some(coinbase) = coinbase { - evm_env.block_env.beneficiary = coinbase; + evm_env.block_env.inner_mut().beneficiary = coinbase; } // need to adjust the timestamp for the next block if let Some(timestamp) = timestamp { - evm_env.block_env.timestamp = U256::from(timestamp); + evm_env.block_env.inner_mut().timestamp = U256::from(timestamp); } else { - evm_env.block_env.timestamp += uint!(12_U256); + evm_env.block_env.inner_mut().timestamp += uint!(12_U256); } if let Some(difficulty) = difficulty { - evm_env.block_env.difficulty = U256::from(difficulty); + evm_env.block_env.inner_mut().difficulty = U256::from(difficulty); } // Validate that the bundle does not contain more than MAX_BLOB_NUMBER_PER_BLOCK blob @@ -111,7 +113,7 @@ where .eth_api() .provider() .chain_spec() - .blob_params_at_timestamp(evm_env.block_env.timestamp.saturating_to()) + .blob_params_at_timestamp(evm_env.block_env.timestamp().saturating_to()) .unwrap_or_else(BlobParams::cancun); if transactions.iter().filter_map(|tx| tx.blob_gas_used()).sum::() > blob_params.max_blob_gas_per_block() @@ -125,31 +127,31 @@ where } // default to call gas limit unless user requests a smaller limit - evm_env.block_env.gas_limit = self.inner.eth_api.call_gas_limit(); + evm_env.block_env.inner_mut().gas_limit = self.inner.eth_api.call_gas_limit(); if let Some(gas_limit) = gas_limit { - if gas_limit > evm_env.block_env.gas_limit { + if gas_limit > evm_env.block_env.gas_limit() { return Err( EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into() ) } - evm_env.block_env.gas_limit = gas_limit; + evm_env.block_env.inner_mut().gas_limit = gas_limit; } if let Some(base_fee) = base_fee { - evm_env.block_env.basefee = base_fee.try_into().unwrap_or(u64::MAX); + evm_env.block_env.inner_mut().basefee = base_fee.try_into().unwrap_or(u64::MAX); } - let state_block_number = evm_env.block_env.number; + let state_block_number = evm_env.block_env.number(); // use the block number of the request - evm_env.block_env.number = U256::from(block_number); + evm_env.block_env.inner_mut().number = U256::from(block_number); let eth_api = self.eth_api().clone(); self.eth_api() .spawn_with_state_at_block(at, move |state| { - let coinbase = evm_env.block_env.beneficiary; - let basefee = evm_env.block_env.basefee; - let db = CacheDB::new(StateProviderDatabase::new(state)); + let coinbase = evm_env.block_env.beneficiary(); + let basefee = evm_env.block_env.basefee(); + let db = State::builder().with_database(StateProviderDatabase::new(state)).build(); let initial_coinbase = db .basic_ref(coinbase) diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 61082f4f929..4084168c4f6 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -30,8 +30,8 @@ use reth_tasks::{ TaskSpawner, TokioTaskExecutor, }; use reth_transaction_pool::{ - noop::NoopTransactionPool, AddedTransactionOutcome, BatchTxProcessor, BatchTxRequest, - TransactionPool, + blobstore::BlobSidecarConverter, noop::NoopTransactionPool, AddedTransactionOutcome, + BatchTxProcessor, BatchTxRequest, TransactionPool, }; use tokio::sync::{broadcast, mpsc, Mutex}; @@ -155,6 +155,7 @@ where pending_block_kind: PendingBlockKind, raw_tx_forwarder: ForwardConfig, send_raw_transaction_sync_timeout: Duration, + evm_memory_limit: u64, ) -> Self { let inner = EthApiInner::new( components, @@ -173,6 +174,7 @@ where pending_block_kind, raw_tx_forwarder.forwarder_client(), send_raw_transaction_sync_timeout, + evm_memory_limit, ); Self { inner: Arc::new(inner) } @@ -315,6 +317,12 @@ pub struct EthApiInner { /// Timeout duration for `send_raw_transaction_sync` RPC method. send_raw_transaction_sync_timeout: Duration, + + /// Blob sidecar converter + blob_sidecar_converter: BlobSidecarConverter, + + /// Maximum memory the EVM can allocate per RPC request. + evm_memory_limit: u64, } impl EthApiInner @@ -341,6 +349,7 @@ where pending_block_kind: PendingBlockKind, raw_tx_forwarder: Option, send_raw_transaction_sync_timeout: Duration, + evm_memory_limit: u64, ) -> Self { let signers = parking_lot::RwLock::new(Default::default()); // get the block number of the latest block @@ -382,6 +391,8 @@ where tx_batch_sender, pending_block_kind, send_raw_transaction_sync_timeout, + blob_sidecar_converter: BlobSidecarConverter::new(), + evm_memory_limit, } } } @@ -553,6 +564,18 @@ where pub const fn send_raw_transaction_sync_timeout(&self) -> Duration { self.send_raw_transaction_sync_timeout } + + /// Returns a handle to the blob sidecar converter. + #[inline] + pub const fn blob_sidecar_converter(&self) -> &BlobSidecarConverter { + &self.blob_sidecar_converter + } + + /// Returns the EVM memory limit. + #[inline] + pub const fn evm_memory_limit(&self) -> u64 { + self.evm_memory_limit + } } #[cfg(test)] @@ -701,7 +724,7 @@ mod tests { /// Invalid block range #[tokio::test] async fn test_fee_history_empty() { - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _, _>>::fee_history( &build_test_eth_api(NoopProvider::default()), U64::from(1), BlockNumberOrTag::Latest, @@ -723,7 +746,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _, _>>::fee_history( ð_api, U64::from(newest_block + 1), newest_block.into(), @@ -746,7 +769,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _, _>>::fee_history( ð_api, U64::from(1), (newest_block + 1000).into(), @@ -769,7 +792,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _, _>>::fee_history( ð_api, U64::from(0), newest_block.into(), diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 4c129546af2..ce0cdcf4160 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1,6 +1,7 @@ //! `eth_` `Filter` RPC handler implementation use alloy_consensus::BlockHeader; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::{Sealable, TxHash}; use alloy_rpc_types_eth::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, Log, @@ -17,6 +18,7 @@ use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_errors::ProviderError; use reth_primitives_traits::{NodePrimitives, SealedHeader}; use reth_rpc_eth_api::{ + helpers::{EthBlocks, LoadReceipt}, EngineEthFilter, EthApiTypes, EthFilterApiServer, FullEthApiTypes, QueryLimits, RpcConvert, RpcNodeCoreExt, RpcTransaction, }; @@ -48,7 +50,11 @@ use tracing::{debug, error, trace}; impl EngineEthFilter for EthFilter where - Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, + Eth: FullEthApiTypes + + RpcNodeCoreExt + + LoadReceipt + + EthBlocks + + 'static, { /// Returns logs matching given filter object, no query limits fn logs( @@ -194,7 +200,11 @@ where impl EthFilter where - Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, + Eth: FullEthApiTypes + + RpcNodeCoreExt + + LoadReceipt + + EthBlocks + + 'static, { /// Access the underlying provider. fn provider(&self) -> &Eth::Provider { @@ -289,11 +299,12 @@ where /// Handler for `eth_getFilterLogs` pub async fn filter_logs(&self, id: FilterId) -> Result, EthFilterError> { let filter = { - let filters = self.inner.active_filters.inner.lock().await; - if let FilterKind::Log(ref filter) = - filters.get(&id).ok_or_else(|| EthFilterError::FilterNotFound(id.clone()))?.kind - { - *filter.clone() + let mut filters = self.inner.active_filters.inner.lock().await; + let filter = + filters.get_mut(&id).ok_or_else(|| EthFilterError::FilterNotFound(id.clone()))?; + if let FilterKind::Log(ref inner_filter) = filter.kind { + filter.last_poll_timestamp = Instant::now(); + *inner_filter.clone() } else { // Not a log filter return Err(EthFilterError::FilterNotFound(id)) @@ -316,7 +327,7 @@ where #[async_trait] impl EthFilterApiServer> for EthFilter where - Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, + Eth: FullEthApiTypes + RpcNodeCoreExt + LoadReceipt + EthBlocks + 'static, { /// Handler for `eth_newFilter` async fn new_filter(&self, filter: Filter) -> RpcResult { @@ -357,8 +368,6 @@ where } }; - //let filter = FilterKind::PendingTransaction(transaction_kind); - // Install the filter and propagate any errors self.inner.install_filter(transaction_kind).await } @@ -435,6 +444,8 @@ impl EthFilterInner where Eth: RpcNodeCoreExt + EthApiTypes + + LoadReceipt + + EthBlocks + 'static, { /// Access the underlying provider. @@ -488,10 +499,43 @@ where Ok(all_logs) } FilterBlockOption::Range { from_block, to_block } => { - // compute the range - let info = self.provider().chain_info()?; + // Handle special case where from block is pending + if from_block.is_some_and(|b| b.is_pending()) { + let to_block = to_block.unwrap_or(BlockNumberOrTag::Pending); + if !(to_block.is_pending() || to_block.is_number()) { + // always empty range + return Ok(Vec::new()); + } + // Try to get pending block and receipts + if let Ok(Some(pending_block)) = self.eth_api.local_pending_block().await { + if let BlockNumberOrTag::Number(to_block) = to_block && + to_block < pending_block.block.number() + { + // this block range is empty based on the user input + return Ok(Vec::new()); + } + + let info = self.provider().chain_info()?; + if pending_block.block.number() > info.best_number { + // only consider the pending block if it is ahead of the chain + let mut all_logs = Vec::new(); + let timestamp = pending_block.block.timestamp(); + let block_num_hash = pending_block.block.num_hash(); + append_matching_block_logs( + &mut all_logs, + ProviderOrBlock::::Block(pending_block.block), + &filter, + block_num_hash, + &pending_block.receipts, + false, // removed = false for pending blocks + timestamp, + )?; + return Ok(all_logs); + } + } + } - // we start at the most recent block if unset in filter + let info = self.provider().chain_info()?; let start_block = info.best_number; let from = from_block .map(|num| self.provider().convert_block_number(num)) @@ -913,7 +957,11 @@ where /// Represents different modes for processing block ranges when filtering logs enum RangeMode< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > { /// Use cache-based processing for recent blocks Cached(CachedMode), @@ -922,7 +970,11 @@ enum RangeMode< } impl< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > RangeMode { /// Creates a new `RangeMode`. @@ -994,14 +1046,22 @@ impl< /// Mode for processing blocks using cache optimization for recent blocks struct CachedMode< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > { filter_inner: Arc>, headers_iter: std::vec::IntoIter::Header>>, } impl< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > CachedMode { async fn next(&mut self) -> Result>, EthFilterError> { @@ -1028,7 +1088,11 @@ type ReceiptFetchFuture

= /// Mode for processing blocks using range queries for older blocks struct RangeBlockMode< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > { filter_inner: Arc>, iter: Peekable::Header>>>, @@ -1039,7 +1103,11 @@ struct RangeBlockMode< } impl< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > RangeBlockMode { async fn next(&mut self) -> Result>, EthFilterError> { diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index a76e146042d..ad9f020bd0c 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,7 +1,6 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. use crate::EthApi; -use reth_evm::{SpecFor, TxEnvFor}; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall}, @@ -13,12 +12,7 @@ impl EthCall for EthApi where N: RpcNodeCore, EthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } @@ -26,12 +20,7 @@ impl Call for EthApi where N: RpcNodeCore, EthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -42,17 +31,17 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.max_simulate_blocks() } + + #[inline] + fn evm_memory_limit(&self) -> u64 { + self.inner.evm_memory_limit() + } } impl EstimateCall for EthApi where N: RpcNodeCore, EthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index 60d6a151f9b..2c18245d542 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -1,33 +1,14 @@ //! An abstraction over ethereum signers. -use std::collections::HashMap; - -use crate::EthApi; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{eip191_hash_message, Address, Signature, B256}; use alloy_signer::SignerSync; -use alloy_signer_local::PrivateKeySigner; -use reth_rpc_convert::{RpcConvert, RpcTypes, SignableTxRequest}; -use reth_rpc_eth_api::{ - helpers::{signer::Result, AddDevSigners, EthSigner}, - FromEvmError, RpcNodeCore, -}; -use reth_rpc_eth_types::{EthApiError, SignError}; -use reth_storage_api::ProviderTx; - -impl AddDevSigners for EthApi -where - N: RpcNodeCore, - EthApiError: FromEvmError, - Rpc: RpcConvert< - Network: RpcTypes>>, - >, -{ - fn with_dev_accounts(&self) { - *self.inner.signers().write() = DevSigner::random_signers(20) - } -} +use alloy_signer_local::{coins_bip39::English, MnemonicBuilder, PrivateKeySigner}; +use reth_rpc_convert::SignableTxRequest; +use reth_rpc_eth_api::helpers::{signer::Result, EthSigner}; +use reth_rpc_eth_types::SignError; +use std::collections::HashMap; /// Holds developer keys #[derive(Debug, Clone)] @@ -55,6 +36,32 @@ impl DevSigner { signers } + /// Generates dev signers deterministically from a fixed mnemonic. + /// Uses the Ethereum derivation path: `m/44'/60'/0'/0/{index}` + pub fn from_mnemonic>( + mnemonic: &str, + num: u32, + ) -> Vec + 'static>> { + let mut signers = Vec::with_capacity(num as usize); + + for i in 0..num { + let sk = MnemonicBuilder::::default() + .phrase(mnemonic) + .index(i) + .expect("invalid derivation path") + .build() + .expect("failed to build signer from mnemonic"); + + let address = sk.address(); + let addresses = vec![address]; + let accounts = HashMap::from([(address, sk)]); + + signers.push(Box::new(Self { addresses, accounts }) as Box>); + } + + signers + } + fn get_key(&self, account: Address) -> Result<&PrivateKeySigner> { self.accounts.get(&account).ok_or(SignError::NoAccount) } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 4fa39112166..8f2c5bf93ef 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -3,14 +3,22 @@ use std::time::Duration; use crate::EthApi; -use alloy_primitives::{hex, Bytes, B256}; +use alloy_consensus::BlobTransactionValidationError; +use alloy_eips::{eip7594::BlobTransactionSidecarVariant, BlockId, Typed2718}; +use alloy_primitives::{hex, B256}; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_primitives_traits::{AlloyBlockHeader, Recovered, WithEncoded}; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, FromEvmError, RpcNodeCore, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; -use reth_transaction_pool::{AddedTransactionOutcome, PoolTransaction, TransactionPool}; +use reth_rpc_eth_types::{error::RpcPoolError, EthApiError}; +use reth_storage_api::BlockReaderIdExt; +use reth_transaction_pool::{ + error::Eip4844PoolTransactionError, AddedTransactionOutcome, EthBlobTransactionSidecar, + EthPoolTransaction, PoolPooledTx, PoolTransaction, TransactionPool, +}; impl EthTransactions for EthApi where @@ -28,13 +36,61 @@ where self.inner.send_raw_transaction_sync_timeout() } - /// Decodes and recovers the transaction and submits it to the pool. - /// - /// Returns the hash of the transaction. - async fn send_raw_transaction(&self, tx: Bytes) -> Result { - let recovered = recover_raw_transaction(&tx)?; + async fn send_transaction( + &self, + tx: WithEncoded>>, + ) -> Result { + let (tx, recovered) = tx.split(); + let mut pool_transaction = + ::Transaction::from_pooled(recovered); + + // TODO: remove this after Osaka transition + // Convert legacy blob sidecars to EIP-7594 format + if pool_transaction.is_eip4844() { + let EthBlobTransactionSidecar::Present(sidecar) = pool_transaction.take_blob() else { + return Err(EthApiError::PoolError(RpcPoolError::Eip4844( + Eip4844PoolTransactionError::MissingEip4844BlobSidecar, + ))); + }; - let pool_transaction = ::Transaction::from_pooled(recovered); + let sidecar = match sidecar { + BlobTransactionSidecarVariant::Eip4844(sidecar) => { + let latest = self + .provider() + .latest_header()? + .ok_or(EthApiError::HeaderNotFound(BlockId::latest()))?; + // Convert to EIP-7594 if next block is Osaka + if self + .provider() + .chain_spec() + .is_osaka_active_at_timestamp(latest.timestamp().saturating_add(12)) + { + BlobTransactionSidecarVariant::Eip7594( + self.blob_sidecar_converter().convert(sidecar).await.ok_or_else( + || { + RpcPoolError::Eip4844( + Eip4844PoolTransactionError::InvalidEip4844Blob( + BlobTransactionValidationError::InvalidProof, + ), + ) + }, + )?, + ) + } else { + BlobTransactionSidecarVariant::Eip4844(sidecar) + } + } + sidecar => sidecar, + }; + + pool_transaction = + EthPoolTransaction::try_from_eip4844(pool_transaction.into_consensus(), sidecar) + .ok_or_else(|| { + RpcPoolError::Eip4844( + Eip4844PoolTransactionError::MissingEip4844BlobSidecar, + ) + })?; + } // forward the transaction to the specific endpoint if configured. if let Some(client) = self.raw_tx_forwarder() { @@ -77,25 +133,55 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{hex_literal::hex, Bytes}; - use reth_chainspec::ChainSpecProvider; + use crate::eth::helpers::types::EthRpcConverter; + use alloy_consensus::{Block, Header, SidecarBuilder, SimpleCoder, Transaction}; + use alloy_primitives::{Address, U256}; + use alloy_rpc_types_eth::request::TransactionRequest; + use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_provider::test_utils::NoopProvider; - use reth_rpc_eth_api::helpers::EthTransactions; - use reth_transaction_pool::{test_utils::testing_pool, TransactionPool}; + use reth_provider::{ + test_utils::{ExtendedAccount, MockEthProvider}, + ChainSpecProvider, + }; + use reth_rpc_eth_api::node::RpcNodeCoreAdapter; + use reth_transaction_pool::test_utils::{testing_pool, TestPool}; + use revm_primitives::Bytes; + use std::collections::HashMap; - #[tokio::test] - async fn send_raw_transaction() { - let noop_provider = NoopProvider::default(); - let noop_network_provider = NoopNetwork::default(); + fn mock_eth_api( + accounts: HashMap, + ) -> EthApi< + RpcNodeCoreAdapter, + EthRpcConverter, + > { + let mock_provider = MockEthProvider::default() + .with_chain_spec(ChainSpecBuilder::mainnet().cancun_activated().build()); + mock_provider.extend_accounts(accounts); + let evm_config = EthEvmConfig::new(mock_provider.chain_spec()); let pool = testing_pool(); - let evm_config = EthEvmConfig::new(noop_provider.chain_spec()); - let eth_api = - EthApi::builder(noop_provider.clone(), pool.clone(), noop_network_provider, evm_config) - .build(); + let genesis_header = Header { + number: 0, + gas_limit: 30_000_000, + timestamp: 1, + excess_blob_gas: Some(0), + base_fee_per_gas: Some(1000000000), + blob_gas_used: Some(0), + ..Default::default() + }; + + let genesis_hash = B256::ZERO; + mock_provider.add_block(genesis_hash, Block::new(genesis_header, Default::default())); + + EthApi::builder(mock_provider, pool, NoopNetwork::default(), evm_config).build() + } + + #[tokio::test] + async fn send_raw_transaction() { + let eth_api = mock_eth_api(Default::default()); + let pool = eth_api.pool(); // https://etherscan.io/tx/0xa694b71e6c128a2ed8e2e0f6770bddbe52e3bb8f10e8472f9a79ab81497a8b5d let tx_1 = Bytes::from(hex!( @@ -126,4 +212,205 @@ mod tests { assert!(pool.get(&tx_1_result).is_some(), "tx1 not found in the pool"); assert!(pool.get(&tx_2_result).is_some(), "tx2 not found in the pool"); } + + #[tokio::test] + async fn test_fill_transaction_fills_chain_id() { + let address = Address::random(); + let accounts = HashMap::from([( + address, + ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)), // 10 ETH + )]); + + let eth_api = mock_eth_api(accounts); + + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + gas: Some(21_000), + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + // Should fill with the chain id from provider + assert!(filled.tx.chain_id().is_some()); + } + + #[tokio::test] + async fn test_fill_transaction_fills_nonce() { + let address = Address::random(); + let nonce = 42u64; + + let accounts = HashMap::from([( + address, + ExtendedAccount::new(nonce, U256::from(1_000_000_000_000_000_000u64)), // 1 ETH + )]); + + let eth_api = mock_eth_api(accounts); + + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + value: Some(U256::from(1000)), + gas: Some(21_000), + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + assert_eq!(filled.tx.nonce(), nonce); + } + + #[tokio::test] + async fn test_fill_transaction_preserves_provided_fields() { + let address = Address::random(); + let provided_nonce = 100u64; + let provided_gas_limit = 50_000u64; + + let accounts = HashMap::from([( + address, + ExtendedAccount::new(42, U256::from(10_000_000_000_000_000_000u64)), + )]); + + let eth_api = mock_eth_api(accounts); + + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + value: Some(U256::from(1000)), + nonce: Some(provided_nonce), + gas: Some(provided_gas_limit), + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + // Should preserve the provided nonce and gas limit + assert_eq!(filled.tx.nonce(), provided_nonce); + assert_eq!(filled.tx.gas_limit(), provided_gas_limit); + } + + #[tokio::test] + async fn test_fill_transaction_fills_all_missing_fields() { + let address = Address::random(); + + let balance = U256::from(100u128) * U256::from(1_000_000_000_000_000_000u128); + let accounts = HashMap::from([(address, ExtendedAccount::new(5, balance))]); + + let eth_api = mock_eth_api(accounts); + + // Create a simple transfer transaction + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + assert!(filled.tx.is_eip1559()); + } + + #[tokio::test] + async fn test_fill_transaction_eip4844_blob_fee() { + let address = Address::random(); + let accounts = HashMap::from([( + address, + ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)), + )]); + + let eth_api = mock_eth_api(accounts); + + let mut builder = SidecarBuilder::::new(); + builder.ingest(b"dummy blob"); + + // EIP-4844 blob transaction with versioned hashes but no blob fee + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + sidecar: Some(builder.build().unwrap()), + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + // Blob transaction should have max_fee_per_blob_gas filled + assert!( + filled.tx.max_fee_per_blob_gas().is_some(), + "max_fee_per_blob_gas should be filled for blob tx" + ); + assert!( + filled.tx.blob_versioned_hashes().is_some(), + "blob_versioned_hashes should be preserved" + ); + } + + #[tokio::test] + async fn test_fill_transaction_eip4844_preserves_blob_fee() { + let address = Address::random(); + let accounts = HashMap::from([( + address, + ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)), + )]); + + let eth_api = mock_eth_api(accounts); + + let provided_blob_fee = 5000000u128; + + let mut builder = SidecarBuilder::::new(); + builder.ingest(b"dummy blob"); + + // EIP-4844 blob transaction with blob fee already set + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + transaction_type: Some(3), // EIP-4844 + sidecar: Some(builder.build().unwrap()), + max_fee_per_blob_gas: Some(provided_blob_fee), // Already set + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + // Should preserve the provided blob fee + assert_eq!( + filled.tx.max_fee_per_blob_gas(), + Some(provided_blob_fee), + "should preserve provided max_fee_per_blob_gas" + ); + } + + #[tokio::test] + async fn test_fill_transaction_non_blob_tx_no_blob_fee() { + let address = Address::random(); + let accounts = HashMap::from([( + address, + ExtendedAccount::new(0, U256::from(10_000_000_000_000_000_000u64)), + )]); + + let eth_api = mock_eth_api(accounts); + + // EIP-1559 transaction without blob fields + let tx_req = TransactionRequest { + from: Some(address), + to: Some(Address::random().into()), + transaction_type: Some(2), // EIP-1559 + ..Default::default() + }; + + let filled = + eth_api.fill_transaction(tx_req).await.expect("fill_transaction should succeed"); + + // Non-blob transaction should NOT have blob fee filled + assert!( + filled.tx.max_fee_per_blob_gas().is_none(), + "max_fee_per_blob_gas should not be set for non-blob tx" + ); + } } diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 1c7982f80fd..985cdf3129e 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -101,6 +101,7 @@ where kind: SubscriptionKind, params: Option, ) -> Result<(), ErrorObject<'static>> { + #[allow(unreachable_patterns)] match kind { SubscriptionKind::NewHeads => { pipe_from_stream(accepted_sink, self.new_headers_stream()).await @@ -199,6 +200,10 @@ where Ok(()) } + _ => { + // TODO: implement once https://github.com/alloy-rs/alloy/pull/2974 is released + Err(invalid_params_rpc_err("Unsupported subscription kind")) + } } } } diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 8c7d382c173..e085b0d1545 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -2,7 +2,7 @@ use alloy_consensus::{transaction::TxHashRef, BlockHeader}; use alloy_eips::BlockNumberOrTag; -use alloy_evm::overrides::apply_block_overrides; +use alloy_evm::{env::BlockEnvironment, overrides::apply_block_overrides}; use alloy_primitives::U256; use alloy_rpc_types_eth::BlockId; use alloy_rpc_types_mev::{ @@ -12,7 +12,7 @@ use alloy_rpc_types_mev::{ use jsonrpsee::core::RpcResult; use reth_evm::{ConfigureEvm, Evm}; use reth_primitives_traits::Recovered; -use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_revm::{database::StateProviderDatabase, State}; use reth_rpc_api::MevSimApiServer; use reth_rpc_eth_api::{ helpers::{block::LoadBlock, Call, EthTransactions}, @@ -22,7 +22,9 @@ use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_storage_api::ProviderTx; use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool}; -use revm::{context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef}; +use revm::{ + context::Block, context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef, +}; use std::{sync::Arc, time::Duration}; use tracing::trace; @@ -243,12 +245,13 @@ where .spawn_with_state_at_block(current_block_id, move |state| { // Setup environment let current_block_number = current_block.number(); - let coinbase = evm_env.block_env.beneficiary; - let basefee = evm_env.block_env.basefee; - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let coinbase = evm_env.block_env.beneficiary(); + let basefee = evm_env.block_env.basefee(); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); // apply overrides - apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env); + apply_block_overrides(block_overrides, &mut db, evm_env.block_env.inner_mut()); let initial_coinbase_balance = DatabaseRef::basic_ref(&db, coinbase) .map_err(EthApiError::from_eth_err)? @@ -425,7 +428,7 @@ where let timeout = override_timeout .map(Duration::from_secs) - .filter(|&custom_duration| custom_duration <= MAX_SIM_TIMEOUT) + .map(|d| d.min(MAX_SIM_TIMEOUT)) .unwrap_or(DEFAULT_SIM_TIMEOUT); let bundle_res = diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 92698e6eca2..334e8d7dea4 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -12,6 +12,7 @@ use alloy_rpc_types_trace::{ }; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, types::ErrorObjectOwned}; +use reth_primitives_traits::TxTy; use reth_rpc_api::{EthApiServer, OtterscanServer}; use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ @@ -73,6 +74,7 @@ where RpcBlock, RpcReceipt, RpcHeader, + TxTy, > + EthTransactions + TraceExt + 'static, diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 767082cc700..daa05c9416b 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -20,7 +20,7 @@ use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardfork, MAINNET, SEPOLIA}; use reth_evm::ConfigureEvm; use reth_primitives_traits::{BlockBody, BlockHeader}; -use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_revm::{database::StateProviderDatabase, State}; use reth_rpc_api::TraceApiServer; use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ @@ -159,7 +159,8 @@ where self.eth_api() .spawn_with_state_at_block(at, move |state| { let mut results = Vec::with_capacity(calls.len()); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut db = + State::builder().with_database(StateProviderDatabase::new(state)).build(); let mut calls = calls.into_iter().peekable(); @@ -363,7 +364,7 @@ where ) -> Result, Eth::Error> { // We'll reuse the matcher across multiple blocks that are traced in parallel let matcher = Arc::new(filter.matcher()); - let TraceFilter { from_block, to_block, after, count, .. } = filter; + let TraceFilter { from_block, to_block, mut after, count, .. } = filter; let start = from_block.unwrap_or(0); let latest_block = self.provider().best_block_number().map_err(Eth::Error::from_eth_err)?; @@ -389,80 +390,97 @@ where .into()) } - // fetch all blocks in that range - let blocks = self - .provider() - .recovered_block_range(start..=end) - .map_err(Eth::Error::from_eth_err)? - .into_iter() - .map(Arc::new) - .collect::>(); - - // trace all blocks - let mut block_traces = Vec::with_capacity(blocks.len()); - for block in &blocks { - let matcher = matcher.clone(); - let traces = self.eth_api().trace_block_until( - block.hash().into(), - Some(block.clone()), - None, - TracingInspectorConfig::default_parity(), - move |tx_info, mut ctx| { - let mut traces = ctx - .take_inspector() - .into_parity_builder() - .into_localized_transaction_traces(tx_info); - traces.retain(|trace| matcher.matches(&trace.trace)); - Ok(Some(traces)) - }, - ); - block_traces.push(traces); - } - - let block_traces = futures::future::try_join_all(block_traces).await?; - let mut all_traces = block_traces - .into_iter() - .flatten() - .flat_map(|traces| traces.into_iter().flatten().flat_map(|traces| traces.into_iter())) - .collect::>(); - - // add reward traces for all blocks - for block in &blocks { - if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? { - all_traces.extend( - self.extract_reward_traces( - block.header(), - block.body().ommers(), - base_block_reward, - ) - .into_iter() - .filter(|trace| matcher.matches(&trace.trace)), + let mut all_traces = Vec::new(); + let mut block_traces = Vec::with_capacity(self.inner.eth_config.max_tracing_requests); + for chunk_start in (start..end).step_by(self.inner.eth_config.max_tracing_requests) { + let chunk_end = + std::cmp::min(chunk_start + self.inner.eth_config.max_tracing_requests as u64, end); + + // fetch all blocks in that chunk + let blocks = self + .eth_api() + .spawn_blocking_io(move |this| { + Ok(this + .provider() + .recovered_block_range(chunk_start..=chunk_end) + .map_err(Eth::Error::from_eth_err)? + .into_iter() + .map(Arc::new) + .collect::>()) + }) + .await?; + + // trace all blocks + for block in &blocks { + let matcher = matcher.clone(); + let traces = self.eth_api().trace_block_until( + block.hash().into(), + Some(block.clone()), + None, + TracingInspectorConfig::default_parity(), + move |tx_info, mut ctx| { + let mut traces = ctx + .take_inspector() + .into_parity_builder() + .into_localized_transaction_traces(tx_info); + traces.retain(|trace| matcher.matches(&trace.trace)); + Ok(Some(traces)) + }, ); - } else { - // no block reward, means we're past the Paris hardfork and don't expect any rewards - // because the blocks in ascending order - break + block_traces.push(traces); } - } - // Skips the first `after` number of matching traces. - // If `after` is greater than or equal to the number of matched traces, it returns an empty - // array. - if let Some(after) = after.map(|a| a as usize) { - if after < all_traces.len() { - all_traces.drain(..after); - } else { - return Ok(vec![]) + #[allow(clippy::iter_with_drain)] + let block_traces = futures::future::try_join_all(block_traces.drain(..)).await?; + all_traces.extend(block_traces.into_iter().flatten().flat_map(|traces| { + traces.into_iter().flatten().flat_map(|traces| traces.into_iter()) + })); + + // add reward traces for all blocks + for block in &blocks { + if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? { + all_traces.extend( + self.extract_reward_traces( + block.header(), + block.body().ommers(), + base_block_reward, + ) + .into_iter() + .filter(|trace| matcher.matches(&trace.trace)), + ); + } else { + // no block reward, means we're past the Paris hardfork and don't expect any + // rewards because the blocks in ascending order + break + } } - } - // Return at most `count` of traces - if let Some(count) = count { - let count = count as usize; - if count < all_traces.len() { - all_traces.truncate(count); + // Skips the first `after` number of matching traces. + if let Some(cutoff) = after.map(|a| a as usize) && + cutoff < all_traces.len() + { + all_traces.drain(..cutoff); + // we removed the first `after` traces + after = None; } - }; + + // Return at most `count` of traces + if let Some(count) = count { + let count = count as usize; + if count < all_traces.len() { + all_traces.truncate(count); + return Ok(all_traces) + } + }; + } + + // If `after` is greater than or equal to the number of matched traces, it returns an + // empty array. + if let Some(cutoff) = after.map(|a| a as usize) && + cutoff >= all_traces.len() + { + return Ok(vec![]) + } Ok(all_traces) } @@ -692,6 +710,7 @@ where /// # Limitations /// This currently requires block filter fields, since reth does not have address indices yet. async fn trace_filter(&self, filter: TraceFilter) -> RpcResult> { + let _permit = self.inner.blocking_task_guard.clone().acquire_many_owned(2).await; Ok(Self::trace_filter(self, filter).await.map_err(Into::into)?) } diff --git a/crates/scroll/alloy/evm/src/block/mod.rs b/crates/scroll/alloy/evm/src/block/mod.rs index 6b221ff1506..610f67122ea 100644 --- a/crates/scroll/alloy/evm/src/block/mod.rs +++ b/crates/scroll/alloy/evm/src/block/mod.rs @@ -28,7 +28,7 @@ use alloy_primitives::{B256, U256}; use revm::{ context::{ result::{InvalidTransaction, ResultAndState}, - TxEnv, + Block, TxEnv, }, database::State, handler::PrecompileProvider, @@ -154,7 +154,7 @@ where fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> { // set state clear flag if the block is after the Spurious Dragon hardfork. let state_clear_flag = - self.spec.is_spurious_dragon_active_at_block(self.evm.block().number.to()); + self.spec.is_spurious_dragon_active_at_block(self.evm.block().number().to()); self.evm.db_mut().set_state_clear_flag(state_clear_flag); // load the l1 gas oracle contract in cache. @@ -169,7 +169,7 @@ where if self .spec .scroll_fork_activation(ScrollHardfork::Curie) - .transitions_at_block(self.evm.block().number.to()) + .transitions_at_block(self.evm.block().number().to()) { if let Err(err) = apply_curie_hard_fork(self.evm.db_mut()) { return Err(BlockExecutionError::msg(format!( @@ -183,7 +183,7 @@ where if self .spec .scroll_fork_activation(ScrollHardfork::Feynman) - .active_at_timestamp(self.evm.block().timestamp.to()) + .active_at_timestamp(self.evm.block().timestamp().to()) { if let Err(err) = apply_feynman_hard_fork(self.evm.db_mut()) { return Err(BlockExecutionError::msg(format!( @@ -206,7 +206,7 @@ where let is_l1_message = tx.tx().ty() == L1_MESSAGE_TRANSACTION_TYPE; // The sum of the transaction’s gas limit and the gas utilized in this block prior, // must be no greater than the block’s gasLimit. - let block_available_gas = self.evm.block().gas_limit - self.gas_used; + let block_available_gas = self.evm.block().gas_limit() - self.gas_used; if tx.tx().gas_limit() > block_available_gas { return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { transaction_gas_limit: tx.tx().gas_limit(), @@ -219,14 +219,14 @@ where let block = self.evm.block(); // verify the transaction type is accepted by the current fork. - if tx.tx().is_eip2930() && !chain_spec.is_curie_active_at_block(block.number.to()) { + if tx.tx().is_eip2930() && !chain_spec.is_curie_active_at_block(block.number().to()) { return Err(BlockValidationError::InvalidTx { hash, error: Box::new(InvalidTransaction::Eip2930NotSupported), } .into()) } - if tx.tx().is_eip1559() && !chain_spec.is_curie_active_at_block(block.number.to()) { + if tx.tx().is_eip1559() && !chain_spec.is_curie_active_at_block(block.number().to()) { return Err(BlockValidationError::InvalidTx { hash, error: Box::new(InvalidTransaction::Eip1559NotSupported), @@ -241,7 +241,7 @@ where .into()) } if tx.tx().is_eip7702() && - !chain_spec.is_euclid_v2_active_at_timestamp(block.timestamp.to()) + !chain_spec.is_euclid_v2_active_at_timestamp(block.timestamp().to()) { return Err(BlockValidationError::InvalidTx { hash, @@ -296,6 +296,7 @@ where receipts: self.receipts, requests: Default::default(), gas_used: self.gas_used, + blob_gas_used: 0, }, )) } diff --git a/crates/scroll/alloy/evm/src/lib.rs b/crates/scroll/alloy/evm/src/lib.rs index f8a7fda6db6..a76fb55f46f 100644 --- a/crates/scroll/alloy/evm/src/lib.rs +++ b/crates/scroll/alloy/evm/src/lib.rs @@ -112,6 +112,7 @@ where type Error = EVMError; type HaltReason = HaltReason; type Spec = ScrollSpecId; + type BlockEnv = BlockEnv; type Precompiles = P; type Inspector = I; @@ -207,6 +208,7 @@ impl EvmFactory for ScrollEvmFactory

{ type Error = EVMError; type HaltReason = HaltReason; type Spec = ScrollSpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm( diff --git a/crates/scroll/alloy/evm/src/system_caller.rs b/crates/scroll/alloy/evm/src/system_caller.rs index f57d3473b72..12f87c4d4a0 100644 --- a/crates/scroll/alloy/evm/src/system_caller.rs +++ b/crates/scroll/alloy/evm/src/system_caller.rs @@ -6,7 +6,10 @@ use alloy_evm::{ Evm, }; use alloy_primitives::B256; -use revm::{context::result::ResultAndState, DatabaseCommit}; +use revm::{ + context::{result::ResultAndState, Block}, + DatabaseCommit, +}; use scroll_alloy_hardforks::ScrollHardforks; /// An ephemeral helper type for executing system calls. @@ -62,13 +65,13 @@ fn transact_blockhashes_contract_call( evm: &mut impl Evm, ) -> Result>, BlockExecutionError> { // if Feynman is not active at timestamp then no system transaction occurs. - if !spec.is_feynman_active_at_timestamp(evm.block().timestamp.to()) { + if !spec.is_feynman_active_at_timestamp(evm.block().timestamp().to()) { return Ok(None); } // if the block number is zero (genesis block) then no system transaction may occur as per // EIP-2935 - if evm.block().number.to::() == 0u64 { + if evm.block().number().to::() == 0u64 { return Ok(None); } diff --git a/crates/scroll/engine-primitives/src/payload/built.rs b/crates/scroll/engine-primitives/src/payload/built.rs index cea13302aa1..4fa87053c16 100644 --- a/crates/scroll/engine-primitives/src/payload/built.rs +++ b/crates/scroll/engine-primitives/src/payload/built.rs @@ -10,8 +10,7 @@ use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV4, ExecutionPayloadFieldV2, ExecutionPayloadV1, ExecutionPayloadV3, PayloadId, }; -use reth_chain_state::ExecutedBlockWithTrieUpdates; -use reth_payload_primitives::BuiltPayload; +use reth_payload_primitives::{BuiltPayload, BuiltPayloadExecutedBlock}; use reth_primitives_traits::SealedBlock; use reth_scroll_primitives::{ScrollBlock, ScrollPrimitives}; @@ -23,7 +22,7 @@ pub struct ScrollBuiltPayload { /// Sealed block pub(crate) block: Arc>, /// Block execution data for the payload - pub(crate) executed_block: Option>, + pub(crate) executed_block: Option>, /// The fees of the block pub(crate) fees: U256, } @@ -33,7 +32,7 @@ impl ScrollBuiltPayload { pub const fn new( id: PayloadId, block: Arc>, - executed_block: Option>, + executed_block: Option>, fees: U256, ) -> Self { Self { id, block, executed_block, fees } @@ -71,7 +70,7 @@ impl BuiltPayload for ScrollBuiltPayload { self.fees } - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { self.executed_block.clone() } diff --git a/crates/scroll/evm/src/build.rs b/crates/scroll/evm/src/build.rs index 2645dad697b..ad39ff5a4ec 100644 --- a/crates/scroll/evm/src/build.rs +++ b/crates/scroll/evm/src/build.rs @@ -7,6 +7,7 @@ use reth_evm::execute::{BlockAssembler, BlockAssemblerInput}; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::SignedTransaction; use reth_scroll_primitives::ScrollReceipt; +use revm::context::Block; use scroll_alloy_evm::ScrollBlockExecutionCtx; use scroll_alloy_hardforks::ScrollHardforks; @@ -53,7 +54,7 @@ where .. } = input; - let timestamp = evm_env.block_env.timestamp; + let timestamp = evm_env.block_env.timestamp(); let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = ScrollReceipt::calculate_receipt_root_no_memo(receipts); @@ -69,15 +70,15 @@ where withdrawals_root: None, logs_bloom, timestamp: timestamp.to(), - mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), nonce: BEACON_NONCE.into(), base_fee_per_gas: self .chain_spec - .is_curie_active_at_block(evm_env.block_env.number.to()) - .then_some(evm_env.block_env.basefee), - number: evm_env.block_env.number.to(), - gas_limit: evm_env.block_env.gas_limit, - difficulty: evm_env.block_env.difficulty, + .is_curie_active_at_block(evm_env.block_env.number().to()) + .then_some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, extra_data: Default::default(), parent_beacon_block_root: None, diff --git a/crates/scroll/openvm-compat/Cargo.toml b/crates/scroll/openvm-compat/Cargo.toml index e7df83724da..a4a8c0eb396 100644 --- a/crates/scroll/openvm-compat/Cargo.toml +++ b/crates/scroll/openvm-compat/Cargo.toml @@ -28,4 +28,4 @@ scroll-alloy-consensus = { path = "../alloy/consensus", default-features = false scroll-alloy-rpc-types = { path = "../alloy/rpc-types", default-features = false } [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm" } +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v97" } diff --git a/crates/scroll/payload/src/builder.rs b/crates/scroll/payload/src/builder.rs index ca754cbadb3..812953dad37 100644 --- a/crates/scroll/payload/src/builder.rs +++ b/crates/scroll/payload/src/builder.rs @@ -3,7 +3,7 @@ use super::ScrollPayloadBuilderError; use crate::config::{PayloadBuildingBreaker, ScrollBuilderConfig}; -use alloy_consensus::{Transaction, Typed2718}; +use alloy_consensus::{BlockHeader, Transaction, Typed2718}; use alloy_primitives::U256; use alloy_rlp::Encodable; use core::fmt::Debug; @@ -11,7 +11,6 @@ use reth_basic_payload_builder::{ is_better_payload, BuildArguments, BuildOutcome, BuildOutcomeKind, MissingPayloadBehaviour, PayloadBuilder, PayloadConfig, }; -use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ block::{BlockExecutionError, BlockValidationError}, @@ -20,17 +19,21 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_payload_builder::PayloadId; -use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_primitives::{ + BuiltPayloadExecutedBlock, PayloadBuilderAttributes, PayloadBuilderError, +}; use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction, TxTy}; -use reth_revm::{cancelled::CancelOnDrop, database::StateProviderDatabase, db::State}; +use reth_revm::{ + cancelled::CancelOnDrop, context::either, database::StateProviderDatabase, db::State, +}; use reth_scroll_chainspec::{ChainConfig, ScrollChainConfig}; use reth_scroll_engine_primitives::{ScrollBuiltPayload, ScrollPayloadBuilderAttributes}; use reth_scroll_evm::{ScrollBaseFeeProvider, ScrollNextBlockEnvAttributes}; use reth_scroll_primitives::{ScrollPrimitives, ScrollTransactionSigned}; use reth_storage_api::{BaseFeeProvider, StateProvider, StateProviderFactory}; use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; -use revm::context::{Block, BlockEnv}; +use revm::context::Block; use scroll_alloy_hardforks::ScrollHardforks; use std::{boxed::Box, sync::Arc, vec, vec::Vec}; @@ -308,20 +311,17 @@ impl ScrollBuilder<'_, Txs> { let execution_outcome = ExecutionOutcome::new( db.take_bundle(), vec![execution_result.receipts], - block.number, + block.number(), Vec::new(), ); // create the executed block data - let executed: ExecutedBlockWithTrieUpdates = - ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block), - execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - }, - trie: ExecutedTrieUpdates::Present(Arc::new(trie_updates)), - }; + let executed: BuiltPayloadExecutedBlock = BuiltPayloadExecutedBlock { + recovered_block: Arc::new(block), + execution_output: Arc::new(execution_outcome), + hashed_state: either::Either::Left(Arc::new(hashed_state)), + trie_updates: either::Either::Left(Arc::new(trie_updates)), + }; let no_tx_pool = ctx.attributes().no_tx_pool; @@ -376,9 +376,9 @@ where } /// Returns the current fee settings for transactions from the mempool - pub fn best_transaction_attributes(&self, block_env: &BlockEnv) -> BestTransactionsAttributes { + pub fn best_transaction_attributes(&self, block_env: impl Block) -> BestTransactionsAttributes { BestTransactionsAttributes::new( - block_env.basefee, + block_env.basefee(), block_env.blob_gasprice().map(|p| p as u64), ) } @@ -435,7 +435,7 @@ where builder: &mut impl BlockBuilder, ) -> Result { let mut info = ExecutionInfo::new(); - let block_gas_limit = builder.evm().block().gas_limit; + let block_gas_limit = builder.evm().block().gas_limit(); let mut gas_spent_by_transactions = Vec::new(); for sequencer_tx in &self.attributes().transactions { @@ -506,8 +506,8 @@ where builder_config: &ScrollBuilderConfig, breaker: PayloadBuildingBreaker, ) -> Result, PayloadBuilderError> { - let block_gas_limit = builder.evm_mut().block().gas_limit; - let base_fee = builder.evm_mut().block().basefee; + let block_gas_limit = builder.evm_mut().block().gas_limit(); + let base_fee = builder.evm_mut().block().basefee(); while let Some(tx) = best_txs.next(()) { let tx = tx.into_consensus(); diff --git a/crates/scroll/rpc/src/eth/mod.rs b/crates/scroll/rpc/src/eth/mod.rs index ae75327a23b..ebe58a23ef1 100644 --- a/crates/scroll/rpc/src/eth/mod.rs +++ b/crates/scroll/rpc/src/eth/mod.rs @@ -16,8 +16,8 @@ use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_convert::{RpcConvert, RpcConverter, RpcTypes, SignableTxRequest}; use reth_rpc_eth_api::{ helpers::{ - pending_block::BuildPendingEnv, AddDevSigners, EthApiSpec, EthState, LoadFee, - LoadPendingBlock, LoadState, SpawnBlocking, Trace, + pending_block::BuildPendingEnv, EthApiSpec, EthState, LoadFee, LoadPendingBlock, LoadState, + SpawnBlocking, Trace, }, EthApiTypes, FullEthApiServer, RpcNodeCore, RpcNodeCoreExt, }; diff --git a/crates/stages/api/src/metrics/listener.rs b/crates/stages/api/src/metrics/listener.rs index 8c0707d1bea..2ae367eb364 100644 --- a/crates/stages/api/src/metrics/listener.rs +++ b/crates/stages/api/src/metrics/listener.rs @@ -52,17 +52,7 @@ impl MetricsListener { trace!(target: "sync::metrics", ?event, "Metric event received"); match event { MetricEvent::SyncHeight { height } => { - for stage_id in StageId::ALL { - self.handle_event(MetricEvent::StageCheckpoint { - stage_id, - checkpoint: StageCheckpoint { - block_number: height, - stage_checkpoint: None, - }, - max_block_number: Some(height), - elapsed: Duration::default(), - }); - } + self.update_all_stages_height(height); } MetricEvent::StageCheckpoint { stage_id, checkpoint, max_block_number, elapsed } => { let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id); @@ -83,6 +73,17 @@ impl MetricsListener { } } } + + /// Updates all stage checkpoints to the given height efficiently. + fn update_all_stages_height(&mut self, height: BlockNumber) { + for stage_id in StageId::ALL { + let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id); + let height_f64 = height as f64; + stage_metrics.checkpoint.set(height_f64); + stage_metrics.entities_processed.set(height_f64); + stage_metrics.entities_total.set(height_f64); + } + } } impl Future for MetricsListener { diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 0a9aaef73de..e8542c36da6 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -572,14 +572,18 @@ impl Pipeline { // FIXME: When handling errors, we do not commit the database transaction. This // leads to the Merkle stage not clearing its checkpoint, and restarting from an // invalid place. - let provider_rw = self.provider_factory.database_provider_rw()?; - provider_rw.save_stage_checkpoint_progress(StageId::MerkleExecute, vec![])?; - provider_rw.save_stage_checkpoint( - StageId::MerkleExecute, - prev_checkpoint.unwrap_or_default(), - )?; + // Only reset MerkleExecute checkpoint if MerkleExecute itself failed + if stage_id == StageId::MerkleExecute { + let provider_rw = self.provider_factory.database_provider_rw()?; + provider_rw + .save_stage_checkpoint_progress(StageId::MerkleExecute, vec![])?; + provider_rw.save_stage_checkpoint( + StageId::MerkleExecute, + prev_checkpoint.unwrap_or_default(), + )?; - provider_rw.commit()?; + provider_rw.commit()?; + } // We unwind because of a validation error. If the unwind itself // fails, we bail entirely, diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index bd1fb59ebe9..b6010dd6f39 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -1,12 +1,7 @@ #![expect(unreachable_pub)] -use alloy_primitives::{Address, B256, U256}; +use alloy_primitives::{Address, B256}; use itertools::concat; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; -use reth_db_api::{ - cursor::DbCursorRO, - tables, - transaction::{DbTx, DbTxMut}, -}; use reth_primitives_traits::{Account, SealedBlock, SealedHeader}; use reth_provider::{ test_utils::MockNodeTypesWithDB, DBProvider, DatabaseProvider, DatabaseProviderFactory, @@ -165,7 +160,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { db.insert_changesets(transitions, None).unwrap(); let provider_rw = db.factory.provider_rw().unwrap(); - provider_rw.write_trie_updates(&updates).unwrap(); + provider_rw.write_trie_updates(updates).unwrap(); provider_rw.commit().unwrap(); let (transitions, final_state) = random_changeset_range( @@ -198,13 +193,6 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { ); db.insert_blocks(blocks.iter(), StorageKind::Static).unwrap(); - - // initialize TD - db.commit(|tx| { - let (head, _) = tx.cursor_read::()?.first()?.unwrap_or_default(); - Ok(tx.put::(head, U256::from(0).into())?) - }) - .unwrap(); } db diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 30dbd9281dd..f81f4972427 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -39,9 +39,9 @@ use crate::{ stages::{ AccountHashingStage, BodyStage, EraImportSource, EraStage, ExecutionStage, FinishStage, - HeaderStage, IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, - PruneSenderRecoveryStage, PruneStage, SenderRecoveryStage, StorageHashingStage, - TransactionLookupStage, + HeaderStage, IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleChangeSets, + MerkleStage, PruneSenderRecoveryStage, PruneStage, SenderRecoveryStage, + StorageHashingStage, TransactionLookupStage, }, StageSet, StageSetBuilder, }; @@ -54,7 +54,7 @@ use reth_primitives_traits::{Block, NodePrimitives}; use reth_provider::HeaderSyncGapProvider; use reth_prune_types::PruneModes; use reth_stages_api::Stage; -use std::{ops::Not, sync::Arc}; +use std::sync::Arc; use tokio::sync::watch; /// A set containing all stages to run a fully syncing instance of reth. @@ -75,6 +75,7 @@ use tokio::sync::watch; /// - [`AccountHashingStage`] /// - [`StorageHashingStage`] /// - [`MerkleStage`] (execute) +/// - [`MerkleChangeSets`] /// - [`TransactionLookupStage`] /// - [`IndexStorageHistoryStage`] /// - [`IndexAccountHistoryStage`] @@ -269,8 +270,14 @@ where Stage, { fn builder(self) -> StageSetBuilder { - StageSetBuilder::default() - .add_stage(EraStage::new(self.era_import_source, self.stages_config.etl.clone())) + let mut builder = StageSetBuilder::default(); + + if self.era_import_source.is_some() { + builder = builder + .add_stage(EraStage::new(self.era_import_source, self.stages_config.etl.clone())); + } + + builder .add_stage(HeaderStage::new( self.provider, self.header_downloader, @@ -339,12 +346,12 @@ where stages_config: self.stages_config.clone(), prune_modes: self.prune_modes.clone(), }) - // If any prune modes are set, add the prune stage. - .add_stage_opt(self.prune_modes.is_empty().not().then(|| { - // Prune stage should be added after all hashing stages, because otherwise it will - // delete - PruneStage::new(self.prune_modes.clone(), self.stages_config.prune.commit_threshold) - })) + // Prune stage should be added after all hashing stages, because otherwise it will + // delete + .add_stage(PruneStage::new( + self.prune_modes.clone(), + self.stages_config.prune.commit_threshold, + )) } } @@ -390,6 +397,13 @@ where } /// A set containing all stages that hash account state. +/// +/// This includes: +/// - [`MerkleStage`] (unwind) +/// - [`AccountHashingStage`] +/// - [`StorageHashingStage`] +/// - [`MerkleStage`] (execute) +/// - [`MerkleChangeSets`] #[derive(Debug)] #[non_exhaustive] pub struct HashingStages { @@ -405,6 +419,7 @@ where MerkleStage

: Stage, AccountHashingStage: Stage, StorageHashingStage: Stage, + MerkleChangeSets: Stage, { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() @@ -422,6 +437,7 @@ where self.stages_config.merkle.incremental_threshold, self.consensus, )) + .add_stage(MerkleChangeSets::new()) } } diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index d1386dded4b..7b6090ca86b 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -580,7 +580,7 @@ mod tests { ..Default::default() }, ); - self.db.insert_headers_with_td(blocks.iter().map(|block| block.sealed_header()))?; + self.db.insert_headers(blocks.iter().map(|block| block.sealed_header()))?; if let Some(progress) = blocks.get(start as usize) { // Insert last progress data { diff --git a/crates/stages/stages/src/stages/era.rs b/crates/stages/stages/src/stages/era.rs index 436ee769659..7af667dce78 100644 --- a/crates/stages/stages/src/stages/era.rs +++ b/crates/stages/stages/src/stages/era.rs @@ -4,18 +4,17 @@ use futures_util::{Stream, StreamExt}; use reqwest::{Client, Url}; use reth_config::config::EtlConfig; use reth_db_api::{table::Value, transaction::DbTxMut}; -use reth_era::{era1_file::Era1Reader, era_file_ops::StreamReader}; +use reth_era::{common::file_ops::StreamReader, era1::file::Era1Reader}; use reth_era_downloader::{read_dir, EraClient, EraMeta, EraStream, EraStreamConfig}; use reth_era_utils as era; use reth_etl::Collector; use reth_primitives_traits::{FullBlockBody, FullBlockHeader, NodePrimitives}; use reth_provider::{ - BlockReader, BlockWriter, DBProvider, HeaderProvider, StageCheckpointWriter, - StaticFileProviderFactory, StaticFileWriter, + BlockReader, BlockWriter, DBProvider, StageCheckpointWriter, StaticFileProviderFactory, + StaticFileWriter, }; use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_static_file_types::StaticFileSegment; -use reth_storage_errors::ProviderError; use std::{ fmt::{Debug, Formatter}, iter, @@ -176,11 +175,6 @@ where .get_highest_static_file_block(StaticFileSegment::Headers) .unwrap_or_default(); - // Find the latest total difficulty - let mut td = static_file_provider - .header_td_by_number(last_header_number)? - .ok_or(ProviderError::TotalDifficultyNotFound(last_header_number))?; - // Although headers were downloaded in reverse order, the collector iterates it in // ascending order let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; @@ -190,7 +184,6 @@ where &mut writer, provider, &mut self.hash_collector, - &mut td, last_header_number..=input.target(), ) .map_err(|e| StageError::Fatal(e.into()))?; @@ -211,10 +204,27 @@ where height } else { - input.target() + // No era files to process. Return the highest block we're aware of to avoid + // limiting subsequent stages with an outdated checkpoint. + // + // This can happen when: + // 1. Era import is complete (all pre-merge blocks imported) + // 2. No era import source was configured + // + // We return max(checkpoint, highest_header, target) to ensure we don't return + // a stale checkpoint that could limit subsequent stages like Headers. + let highest_header = provider + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::Headers) + .unwrap_or_default(); + + let checkpoint = input.checkpoint().block_number; + let from_target = input.target.unwrap_or(checkpoint); + + checkpoint.max(highest_header).max(from_target) }; - Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height == input.target() }) + Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height >= input.target() }) } fn unwind( @@ -330,7 +340,7 @@ mod tests { }; use reth_ethereum_primitives::TransactionSigned; use reth_primitives_traits::{SealedBlock, SealedHeader}; - use reth_provider::{BlockNumReader, TransactionsProvider}; + use reth_provider::{BlockNumReader, HeaderProvider, TransactionsProvider}; use reth_testing_utils::generators::{ random_block_range, random_signed_tx, BlockRangeParams, }; @@ -385,7 +395,7 @@ mod tests { ..Default::default() }, ); - self.db.insert_headers_with_td(blocks.iter().map(|block| block.sealed_header()))?; + self.db.insert_headers(blocks.iter().map(|block| block.sealed_header()))?; if let Some(progress) = blocks.get(start as usize) { // Insert last progress data { @@ -441,9 +451,6 @@ mod tests { match output { Some(output) if output.checkpoint.block_number > initial_checkpoint => { let provider = self.db.factory.provider()?; - let mut td = provider - .header_td_by_number(initial_checkpoint.saturating_sub(1))? - .unwrap_or_default(); for block_num in initial_checkpoint.. output @@ -463,10 +470,6 @@ mod tests { assert!(header.is_some()); let header = SealedHeader::seal_slow(header.unwrap()); assert_eq!(header.hash(), hash); - - // validate the header total difficulty - td += header.difficulty; - assert_eq!(provider.header_td_by_number(block_num)?, Some(td)); } self.validate_db_blocks( @@ -507,10 +510,6 @@ mod tests { .ensure_no_entry_above_by_value::(block, |val| val)?; self.db.ensure_no_entry_above::(block, |key| key)?; self.db.ensure_no_entry_above::(block, |key| key)?; - self.db.ensure_no_entry_above::( - block, - |num| num, - )?; Ok(()) } diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 3736fa523cb..adfc87c5ccc 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -39,7 +39,6 @@ use super::missing_static_data_error; /// Input tables: /// - [`tables::CanonicalHeaders`] get next block to execute. /// - [`tables::Headers`] get for revm environment variables. -/// - [`tables::HeaderTerminalDifficulties`] /// - [`tables::BlockBodyIndices`] to get tx number /// - [`tables::Transactions`] to execute /// @@ -661,7 +660,7 @@ where mod tests { use super::*; use crate::{stages::MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD, test_utils::TestStageDB}; - use alloy_primitives::{address, hex_literal::hex, keccak256, Address, B256, U256}; + use alloy_primitives::{address, hex_literal::hex, keccak256, B256, U256}; use alloy_rlp::Decodable; use assert_matches::assert_matches; use reth_chainspec::ChainSpecBuilder; @@ -678,9 +677,7 @@ mod tests { DatabaseProviderFactory, ReceiptProvider, StaticFileProviderFactory, }; use reth_prune::PruneModes; - use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig}; use reth_stages_api::StageUnitCheckpoint; - use std::collections::BTreeMap; fn stage() -> ExecutionStage { let evm_config = @@ -896,21 +893,12 @@ mod tests { // If there is a pruning configuration, then it's forced to use the database. // This way we test both cases. - let modes = [None, Some(PruneModes::none())]; - let random_filter = ReceiptsLogPruneConfig(BTreeMap::from([( - Address::random(), - PruneMode::Distance(100000), - )])); + let modes = [None, Some(PruneModes::default())]; // Tests node with database and node with static files - for mut mode in modes { + for mode in modes { let mut provider = factory.database_provider_rw().unwrap(); - if let Some(mode) = &mut mode { - // Simulating a full node where we write receipts to database - mode.receipts_log_filter = random_filter.clone(); - } - let mut execution_stage = stage(); provider.set_prune_modes(mode.clone().unwrap_or_default()); @@ -1033,19 +1021,10 @@ mod tests { // If there is a pruning configuration, then it's forced to use the database. // This way we test both cases. - let modes = [None, Some(PruneModes::none())]; - let random_filter = ReceiptsLogPruneConfig(BTreeMap::from([( - Address::random(), - PruneMode::Before(100000), - )])); + let modes = [None, Some(PruneModes::default())]; // Tests node with database and node with static files - for mut mode in modes { - if let Some(mode) = &mut mode { - // Simulating a full node where we write receipts to database - mode.receipts_log_filter = random_filter.clone(); - } - + for mode in modes { // Test Execution let mut execution_stage = stage(); provider.set_prune_modes(mode.clone().unwrap_or_default()); diff --git a/crates/stages/stages/src/stages/finish.rs b/crates/stages/stages/src/stages/finish.rs index 1b9e624b41b..8d676c35b99 100644 --- a/crates/stages/stages/src/stages/finish.rs +++ b/crates/stages/stages/src/stages/finish.rs @@ -72,7 +72,7 @@ mod tests { let start = input.checkpoint().block_number; let mut rng = generators::rng(); let head = random_header(&mut rng, start, None); - self.db.insert_headers_with_td(std::iter::once(&head))?; + self.db.insert_headers(std::iter::once(&head))?; // use previous progress as seed size let end = input.target.unwrap_or_default() + 1; @@ -82,7 +82,7 @@ mod tests { } let mut headers = random_header_range(&mut rng, start + 1..end, head.hash()); - self.db.insert_headers_with_td(headers.iter())?; + self.db.insert_headers(headers.iter())?; headers.insert(0, head); Ok(headers) } diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index cc86db14d38..1e48f2d38e0 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -64,7 +64,7 @@ impl AccountHashingStage { opts: SeedOpts, ) -> Result, StageError> where - N::Primitives: reth_primitives_traits::FullNodePrimitives< + N::Primitives: reth_primitives_traits::NodePrimitives< Block = reth_ethereum_primitives::Block, BlockHeader = reth_primitives_traits::Header, >, diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index d3e690dc516..8ad39be5eb8 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -16,15 +16,14 @@ use reth_network_p2p::headers::{ }; use reth_primitives_traits::{serde_bincode_compat, FullBlockHeader, NodePrimitives, SealedHeader}; use reth_provider::{ - providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderProvider, - HeaderSyncGapProvider, StaticFileProviderFactory, + providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderSyncGapProvider, + StaticFileProviderFactory, }; use reth_stages_api::{ CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, HeadersCheckpoint, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, }; use reth_static_file_types::StaticFileSegment; -use reth_storage_errors::provider::ProviderError; use std::task::{ready, Context, Poll}; use tokio::sync::watch; @@ -107,11 +106,6 @@ where .get_highest_static_file_block(StaticFileSegment::Headers) .unwrap_or_default(); - // Find the latest total difficulty - let mut td = static_file_provider - .header_td_by_number(last_header_number)? - .ok_or(ProviderError::TotalDifficultyNotFound(last_header_number))?; - // Although headers were downloaded in reverse order, the collector iterates it in ascending // order let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; @@ -134,11 +128,8 @@ where } last_header_number = header.number(); - // Increase total difficulty - td += header.difficulty(); - // Append to Headers segment - writer.append_header(header, td, header_hash)?; + writer.append_header(header, header_hash)?; } info!(target: "sync::stages::headers", total = total_headers, "Writing headers hash index"); @@ -342,9 +333,6 @@ where (input.unwind_to + 1).., )?; provider.tx_ref().unwind_table_by_num::(input.unwind_to)?; - provider - .tx_ref() - .unwind_table_by_num::(input.unwind_to)?; let unfinalized_headers_unwound = provider.tx_ref().unwind_table_by_num::(input.unwind_to)?; @@ -415,7 +403,7 @@ mod tests { ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder, }; use reth_network_p2p::test_utils::{TestHeaderDownloader, TestHeadersClient}; - use reth_provider::{test_utils::MockNodeTypesWithDB, BlockNumReader}; + use reth_provider::{test_utils::MockNodeTypesWithDB, BlockNumReader, HeaderProvider}; use tokio::sync::watch; pub(crate) struct HeadersTestRunner { @@ -469,7 +457,7 @@ mod tests { let start = input.checkpoint().block_number; let headers = random_header_range(&mut rng, 0..start + 1, B256::ZERO); let head = headers.last().cloned().unwrap(); - self.db.insert_headers_with_td(headers.iter())?; + self.db.insert_headers(headers.iter())?; // use previous checkpoint as seed size let end = input.target.unwrap_or_default() + 1; @@ -493,9 +481,6 @@ mod tests { match output { Some(output) if output.checkpoint.block_number > initial_checkpoint => { let provider = self.db.factory.provider()?; - let mut td = provider - .header_td_by_number(initial_checkpoint.saturating_sub(1))? - .unwrap_or_default(); for block_num in initial_checkpoint..output.checkpoint.block_number { // look up the header hash @@ -509,10 +494,6 @@ mod tests { assert!(header.is_some()); let header = SealedHeader::seal_slow(header.unwrap()); assert_eq!(header.hash(), hash); - - // validate the header total difficulty - td += header.difficulty; - assert_eq!(provider.header_td_by_number(block_num)?, Some(td)); } } _ => self.check_no_header_entry_above(initial_checkpoint)?, @@ -567,10 +548,6 @@ mod tests { .ensure_no_entry_above_by_value::(block, |val| val)?; self.db.ensure_no_entry_above::(block, |key| key)?; self.db.ensure_no_entry_above::(block, |key| key)?; - self.db.ensure_no_entry_above::( - block, - |num| num, - )?; Ok(()) } @@ -635,16 +612,7 @@ mod tests { let static_file_provider = provider.static_file_provider(); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers).unwrap(); for header in sealed_headers { - let ttd = if header.number() == 0 { - header.difficulty() - } else { - let parent_block_number = header.number() - 1; - let parent_ttd = - provider.header_td_by_number(parent_block_number).unwrap().unwrap_or_default(); - parent_ttd + header.difficulty() - }; - - writer.append_header(header.header(), ttd, &header.hash()).unwrap(); + writer.append_header(header.header(), &header.hash()).unwrap(); } drop(writer); diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index e6aa166d2a2..c216b9958d4 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -280,7 +280,7 @@ where })?; match progress { StateRootProgress::Progress(state, hashed_entries_walked, updates) => { - provider.write_trie_updates(&updates)?; + provider.write_trie_updates(updates)?; let mut checkpoint = MerkleCheckpoint::new( to_block, @@ -323,7 +323,7 @@ where }) } StateRootProgress::Complete(root, hashed_entries_walked, updates) => { - provider.write_trie_updates(&updates)?; + provider.write_trie_updates(updates)?; entities_checkpoint.processed += hashed_entries_walked as u64; @@ -350,7 +350,7 @@ where error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); StageError::Fatal(Box::new(e)) })?; - provider.write_trie_updates(&updates)?; + provider.write_trie_updates(updates)?; final_root = Some(root); } @@ -445,7 +445,7 @@ where )?; // Validation passed, apply unwind changes to the database. - provider.write_trie_updates(&updates)?; + provider.write_trie_updates(updates)?; // Update entities checkpoint to reflect the unwind operation // Since we're unwinding, we need to recalculate the total entities at the target block @@ -782,7 +782,7 @@ mod tests { let hash = last_header.hash_slow(); writer.prune_headers(1).unwrap(); writer.commit().unwrap(); - writer.append_header(&last_header, U256::ZERO, &hash).unwrap(); + writer.append_header(&last_header, &hash).unwrap(); writer.commit().unwrap(); Ok(blocks) diff --git a/crates/stages/stages/src/stages/merkle_changesets.rs b/crates/stages/stages/src/stages/merkle_changesets.rs new file mode 100644 index 00000000000..dd4d8cf2017 --- /dev/null +++ b/crates/stages/stages/src/stages/merkle_changesets.rs @@ -0,0 +1,401 @@ +use crate::stages::merkle::INVALID_STATE_ROOT_ERROR_MESSAGE; +use alloy_consensus::BlockHeader; +use alloy_primitives::BlockNumber; +use reth_consensus::ConsensusError; +use reth_primitives_traits::{GotExpected, SealedHeader}; +use reth_provider::{ + ChainStateBlockReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, + PruneCheckpointWriter, StageCheckpointReader, TrieWriter, +}; +use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; +use reth_stages_api::{ + BlockErrorKind, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, + UnwindInput, UnwindOutput, +}; +use reth_trie::{updates::TrieUpdates, HashedPostState, KeccakKeyHasher, StateRoot, TrieInput}; +use reth_trie_db::{DatabaseHashedPostState, DatabaseStateRoot}; +use std::ops::Range; +use tracing::{debug, error}; + +/// The `MerkleChangeSets` stage. +/// +/// This stage processes and maintains trie changesets from the finalized block to the latest block. +#[derive(Debug, Clone)] +pub struct MerkleChangeSets { + /// The number of blocks to retain changesets for, used as a fallback when the finalized block + /// is not found. Defaults to 64 (2 epochs in beacon chain). + retention_blocks: u64, +} + +impl MerkleChangeSets { + /// Creates a new `MerkleChangeSets` stage with default retention blocks of 64. + pub const fn new() -> Self { + Self { retention_blocks: 64 } + } + + /// Creates a new `MerkleChangeSets` stage with a custom finalized block height. + pub const fn with_retention_blocks(retention_blocks: u64) -> Self { + Self { retention_blocks } + } + + /// Returns the range of blocks which are already computed. Will return an empty range if none + /// have been computed. + fn computed_range( + provider: &Provider, + checkpoint: Option, + ) -> Result, StageError> + where + Provider: PruneCheckpointReader, + { + let to = checkpoint.map(|chk| chk.block_number).unwrap_or_default(); + + // Get the prune checkpoint for MerkleChangeSets to use as the lower bound. If there's no + // prune checkpoint or if the pruned block number is None, return empty range + let Some(from) = provider + .get_prune_checkpoint(PruneSegment::MerkleChangeSets)? + .and_then(|chk| chk.block_number) + // prune checkpoint indicates the last block pruned, so the block after is the start of + // the computed data + .map(|block_number| block_number + 1) + else { + return Ok(0..0) + }; + + Ok(from..to + 1) + } + + /// Determines the target range for changeset computation based on the checkpoint and provider + /// state. + /// + /// Returns the target range (exclusive end) to compute changesets for. + fn determine_target_range( + &self, + provider: &Provider, + ) -> Result, StageError> + where + Provider: StageCheckpointReader + ChainStateBlockReader, + { + // Get merkle checkpoint which represents our target end block + let merkle_checkpoint = provider + .get_stage_checkpoint(StageId::MerkleExecute)? + .map(|checkpoint| checkpoint.block_number) + .unwrap_or(0); + + let target_end = merkle_checkpoint + 1; // exclusive + + // Calculate the target range based on the finalized block and the target block. + // We maintain changesets from the finalized block to the latest block. + let finalized_block = provider.last_finalized_block_number()?; + + // Calculate the fallback start position based on retention blocks + let retention_based_start = merkle_checkpoint.saturating_sub(self.retention_blocks); + + // If the finalized block was way in the past then we don't want to generate changesets for + // all of those past blocks; we only care about the recent history. + // + // Use maximum of finalized_block and retention_based_start if finalized_block exists, + // otherwise just use retention_based_start. + let mut target_start = finalized_block + .map(|finalized| finalized.saturating_add(1).max(retention_based_start)) + .unwrap_or(retention_based_start); + + // We cannot revert the genesis block; target_start must be >0 + target_start = target_start.max(1); + + Ok(target_start..target_end) + } + + /// Calculates the trie updates given a [`TrieInput`], asserting that the resulting state root + /// matches the expected one for the block. + fn calculate_block_trie_updates( + provider: &Provider, + block_number: BlockNumber, + input: TrieInput, + ) -> Result { + let (root, trie_updates) = + StateRoot::overlay_root_from_nodes_with_updates(provider.tx_ref(), input).map_err( + |e| { + error!( + target: "sync::stages::merkle_changesets", + %e, + ?block_number, + "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); + StageError::Fatal(Box::new(e)) + }, + )?; + + let block = provider + .header_by_number(block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + + let (got, expected) = (root, block.state_root()); + if got != expected { + // Only seal the header when we need it for the error + let header = SealedHeader::seal_slow(block); + error!( + target: "sync::stages::merkle_changesets", + ?block_number, + ?got, + ?expected, + "Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}", + ); + return Err(StageError::Block { + error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( + GotExpected { got, expected }.into(), + )), + block: Box::new(header.block_with_parent()), + }) + } + + Ok(trie_updates) + } + + fn populate_range( + provider: &Provider, + target_range: Range, + ) -> Result<(), StageError> + where + Provider: StageCheckpointReader + + TrieWriter + + DBProvider + + HeaderProvider + + ChainStateBlockReader, + { + let target_start = target_range.start; + let target_end = target_range.end; + debug!( + target: "sync::stages::merkle_changesets", + ?target_range, + "Starting trie changeset computation", + ); + + // We need to distinguish a cumulative revert and a per-block revert. A cumulative revert + // reverts changes starting at db tip all the way to a block. A per-block revert only + // reverts a block's changes. + // + // We need to calculate the cumulative HashedPostState reverts for every block in the + // target range. The cumulative HashedPostState revert for block N can be calculated as: + // + // + // ``` + // // where `extend` overwrites any shared keys + // cumulative_state_revert(N) = cumulative_state_revert(N + 1).extend(get_block_state_revert(N)) + // ``` + // + // We need per-block reverts to calculate the prefix set for each individual block. By + // using the per-block reverts to calculate cumulative reverts on-the-fly we can save a + // bunch of memory. + debug!( + target: "sync::stages::merkle_changesets", + ?target_range, + "Computing per-block state reverts", + ); + let mut per_block_state_reverts = Vec::new(); + for block_number in target_range.clone() { + per_block_state_reverts.push(HashedPostState::from_reverts::( + provider.tx_ref(), + block_number..=block_number, + )?); + } + + // Helper to retrieve state revert data for a specific block from the pre-computed array + let get_block_state_revert = |block_number: BlockNumber| -> &HashedPostState { + let index = (block_number - target_start) as usize; + &per_block_state_reverts[index] + }; + + // Helper to accumulate state reverts from a given block to the target end + let compute_cumulative_state_revert = |block_number: BlockNumber| -> HashedPostState { + let mut cumulative_revert = HashedPostState::default(); + for n in (block_number..target_end).rev() { + cumulative_revert.extend_ref(get_block_state_revert(n)) + } + cumulative_revert + }; + + // To calculate the changeset for a block, we first need the TrieUpdates which are + // generated as a result of processing the block. To get these we need: + // 1) The TrieUpdates which revert the db's trie to _prior_ to the block + // 2) The HashedPostState to revert the db's state to _after_ the block + // + // To get (1) for `target_start` we need to do a big state root calculation which takes + // into account all changes between that block and db tip. For each block after the + // `target_start` we can update (1) using the TrieUpdates which were output by the previous + // block, only targeting the state changes of that block. + debug!( + target: "sync::stages::merkle_changesets", + ?target_start, + "Computing trie state at starting block", + ); + let mut input = TrieInput::default(); + input.state = compute_cumulative_state_revert(target_start); + input.prefix_sets = input.state.construct_prefix_sets(); + // target_start will be >= 1, see `determine_target_range`. + input.nodes = + Self::calculate_block_trie_updates(provider, target_start - 1, input.clone())?; + + for block_number in target_range { + debug!( + target: "sync::stages::merkle_changesets", + ?block_number, + "Computing trie updates for block", + ); + // Revert the state so that this block has been just processed, meaning we take the + // cumulative revert of the subsequent block. + input.state = compute_cumulative_state_revert(block_number + 1); + + // Construct prefix sets from only this block's `HashedPostState`, because we only care + // about trie updates which occurred as a result of this block being processed. + input.prefix_sets = get_block_state_revert(block_number).construct_prefix_sets(); + + // Calculate the trie updates for this block, then apply those updates to the reverts. + // We calculate the overlay which will be passed into the next step using the trie + // reverts prior to them being updated. + let this_trie_updates = + Self::calculate_block_trie_updates(provider, block_number, input.clone())?; + + let trie_overlay = input.nodes.clone().into_sorted(); + input.nodes.extend_ref(&this_trie_updates); + let this_trie_updates = this_trie_updates.into_sorted(); + + // Write the changesets to the DB using the trie updates produced by the block, and the + // trie reverts as the overlay. + debug!( + target: "sync::stages::merkle_changesets", + ?block_number, + "Writing trie changesets for block", + ); + provider.write_trie_changesets( + block_number, + &this_trie_updates, + Some(&trie_overlay), + )?; + } + + Ok(()) + } +} + +impl Default for MerkleChangeSets { + fn default() -> Self { + Self::new() + } +} + +impl Stage for MerkleChangeSets +where + Provider: StageCheckpointReader + + TrieWriter + + DBProvider + + HeaderProvider + + ChainStateBlockReader + + PruneCheckpointReader + + PruneCheckpointWriter, +{ + fn id(&self) -> StageId { + StageId::MerkleChangeSets + } + + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { + // Get merkle checkpoint and assert that the target is the same. + let merkle_checkpoint = provider + .get_stage_checkpoint(StageId::MerkleExecute)? + .map(|checkpoint| checkpoint.block_number) + .unwrap_or(0); + + if input.target.is_none_or(|target| merkle_checkpoint != target) { + return Err(StageError::Fatal(eyre::eyre!("Cannot sync stage to block {:?} when MerkleExecute is at block {merkle_checkpoint:?}", input.target).into())) + } + + let mut target_range = self.determine_target_range(provider)?; + + // Get the previously computed range. This will be updated to reflect the populating of the + // target range. + let mut computed_range = Self::computed_range(provider, input.checkpoint)?; + debug!( + target: "sync::stages::merkle_changesets", + ?computed_range, + ?target_range, + "Got computed and target ranges", + ); + + // We want the target range to not include any data already computed previously, if + // possible, so we start the target range from the end of the computed range if that is + // greater. + // + // ------------------------------> Block # + // |------computed-----| + // |-----target-----| + // |--actual--| + // + // However, if the target start is less than the previously computed start, we don't want to + // do this, as it would leave a gap of data at `target_range.start..=computed_range.start`. + // + // ------------------------------> Block # + // |---computed---| + // |-------target-------| + // |-------actual-------| + // + if target_range.start >= computed_range.start { + target_range.start = target_range.start.max(computed_range.end); + } + + // If target range is empty (target_start >= target_end), stage is already successfully + // executed. + if target_range.start >= target_range.end { + return Ok(ExecOutput::done(StageCheckpoint::new(target_range.end.saturating_sub(1)))); + } + + // If our target range is a continuation of the already computed range then we can keep the + // already computed data. + if target_range.start == computed_range.end { + // Clear from target_start onwards to ensure no stale data exists + provider.clear_trie_changesets_from(target_range.start)?; + computed_range.end = target_range.end; + } else { + // If our target range is not a continuation of the already computed range then we + // simply clear the computed data, to make sure there's no gaps or conflicts. + provider.clear_trie_changesets()?; + computed_range = target_range.clone(); + } + + // Populate the target range with changesets + Self::populate_range(provider, target_range)?; + + // Update the prune checkpoint to reflect that all data before `computed_range.start` + // is not available. + provider.save_prune_checkpoint( + PruneSegment::MerkleChangeSets, + PruneCheckpoint { + block_number: Some(computed_range.start.saturating_sub(1)), + tx_number: None, + prune_mode: PruneMode::Before(computed_range.start), + }, + )?; + + // `computed_range.end` is exclusive. + let checkpoint = StageCheckpoint::new(computed_range.end.saturating_sub(1)); + + Ok(ExecOutput::done(checkpoint)) + } + + fn unwind( + &mut self, + provider: &Provider, + input: UnwindInput, + ) -> Result { + // Unwinding is trivial; just clear everything after the target block. + provider.clear_trie_changesets_from(input.unwind_to + 1)?; + + let mut computed_range = Self::computed_range(provider, Some(input.checkpoint))?; + computed_range.end = input.unwind_to + 1; + if computed_range.start > computed_range.end { + computed_range.start = computed_range.end; + } + + // `computed_range.end` is exclusive + let checkpoint = StageCheckpoint::new(computed_range.end.saturating_sub(1)); + + Ok(UnwindOutput { checkpoint }) + } +} diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index f9b2312f5ab..58fa7cfb324 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -16,6 +16,8 @@ mod index_account_history; mod index_storage_history; /// Stage for computing state root. mod merkle; +/// Stage for computing merkle changesets. +mod merkle_changesets; mod prune; /// The sender recovery stage. mod sender_recovery; @@ -32,6 +34,7 @@ pub use headers::*; pub use index_account_history::*; pub use index_storage_history::*; pub use merkle::*; +pub use merkle_changesets::*; pub use prune::*; pub use sender_recovery::*; pub use tx_lookup::*; @@ -225,7 +228,7 @@ mod tests { // In an unpruned configuration there is 1 receipt, 3 changed accounts and 1 changed // storage. - let mut prune = PruneModes::none(); + let mut prune = PruneModes::default(); check_pruning(test_db.factory.clone(), prune.clone(), 1, 3, 1).await; prune.receipts = Some(PruneMode::Full); diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index f62259dcfdd..f6fb7f90ae1 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -1,7 +1,7 @@ use reth_db_api::{table::Value, transaction::DbTxMut}; use reth_primitives_traits::NodePrimitives; use reth_provider::{ - BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, + BlockReader, ChainStateBlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, }; use reth_prune::{ @@ -42,6 +42,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader + + ChainStateBlockReader + StaticFileProviderFactory< Primitives: NodePrimitives, >, @@ -102,9 +103,18 @@ where // We cannot recover the data that was pruned in `execute`, so we just update the // checkpoints. let prune_checkpoints = provider.get_prune_checkpoints()?; + let unwind_to_last_tx = + provider.block_body_indices(input.unwind_to)?.map(|i| i.last_tx_num()); + for (segment, mut checkpoint) in prune_checkpoints { - checkpoint.block_number = Some(input.unwind_to); - provider.save_prune_checkpoint(segment, checkpoint)?; + // Only update the checkpoint if unwind_to is lower than the existing checkpoint. + if let Some(block) = checkpoint.block_number && + input.unwind_to < block + { + checkpoint.block_number = Some(input.unwind_to); + checkpoint.tx_number = unwind_to_last_tx; + provider.save_prune_checkpoint(segment, checkpoint)?; + } } Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) }) } @@ -121,7 +131,7 @@ impl PruneSenderRecoveryStage { /// Create new prune sender recovery stage with the given prune mode and commit threshold. pub fn new(prune_mode: PruneMode, commit_threshold: usize) -> Self { Self(PruneStage::new( - PruneModes { sender_recovery: Some(prune_mode), ..PruneModes::none() }, + PruneModes { sender_recovery: Some(prune_mode), ..PruneModes::default() }, commit_threshold, )) } @@ -133,6 +143,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader + + ChainStateBlockReader + StaticFileProviderFactory< Primitives: NodePrimitives, >, diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index f38f77b2247..5df7707df0f 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{keccak256, Address, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{keccak256, Address, BlockNumber, TxHash, TxNumber, B256}; use reth_chainspec::MAINNET; use reth_db::{ test_utils::{create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir}, @@ -44,7 +44,8 @@ impl Default for TestStageDB { create_test_rw_db(), MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), - ), + ) + .expect("failed to create test provider factory"), } } } @@ -59,7 +60,8 @@ impl TestStageDB { create_test_rw_db_with_path(path), MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), - ), + ) + .expect("failed to create test provider factory"), } } @@ -150,7 +152,6 @@ impl TestStageDB { writer: Option<&mut StaticFileProviderRWRefMut<'_, EthPrimitives>>, tx: &TX, header: &SealedHeader, - td: U256, ) -> ProviderResult<()> { if let Some(writer) = writer { // Backfill: some tests start at a forward block number, but static files require no @@ -160,14 +161,13 @@ impl TestStageDB { for block_number in 0..header.number { let mut prev = header.clone_header(); prev.number = block_number; - writer.append_header(&prev, U256::ZERO, &B256::ZERO)?; + writer.append_header(&prev, &B256::ZERO)?; } } - writer.append_header(header.header(), td, &header.hash())?; + writer.append_header(header.header(), &header.hash())?; } else { tx.put::(header.number, header.hash())?; - tx.put::(header.number, td.into())?; tx.put::(header.number, header.header().clone())?; } @@ -175,20 +175,16 @@ impl TestStageDB { Ok(()) } - fn insert_headers_inner<'a, I, const TD: bool>(&self, headers: I) -> ProviderResult<()> + fn insert_headers_inner<'a, I>(&self, headers: I) -> ProviderResult<()> where I: IntoIterator, { let provider = self.factory.static_file_provider(); let mut writer = provider.latest_writer(StaticFileSegment::Headers)?; let tx = self.factory.provider_rw()?.into_tx(); - let mut td = U256::ZERO; for header in headers { - if TD { - td += header.difficulty; - } - Self::insert_header(Some(&mut writer), &tx, header, td)?; + Self::insert_header(Some(&mut writer), &tx, header)?; } writer.commit()?; @@ -203,17 +199,7 @@ impl TestStageDB { where I: IntoIterator, { - self.insert_headers_inner::(headers) - } - - /// Inserts total difficulty of headers into the corresponding static file and tables. - /// - /// Superset functionality of [`TestStageDB::insert_headers`]. - pub fn insert_headers_with_td<'a, I>(&self, headers: I) -> ProviderResult<()> - where - I: IntoIterator, - { - self.insert_headers_inner::(headers) + self.insert_headers_inner::(headers) } /// Insert ordered collection of [`SealedBlock`] into corresponding tables. @@ -240,7 +226,7 @@ impl TestStageDB { .then(|| provider.latest_writer(StaticFileSegment::Headers).unwrap()); blocks.iter().try_for_each(|block| { - Self::insert_header(headers_writer.as_mut(), &tx, block.sealed_header(), U256::ZERO) + Self::insert_header(headers_writer.as_mut(), &tx, block.sealed_header()) })?; if let Some(mut writer) = headers_writer { diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index 19e15304896..6e70fbe26a0 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -24,12 +24,14 @@ modular-bitfield = { workspace = true, optional = true } [dev-dependencies] reth-codecs.workspace = true +reth-trie-common = { workspace = true, features = ["reth-codec"] } alloy-primitives = { workspace = true, features = ["arbitrary", "rand"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true rand.workspace = true bytes.workspace = true +modular-bitfield.workspace = true [features] default = ["std"] diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index 61c399d9ac3..04f4123c9f7 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -1,4 +1,6 @@ use super::StageId; +#[cfg(test)] +use alloc::vec; use alloc::{format, string::String, vec::Vec}; use alloy_primitives::{Address, BlockNumber, B256, U256}; use core::ops::RangeInclusive; @@ -287,6 +289,17 @@ pub struct IndexHistoryCheckpoint { pub progress: EntitiesCheckpoint, } +/// Saves the progress of `MerkleChangeSets` stage. +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct MerkleChangeSetsCheckpoint { + /// Block range which this checkpoint is valid for. + pub block_range: CheckpointBlockRange, +} + /// Saves the progress of abstract stage iterating over or downloading entities. #[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] #[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] @@ -386,6 +399,9 @@ impl StageCheckpoint { StageId::IndexStorageHistory | StageId::IndexAccountHistory => { StageUnitCheckpoint::IndexHistory(IndexHistoryCheckpoint::default()) } + StageId::MerkleChangeSets => { + StageUnitCheckpoint::MerkleChangeSets(MerkleChangeSetsCheckpoint::default()) + } _ => return self, }); _ = self.stage_checkpoint.map(|mut checkpoint| checkpoint.set_block_range(from, to)); @@ -411,6 +427,7 @@ impl StageCheckpoint { progress: entities, .. }) => Some(entities), + StageUnitCheckpoint::MerkleChangeSets(_) => None, } } } @@ -436,6 +453,8 @@ pub enum StageUnitCheckpoint { Headers(HeadersCheckpoint), /// Saves the progress of Index History stage. IndexHistory(IndexHistoryCheckpoint), + /// Saves the progress of `MerkleChangeSets` stage. + MerkleChangeSets(MerkleChangeSetsCheckpoint), } impl StageUnitCheckpoint { @@ -446,7 +465,8 @@ impl StageUnitCheckpoint { Self::Account(AccountHashingCheckpoint { block_range, .. }) | Self::Storage(StorageHashingCheckpoint { block_range, .. }) | Self::Execution(ExecutionCheckpoint { block_range, .. }) | - Self::IndexHistory(IndexHistoryCheckpoint { block_range, .. }) => { + Self::IndexHistory(IndexHistoryCheckpoint { block_range, .. }) | + Self::MerkleChangeSets(MerkleChangeSetsCheckpoint { block_range, .. }) => { let old_range = *block_range; *block_range = CheckpointBlockRange { from, to }; @@ -544,6 +564,15 @@ stage_unit_checkpoints!( index_history_stage_checkpoint, /// Sets the stage checkpoint to index history. with_index_history_stage_checkpoint + ), + ( + 6, + MerkleChangeSets, + MerkleChangeSetsCheckpoint, + /// Returns the merkle changesets stage checkpoint, if any. + merkle_changesets_stage_checkpoint, + /// Sets the stage checkpoint to merkle changesets. + with_merkle_changesets_stage_checkpoint ) ); diff --git a/crates/stages/types/src/id.rs b/crates/stages/types/src/id.rs index 78d7e0ec1b6..8c0a91c8731 100644 --- a/crates/stages/types/src/id.rs +++ b/crates/stages/types/src/id.rs @@ -25,6 +25,7 @@ pub enum StageId { TransactionLookup, IndexStorageHistory, IndexAccountHistory, + MerkleChangeSets, Prune, Finish, /// Other custom stage with a provided string identifier. @@ -39,7 +40,7 @@ static ENCODED_STAGE_IDS: OnceLock>> = OnceLock::new(); impl StageId { /// All supported Stages - pub const ALL: [Self; 15] = [ + pub const ALL: [Self; 16] = [ Self::Era, Self::Headers, Self::Bodies, @@ -53,6 +54,7 @@ impl StageId { Self::TransactionLookup, Self::IndexStorageHistory, Self::IndexAccountHistory, + Self::MerkleChangeSets, Self::Prune, Self::Finish, ]; @@ -88,6 +90,7 @@ impl StageId { Self::TransactionLookup => "TransactionLookup", Self::IndexAccountHistory => "IndexAccountHistory", Self::IndexStorageHistory => "IndexStorageHistory", + Self::MerkleChangeSets => "MerkleChangeSets", Self::Prune => "Prune", Self::Finish => "Finish", Self::Other(s) => s, diff --git a/crates/stages/types/src/lib.rs b/crates/stages/types/src/lib.rs index 4e30ce27cd7..83585fee7ce 100644 --- a/crates/stages/types/src/lib.rs +++ b/crates/stages/types/src/lib.rs @@ -18,8 +18,8 @@ pub use id::StageId; mod checkpoints; pub use checkpoints::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, - HeadersCheckpoint, IndexHistoryCheckpoint, MerkleCheckpoint, StageCheckpoint, - StageUnitCheckpoint, StorageHashingCheckpoint, StorageRootMerkleCheckpoint, + HeadersCheckpoint, IndexHistoryCheckpoint, MerkleChangeSetsCheckpoint, MerkleCheckpoint, + StageCheckpoint, StageUnitCheckpoint, StorageHashingCheckpoint, StorageRootMerkleCheckpoint, }; mod execution; diff --git a/crates/stateless/Cargo.toml b/crates/stateless/Cargo.toml index 36a891ac3d2..8adbae28ae3 100644 --- a/crates/stateless/Cargo.toml +++ b/crates/stateless/Cargo.toml @@ -36,3 +36,11 @@ thiserror.workspace = true itertools.workspace = true serde.workspace = true serde_with.workspace = true + +k256 = { workspace = true, optional = true } +secp256k1 = { workspace = true, optional = true } + +[features] +default = ["k256"] +k256 = ["dep:k256"] +secp256k1 = ["dep:secp256k1"] diff --git a/crates/stateless/src/lib.rs b/crates/stateless/src/lib.rs index 1e858b9f9fb..6813638485e 100644 --- a/crates/stateless/src/lib.rs +++ b/crates/stateless/src/lib.rs @@ -35,9 +35,12 @@ extern crate alloc; +mod recover_block; /// Sparse trie implementation for stateless validation pub mod trie; +#[doc(inline)] +pub use recover_block::UncompressedPublicKey; #[doc(inline)] pub use trie::StatelessTrie; #[doc(inline)] diff --git a/crates/stateless/src/recover_block.rs b/crates/stateless/src/recover_block.rs new file mode 100644 index 00000000000..15db1fe55e1 --- /dev/null +++ b/crates/stateless/src/recover_block.rs @@ -0,0 +1,143 @@ +use crate::validation::StatelessValidationError; +use alloc::vec::Vec; +use alloy_consensus::BlockHeader; +use alloy_primitives::{Address, Signature, B256}; +use core::ops::Deref; +use reth_chainspec::EthereumHardforks; +use reth_ethereum_primitives::{Block, TransactionSigned}; +use reth_primitives_traits::{Block as _, RecoveredBlock}; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, Bytes}; + +#[cfg(all(feature = "k256", feature = "secp256k1"))] +use k256 as _; + +/// Serialized uncompressed public key +#[serde_as] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UncompressedPublicKey(#[serde_as(as = "Bytes")] pub [u8; 65]); + +impl Deref for UncompressedPublicKey { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// Verifies all transactions in a block against a list of public keys and signatures. +/// +/// Returns a `RecoveredBlock` +pub(crate) fn recover_block_with_public_keys( + block: Block, + public_keys: Vec, + chain_spec: &ChainSpec, +) -> Result, StatelessValidationError> +where + ChainSpec: EthereumHardforks, +{ + if block.body().transactions.len() != public_keys.len() { + return Err(StatelessValidationError::Custom( + "Number of public keys must match number of transactions", + )); + } + + // Determine if we're in the Homestead fork for signature validation + let is_homestead = chain_spec.is_homestead_active_at_block(block.header().number()); + + // Verify each transaction signature against its corresponding public key + let senders = public_keys + .iter() + .zip(block.body().transactions()) + .map(|(vk, tx)| verify_and_compute_sender(vk, tx, is_homestead)) + .collect::, _>>()?; + + // Create RecoveredBlock with verified senders + let block_hash = block.hash_slow(); + Ok(RecoveredBlock::new(block, senders, block_hash)) +} + +/// Verifies a transaction using its signature and the given public key. +/// +/// Note: If the signature or the public key is incorrect, then this method +/// will return an error. +/// +/// Returns the address derived from the public key. +fn verify_and_compute_sender( + vk: &UncompressedPublicKey, + tx: &TransactionSigned, + is_homestead: bool, +) -> Result { + let sig = tx.signature(); + + // non-normalized signatures are only valid pre-homestead + let sig_is_normalized = sig.normalize_s().is_none(); + if is_homestead && !sig_is_normalized { + return Err(StatelessValidationError::HomesteadSignatureNotNormalized); + } + let sig_hash = tx.signature_hash(); + #[cfg(all(feature = "k256", feature = "secp256k1"))] + { + let _ = verify_and_compute_sender_unchecked_k256; + } + #[cfg(feature = "secp256k1")] + { + verify_and_compute_sender_unchecked_secp256k1(vk, sig, sig_hash) + } + #[cfg(all(feature = "k256", not(feature = "secp256k1")))] + { + verify_and_compute_sender_unchecked_k256(vk, sig, sig_hash) + } + #[cfg(not(any(feature = "secp256k1", feature = "k256")))] + { + let _ = vk; + let _ = tx; + let _: B256 = sig_hash; + let _: &Signature = sig; + + unimplemented!("Must choose either k256 or secp256k1 feature") + } +} +#[cfg(feature = "k256")] +fn verify_and_compute_sender_unchecked_k256( + vk: &UncompressedPublicKey, + sig: &Signature, + sig_hash: B256, +) -> Result { + use k256::ecdsa::{signature::hazmat::PrehashVerifier, VerifyingKey}; + + let vk = + VerifyingKey::from_sec1_bytes(vk).map_err(|_| StatelessValidationError::SignerRecovery)?; + + sig.to_k256() + .and_then(|sig| vk.verify_prehash(sig_hash.as_slice(), &sig)) + .map_err(|_| StatelessValidationError::SignerRecovery)?; + + Ok(Address::from_public_key(&vk)) +} + +#[cfg(feature = "secp256k1")] +fn verify_and_compute_sender_unchecked_secp256k1( + vk: &UncompressedPublicKey, + sig: &Signature, + sig_hash: B256, +) -> Result { + use secp256k1::{ecdsa::Signature as SecpSignature, Message, PublicKey, SECP256K1}; + + let public_key = + PublicKey::from_slice(vk).map_err(|_| StatelessValidationError::SignerRecovery)?; + + let mut sig_bytes = [0u8; 64]; + sig_bytes[0..32].copy_from_slice(&sig.r().to_be_bytes::<32>()); + sig_bytes[32..64].copy_from_slice(&sig.s().to_be_bytes::<32>()); + + let signature = SecpSignature::from_compact(&sig_bytes) + .map_err(|_| StatelessValidationError::SignerRecovery)?; + + let message = Message::from_digest(sig_hash.0); + SECP256K1 + .verify_ecdsa(&message, &signature, &public_key) + .map_err(|_| StatelessValidationError::SignerRecovery)?; + + Ok(Address::from_raw_public_key(&vk[1..])) +} diff --git a/crates/stateless/src/validation.rs b/crates/stateless/src/validation.rs index 23308bcfa55..db5f317ab22 100644 --- a/crates/stateless/src/validation.rs +++ b/crates/stateless/src/validation.rs @@ -1,4 +1,5 @@ use crate::{ + recover_block::{recover_block_with_public_keys, UncompressedPublicKey}, trie::{StatelessSparseTrie, StatelessTrie}, witness_db::WitnessDatabase, ExecutionWitness, @@ -16,11 +17,17 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::{Consensus, HeaderValidator}; use reth_errors::ConsensusError; use reth_ethereum_consensus::{validate_block_post_execution, EthBeaconConsensus}; -use reth_ethereum_primitives::{Block, EthPrimitives}; -use reth_evm::{execute::Executor, ConfigureEvm}; +use reth_ethereum_primitives::{Block, EthPrimitives, EthereumReceipt}; +use reth_evm::{ + execute::{BlockExecutionOutput, Executor}, + ConfigureEvm, +}; use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_trie_common::{HashedPostState, KeccakKeyHasher}; +/// BLOCKHASH ancestor lookup window limit per EVM (number of most recent blocks accessible). +const BLOCKHASH_ANCESTOR_LIMIT: usize = 256; + /// Errors that can occur during stateless validation. #[derive(Debug, thiserror::Error)] pub enum StatelessValidationError { @@ -86,6 +93,14 @@ pub enum StatelessValidationError { expected: B256, }, + /// Error during signer recovery. + #[error("signer recovery failed")] + SignerRecovery, + + /// Error when signature has non-normalized s value in homestead block. + #[error("signature s value not normalized for homestead block")] + HomesteadSignatureNotNormalized, + /// Custom error. #[error("{0}")] Custom(&'static str), @@ -127,17 +142,19 @@ pub enum StatelessValidationError { /// If all steps succeed the function returns `Some` containing the hash of the validated /// `current_block`. pub fn stateless_validation( - current_block: RecoveredBlock, + current_block: Block, + public_keys: Vec, witness: ExecutionWitness, chain_spec: Arc, evm_config: E, -) -> Result +) -> Result<(B256, BlockExecutionOutput), StatelessValidationError> where ChainSpec: Send + Sync + EthChainSpec

+ EthereumHardforks + Debug, E: ConfigureEvm + Clone + 'static, { stateless_validation_with_trie::( current_block, + public_keys, witness, chain_spec, evm_config, @@ -151,16 +168,19 @@ where /// /// See `stateless_validation` for detailed documentation of the validation process. pub fn stateless_validation_with_trie( - current_block: RecoveredBlock, + current_block: Block, + public_keys: Vec, witness: ExecutionWitness, chain_spec: Arc, evm_config: E, -) -> Result +) -> Result<(B256, BlockExecutionOutput), StatelessValidationError> where T: StatelessTrie, ChainSpec: Send + Sync + EthChainSpec
+ EthereumHardforks + Debug, E: ConfigureEvm + Clone + 'static, { + let current_block = recover_block_with_public_keys(current_block, public_keys, &*chain_spec)?; + let mut ancestor_headers: Vec<_> = witness .headers .iter() @@ -175,6 +195,15 @@ where // ascending order. ancestor_headers.sort_by_key(|header| header.number()); + // Enforce BLOCKHASH ancestor headers limit (256 most recent blocks) + let count = ancestor_headers.len(); + if count > BLOCKHASH_ANCESTOR_LIMIT { + return Err(StatelessValidationError::AncestorHeaderLimitExceeded { + count, + limit: BLOCKHASH_ANCESTOR_LIMIT, + }); + } + // Check that the ancestor headers form a contiguous chain and are not just random headers. let ancestor_hashes = compute_ancestor_hashes(¤t_block, &ancestor_headers)?; @@ -216,7 +245,7 @@ where } // Return block hash - Ok(current_block.hash_slow()) + Ok((current_block.hash_slow(), output)) } /// Performs consensus validation checks on a block without execution or state validation. diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs deleted file mode 100644 index 5232061caaf..00000000000 --- a/crates/static-file/static-file/src/segments/headers.rs +++ /dev/null @@ -1,62 +0,0 @@ -use crate::segments::Segment; -use alloy_primitives::BlockNumber; -use reth_codecs::Compact; -use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{providers::StaticFileWriter, DBProvider, StaticFileProviderFactory}; -use reth_static_file_types::StaticFileSegment; -use reth_storage_errors::provider::ProviderResult; -use std::ops::RangeInclusive; - -/// Static File segment responsible for [`StaticFileSegment::Headers`] part of data. -#[derive(Debug, Default)] -pub struct Headers; - -impl Segment for Headers -where - Provider: StaticFileProviderFactory> - + DBProvider, -{ - fn segment(&self) -> StaticFileSegment { - StaticFileSegment::Headers - } - - fn copy_to_static_files( - &self, - provider: Provider, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let static_file_provider = provider.static_file_provider(); - let mut static_file_writer = - static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Headers)?; - - let mut headers_cursor = provider - .tx_ref() - .cursor_read::::BlockHeader>>( - )?; - let headers_walker = headers_cursor.walk_range(block_range.clone())?; - - let mut header_td_cursor = - provider.tx_ref().cursor_read::()?; - let header_td_walker = header_td_cursor.walk_range(block_range.clone())?; - - let mut canonical_headers_cursor = - provider.tx_ref().cursor_read::()?; - let canonical_headers_walker = canonical_headers_cursor.walk_range(block_range)?; - - for ((header_entry, header_td_entry), canonical_header_entry) in - headers_walker.zip(header_td_walker).zip(canonical_headers_walker) - { - let (header_block, header) = header_entry?; - let (header_td_block, header_td) = header_td_entry?; - let (canonical_header_block, canonical_header) = canonical_header_entry?; - - debug_assert_eq!(header_block, header_td_block); - debug_assert_eq!(header_td_block, canonical_header_block); - - static_file_writer.append_header(&header, header_td.0, &canonical_header)?; - } - - Ok(()) - } -} diff --git a/crates/static-file/static-file/src/segments/mod.rs b/crates/static-file/static-file/src/segments/mod.rs index fc79effdd5a..a1499a2eaa8 100644 --- a/crates/static-file/static-file/src/segments/mod.rs +++ b/crates/static-file/static-file/src/segments/mod.rs @@ -1,11 +1,5 @@ //! `StaticFile` segment implementations and utilities. -mod transactions; -pub use transactions::Transactions; - -mod headers; -pub use headers::Headers; - mod receipts; pub use receipts::Receipts; diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs deleted file mode 100644 index 74cb58ed708..00000000000 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ /dev/null @@ -1,60 +0,0 @@ -use crate::segments::Segment; -use alloy_primitives::BlockNumber; -use reth_codecs::Compact; -use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, -}; -use reth_static_file_types::StaticFileSegment; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use std::ops::RangeInclusive; - -/// Static File segment responsible for [`StaticFileSegment::Transactions`] part of data. -#[derive(Debug, Default)] -pub struct Transactions; - -impl Segment for Transactions -where - Provider: StaticFileProviderFactory> - + DBProvider - + BlockReader, -{ - fn segment(&self) -> StaticFileSegment { - StaticFileSegment::Transactions - } - - /// Write transactions from database table [`tables::Transactions`] to static files with segment - /// [`StaticFileSegment::Transactions`] for the provided block range. - fn copy_to_static_files( - &self, - provider: Provider, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let static_file_provider = provider.static_file_provider(); - let mut static_file_writer = static_file_provider - .get_writer(*block_range.start(), StaticFileSegment::Transactions)?; - - for block in block_range { - static_file_writer.increment_block(block)?; - - let block_body_indices = provider - .block_body_indices(block)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; - - let mut transactions_cursor = provider.tx_ref().cursor_read::::SignedTx, - >>()?; - let transactions_walker = - transactions_cursor.walk_range(block_body_indices.tx_num_range())?; - - for entry in transactions_walker { - let (tx_number, transaction) = entry?; - - static_file_writer.append_transaction(tx_number, &transaction)?; - } - } - - Ok(()) - } -} diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index b6d205a42e1..2e7aa4b9df4 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -131,12 +131,6 @@ where let mut segments = Vec::<(Box>, RangeInclusive)>::new(); - if let Some(block_range) = targets.transactions.clone() { - segments.push((Box::new(segments::Transactions), block_range)); - } - if let Some(block_range) = targets.headers.clone() { - segments.push((Box::new(segments::Headers), block_range)); - } if let Some(block_range) = targets.receipts.clone() { segments.push((Box::new(segments::Receipts), block_range)); } @@ -178,16 +172,11 @@ where /// Returns highest block numbers for all static file segments. pub fn copy_to_static_files(&self) -> ProviderResult { let provider = self.provider.database_provider_ro()?; - let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] - .into_iter() + let stages_checkpoints = std::iter::once(StageId::Execution) .map(|stage| provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number))) .collect::, _>>()?; - let highest_static_files = HighestStaticFiles { - headers: stages_checkpoints[0], - receipts: stages_checkpoints[1], - transactions: stages_checkpoints[2], - }; + let highest_static_files = HighestStaticFiles { receipts: stages_checkpoints[0] }; let targets = self.get_static_file_targets(highest_static_files)?; self.run(targets)?; @@ -204,13 +193,8 @@ where let highest_static_files = self.provider.static_file_provider().get_highest_static_files(); let targets = StaticFileTargets { - headers: finalized_block_numbers.headers.and_then(|finalized_block_number| { - self.get_static_file_target(highest_static_files.headers, finalized_block_number) - }), // StaticFile receipts only if they're not pruned according to the user configuration - receipts: if self.prune_modes.receipts.is_none() && - self.prune_modes.receipts_log_filter.is_empty() - { + receipts: if self.prune_modes.receipts.is_none() { finalized_block_numbers.receipts.and_then(|finalized_block_number| { self.get_static_file_target( highest_static_files.receipts, @@ -220,12 +204,6 @@ where } else { None }, - transactions: finalized_block_numbers.transactions.and_then(|finalized_block_number| { - self.get_static_file_target( - highest_static_files.transactions, - finalized_block_number, - ) - }), }; trace!( @@ -315,69 +293,36 @@ mod tests { StaticFileProducerInner::new(provider_factory.clone(), PruneModes::default()); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(1), - receipts: Some(1), - transactions: Some(1), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(1) }) .expect("get static file targets"); - assert_eq!( - targets, - StaticFileTargets { - headers: Some(0..=1), - receipts: Some(0..=1), - transactions: Some(0..=1) - } - ); + assert_eq!(targets, StaticFileTargets { receipts: Some(0..=1) }); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(1), receipts: Some(1), transactions: Some(1) } + HighestStaticFiles { receipts: Some(1) } ); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(3), - receipts: Some(3), - transactions: Some(3), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(3) }) .expect("get static file targets"); - assert_eq!( - targets, - StaticFileTargets { - headers: Some(2..=3), - receipts: Some(2..=3), - transactions: Some(2..=3) - } - ); + assert_eq!(targets, StaticFileTargets { receipts: Some(2..=3) }); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } + HighestStaticFiles { receipts: Some(3) } ); let targets = static_file_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(4), - receipts: Some(4), - transactions: Some(4), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(4) }) .expect("get static file targets"); - assert_eq!( - targets, - StaticFileTargets { - headers: Some(4..=4), - receipts: Some(4..=4), - transactions: Some(4..=4) - } - ); + assert_eq!(targets, StaticFileTargets { receipts: Some(4..=4) }); assert_matches!( static_file_producer.run(targets), Err(ProviderError::BlockBodyIndicesNotFound(4)) ); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } + HighestStaticFiles { receipts: Some(3) } ); } @@ -401,11 +346,7 @@ mod tests { std::thread::sleep(Duration::from_millis(100)); } let targets = locked_producer - .get_static_file_targets(HighestStaticFiles { - headers: Some(1), - receipts: Some(1), - transactions: Some(1), - }) + .get_static_file_targets(HighestStaticFiles { receipts: Some(1) }) .expect("get static file targets"); assert_matches!(locked_producer.run(targets.clone()), Ok(_)); tx.send(targets).unwrap(); diff --git a/crates/static-file/types/Cargo.toml b/crates/static-file/types/Cargo.toml index e2cd90c2686..18d85a37c20 100644 --- a/crates/static-file/types/Cargo.toml +++ b/crates/static-file/types/Cargo.toml @@ -21,6 +21,7 @@ strum = { workspace = true, features = ["derive"] } [dev-dependencies] reth-nippy-jar.workspace = true +serde_json.workspace = true [features] default = ["std"] @@ -29,5 +30,6 @@ std = [ "derive_more/std", "serde/std", "strum/std", + "serde_json/std", ] clap = ["dep:clap"] diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 53be4f6d1c1..73d0ffe0506 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -27,39 +27,15 @@ pub const DEFAULT_BLOCKS_PER_STATIC_FILE: u64 = 500_000; /// Highest static file block numbers, per data segment. #[derive(Debug, Clone, Copy, Default, Eq, PartialEq)] pub struct HighestStaticFiles { - /// Highest static file block of headers, inclusive. - /// If [`None`], no static file is available. - pub headers: Option, /// Highest static file block of receipts, inclusive. /// If [`None`], no static file is available. pub receipts: Option, - /// Highest static file block of transactions, inclusive. - /// If [`None`], no static file is available. - pub transactions: Option, } impl HighestStaticFiles { - /// Returns the highest static file if it exists for a segment - pub const fn highest(&self, segment: StaticFileSegment) -> Option { - match segment { - StaticFileSegment::Headers => self.headers, - StaticFileSegment::Transactions => self.transactions, - StaticFileSegment::Receipts => self.receipts, - } - } - - /// Returns a mutable reference to a static file segment - pub const fn as_mut(&mut self, segment: StaticFileSegment) -> &mut Option { - match segment { - StaticFileSegment::Headers => &mut self.headers, - StaticFileSegment::Transactions => &mut self.transactions, - StaticFileSegment::Receipts => &mut self.receipts, - } - } - /// Returns an iterator over all static file segments fn iter(&self) -> impl Iterator> { - [self.headers, self.transactions, self.receipts].into_iter() + [self.receipts].into_iter() } /// Returns the minimum block of all segments. @@ -76,41 +52,33 @@ impl HighestStaticFiles { /// Static File targets, per data segment, measured in [`BlockNumber`]. #[derive(Debug, Clone, Eq, PartialEq)] pub struct StaticFileTargets { - /// Targeted range of headers. - pub headers: Option>, /// Targeted range of receipts. pub receipts: Option>, - /// Targeted range of transactions. - pub transactions: Option>, } impl StaticFileTargets { /// Returns `true` if any of the targets are [Some]. pub const fn any(&self) -> bool { - self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() + self.receipts.is_some() } /// Returns `true` if all targets are either [`None`] or has beginning of the range equal to the /// highest static file. pub fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { - [ - (self.headers.as_ref(), static_files.headers), - (self.receipts.as_ref(), static_files.receipts), - (self.transactions.as_ref(), static_files.transactions), - ] - .iter() - .all(|(target_block_range, highest_static_file_block)| { - target_block_range.is_none_or(|target_block_range| { - *target_block_range.start() == - highest_static_file_block - .map_or(0, |highest_static_file_block| highest_static_file_block + 1) - }) - }) + core::iter::once(&(self.receipts.as_ref(), static_files.receipts)).all( + |(target_block_range, highest_static_file_block)| { + target_block_range.is_none_or(|target_block_range| { + *target_block_range.start() == + highest_static_file_block + .map_or(0, |highest_static_file_block| highest_static_file_block + 1) + }) + }, + ) } } /// Each static file has a fixed number of blocks. This gives out the range where the requested -/// block is positioned. Used for segment filename. +/// block is positioned, according to the specified number of blocks per static file. pub const fn find_fixed_range( block: BlockNumber, blocks_per_static_file: u64, @@ -123,42 +91,9 @@ pub const fn find_fixed_range( mod tests { use super::*; - #[test] - fn test_highest_static_files_highest() { - let files = - HighestStaticFiles { headers: Some(100), receipts: Some(200), transactions: None }; - - // Test for headers segment - assert_eq!(files.highest(StaticFileSegment::Headers), Some(100)); - - // Test for receipts segment - assert_eq!(files.highest(StaticFileSegment::Receipts), Some(200)); - - // Test for transactions segment - assert_eq!(files.highest(StaticFileSegment::Transactions), None); - } - - #[test] - fn test_highest_static_files_as_mut() { - let mut files = HighestStaticFiles::default(); - - // Modify headers value - *files.as_mut(StaticFileSegment::Headers) = Some(150); - assert_eq!(files.headers, Some(150)); - - // Modify receipts value - *files.as_mut(StaticFileSegment::Receipts) = Some(250); - assert_eq!(files.receipts, Some(250)); - - // Modify transactions value - *files.as_mut(StaticFileSegment::Transactions) = Some(350); - assert_eq!(files.transactions, Some(350)); - } - #[test] fn test_highest_static_files_min() { - let files = - HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: None }; + let files = HighestStaticFiles { receipts: Some(100) }; // Minimum value among the available segments assert_eq!(files.min_block_num(), Some(100)); @@ -170,11 +105,10 @@ mod tests { #[test] fn test_highest_static_files_max() { - let files = - HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: Some(500) }; + let files = HighestStaticFiles { receipts: Some(100) }; // Maximum value among the available segments - assert_eq!(files.max_block_num(), Some(500)); + assert_eq!(files.max_block_num(), Some(100)); let empty_files = HighestStaticFiles::default(); // No values, should return None diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index 0458bea1678..be72510fbb4 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -7,7 +7,7 @@ use alloy_primitives::TxNumber; use core::{ops::RangeInclusive, str::FromStr}; use derive_more::Display; use serde::{Deserialize, Serialize}; -use strum::{AsRefStr, EnumString}; +use strum::{EnumIs, EnumString}; #[derive( Debug, @@ -21,20 +21,18 @@ use strum::{AsRefStr, EnumString}; Deserialize, Serialize, EnumString, - AsRefStr, Display, + EnumIs, )] +#[strum(serialize_all = "kebab-case")] #[cfg_attr(feature = "clap", derive(clap::ValueEnum))] /// Segment of the data that can be moved to static files. pub enum StaticFileSegment { - #[strum(serialize = "headers")] /// Static File segment responsible for the `CanonicalHeaders`, `Headers`, /// `HeaderTerminalDifficulties` tables. Headers, - #[strum(serialize = "transactions")] /// Static File segment responsible for the `Transactions` table. Transactions, - #[strum(serialize = "receipts")] /// Static File segment responsible for the `Receipts` table. Receipts, } @@ -42,6 +40,8 @@ pub enum StaticFileSegment { impl StaticFileSegment { /// Returns the segment as a string. pub const fn as_str(&self) -> &'static str { + // `strum` doesn't generate a doc comment for `into_str` when using `IntoStaticStr` derive + // macro, so we need to manually implement it. match self { Self::Headers => "headers", Self::Transactions => "transactions", @@ -72,7 +72,7 @@ impl StaticFileSegment { pub fn filename(&self, block_range: &SegmentRangeInclusive) -> String { // ATTENTION: if changing the name format, be sure to reflect those changes in // [`Self::parse_filename`]. - format!("static_file_{}_{}_{}", self.as_ref(), block_range.start(), block_range.end()) + format!("static_file_{}_{}_{}", self.as_str(), block_range.start(), block_range.end()) } /// Returns file name for the provided segment and range, alongside filters, compression. @@ -122,16 +122,6 @@ impl StaticFileSegment { Some((segment, SegmentRangeInclusive::new(block_start, block_end))) } - /// Returns `true` if the segment is `StaticFileSegment::Headers`. - pub const fn is_headers(&self) -> bool { - matches!(self, Self::Headers) - } - - /// Returns `true` if the segment is `StaticFileSegment::Receipts`. - pub const fn is_receipts(&self) -> bool { - matches!(self, Self::Receipts) - } - /// Returns `true` if a segment row is linked to a transaction. pub const fn is_tx_based(&self) -> bool { matches!(self, Self::Receipts | Self::Transactions) @@ -144,7 +134,7 @@ impl StaticFileSegment { } /// A segment header that contains information common to all segments. Used for storage. -#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Hash, Clone)] +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Hash, Clone, Copy)] pub struct SegmentHeader { /// Defines the expected block range for a static file segment. This attribute is crucial for /// scenarios where the file contains no data, allowing for a representation beyond a @@ -175,14 +165,19 @@ impl SegmentHeader { self.segment } + /// Returns the expected block range. + pub const fn expected_block_range(&self) -> SegmentRangeInclusive { + self.expected_block_range + } + /// Returns the block range. - pub const fn block_range(&self) -> Option<&SegmentRangeInclusive> { - self.block_range.as_ref() + pub const fn block_range(&self) -> Option { + self.block_range } /// Returns the transaction range. - pub const fn tx_range(&self) -> Option<&SegmentRangeInclusive> { - self.tx_range.as_ref() + pub const fn tx_range(&self) -> Option { + self.tx_range } /// The expected block start of the segment. @@ -217,12 +212,12 @@ impl SegmentHeader { /// Number of transactions. pub fn tx_len(&self) -> Option { - self.tx_range.as_ref().map(|r| (r.end() + 1) - r.start()) + self.tx_range.as_ref().map(|r| r.len()) } /// Number of blocks. pub fn block_len(&self) -> Option { - self.block_range.as_ref().map(|r| (r.end() + 1) - r.start()) + self.block_range.as_ref().map(|r| r.len()) } /// Increments block end range depending on segment @@ -329,6 +324,16 @@ impl SegmentRangeInclusive { pub const fn end(&self) -> u64 { self.end } + + /// Returns the length of the inclusive range. + pub const fn len(&self) -> u64 { + self.end.saturating_sub(self.start).saturating_add(1) + } + + /// Returns true if the range is empty. + pub const fn is_empty(&self) -> bool { + self.start > self.end + } } impl core::fmt::Display for SegmentRangeInclusive { @@ -467,4 +472,36 @@ mod tests { ); } } + + /// Used in filename writing/parsing + #[test] + fn test_static_file_segment_str_roundtrip() { + for segment in StaticFileSegment::iter() { + let static_str = segment.as_str(); + assert_eq!(StaticFileSegment::from_str(static_str).unwrap(), segment); + + let expected_str = match segment { + StaticFileSegment::Headers => "headers", + StaticFileSegment::Transactions => "transactions", + StaticFileSegment::Receipts => "receipts", + }; + assert_eq!(static_str, expected_str); + } + } + + /// Used in segment headers serialize/deserialize + #[test] + fn test_static_file_segment_serde_roundtrip() { + for segment in StaticFileSegment::iter() { + let ser = serde_json::to_string(&segment).unwrap(); + assert_eq!(serde_json::from_str::(&ser).unwrap(), segment); + + let expected_str = match segment { + StaticFileSegment::Headers => "Headers", + StaticFileSegment::Transactions => "Transactions", + StaticFileSegment::Receipts => "Receipts", + }; + assert_eq!(ser, format!("\"{expected_str}\"")); + } + } } diff --git a/crates/storage/codecs/derive/src/compact/flags.rs b/crates/storage/codecs/derive/src/compact/flags.rs index b6bad462917..c3e0b988cf6 100644 --- a/crates/storage/codecs/derive/src/compact/flags.rs +++ b/crates/storage/codecs/derive/src/compact/flags.rs @@ -51,7 +51,7 @@ pub(crate) fn generate_flag_struct( quote! { buf.get_u8(), }; - total_bytes.into() + total_bytes ]; let docs = format!( @@ -64,11 +64,11 @@ pub(crate) fn generate_flag_struct( impl<'a> #ident<'a> { #[doc = #bitflag_encoded_bytes] pub const fn bitflag_encoded_bytes() -> usize { - #total_bytes as usize + #total_bytes } #[doc = #bitflag_unused_bits] pub const fn bitflag_unused_bits() -> usize { - #unused_bits as usize + #unused_bits } } } @@ -77,11 +77,11 @@ pub(crate) fn generate_flag_struct( impl #ident { #[doc = #bitflag_encoded_bytes] pub const fn bitflag_encoded_bytes() -> usize { - #total_bytes as usize + #total_bytes } #[doc = #bitflag_unused_bits] pub const fn bitflag_unused_bits() -> usize { - #unused_bits as usize + #unused_bits } } } @@ -123,8 +123,8 @@ fn build_struct_field_flags( fields: Vec<&StructFieldDescriptor>, field_flags: &mut Vec, is_zstd: bool, -) -> u8 { - let mut total_bits = 0; +) -> usize { + let mut total_bits: usize = 0; // Find out the adequate bit size for the length of each field, if applicable. for field in fields { @@ -138,7 +138,7 @@ fn build_struct_field_flags( let name = format_ident!("{name}_len"); let bitsize = get_bit_size(ftype); let bsize = format_ident!("B{bitsize}"); - total_bits += bitsize; + total_bits += bitsize as usize; field_flags.push(quote! { pub #name: #bsize , @@ -170,7 +170,7 @@ fn build_struct_field_flags( /// skipped field. /// /// Returns the total number of bytes used by the flags struct and how many unused bits. -fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec) -> (u8, u8) { +fn pad_flag_struct(total_bits: usize, field_flags: &mut Vec) -> (usize, usize) { let remaining = 8 - total_bits % 8; if remaining == 8 { (total_bits / 8, 0) diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index 78be372c61c..83fc9f5052b 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -82,7 +82,7 @@ pub fn get_fields(data: &Data) -> FieldList { ); load_field(&data_fields.unnamed[0], &mut fields, false); } - syn::Fields::Unit => todo!(), + syn::Fields::Unit => unimplemented!("Compact does not support unit structs"), }, Data::Enum(data) => { for variant in &data.variants { @@ -106,7 +106,7 @@ pub fn get_fields(data: &Data) -> FieldList { } } } - Data::Union(_) => todo!(), + Data::Union(_) => unimplemented!("Compact does not support union types"), } fields @@ -176,7 +176,8 @@ fn should_use_alt_impl(ftype: &str, segment: &syn::PathSegment) -> bool { let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.args.last() && let (Some(path), 1) = (arg_path.path.segments.first(), arg_path.path.segments.len()) && ["B256", "Address", "Address", "Bloom", "TxHash", "BlockHash", "CompactPlaceholder"] - .contains(&path.ident.to_string().as_str()) + .iter() + .any(|&s| path.ident == s) { return true } @@ -237,11 +238,11 @@ mod tests { impl TestStruct { #[doc = "Used bytes by [`TestStructFlags`]"] pub const fn bitflag_encoded_bytes() -> usize { - 2u8 as usize + 2usize } #[doc = "Unused bits for new fields by [`TestStructFlags`]"] pub const fn bitflag_unused_bits() -> usize { - 1u8 as usize + 1usize } } diff --git a/crates/storage/codecs/derive/src/compact/structs.rs b/crates/storage/codecs/derive/src/compact/structs.rs index f8ebda33499..4bafe730624 100644 --- a/crates/storage/codecs/derive/src/compact/structs.rs +++ b/crates/storage/codecs/derive/src/compact/structs.rs @@ -155,7 +155,7 @@ impl<'a> StructHandler<'a> { let (#name, new_buf) = #ident_type::#from_compact_ident(buf, flags.#len() as usize); }); } else { - todo!() + unreachable!("flag-type fields are always compact in Compact derive") } self.lines.push(quote! { buf = new_buf; diff --git a/crates/storage/codecs/src/alloy/transaction/eip1559.rs b/crates/storage/codecs/src/alloy/transaction/eip1559.rs index 6d910a6900c..f13422a2dea 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip1559.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip1559.rs @@ -53,7 +53,8 @@ impl Compact for AlloyTxEip1559 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip1559::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip1559::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, @@ -67,6 +68,6 @@ impl Compact for AlloyTxEip1559 { input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/eip2930.rs b/crates/storage/codecs/src/alloy/transaction/eip2930.rs index aeb08f361be..a5c25a84d4f 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip2930.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip2930.rs @@ -52,7 +52,8 @@ impl Compact for AlloyTxEip2930 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip2930::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip2930::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, nonce: tx.nonce, @@ -63,6 +64,6 @@ impl Compact for AlloyTxEip2930 { access_list: tx.access_list, input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/eip4844.rs b/crates/storage/codecs/src/alloy/transaction/eip4844.rs index 6367f3e08e7..6ea1927f7d5 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip4844.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip4844.rs @@ -68,7 +68,8 @@ impl Compact for AlloyTxEip4844 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip4844::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip4844::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, nonce: tx.nonce, @@ -82,7 +83,7 @@ impl Compact for AlloyTxEip4844 { max_fee_per_blob_gas: tx.max_fee_per_blob_gas, input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/eip7702.rs b/crates/storage/codecs/src/alloy/transaction/eip7702.rs index eab10af0b66..95de81c3804 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip7702.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip7702.rs @@ -57,7 +57,8 @@ impl Compact for AlloyTxEip7702 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip7702::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip7702::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, nonce: tx.nonce, @@ -70,6 +71,6 @@ impl Compact for AlloyTxEip7702 { access_list: tx.access_list, authorization_list: tx.authorization_list, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/legacy.rs b/crates/storage/codecs/src/alloy/transaction/legacy.rs index 1667893dc33..c4caf97ac38 100644 --- a/crates/storage/codecs/src/alloy/transaction/legacy.rs +++ b/crates/storage/codecs/src/alloy/transaction/legacy.rs @@ -67,7 +67,8 @@ impl Compact for AlloyTxLegacy { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxLegacy::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxLegacy::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, @@ -79,6 +80,6 @@ impl Compact for AlloyTxLegacy { input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index 40333ce9889..7f9c318e6a1 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -66,7 +66,8 @@ impl Compact for AlloyTxDeposit { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxDeposit::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxDeposit::from_compact(buf, len); let alloy_tx = Self { source_hash: tx.source_hash, from: tx.from, @@ -77,7 +78,7 @@ impl Compact for AlloyTxDeposit { is_system_transaction: tx.is_system_transaction, input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 67e5f32b07c..1ac37966c2e 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -312,10 +312,9 @@ where return (None, buf) } - let (len, mut buf) = decode_varuint(buf); + let (len, buf) = decode_varuint(buf); - let (element, _) = T::from_compact(&buf[..len], len); - buf.advance(len); + let (element, buf) = T::from_compact(buf, len); (Some(element), buf) } diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 900dd525f4a..f8536bbb1ce 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -31,7 +31,7 @@ alloy-consensus.workspace = true reth-scroll-primitives = { workspace = true, optional = true } # optimism -reth-optimism-primitives = { workspace = true, optional = true } +reth-optimism-primitives = { workspace = true, optional = true, features = ["serde", "reth-codec"] } # codecs modular-bitfield.workspace = true diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index 3aeee949ea1..068b64a3c97 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -87,7 +87,7 @@ pub trait DbDupCursorRO { /// | `key` | `subkey` | **Equivalent starting position** | /// |--------|----------|-----------------------------------------| /// | `None` | `None` | [`DbCursorRO::first()`] | - /// | `Some` | `None` | [`DbCursorRO::seek()`] | + /// | `Some` | `None` | [`DbCursorRO::seek_exact()`] | /// | `None` | `Some` | [`DbDupCursorRO::seek_by_key_subkey()`] | /// | `Some` | `Some` | [`DbDupCursorRO::seek_by_key_subkey()`] | fn walk_dup( diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs index 4a8440cb950..60f69ae8f0d 100644 --- a/crates/storage/db-api/src/mock.rs +++ b/crates/storage/db-api/src/mock.rs @@ -1,4 +1,7 @@ -//! Mock database +//! Mock database implementation for testing and development. +//! +//! Provides lightweight mock implementations of database traits. All operations +//! are no-ops that return default values without persisting data. use crate::{ common::{IterPairResult, PairResult, ValueOnlyResult}, @@ -15,20 +18,35 @@ use crate::{ use core::ops::Bound; use std::{collections::BTreeMap, ops::RangeBounds}; -/// Mock database used for testing with inner `BTreeMap` structure +/// Mock database implementation for testing and development. +/// +/// Provides a lightweight implementation of the [`Database`] trait suitable +/// for testing scenarios where actual database operations are not required. #[derive(Clone, Debug, Default)] pub struct DatabaseMock { - /// Main data. TODO (Make it table aware) + /// Internal data storage using a `BTreeMap`. + /// + /// TODO: Make the mock database table-aware by properly utilizing + /// this data structure to simulate realistic database behavior during testing. pub data: BTreeMap, Vec>, } impl Database for DatabaseMock { type TX = TxMock; type TXMut = TxMock; + + /// Creates a new read-only transaction. + /// + /// This always succeeds and returns a default [`TxMock`] instance. + /// The mock transaction doesn't actually perform any database operations. fn tx(&self) -> Result { Ok(TxMock::default()) } + /// Creates a new read-write transaction. + /// + /// This always succeeds and returns a default [`TxMock`] instance. + /// The mock transaction doesn't actually perform any database operations. fn tx_mut(&self) -> Result { Ok(TxMock::default()) } @@ -36,10 +54,14 @@ impl Database for DatabaseMock { impl DatabaseMetrics for DatabaseMock {} -/// Mock read only tx +/// Mock transaction implementation for testing and development. +/// +/// Implements both [`DbTx`] and [`DbTxMut`] traits. All operations are no-ops +/// that return success or default values, suitable for testing database operations +/// without side effects. #[derive(Debug, Clone, Default)] pub struct TxMock { - /// Table representation + /// Internal table representation (currently unused). _table: BTreeMap, Vec>, } @@ -47,10 +69,20 @@ impl DbTx for TxMock { type Cursor = CursorMock; type DupCursor = CursorMock; + /// Retrieves a value by key from the specified table. + /// + /// **Mock behavior**: Always returns `None` regardless of the key. + /// This simulates a table with no data, which is typical for testing + /// scenarios where you want to verify that read operations are called + /// correctly without actually storing data. fn get(&self, _key: T::Key) -> Result, DatabaseError> { Ok(None) } + /// Retrieves a value by encoded key from the specified table. + /// + /// **Mock behavior**: Always returns `None` regardless of the encoded key. + /// This is equivalent to [`Self::get`] but works with pre-encoded keys. fn get_by_encoded_key( &self, _key: &::Encoded, @@ -58,24 +90,48 @@ impl DbTx for TxMock { Ok(None) } + /// Commits the transaction. + /// + /// **Mock behavior**: Always returns `Ok(true)`, indicating successful commit. + /// No actual data is persisted since this is a mock implementation. fn commit(self) -> Result { Ok(true) } + /// Aborts the transaction. + /// + /// **Mock behavior**: No-op. Since no data is actually stored in the mock, + /// there's nothing to rollback. fn abort(self) {} + /// Creates a read-only cursor for the specified table. + /// + /// **Mock behavior**: Returns a default [`CursorMock`] that will not + /// iterate over any data (all cursor operations return `None`). fn cursor_read(&self) -> Result, DatabaseError> { Ok(CursorMock { _cursor: 0 }) } + /// Creates a read-only duplicate cursor for the specified duplicate sort table. + /// + /// **Mock behavior**: Returns a default [`CursorMock`] that will not + /// iterate over any data (all cursor operations return `None`). fn cursor_dup_read(&self) -> Result, DatabaseError> { Ok(CursorMock { _cursor: 0 }) } + /// Returns the number of entries in the specified table. + /// + /// **Mock behavior**: Returns the length of the internal `_table` `BTreeMap`, + /// which is typically 0 since no data is actually stored. fn entries(&self) -> Result { Ok(self._table.len()) } + /// Disables long read transaction safety checks. + /// + /// **Mock behavior**: No-op. This is a performance optimization that + /// doesn't apply to the mock implementation. fn disable_long_read_transaction_safety(&mut self) {} } @@ -83,10 +139,19 @@ impl DbTxMut for TxMock { type CursorMut = CursorMock; type DupCursorMut = CursorMock; + /// Inserts or updates a key-value pair in the specified table. + /// + /// **Mock behavior**: Always returns `Ok(())` without actually storing + /// the data. This allows tests to verify that write operations are called + /// correctly without side effects. fn put(&self, _key: T::Key, _value: T::Value) -> Result<(), DatabaseError> { Ok(()) } + /// Deletes a key-value pair from the specified table. + /// + /// **Mock behavior**: Always returns `Ok(true)`, indicating successful + /// deletion, without actually removing any data. fn delete( &self, _key: T::Key, @@ -95,14 +160,26 @@ impl DbTxMut for TxMock { Ok(true) } + /// Clears all entries from the specified table. + /// + /// **Mock behavior**: Always returns `Ok(())` without actually clearing + /// any data. This simulates successful table clearing for testing purposes. fn clear(&self) -> Result<(), DatabaseError> { Ok(()) } + /// Creates a write cursor for the specified table. + /// + /// **Mock behavior**: Returns a default [`CursorMock`] that will not + /// iterate over any data and all write operations will be no-ops. fn cursor_write(&self) -> Result, DatabaseError> { Ok(CursorMock { _cursor: 0 }) } + /// Creates a write duplicate cursor for the specified duplicate sort table. + /// + /// **Mock behavior**: Returns a default [`CursorMock`] that will not + /// iterate over any data and all write operations will be no-ops. fn cursor_dup_write(&self) -> Result, DatabaseError> { Ok(CursorMock { _cursor: 0 }) } @@ -110,41 +187,61 @@ impl DbTxMut for TxMock { impl TableImporter for TxMock {} -/// Cursor that iterates over table +/// Mock cursor implementation for testing and development. +/// +/// Implements all cursor traits. All operations are no-ops that return empty +/// results, suitable for testing cursor operations without side effects. #[derive(Debug)] pub struct CursorMock { + /// Internal cursor position (currently unused). _cursor: u32, } impl DbCursorRO for CursorMock { + /// Moves to the first entry in the table. + /// **Mock behavior**: Always returns `None`. fn first(&mut self) -> PairResult { Ok(None) } + /// Seeks to an exact key match. + /// **Mock behavior**: Always returns `None`. fn seek_exact(&mut self, _key: T::Key) -> PairResult { Ok(None) } + /// Seeks to the first key greater than or equal to the given key. + /// **Mock behavior**: Always returns `None`. fn seek(&mut self, _key: T::Key) -> PairResult { Ok(None) } + /// Moves to the next entry. + /// **Mock behavior**: Always returns `None`. fn next(&mut self) -> PairResult { Ok(None) } + /// Moves to the previous entry. + /// **Mock behavior**: Always returns `None`. fn prev(&mut self) -> PairResult { Ok(None) } + /// Moves to the last entry in the table. + /// **Mock behavior**: Always returns `None`. fn last(&mut self) -> PairResult { Ok(None) } + /// Returns the current entry without moving the cursor. + /// **Mock behavior**: Always returns `None`. fn current(&mut self) -> PairResult { Ok(None) } + /// Creates a forward walker starting from the given key. + /// **Mock behavior**: Returns an empty walker that won't iterate over any data. fn walk(&mut self, start_key: Option) -> Result, DatabaseError> { let start: IterPairResult = match start_key { Some(key) => >::seek(self, key).transpose(), @@ -154,6 +251,8 @@ impl DbCursorRO for CursorMock { Ok(Walker::new(self, start)) } + /// Creates a range walker for the specified key range. + /// **Mock behavior**: Returns an empty walker that won't iterate over any data. fn walk_range( &mut self, range: impl RangeBounds, @@ -176,6 +275,8 @@ impl DbCursorRO for CursorMock { Ok(RangeWalker::new(self, start, end_key)) } + /// Creates a backward walker starting from the given key. + /// **Mock behavior**: Returns an empty walker that won't iterate over any data. fn walk_back( &mut self, start_key: Option, @@ -189,18 +290,26 @@ impl DbCursorRO for CursorMock { } impl DbDupCursorRO for CursorMock { + /// Moves to the next duplicate entry. + /// **Mock behavior**: Always returns `None`. fn next_dup(&mut self) -> PairResult { Ok(None) } + /// Moves to the next entry with a different key. + /// **Mock behavior**: Always returns `None`. fn next_no_dup(&mut self) -> PairResult { Ok(None) } + /// Moves to the next duplicate value. + /// **Mock behavior**: Always returns `None`. fn next_dup_val(&mut self) -> ValueOnlyResult { Ok(None) } + /// Seeks to a specific key-subkey combination. + /// **Mock behavior**: Always returns `None`. fn seek_by_key_subkey( &mut self, _key: ::Key, @@ -209,6 +318,8 @@ impl DbDupCursorRO for CursorMock { Ok(None) } + /// Creates a duplicate walker for the specified key and subkey. + /// **Mock behavior**: Returns an empty walker that won't iterate over any data. fn walk_dup( &mut self, _key: Option<::Key>, @@ -219,6 +330,8 @@ impl DbDupCursorRO for CursorMock { } impl DbCursorRW for CursorMock { + /// Inserts or updates a key-value pair at the current cursor position. + /// **Mock behavior**: Always succeeds without modifying any data. fn upsert( &mut self, _key: ::Key, @@ -227,6 +340,8 @@ impl DbCursorRW for CursorMock { Ok(()) } + /// Inserts a key-value pair at the current cursor position. + /// **Mock behavior**: Always succeeds without modifying any data. fn insert( &mut self, _key: ::Key, @@ -235,6 +350,8 @@ impl DbCursorRW for CursorMock { Ok(()) } + /// Appends a key-value pair at the end of the table. + /// **Mock behavior**: Always succeeds without modifying any data. fn append( &mut self, _key: ::Key, @@ -243,16 +360,22 @@ impl DbCursorRW for CursorMock { Ok(()) } + /// Deletes the entry at the current cursor position. + /// **Mock behavior**: Always succeeds without modifying any data. fn delete_current(&mut self) -> Result<(), DatabaseError> { Ok(()) } } impl DbDupCursorRW for CursorMock { + /// Deletes all duplicate entries at the current cursor position. + /// **Mock behavior**: Always succeeds without modifying any data. fn delete_current_duplicates(&mut self) -> Result<(), DatabaseError> { Ok(()) } + /// Appends a duplicate key-value pair. + /// **Mock behavior**: Always succeeds without modifying any data. fn append_dup(&mut self, _key: ::Key, _value: ::Value) -> Result<(), DatabaseError> { Ok(()) } diff --git a/crates/storage/db-api/src/models/accounts.rs b/crates/storage/db-api/src/models/accounts.rs index 263e362cc6a..41a11e1c7e5 100644 --- a/crates/storage/db-api/src/models/accounts.rs +++ b/crates/storage/db-api/src/models/accounts.rs @@ -176,7 +176,11 @@ impl Decode for AddressStorageKey { } } -impl_fixed_arbitrary!((BlockNumberAddress, 28), (AddressStorageKey, 52)); +impl_fixed_arbitrary!( + (BlockNumberAddress, 28), + (BlockNumberHashedAddress, 40), + (AddressStorageKey, 52) +); #[cfg(test)] mod tests { @@ -209,6 +213,31 @@ mod tests { assert_eq!(bytes, Encode::encode(key)); } + #[test] + fn test_block_number_hashed_address() { + let num = 1u64; + let hash = B256::from_slice(&[0xba; 32]); + let key = BlockNumberHashedAddress((num, hash)); + + let mut bytes = [0u8; 40]; + bytes[..8].copy_from_slice(&num.to_be_bytes()); + bytes[8..].copy_from_slice(hash.as_slice()); + + let encoded = Encode::encode(key); + assert_eq!(encoded, bytes); + + let decoded: BlockNumberHashedAddress = Decode::decode(&encoded).unwrap(); + assert_eq!(decoded, key); + } + + #[test] + fn test_block_number_hashed_address_rand() { + let mut bytes = [0u8; 40]; + rng().fill(bytes.as_mut_slice()); + let key = BlockNumberHashedAddress::arbitrary(&mut Unstructured::new(&bytes)).unwrap(); + assert_eq!(bytes, Encode::encode(key)); + } + #[test] fn test_address_storage_key() { let storage_key = StorageKey::random(); diff --git a/crates/storage/db-api/src/models/metadata.rs b/crates/storage/db-api/src/models/metadata.rs new file mode 100644 index 00000000000..9c2fe405ebb --- /dev/null +++ b/crates/storage/db-api/src/models/metadata.rs @@ -0,0 +1,39 @@ +//! Storage metadata models. + +use reth_codecs::{add_arbitrary_tests, Compact}; +use serde::{Deserialize, Serialize}; + +/// Storage configuration settings for this node. +/// +/// These should be set during `init_genesis` or `init_db` depending on whether we want dictate +/// behaviour of new or old nodes respectively. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +#[add_arbitrary_tests(compact)] +pub struct StorageSettings { + /// Whether this node always writes receipts to static files. + /// + /// If this is set to FALSE AND receipt pruning IS ENABLED, all receipts should be written to DB. Otherwise, they should be written to static files. This ensures that older nodes do not need to migrate their current DB tables to static files. For more, read: + pub receipts_in_static_files: bool, +} + +impl StorageSettings { + /// Creates a new `StorageSettings` with default values. + pub const fn new() -> Self { + Self { receipts_in_static_files: false } + } + + /// Creates `StorageSettings` for legacy nodes. + /// + /// This explicitly sets `receipts_in_static_files` to `false`, ensuring older nodes + /// continue writing receipts to the database when receipt pruning is enabled. + pub const fn legacy() -> Self { + Self { receipts_in_static_files: false } + } + + /// Sets the `receipts_static_files` flag to true. + pub const fn with_receipts_in_static_files(mut self) -> Self { + self.receipts_in_static_files = true; + self + } +} diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 1100a80daa1..31853c9689d 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -12,18 +12,22 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned, TxType}; use reth_primitives_traits::{Account, Bytecode, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; -use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *}; +use reth_trie_common::{ + StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry, *, +}; use serde::{Deserialize, Serialize}; pub mod accounts; pub mod blocks; pub mod integer_list; +pub mod metadata; pub mod sharded_key; pub mod storage_sharded_key; pub use accounts::*; pub use blocks::*; pub use integer_list::IntegerList; +pub use metadata::*; pub use reth_db_models::{ AccountBeforeTx, ClientVersion, StaticFileBlockWithdrawals, StoredBlockBodyIndices, StoredBlockWithdrawals, @@ -219,6 +223,7 @@ impl_compression_for_compact!( TxType, StorageEntry, BranchNodeCompact, + TrieChangeSetsEntry, StoredNibbles, StoredNibblesSubKey, StorageTrieEntry, diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index 5715852a5dd..54517908de7 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -139,6 +139,9 @@ pub trait TableImporter: DbTxMut { } /// Imports table data from another transaction within a range. + /// + /// This method works correctly with both regular and `DupSort` tables. For `DupSort` tables, + /// all duplicate entries within the range are preserved during import. fn import_table_with_range( &self, source_tx: &R, diff --git a/crates/storage/db-api/src/tables/mod.rs b/crates/storage/db-api/src/tables/mod.rs index a5cb5ff477d..483048383ab 100644 --- a/crates/storage/db-api/src/tables/mod.rs +++ b/crates/storage/db-api/src/tables/mod.rs @@ -21,8 +21,8 @@ use crate::{ accounts::BlockNumberAddress, blocks::{HeaderHash, StoredBlockOmmers}, storage_sharded_key::StorageShardedKey, - AccountBeforeTx, ClientVersion, CompactU256, IntegerList, ShardedKey, - StoredBlockBodyIndices, StoredBlockWithdrawals, + AccountBeforeTx, BlockNumberHashedAddress, ClientVersion, CompactU256, IntegerList, + ShardedKey, StoredBlockBodyIndices, StoredBlockWithdrawals, }, table::{Decode, DupSort, Encode, Table, TableInfo}, }; @@ -32,7 +32,9 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_primitives_traits::{Account, Bytecode, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; -use reth_trie_common::{BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey}; +use reth_trie_common::{ + BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry, +}; use serde::{Deserialize, Serialize}; use std::fmt; @@ -306,7 +308,8 @@ tables! { type Value = HeaderHash; } - /// Stores the total difficulty from a block header. + /// Stores the total difficulty from block headers. + /// Note: Deprecated. table HeaderTerminalDifficulties { type Key = BlockNumber; type Value = CompactU256; @@ -486,6 +489,20 @@ tables! { type SubKey = StoredNibblesSubKey; } + /// Stores the state of a node in the accounts trie prior to a particular block being executed. + table AccountsTrieChangeSets { + type Key = BlockNumber; + type Value = TrieChangeSetsEntry; + type SubKey = StoredNibblesSubKey; + } + + /// Stores the state of a node in a storage trie prior to a particular block being executed. + table StoragesTrieChangeSets { + type Key = BlockNumberHashedAddress; + type Value = TrieChangeSetsEntry; + type SubKey = StoredNibblesSubKey; + } + /// Stores the transaction sender for each canonical transaction. /// It is needed to speed up execution stage and allows fetching signer without doing /// transaction signed recovery @@ -523,6 +540,13 @@ tables! { type Key = ChainStateKey; type Value = BlockNumber; } + + /// Stores generic node metadata as key-value pairs. + /// Can store feature flags, configuration markers, and other node-specific data. + table Metadata { + type Key = String; + type Value = Vec; + } } /// Keys for the `ChainState` table. @@ -531,7 +555,7 @@ pub enum ChainStateKey { /// Last finalized block key LastFinalizedBlock, /// Last safe block key - LastSafeBlockBlock, + LastSafeBlock, } impl Encode for ChainStateKey { @@ -540,7 +564,7 @@ impl Encode for ChainStateKey { fn encode(self) -> Self::Encoded { match self { Self::LastFinalizedBlock => [0], - Self::LastSafeBlockBlock => [1], + Self::LastSafeBlock => [1], } } } @@ -549,7 +573,7 @@ impl Decode for ChainStateKey { fn decode(value: &[u8]) -> Result { match value { [0] => Ok(Self::LastFinalizedBlock), - [1] => Ok(Self::LastSafeBlockBlock), + [1] => Ok(Self::LastSafeBlock), _ => Err(crate::DatabaseError::Decode), } } diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 48442aab381..3579d5360d6 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -9,13 +9,15 @@ use reth_config::config::EtlConfig; use reth_db_api::{tables, transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; use reth_execution_errors::StateRootError; -use reth_primitives_traits::{Account, Bytecode, GotExpected, NodePrimitives, StorageEntry}; +use reth_primitives_traits::{ + Account, Bytecode, GotExpected, NodePrimitives, SealedHeader, StorageEntry, +}; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, - HashingWriter, HeaderProvider, HistoryWriter, OriginalValuesKnown, ProviderError, RevertsInit, - StageCheckpointReader, StageCheckpointWriter, StateWriter, StaticFileProviderFactory, - TrieWriter, + HashingWriter, HeaderProvider, HistoryWriter, MetadataWriter, OriginalValuesKnown, + ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, StateWriter, + StaticFileProviderFactory, StorageSettings, StorageSettingsCache, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -88,7 +90,8 @@ where + StaticFileProviderFactory> + ChainSpecProvider + StageCheckpointReader - + BlockHashReader, + + BlockHashReader + + StorageSettingsCache, PF::ProviderRW: StaticFileProviderFactory + StageCheckpointWriter + HistoryWriter @@ -96,6 +99,7 @@ where + HashingWriter + StateWriter + TrieWriter + + MetadataWriter + AsRef, PF::ChainSpec: EthChainSpec
::BlockHeader>, { @@ -159,9 +163,14 @@ where static_file_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(0)?; static_file_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(0)?; + // Behaviour reserved only for new nodes should be set here. + let storage_settings = StorageSettings::new(); + provider_rw.write_storage_settings(storage_settings)?; + // `commit_unwind`` will first commit the DB and then the static file provider, which is // necessary on `init_genesis`. provider_rw.commit()?; + factory.set_storage_settings_cache(storage_settings); Ok(hash) } @@ -345,9 +354,8 @@ where match static_file_provider.block_hash(0) { Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => { - let (difficulty, hash) = (header.difficulty(), block_hash); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - writer.append_header(header, difficulty, &hash)?; + writer.append_header(header, &block_hash)?; } Ok(Some(_)) => {} Err(e) => return Err(e), @@ -389,13 +397,16 @@ where } let block = provider_rw.last_block_number()?; + let hash = provider_rw .block_hash(block)? .ok_or_else(|| eyre::eyre!("Block hash not found for block {}", block))?; - let expected_state_root = provider_rw + let header = provider_rw .header_by_number(block)? - .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))? - .state_root(); + .map(SealedHeader::seal_slow) + .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))?; + + let expected_state_root = header.state_root(); // first line can be state root let dump_state_root = parse_state_root(&mut reader)?; @@ -403,6 +414,7 @@ where error!(target: "reth::cli", ?dump_state_root, ?expected_state_root, + header=?header.num_hash(), "State root from state dump does not match state root in current header." ); return Err(InitStorageError::StateRootMismatch(GotExpected { @@ -602,7 +614,7 @@ where match state_root.root_with_progress()? { StateRootProgress::Progress(state, _, updates) => { - let updated_len = provider.write_trie_updates(&updates)?; + let updated_len = provider.write_trie_updates(updates)?; total_flushed_updates += updated_len; trace!(target: "reth::cli", @@ -622,7 +634,7 @@ where } } StateRootProgress::Complete(root, _, updates) => { - let updated_len = provider.write_trie_updates(&updates)?; + let updated_len = provider.write_trie_updates(updates)?; total_flushed_updates += updated_len; trace!(target: "reth::cli", @@ -721,11 +733,14 @@ mod tests { init_genesis(&factory).unwrap(); // Try to init db with a different genesis block - let genesis_hash = init_genesis(&ProviderFactory::::new( - factory.into_db(), - MAINNET.clone(), - static_file_provider, - )); + let genesis_hash = init_genesis( + &ProviderFactory::::new( + factory.into_db(), + MAINNET.clone(), + static_file_provider, + ) + .unwrap(), + ); assert!(matches!( genesis_hash.unwrap_err(), diff --git a/crates/storage/db/benches/criterion.rs b/crates/storage/db/benches/criterion.rs index 64d6fbdbfdf..7d62384c164 100644 --- a/crates/storage/db/benches/criterion.rs +++ b/crates/storage/db/benches/criterion.rs @@ -31,7 +31,6 @@ pub fn db(c: &mut Criterion) { group.warm_up_time(std::time::Duration::from_millis(200)); measure_table_db::(&mut group); - measure_table_db::(&mut group); measure_table_db::(&mut group); measure_table_db::(&mut group); measure_table_db::(&mut group); @@ -48,7 +47,6 @@ pub fn serialization(c: &mut Criterion) { group.warm_up_time(std::time::Duration::from_millis(200)); measure_table_serialization::(&mut group); - measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 0bbb75ce4b5..5ca6eacb6c7 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -345,3 +345,110 @@ impl DbDupCursorRW for Cursor { ) } } + +#[cfg(test)] +mod tests { + use crate::{ + mdbx::{DatabaseArguments, DatabaseEnv, DatabaseEnvKind}, + tables::StorageChangeSets, + Database, + }; + use alloy_primitives::{address, Address, B256, U256}; + use reth_db_api::{ + cursor::{DbCursorRO, DbDupCursorRW}, + models::{BlockNumberAddress, ClientVersion}, + table::TableImporter, + transaction::{DbTx, DbTxMut}, + }; + use reth_primitives_traits::StorageEntry; + use std::sync::Arc; + use tempfile::TempDir; + + fn create_test_db() -> Arc { + let path = TempDir::new().unwrap(); + let mut db = DatabaseEnv::open( + path.path(), + DatabaseEnvKind::RW, + DatabaseArguments::new(ClientVersion::default()), + ) + .unwrap(); + db.create_tables().unwrap(); + Arc::new(db) + } + + #[test] + fn test_import_table_with_range_works_on_dupsort() { + let addr1 = address!("0000000000000000000000000000000000000001"); + let addr2 = address!("0000000000000000000000000000000000000002"); + let addr3 = address!("0000000000000000000000000000000000000003"); + let source_db = create_test_db(); + let target_db = create_test_db(); + let test_data = vec![ + ( + BlockNumberAddress((100, addr1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(100) }, + ), + ( + BlockNumberAddress((100, addr1)), + StorageEntry { key: B256::with_last_byte(2), value: U256::from(200) }, + ), + ( + BlockNumberAddress((100, addr1)), + StorageEntry { key: B256::with_last_byte(3), value: U256::from(300) }, + ), + ( + BlockNumberAddress((101, addr1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(400) }, + ), + ( + BlockNumberAddress((101, addr2)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(500) }, + ), + ( + BlockNumberAddress((101, addr2)), + StorageEntry { key: B256::with_last_byte(2), value: U256::from(600) }, + ), + ( + BlockNumberAddress((102, addr3)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(700) }, + ), + ]; + + // setup data + let tx = source_db.tx_mut().unwrap(); + { + let mut cursor = tx.cursor_dup_write::().unwrap(); + for (key, value) in &test_data { + cursor.append_dup(*key, *value).unwrap(); + } + } + tx.commit().unwrap(); + + // import data from source db to target + let source_tx = source_db.tx().unwrap(); + let target_tx = target_db.tx_mut().unwrap(); + + target_tx + .import_table_with_range::( + &source_tx, + Some(BlockNumberAddress((100, Address::ZERO))), + BlockNumberAddress((102, Address::repeat_byte(0xff))), + ) + .unwrap(); + target_tx.commit().unwrap(); + + // fetch all data from target db + let verify_tx = target_db.tx().unwrap(); + let mut cursor = verify_tx.cursor_dup_read::().unwrap(); + let copied: Vec<_> = cursor.walk(None).unwrap().collect::, _>>().unwrap(); + + // verify each entry matches the test data + assert_eq!(copied.len(), test_data.len(), "Should copy all entries including duplicates"); + for ((copied_key, copied_value), (expected_key, expected_value)) in + copied.iter().zip(test_data.iter()) + { + assert_eq!(copied_key, expected_key); + assert_eq!(copied_value, expected_value); + } + } +} diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index def7c90ca42..b00bfd3c9a5 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -103,6 +103,22 @@ pub struct DatabaseArguments { /// MDBX allows up to 32767 readers (`MDBX_READERS_LIMIT`). This arg is to configure the max /// readers. max_readers: Option, + /// Defines the synchronization strategy used by the MDBX database when writing data to disk. + /// + /// This determines how aggressively MDBX ensures data durability versus prioritizing + /// performance. The available modes are: + /// + /// - [`SyncMode::Durable`]: Ensures all transactions are fully flushed to disk before they are + /// considered committed. This provides the highest level of durability and crash safety + /// but may have a performance cost. + /// - [`SyncMode::SafeNoSync`]: Skips certain fsync operations to improve write performance. + /// This mode still maintains database integrity but may lose the most recent transactions if + /// the system crashes unexpectedly. + /// + /// Choose `Durable` if consistency and crash safety are critical (e.g., production + /// environments). Choose `SafeNoSync` if performance is more important and occasional data + /// loss is acceptable (e.g., testing or ephemeral data). + sync_mode: SyncMode, } impl Default for DatabaseArguments { @@ -126,6 +142,7 @@ impl DatabaseArguments { max_read_transaction_duration: None, exclusive: None, max_readers: None, + sync_mode: SyncMode::Durable, } } @@ -137,6 +154,15 @@ impl DatabaseArguments { self } + /// Sets the database sync mode. + pub const fn with_sync_mode(mut self, sync_mode: Option) -> Self { + if let Some(sync_mode) = sync_mode { + self.sync_mode = sync_mode; + } + + self + } + /// Configures the database growth step in bytes. pub const fn with_growth_step(mut self, growth_step: Option) -> Self { if let Some(growth_step) = growth_step { @@ -329,7 +355,7 @@ impl DatabaseEnv { DatabaseEnvKind::RW => { // enable writemap mode in RW mode inner_env.write_map(); - Mode::ReadWrite { sync_mode: SyncMode::Durable } + Mode::ReadWrite { sync_mode: args.sync_mode } } }; diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index f2c9ce45fbc..6292020dd53 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -1,9 +1,6 @@ //! reth's static file database table import and access -use std::{ - collections::{hash_map::Entry, HashMap}, - path::Path, -}; +use std::{collections::HashMap, path::Path}; mod cursor; pub use cursor::StaticFileCursor; @@ -17,12 +14,11 @@ pub use masks::*; use reth_static_file_types::{SegmentHeader, SegmentRangeInclusive, StaticFileSegment}; /// Alias type for a map of [`StaticFileSegment`] and sorted lists of existing static file ranges. -type SortedStaticFiles = - HashMap)>>; +type SortedStaticFiles = HashMap>; /// Given the `static_files` directory path, it returns a list over the existing `static_files` /// organized by [`StaticFileSegment`]. Each segment has a sorted list of block ranges and -/// transaction ranges as presented in the file configuration. +/// segment headers as presented in the file configuration. pub fn iter_static_files(path: &Path) -> Result { if !path.exists() { reth_fs_util::create_dir_all(path).map_err(|err| NippyJarError::Custom(err.to_string()))?; @@ -39,25 +35,18 @@ pub fn iter_static_files(path: &Path) -> Result::load(&entry.path())?; - let (block_range, tx_range) = - (jar.user_header().block_range().copied(), jar.user_header().tx_range().copied()); - - if let Some(block_range) = block_range { - match static_files.entry(segment) { - Entry::Occupied(mut entry) => { - entry.get_mut().push((block_range, tx_range)); - } - Entry::Vacant(entry) => { - entry.insert(vec![(block_range, tx_range)]); - } - } + if let Some(block_range) = jar.user_header().block_range() { + static_files + .entry(segment) + .and_modify(|headers| headers.push((block_range, *jar.user_header()))) + .or_insert_with(|| vec![(block_range, *jar.user_header())]); } } } for range_list in static_files.values_mut() { // Sort by block end range. - range_list.sort_by_key(|(r, _)| r.end()); + range_list.sort_by_key(|(block_range, _)| block_range.end()); } Ok(static_files) diff --git a/crates/storage/errors/src/lib.rs b/crates/storage/errors/src/lib.rs index 1a09d745140..eca6cd47a45 100644 --- a/crates/storage/errors/src/lib.rs +++ b/crates/storage/errors/src/lib.rs @@ -21,8 +21,5 @@ pub mod lockfile; pub mod provider; pub use provider::{ProviderError, ProviderResult}; -/// Writer error -pub mod writer; - /// Any error pub mod any; diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index c27587690ba..ed5230c18fb 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,4 +1,4 @@ -use crate::{any::AnyError, db::DatabaseError, writer::UnifiedStorageWriterError}; +use crate::{any::AnyError, db::DatabaseError}; use alloc::{boxed::Box, string::String}; use alloy_eips::{BlockHashOrNumber, HashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; @@ -58,9 +58,6 @@ pub enum ProviderError { /// The account address. address: Address, }, - /// The total difficulty for a block is missing. - #[error("total difficulty not found for block #{_0}")] - TotalDifficultyNotFound(BlockNumber), /// When required header related data was not found but was required. #[error("no header found for {_0:?}")] HeaderNotFound(BlockHashOrNumber), @@ -128,15 +125,20 @@ pub enum ProviderError { /// Consistent view error. #[error("failed to initialize consistent view: {_0}")] ConsistentView(Box), - /// Storage writer error. - #[error(transparent)] - UnifiedStorageWriterError(#[from] UnifiedStorageWriterError), /// Received invalid output from configured storage implementation. #[error("received invalid output from storage")] InvalidStorageOutput, /// Missing trie updates. #[error("missing trie updates for block {0}")] MissingTrieUpdates(B256), + /// Insufficient changesets to revert to the requested block. + #[error("insufficient changesets to revert to block #{requested}. Available changeset range: {available:?}")] + InsufficientChangesets { + /// The block number requested for reversion + requested: BlockNumber, + /// The available range of blocks with changesets + available: core::ops::RangeInclusive, + }, /// Any other error type wrapped into a cloneable [`AnyError`]. #[error(transparent)] Other(#[from] AnyError), diff --git a/crates/storage/errors/src/writer.rs b/crates/storage/errors/src/writer.rs deleted file mode 100644 index 52a5ba06e5e..00000000000 --- a/crates/storage/errors/src/writer.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::db::DatabaseError; -use reth_static_file_types::StaticFileSegment; - -/// `UnifiedStorageWriter` related errors -#[derive(Clone, Debug, derive_more::Display, PartialEq, Eq, derive_more::Error)] -pub enum UnifiedStorageWriterError { - /// Database writer is missing - #[display("Database writer is missing")] - MissingDatabaseWriter, - /// Static file writer is missing - #[display("Static file writer is missing")] - MissingStaticFileWriter, - /// Static file writer is of wrong segment - #[display("Static file writer is of wrong segment: got {_0}, expected {_1}")] - IncorrectStaticFileWriter(StaticFileSegment, StaticFileSegment), - /// Database-related errors. - Database(DatabaseError), -} - -impl From for UnifiedStorageWriterError { - fn from(error: DatabaseError) -> Self { - Self::Database(error) - } -} diff --git a/crates/storage/libmdbx-rs/README.md b/crates/storage/libmdbx-rs/README.md index df115ee69a0..f6989efa419 100644 --- a/crates/storage/libmdbx-rs/README.md +++ b/crates/storage/libmdbx-rs/README.md @@ -1,7 +1,7 @@ # libmdbx-rs -Rust bindings for [libmdbx](https://libmdbx.dqdkfa.ru). +Rust bindings for [libmdbx](https://github.com/erthink/libmdbx). Forked from an earlier Apache licenced version of the `libmdbx-rs` crate, before it changed licence to GPL. NOTE: Most of the repo came from [lmdb-rs bindings](https://github.com/mozilla/lmdb-rs). @@ -9,7 +9,7 @@ NOTE: Most of the repo came from [lmdb-rs bindings](https://github.com/mozilla/l ## Updating the libmdbx Version To update the libmdbx version you must clone it and copy the `dist/` folder in `mdbx-sys/`. -Make sure to follow the [building steps](https://libmdbx.dqdkfa.ru/usage.html#getting). +Make sure to follow the [building steps](https://github.com/erthink/libmdbx#building). ```bash # clone libmdbx to a repository outside at specific tag diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index 71bd77b55d2..6aefab57b19 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -1,8 +1,10 @@ +use std::str::FromStr; + use bitflags::bitflags; use ffi::*; /// MDBX sync mode -#[derive(Clone, Copy, Debug, Default)] +#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)] pub enum SyncMode { /// Default robust and durable sync mode. /// Metadata is written and flushed to disk after a data is written and flushed, which @@ -119,6 +121,21 @@ impl From for EnvironmentFlags { } } +impl FromStr for SyncMode { + type Err = String; + + fn from_str(s: &str) -> Result { + let val = s.trim().to_ascii_lowercase(); + match val.as_str() { + "durable" => Ok(Self::Durable), + "safe-no-sync" | "safenosync" | "safe_no_sync" => Ok(Self::SafeNoSync), + _ => Err(format!( + "invalid value '{s}' for sync mode. valid values: durable, safe-no-sync" + )), + } + } +} + #[derive(Clone, Copy, Debug, Default)] pub struct EnvironmentFlags { pub no_sub_dir: bool, diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 1b47d595c4f..8653c112c66 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -200,6 +200,9 @@ impl NippyJar { // Read [`Self`] located at the data file. let config_path = path.with_extension(CONFIG_FILE_EXTENSION); let config_file = File::open(&config_path) + .inspect_err(|e| { + warn!( ?path, %e, "Failed to load static file jar"); + }) .map_err(|err| reth_fs_util::FsPathError::open(err, config_path))?; let mut obj = Self::load_from_reader(config_file)?; diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 82a3726c43e..b67064d0fff 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -29,7 +29,6 @@ reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-codecs.workspace = true -reth-evm.workspace = true reth-chain-state.workspace = true reth-node-types.workspace = true reth-static-file-types.workspace = true @@ -71,6 +70,7 @@ reth-trie = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true reth-ethereum-engine-primitives.workspace = true reth-ethereum-primitives.workspace = true +reth-tracing.workspace = true revm-database-interface.workspace = true revm-state.workspace = true @@ -90,7 +90,6 @@ test-utils = [ "reth-ethereum-engine-primitives", "reth-ethereum-primitives/test-utils", "reth-chainspec/test-utils", - "reth-evm/test-utils", "reth-primitives-traits/test-utils", "reth-codecs/test-utils", "reth-db-api/test-utils", diff --git a/crates/storage/provider/src/bundle_state/mod.rs b/crates/storage/provider/src/bundle_state/mod.rs deleted file mode 100644 index 58b76f1eacf..00000000000 --- a/crates/storage/provider/src/bundle_state/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Bundle state module. -//! This module contains all the logic related to bundle state. - -mod state_reverts; -pub use state_reverts::StorageRevertsIter; diff --git a/crates/storage/provider/src/changesets_utils/mod.rs b/crates/storage/provider/src/changesets_utils/mod.rs new file mode 100644 index 00000000000..3b65825264b --- /dev/null +++ b/crates/storage/provider/src/changesets_utils/mod.rs @@ -0,0 +1,7 @@ +//! This module contains helpful utilities related to populating changesets tables. + +mod state_reverts; +pub use state_reverts::StorageRevertsIter; + +mod trie; +pub use trie::*; diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/changesets_utils/state_reverts.rs similarity index 100% rename from crates/storage/provider/src/bundle_state/state_reverts.rs rename to crates/storage/provider/src/changesets_utils/state_reverts.rs diff --git a/crates/storage/provider/src/changesets_utils/trie.rs b/crates/storage/provider/src/changesets_utils/trie.rs new file mode 100644 index 00000000000..f4365aab103 --- /dev/null +++ b/crates/storage/provider/src/changesets_utils/trie.rs @@ -0,0 +1,147 @@ +use itertools::{merge_join_by, EitherOrBoth}; +use reth_db_api::DatabaseError; +use reth_trie::{trie_cursor::TrieCursor, BranchNodeCompact, Nibbles}; +use std::cmp::{Ord, Ordering}; + +/// Combines a sorted iterator of trie node paths and a storage trie cursor into a new +/// iterator which produces the current values of all given paths in the same order. +#[derive(Debug)] +pub struct StorageTrieCurrentValuesIter<'cursor, P, C> { + /// Sorted iterator of node paths which we want the values of. + paths: P, + /// Storage trie cursor. + cursor: &'cursor mut C, + /// Current value at the cursor, allows us to treat the cursor as a peekable iterator. + cursor_current: Option<(Nibbles, BranchNodeCompact)>, +} + +impl<'cursor, P, C> StorageTrieCurrentValuesIter<'cursor, P, C> +where + P: Iterator, + C: TrieCursor, +{ + /// Instantiate a [`StorageTrieCurrentValuesIter`] from a sorted paths iterator and a cursor. + pub fn new(paths: P, cursor: &'cursor mut C) -> Result { + let mut new_self = Self { paths, cursor, cursor_current: None }; + new_self.seek_cursor(Nibbles::default())?; + Ok(new_self) + } + + fn seek_cursor(&mut self, path: Nibbles) -> Result<(), DatabaseError> { + self.cursor_current = self.cursor.seek(path)?; + Ok(()) + } +} + +impl<'cursor, P, C> Iterator for StorageTrieCurrentValuesIter<'cursor, P, C> +where + P: Iterator, + C: TrieCursor, +{ + type Item = Result<(Nibbles, Option), DatabaseError>; + + fn next(&mut self) -> Option { + let Some(curr_path) = self.paths.next() else { + // If there are no more paths then there is no further possible output. + return None + }; + + // If the path is ahead of the cursor then seek the cursor forward to catch up. The cursor + // will seek either to `curr_path` or beyond it. + if self.cursor_current.as_ref().is_some_and(|(cursor_path, _)| curr_path > *cursor_path) && + let Err(err) = self.seek_cursor(curr_path) + { + return Some(Err(err)) + } + + // If there is a path but the cursor is empty then that path has no node. + if self.cursor_current.is_none() { + return Some(Ok((curr_path, None))) + } + + let (cursor_path, cursor_node) = + self.cursor_current.as_mut().expect("already checked for None"); + + // There is both a path and a cursor value, compare their paths. + match curr_path.cmp(cursor_path) { + Ordering::Less => { + // If the path is behind the cursor then there is no value for that + // path, produce None. + Some(Ok((curr_path, None))) + } + Ordering::Equal => { + // If the target path and cursor's path match then there is a value for that path, + // return the value. We don't seek the cursor here, that will be handled on the + // next call to `next` after checking that `paths` isn't None. + let cursor_node = core::mem::take(cursor_node); + Some(Ok((*cursor_path, Some(cursor_node)))) + } + Ordering::Greater => { + panic!("cursor was seeked to {curr_path:?}, but produced a node at a lower path {cursor_path:?}") + } + } + } +} + +/// Returns an iterator which produces the values to be inserted into the `StoragesTrieChangeSets` +/// table for an account whose storage was wiped during a block. It is expected that this is called +/// prior to inserting the block's trie updates. +/// +/// ## Arguments +/// +/// - `curr_values_of_changed` is an iterator over the current values of all trie nodes modified by +/// the block, ordered by path. +/// - `all_nodes` is an iterator over all existing trie nodes for the account, ordered by path. +/// +/// ## Returns +/// +/// An iterator of trie node paths and a `Some(node)` (indicating the node was wiped) or a `None` +/// (indicating the node was modified in the block but didn't previously exist. The iterator's +/// results will be ordered by path. +pub fn storage_trie_wiped_changeset_iter( + curr_values_of_changed: impl Iterator< + Item = Result<(Nibbles, Option), DatabaseError>, + >, + all_nodes: impl Iterator>, +) -> Result< + impl Iterator), DatabaseError>>, + DatabaseError, +> { + let all_nodes = all_nodes.map(|e| e.map(|(nibbles, node)| (nibbles, Some(node)))); + + let merged = merge_join_by(curr_values_of_changed, all_nodes, |a, b| match (a, b) { + (Err(_), _) => Ordering::Less, + (_, Err(_)) => Ordering::Greater, + (Ok(a), Ok(b)) => a.0.cmp(&b.0), + }); + + Ok(merged.map(|either_or| match either_or { + EitherOrBoth::Left(changed) => { + // A path of a changed node (given in `paths`) which was not found in the database (or + // there's an error). The current value of this path must be None, otherwise it would + // have also been returned by the `all_nodes` iter. + debug_assert!( + changed.as_ref().is_err() || changed.as_ref().is_ok_and(|(_, node)| node.is_none()), + "changed node is Some but wasn't returned by `all_nodes` iterator: {changed:?}", + ); + changed + } + EitherOrBoth::Right(wiped) => { + // A node was found in the db (indicating it was wiped) but was not given in `paths`. + // Return it as-is. + wiped + } + EitherOrBoth::Both(changed, _wiped) => { + // A path of a changed node (given in `paths`) was found with a previous value in the + // database. The changed node must have a value which is equal to the one found by the + // `all_nodes` iterator. If the changed node had no previous value (None) it wouldn't + // be returned by `all_nodes` and so would be in the Left branch. + // + // Due to the ordering closure passed to `merge_join_by` it's not possible for either + // value to be an error here. + debug_assert!(changed.is_ok(), "unreachable error condition: {changed:?}"); + debug_assert_eq!(changed, _wiped); + changed + } + })) +} diff --git a/crates/storage/provider/src/either_writer.rs b/crates/storage/provider/src/either_writer.rs new file mode 100644 index 00000000000..5c50141f651 --- /dev/null +++ b/crates/storage/provider/src/either_writer.rs @@ -0,0 +1,43 @@ +//! Generic writer abstraction for writing to either database tables or static files. + +use crate::providers::StaticFileProviderRWRefMut; +use alloy_primitives::{BlockNumber, TxNumber}; +use reth_db::table::Value; +use reth_db_api::{cursor::DbCursorRW, tables}; +use reth_node_types::NodePrimitives; +use reth_storage_errors::provider::ProviderResult; + +/// Represents a destination for writing data, either to database or static files. +#[derive(Debug)] +pub enum EitherWriter<'a, CURSOR, N> { + /// Write to database table via cursor + Database(CURSOR), + /// Write to static file + StaticFile(StaticFileProviderRWRefMut<'a, N>), +} + +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> { + /// Increment the block number. + /// + /// Relevant only for [`Self::StaticFile`]. It is a no-op for [`Self::Database`]. + pub fn increment_block(&mut self, expected_block_number: BlockNumber) -> ProviderResult<()> { + match self { + Self::Database(_) => Ok(()), + Self::StaticFile(writer) => writer.increment_block(expected_block_number), + } + } +} + +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> +where + N::Receipt: Value, + CURSOR: DbCursorRW>, +{ + /// Append a transaction receipt. + pub fn append_receipt(&mut self, tx_num: TxNumber, receipt: &N::Receipt) -> ProviderResult<()> { + match self { + Self::Database(cursor) => Ok(cursor.append(tx_num, receipt)?), + Self::StaticFile(writer) => writer.append_receipt(tx_num, receipt), + } + } +} diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index c281f117908..84e1a4f8b46 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -21,35 +21,34 @@ pub mod providers; pub use providers::{ DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW, HistoricalStateProvider, HistoricalStateProviderRef, LatestStateProvider, LatestStateProviderRef, ProviderFactory, - StaticFileAccess, StaticFileWriter, + StaticFileAccess, StaticFileProviderBuilder, StaticFileWriter, }; +pub mod changesets_utils; + #[cfg(any(test, feature = "test-utils"))] /// Common test helpers for mocking the Provider. pub mod test_utils; -/// Re-export provider error. -pub use reth_storage_errors::provider::{ProviderError, ProviderResult}; - -pub use reth_static_file_types as static_file; -pub use static_file::StaticFileSegment; -pub use reth_execution_types::*; - -pub mod bundle_state; - -/// Re-export `OriginalValuesKnown` -pub use revm_database::states::OriginalValuesKnown; - -/// Writer standalone type. -pub mod writer; +pub mod either_writer; +pub use either_writer::*; pub use reth_chain_state::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, CanonStateNotifications, CanonStateSubscriptions, }; - +pub use reth_execution_types::*; +/// Re-export `OriginalValuesKnown` +pub use revm_database::states::OriginalValuesKnown; // reexport traits to avoid breaking changes -pub use reth_storage_api::{HistoryWriter, StatsReader}; +pub use reth_static_file_types as static_file; +pub use reth_storage_api::{ + HistoryWriter, MetadataProvider, MetadataWriter, StatsReader, StorageSettings, + StorageSettingsCache, +}; +/// Re-export provider error. +pub use reth_storage_errors::provider::{ProviderError, ProviderResult}; +pub use static_file::StaticFileSegment; pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 75e276b3c42..9dbbed9e88c 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1,48 +1,34 @@ -#![allow(unused)] use crate::{ providers::{ConsistentProvider, ProviderNodeTypes, StaticFileProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, - ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, - DatabaseProviderFactory, FullProvider, HashedPostStateProvider, HeaderProvider, ProviderError, - ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, - StaticFileProviderFactory, TransactionVariant, TransactionsProvider, + ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, + HashedPostStateProvider, HeaderProvider, ProviderError, ProviderFactory, PruneCheckpointReader, + ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, + StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, + TransactionsProvider, TrieReader, }; -use alloy_consensus::{transaction::TransactionMeta, Header}; -use alloy_eips::{ - eip4895::{Withdrawal, Withdrawals}, - BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, -}; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use alloy_consensus::transaction::TransactionMeta; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ BlockState, CanonicalInMemoryState, ForkChoiceNotifications, ForkChoiceSubscriptions, MemoryOverlayStateProvider, }; -use reth_chainspec::{ChainInfo, EthereumHardforks}; -use reth_db_api::{ - models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices}, - transaction::DbTx, - Database, -}; -use reth_ethereum_primitives::{Block, EthPrimitives, Receipt, TransactionSigned}; -use reth_evm::{ConfigureEvm, EvmEnv}; +use reth_chainspec::ChainInfo; +use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices}; use reth_execution_types::ExecutionOutcome; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; -use reth_primitives_traits::{ - Account, BlockBody, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, StorageEntry, -}; +use reth_primitives_traits::{Account, RecoveredBlock, SealedHeader, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{ - BlockBodyIndicesProvider, DBProvider, NodePrimitivesProvider, StorageChangeSetReader, -}; +use reth_storage_api::{BlockBodyIndicesProvider, NodePrimitivesProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; -use reth_trie::{HashedPostState, KeccakKeyHasher}; +use reth_trie::{updates::TrieUpdatesSorted, HashedPostState, KeccakKeyHasher}; use revm_database::BundleState; use std::{ - ops::{Add, RangeBounds, RangeInclusive, Sub}, + ops::{RangeBounds, RangeInclusive}, sync::Arc, time::Instant, }; @@ -190,14 +176,6 @@ impl HeaderProvider for BlockchainProvider { self.consistent_provider()?.header_by_number(num) } - fn header_td(&self, hash: BlockHash) -> ProviderResult> { - self.consistent_provider()?.header_td(hash) - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - self.consistent_provider()?.header_td_by_number(number) - } - fn headers_range( &self, range: impl RangeBounds, @@ -716,6 +694,14 @@ impl ChangeSetReader for BlockchainProvider { ) -> ProviderResult> { self.consistent_provider()?.account_block_changeset(block_number) } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult> { + self.consistent_provider()?.get_account_before_block(block_number, address) + } } impl AccountReader for BlockchainProvider { @@ -745,6 +731,19 @@ impl StateReader for BlockchainProvider { } } +impl TrieReader for BlockchainProvider { + fn trie_reverts(&self, from: BlockNumber) -> ProviderResult { + self.consistent_provider()?.trie_reverts(from) + } + + fn get_block_trie_updates( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + self.consistent_provider()?.get_block_trie_updates(block_number) + } +} + #[cfg(test)] mod tests { use crate::{ @@ -753,8 +752,7 @@ mod tests { create_test_provider_factory, create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }, - BlockWriter, CanonChainTracker, ProviderFactory, StaticFileProviderFactory, - StaticFileWriter, + BlockWriter, CanonChainTracker, ProviderFactory, }; use alloy_eips::{BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, TxNumber, B256}; @@ -762,25 +760,14 @@ mod tests { use rand::Rng; use reth_chain_state::{ test_utils::TestBlockBuilder, CanonStateNotification, CanonStateSubscriptions, - CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, - NewCanonicalChain, - }; - use reth_chainspec::{ - ChainSpec, ChainSpecBuilder, ChainSpecProvider, EthereumHardfork, MAINNET, - }; - use reth_db_api::{ - cursor::DbCursorRO, - models::{AccountBeforeTx, StoredBlockBodyIndices}, - tables, - transaction::DbTx, + CanonicalInMemoryState, ExecutedBlock, NewCanonicalChain, }; + use reth_chainspec::{ChainSpec, MAINNET}; + use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; - use reth_ethereum_primitives::{Block, EthPrimitives, Receipt}; + use reth_ethereum_primitives::{Block, Receipt}; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives_traits::{ - BlockBody, RecoveredBlock, SealedBlock, SignedTransaction, SignerRecoverable, - }; - use reth_static_file_types::StaticFileSegment; + use reth_primitives_traits::{RecoveredBlock, SealedBlock, SignerRecoverable}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DBProvider, DatabaseProviderFactory, @@ -793,9 +780,8 @@ mod tests { }; use revm_database::{BundleState, OriginalValuesKnown}; use std::{ - ops::{Bound, Deref, Range, RangeBounds}, + ops::{Bound, Range, RangeBounds}, sync::Arc, - time::Instant, }; const TEST_BLOCKS_COUNT: usize = 5; @@ -900,12 +886,14 @@ mod tests { let execution_outcome = ExecutionOutcome { receipts: vec![block_receipts], ..Default::default() }; - ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)), - execution_outcome.into(), - Default::default(), - ExecutedTrieUpdates::empty(), - ) + ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + block.clone(), + senders, + )), + execution_output: execution_outcome.into(), + ..Default::default() + } }) .collect(), }; @@ -1027,15 +1015,13 @@ mod tests { let in_memory_block_senders = first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { - new: vec![ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed( + new: vec![ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( first_in_mem_block.clone(), in_memory_block_senders, )), - Default::default(), - Default::default(), - ExecutedTrieUpdates::empty(), - )], + ..Default::default() + }], }; provider.canonical_in_memory_state.update_chain(chain); @@ -1063,16 +1049,12 @@ mod tests { assert_eq!(provider.find_block_by_hash(first_db_block.hash(), BlockSource::Pending)?, None); // Insert the last block into the pending state - provider.canonical_in_memory_state.set_pending_block(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed( - last_in_mem_block.clone(), - Default::default(), - )), - execution_output: Default::default(), - hashed_state: Default::default(), - }, - trie: ExecutedTrieUpdates::empty(), + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + last_in_mem_block.clone(), + Default::default(), + )), + ..Default::default() }); // Now the last block should be found in memory @@ -1123,15 +1105,13 @@ mod tests { let in_memory_block_senders = first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { - new: vec![ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed( + new: vec![ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( first_in_mem_block.clone(), in_memory_block_senders, )), - Default::default(), - Default::default(), - ExecutedTrieUpdates::empty(), - )], + ..Default::default() + }], }; provider.canonical_in_memory_state.update_chain(chain); @@ -1177,16 +1157,12 @@ mod tests { ); // Set the block as pending - provider.canonical_in_memory_state.set_pending_block(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed( - block.clone(), - block.senders().unwrap(), - )), - execution_output: Default::default(), - hashed_state: Default::default(), - }, - trie: ExecutedTrieUpdates::empty(), + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + block.clone(), + block.senders().unwrap(), + )), + ..Default::default() }); // Assertions related to the pending block @@ -1224,15 +1200,13 @@ mod tests { let in_memory_block_senders = first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { - new: vec![ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed( + new: vec![ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( first_in_mem_block.clone(), in_memory_block_senders, )), - Default::default(), - Default::default(), - ExecutedTrieUpdates::empty(), - )], + ..Default::default() + }], }; provider.canonical_in_memory_state.update_chain(chain); @@ -1298,24 +1272,12 @@ mod tests { BlockRangeParams::default(), )?; - let database_block = database_blocks.first().unwrap().clone(); - let in_memory_block = in_memory_blocks.last().unwrap().clone(); // make sure that the finalized block is on db let finalized_block = database_blocks.get(database_blocks.len() - 3).unwrap(); provider.set_finalized(finalized_block.clone_sealed_header()); let blocks = [database_blocks, in_memory_blocks].concat(); - assert_eq!( - provider.header_td_by_number(database_block.number)?, - Some(database_block.difficulty) - ); - - assert_eq!( - provider.header_td_by_number(in_memory_block.number)?, - Some(in_memory_block.difficulty) - ); - assert_eq!( provider.sealed_headers_while(0..=10, |header| header.number <= 8)?, blocks @@ -1704,9 +1666,12 @@ mod tests { .first() .map(|block| { let senders = block.senders().expect("failed to recover senders"); - ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)), - Arc::new(ExecutionOutcome { + ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + block.clone(), + senders, + )), + execution_output: Arc::new(ExecutionOutcome { bundle: BundleState::new( in_memory_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) @@ -1719,9 +1684,8 @@ mod tests { first_block: first_in_memory_block, ..Default::default() }), - Default::default(), - ExecutedTrieUpdates::empty(), - ) + ..Default::default() + } }) .unwrap()], }; @@ -1839,19 +1803,13 @@ mod tests { // adding a pending block to state can test pending() and pending_state_by_hash() function let pending_block = database_blocks[database_blocks.len() - 1].clone(); - only_database_provider.canonical_in_memory_state.set_pending_block( - ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed( - pending_block.clone(), - Default::default(), - )), - execution_output: Default::default(), - hashed_state: Default::default(), - }, - trie: ExecutedTrieUpdates::empty(), - }, - ); + only_database_provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + pending_block.clone(), + Default::default(), + )), + ..Default::default() + }); assert_eq!( pending_block.hash(), @@ -1937,16 +1895,12 @@ mod tests { // Set the pending block in memory let pending_block = in_memory_blocks.last().unwrap(); - provider.canonical_in_memory_state.set_pending_block(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed( - pending_block.clone(), - Default::default(), - )), - execution_output: Default::default(), - hashed_state: Default::default(), - }, - trie: ExecutedTrieUpdates::empty(), + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + pending_block.clone(), + Default::default(), + )), + ..Default::default() }); // Set the safe block in memory @@ -2586,14 +2540,15 @@ mod tests { persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[1].number); let to_be_persisted_tx = in_memory_blocks[1].body().transactions[0].clone(); - assert!(matches!( + assert_eq!( correct_transaction_hash_fn( *to_be_persisted_tx.tx_hash(), provider.canonical_in_memory_state(), provider.database - ), - Ok(Some(to_be_persisted_tx)) - )); + ) + .unwrap(), + Some(to_be_persisted_tx) + ); } Ok(()) diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 03615d5357b..67113fc5c0c 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -4,7 +4,7 @@ use crate::{ BlockReader, BlockReaderIdExt, BlockSource, ChainSpecProvider, ChangeSetReader, HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, - TransactionsProvider, + TransactionsProvider, TrieReader, }; use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_eips::{ @@ -13,7 +13,7 @@ use alloy_eips::{ }; use alloy_primitives::{ map::{hash_map, HashMap}, - Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, + Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, }; use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; use reth_chainspec::ChainInfo; @@ -28,6 +28,7 @@ use reth_storage_api::{ StorageChangeSetReader, TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; +use reth_trie::updates::TrieUpdatesSorted; use revm_database::states::PlainStorageRevert; use std::{ ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, @@ -662,37 +663,6 @@ impl HeaderProvider for ConsistentProvider { ) } - fn header_td(&self, hash: BlockHash) -> ProviderResult> { - if let Some(num) = self.block_number(hash)? { - self.header_td_by_number(num) - } else { - Ok(None) - } - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - let number = if self.head_block.as_ref().map(|b| b.block_on_chain(number.into())).is_some() - { - // If the block exists in memory, we should return a TD for it. - // - // The canonical in memory state should only store post-merge blocks. Post-merge blocks - // have zero difficulty. This means we can use the total difficulty for the last - // finalized block number if present (so that we are not affected by reorgs), if not the - // last number in the database will be used. - if let Some(last_finalized_num_hash) = - self.canonical_in_memory_state.get_finalized_num_hash() - { - last_finalized_num_hash.number - } else { - self.last_block_number()? - } - } else { - // Otherwise, return what we have on disk for the input block - number - }; - self.storage_provider.header_td_by_number(number) - } - fn headers_range( &self, range: impl RangeBounds, @@ -1422,6 +1392,52 @@ impl ChangeSetReader for ConsistentProvider { self.storage_provider.account_block_changeset(block_number) } } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + // Search in-memory state for the account changeset + let changeset = state + .block_ref() + .execution_output + .bundle + .reverts + .clone() + .to_plain_state_reverts() + .accounts + .into_iter() + .flatten() + .find(|(addr, _)| addr == &address) + .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }); + Ok(changeset) + } else { + // Perform checks on whether or not changesets exist for the block. + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let account_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::AccountHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !account_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + // Delegate to the storage provider for database lookups + self.storage_provider.get_account_before_block(block_number, address) + } + } } impl AccountReader for ConsistentProvider { @@ -1458,6 +1474,19 @@ impl StateReader for ConsistentProvider { } } +impl TrieReader for ConsistentProvider { + fn trie_reverts(&self, from: BlockNumber) -> ProviderResult { + self.storage_provider.trie_reverts(from) + } + + fn get_block_trie_updates( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + self.storage_provider.get_block_trie_updates(block_number) + } +} + #[cfg(test)] mod tests { use crate::{ @@ -1468,9 +1497,7 @@ mod tests { use alloy_primitives::B256; use itertools::Itertools; use rand::Rng; - use reth_chain_state::{ - ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, NewCanonicalChain, - }; + use reth_chain_state::{ExecutedBlock, NewCanonicalChain}; use reth_db_api::models::AccountBeforeTx; use reth_ethereum_primitives::Block; use reth_execution_types::ExecutionOutcome; @@ -1573,15 +1600,13 @@ mod tests { let in_memory_block_senders = first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { - new: vec![ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed( + new: vec![ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( first_in_mem_block.clone(), in_memory_block_senders, )), - Default::default(), - Default::default(), - ExecutedTrieUpdates::empty(), - )], + ..Default::default() + }], }; consistent_provider.canonical_in_memory_state.update_chain(chain); let consistent_provider = provider.consistent_provider()?; @@ -1615,16 +1640,12 @@ mod tests { ); // Insert the last block into the pending state - provider.canonical_in_memory_state.set_pending_block(ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed( - last_in_mem_block.clone(), - Default::default(), - )), - execution_output: Default::default(), - hashed_state: Default::default(), - }, - trie: ExecutedTrieUpdates::empty(), + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + last_in_mem_block.clone(), + Default::default(), + )), + ..Default::default() }); // Now the last block should be found in memory @@ -1683,15 +1704,13 @@ mod tests { let in_memory_block_senders = first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { - new: vec![ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed( + new: vec![ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( first_in_mem_block.clone(), in_memory_block_senders, )), - Default::default(), - Default::default(), - ExecutedTrieUpdates::empty(), - )], + ..Default::default() + }], }; consistent_provider.canonical_in_memory_state.update_chain(chain); @@ -1788,9 +1807,12 @@ mod tests { .first() .map(|block| { let senders = block.senders().expect("failed to recover senders"); - ExecutedBlockWithTrieUpdates::new( - Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)), - Arc::new(ExecutionOutcome { + ExecutedBlock { + recovered_block: Arc::new(RecoveredBlock::new_sealed( + block.clone(), + senders, + )), + execution_output: Arc::new(ExecutionOutcome { bundle: BundleState::new( in_memory_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) @@ -1803,9 +1825,8 @@ mod tests { first_block: first_in_memory_block, ..Default::default() }), - Default::default(), - ExecutedTrieUpdates::empty(), - ) + ..Default::default() + } }) .unwrap()], }; diff --git a/crates/storage/provider/src/providers/database/builder.rs b/crates/storage/provider/src/providers/database/builder.rs index 4bc8569432e..bcd61f188f9 100644 --- a/crates/storage/provider/src/providers/database/builder.rs +++ b/crates/storage/provider/src/providers/database/builder.rs @@ -3,13 +3,17 @@ //! This also includes general purpose staging types that provide builder style functions that lead //! up to the intended build target. -use crate::{providers::StaticFileProvider, ProviderFactory}; +use crate::{ + providers::{NodeTypesForProvider, StaticFileProvider}, + ProviderFactory, +}; use reth_db::{ mdbx::{DatabaseArguments, MaxReadTransactionDuration}, open_db_read_only, DatabaseEnv, }; use reth_db_api::{database_metrics::DatabaseMetrics, Database}; use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter}; +use reth_storage_errors::provider::ProviderResult; use std::{ marker::PhantomData, path::{Path, PathBuf}, @@ -48,10 +52,9 @@ impl ProviderFactoryBuilder { /// /// ```no_run /// use reth_chainspec::MAINNET; - /// use reth_node_types::NodeTypes; - /// use reth_provider::providers::ProviderFactoryBuilder; + /// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder}; /// - /// fn demo>() { + /// fn demo>() { /// let provider_factory = ProviderFactoryBuilder::::default() /// .open_read_only(MAINNET.clone(), "datadir") /// .unwrap(); @@ -64,11 +67,9 @@ impl ProviderFactoryBuilder { /// /// ```no_run /// use reth_chainspec::MAINNET; - /// use reth_node_types::NodeTypes; - /// - /// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig}; + /// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder, ReadOnlyConfig}; /// - /// fn demo>() { + /// fn demo>() { /// let provider_factory = ProviderFactoryBuilder::::default() /// .open_read_only(MAINNET.clone(), ReadOnlyConfig::from_datadir("datadir").no_watch()) /// .unwrap(); @@ -84,11 +85,9 @@ impl ProviderFactoryBuilder { /// /// ```no_run /// use reth_chainspec::MAINNET; - /// use reth_node_types::NodeTypes; - /// - /// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig}; + /// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder, ReadOnlyConfig}; /// - /// fn demo>() { + /// fn demo>() { /// let provider_factory = ProviderFactoryBuilder::::default() /// .open_read_only( /// MAINNET.clone(), @@ -103,15 +102,15 @@ impl ProviderFactoryBuilder { config: impl Into, ) -> eyre::Result>>> where - N: NodeTypes, + N: NodeTypesForProvider, { let ReadOnlyConfig { db_dir, db_args, static_files_dir, watch_static_files } = config.into(); - Ok(self - .db(Arc::new(open_db_read_only(db_dir, db_args)?)) + self.db(Arc::new(open_db_read_only(db_dir, db_args)?)) .chainspec(chainspec) .static_file(StaticFileProvider::read_only(static_files_dir, watch_static_files)?) - .build_provider_factory()) + .build_provider_factory() + .map_err(Into::into) } } @@ -320,11 +319,13 @@ impl TypesAnd3 { impl TypesAnd3, StaticFileProvider> where - N: NodeTypes, + N: NodeTypesForProvider, DB: Database + DatabaseMetrics + Clone + Unpin + 'static, { /// Creates the [`ProviderFactory`]. - pub fn build_provider_factory(self) -> ProviderFactory> { + pub fn build_provider_factory( + self, + ) -> ProviderResult>> { let Self { _types, val_1, val_2, val_3 } = self; ProviderFactory::new(val_1, val_2, val_3) } diff --git a/crates/storage/provider/src/providers/database/chain.rs b/crates/storage/provider/src/providers/database/chain.rs index 2da32d9a05f..9ce3861eb3c 100644 --- a/crates/storage/provider/src/providers/database/chain.rs +++ b/crates/storage/provider/src/providers/database/chain.rs @@ -1,12 +1,12 @@ use crate::{providers::NodeTypesForProvider, DatabaseProvider}; use reth_db_api::transaction::{DbTx, DbTxMut}; -use reth_node_types::FullNodePrimitives; +use reth_node_types::NodePrimitives; use reth_primitives_traits::{FullBlockHeader, FullSignedTx}; use reth_storage_api::{ChainStorageReader, ChainStorageWriter, EmptyBodyStorage, EthStorage}; /// Trait that provides access to implementations of [`ChainStorage`] -pub trait ChainStorage: Send + Sync { +pub trait ChainStorage: Send + Sync { /// Provides access to the chain reader. fn reader(&self) -> impl ChainStorageReader, Primitives> where @@ -24,7 +24,7 @@ impl ChainStorage for EthStorage where T: FullSignedTx, H: FullBlockHeader, - N: FullNodePrimitives< + N: NodePrimitives< Block = alloy_consensus::Block, BlockHeader = H, BlockBody = alloy_consensus::BlockBody, @@ -52,7 +52,7 @@ impl ChainStorage for EmptyBodyStorage where T: FullSignedTx, H: FullBlockHeader, - N: FullNodePrimitives< + N: NodePrimitives< Block = alloy_consensus::Block, BlockHeader = H, BlockBody = alloy_consensus::BlockBody, diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index 4923b51db37..4daac3dfddb 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -45,7 +45,6 @@ pub(crate) enum Action { InsertBlockBodyIndices, InsertTransactionBlocks, GetNextTxNum, - GetParentTD, } /// Database provider metrics @@ -71,8 +70,6 @@ struct DatabaseProviderMetrics { insert_tx_blocks: Histogram, /// Duration of get next tx num get_next_tx_num: Histogram, - /// Duration of get parent TD - get_parent_td: Histogram, } impl DatabaseProviderMetrics { @@ -88,7 +85,6 @@ impl DatabaseProviderMetrics { Action::InsertBlockBodyIndices => self.insert_block_body_indices.record(duration), Action::InsertTransactionBlocks => self.insert_tx_blocks.record(duration), Action::GetNextTxNum => self.get_next_tx_num.record(duration), - Action::GetParentTD => self.get_parent_td.record(duration), } } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 54642a94757..03c5ee417b9 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -1,29 +1,31 @@ use crate::{ - providers::{state::latest::LatestStateProvider, StaticFileProvider}, + providers::{state::latest::LatestStateProvider, NodeTypesForProvider, StaticFileProvider}, to_range, traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, - HashedPostStateProvider, HeaderProvider, HeaderSyncGapProvider, ProviderError, - PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, + HashedPostStateProvider, HeaderProvider, HeaderSyncGapProvider, MetadataProvider, + ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, }; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use core::fmt; +use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_node_types::{ - BlockTy, HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithDBAdapter, ReceiptTy, TxTy, + BlockTy, HeaderTy, NodeTypesWithDB, NodeTypesWithDBAdapter, ReceiptTy, TxTy, }; use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - BlockBodyIndicesProvider, NodePrimitivesProvider, TryIntoHistoricalStateProvider, + BlockBodyIndicesProvider, NodePrimitivesProvider, StorageSettings, StorageSettingsCache, + TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::HashedPostState; @@ -64,39 +66,54 @@ pub struct ProviderFactory { prune_modes: PruneModes, /// The node storage handler. storage: Arc, + /// Storage configuration settings for this node + storage_settings: Arc>, } -impl ProviderFactory>> { +impl ProviderFactory>> { /// Instantiates the builder for this type pub fn builder() -> ProviderFactoryBuilder { ProviderFactoryBuilder::default() } } -impl ProviderFactory { +impl ProviderFactory { /// Create new database provider factory. pub fn new( db: N::DB, chain_spec: Arc, static_file_provider: StaticFileProvider, - ) -> Self { - Self { + ) -> ProviderResult { + // Load storage settings from database at init time. Creates a temporary provider + // to read persisted settings, falling back to legacy defaults if none exist. + // + // Both factory and all providers it creates should share these cached settings. + let legacy_settings = StorageSettings::legacy(); + let storage_settings = DatabaseProvider::<_, N>::new( + db.tx()?, + chain_spec.clone(), + static_file_provider.clone(), + Default::default(), + Default::default(), + Arc::new(RwLock::new(legacy_settings)), + ) + .storage_settings()? + .unwrap_or(legacy_settings); + + Ok(Self { db, chain_spec, static_file_provider, - prune_modes: PruneModes::none(), + prune_modes: PruneModes::default(), storage: Default::default(), - } - } - - /// Enables metrics on the static file provider. - pub fn with_static_files_metrics(mut self) -> Self { - self.static_file_provider = self.static_file_provider.with_metrics(); - self + storage_settings: Arc::new(RwLock::new(storage_settings)), + }) } +} +impl ProviderFactory { /// Sets the pruning configuration for an existing [`ProviderFactory`]. - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + pub const fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { self.prune_modes = prune_modes; self } @@ -113,7 +130,17 @@ impl ProviderFactory { } } -impl>> ProviderFactory { +impl StorageSettingsCache for ProviderFactory { + fn cached_storage_settings(&self) -> StorageSettings { + *self.storage_settings.read() + } + + fn set_storage_settings_cache(&self, settings: StorageSettings) { + *self.storage_settings.write() = settings; + } +} + +impl>> ProviderFactory { /// Create new database provider by passing a path. [`ProviderFactory`] will own the database /// instance. pub fn new_with_database_path>( @@ -122,13 +149,12 @@ impl>> ProviderFactory { args: DatabaseArguments, static_file_provider: StaticFileProvider, ) -> RethResult { - Ok(Self { - db: Arc::new(init_db(path, args).map_err(RethError::msg)?), + Self::new( + Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider, - prune_modes: PruneModes::none(), - storage: Default::default(), - }) + ) + .map_err(RethError::Provider) } } @@ -147,6 +173,7 @@ impl ProviderFactory { self.static_file_provider.clone(), self.prune_modes.clone(), self.storage.clone(), + self.storage_settings.clone(), )) } @@ -162,6 +189,7 @@ impl ProviderFactory { self.static_file_provider.clone(), self.prune_modes.clone(), self.storage.clone(), + self.storage_settings.clone(), ))) } @@ -242,14 +270,6 @@ impl HeaderProvider for ProviderFactory { self.static_file_provider.header_by_number(num) } - fn header_td(&self, hash: BlockHash) -> ProviderResult> { - self.provider()?.header_td(hash) - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - self.static_file_provider.header_td_by_number(number) - } - fn headers_range( &self, range: impl RangeBounds, @@ -553,13 +573,15 @@ where N: NodeTypesWithDB, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let Self { db, chain_spec, static_file_provider, prune_modes, storage } = self; + let Self { db, chain_spec, static_file_provider, prune_modes, storage, storage_settings } = + self; f.debug_struct("ProviderFactory") .field("db", &db) .field("chain_spec", &chain_spec) .field("static_file_provider", &static_file_provider) .field("prune_modes", &prune_modes) .field("storage", &storage) + .field("storage_settings", &*storage_settings.read()) .finish() } } @@ -572,6 +594,7 @@ impl Clone for ProviderFactory { static_file_provider: self.static_file_provider.clone(), prune_modes: self.prune_modes.clone(), storage: self.storage.clone(), + storage_settings: self.storage_settings.clone(), } } } @@ -585,7 +608,7 @@ mod tests { BlockHashReader, BlockNumReader, BlockWriter, DBProvider, HeaderSyncGapProvider, TransactionsProvider, }; - use alloy_primitives::{TxNumber, B256, U256}; + use alloy_primitives::{TxNumber, B256}; use assert_matches::assert_matches; use reth_chainspec::ChainSpecBuilder; use reth_db::{ @@ -665,7 +688,7 @@ mod tests { let prune_modes = PruneModes { sender_recovery: Some(PruneMode::Full), transaction_lookup: Some(PruneMode::Full), - ..PruneModes::none() + ..PruneModes::default() }; let factory = create_test_provider_factory(); let provider = factory.with_prune_modes(prune_modes).provider_rw().unwrap(); @@ -730,7 +753,7 @@ mod tests { let static_file_provider = provider.static_file_provider(); let mut static_file_writer = static_file_provider.latest_writer(StaticFileSegment::Headers).unwrap(); - static_file_writer.append_header(head.header(), U256::ZERO, &head.hash()).unwrap(); + static_file_writer.append_header(head.header(), &head.hash()).unwrap(); static_file_writer.commit().unwrap(); drop(static_file_writer); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8fbd71e2ace..5ab3d1cf285 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,5 +1,7 @@ use crate::{ - bundle_state::StorageRevertsIter, + changesets_utils::{ + storage_trie_wiped_changeset_iter, StorageRevertsIter, StorageTrieCurrentValuesIter, + }, providers::{ database::{chain::ChainStorage, metrics}, static_file::StaticFileWriter, @@ -11,33 +13,34 @@ use crate::{ }, AccountReader, BlockBodyWriter, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, - DBProvider, HashingWriter, HeaderProvider, HeaderSyncGapProvider, HistoricalStateProvider, - HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, - StageCheckpointReader, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, - StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, TrieWriter, + DBProvider, EitherWriter, HashingWriter, HeaderProvider, HeaderSyncGapProvider, + HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, + LatestStateProviderRef, OriginalValuesKnown, ProviderError, PruneCheckpointReader, + PruneCheckpointWriter, RevertsInit, StageCheckpointReader, StateProviderBox, StateWriter, + StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, + TransactionsProvider, TransactionsProviderExt, TrieReader, TrieWriter, }; use alloy_consensus::{ transaction::{SignerRecoverable, TransactionMeta, TxHashRef}, - BlockHeader, TxReceipt, + BlockHeader, }; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{ keccak256, map::{hash_map, B256Map, HashMap, HashSet}, - Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, + Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, }; use itertools::Itertools; +use parking_lot::RwLock; use rayon::slice::ParallelSliceMut; -use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates}; -use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, EthereumHardforks}; +use reth_chain_state::ExecutedBlock; +use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - ShardedKey, StoredBlockBodyIndices, + BlockNumberHashedAddress, ShardedKey, StorageSettings, StoredBlockBodyIndices, }, table::Table, tables, @@ -55,16 +58,23 @@ use reth_prune_types::{ use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, StateProvider, - StorageChangeSetReader, TryIntoHistoricalStateProvider, + BlockBodyIndicesProvider, BlockBodyReader, MetadataProvider, MetadataWriter, + NodePrimitivesProvider, StateProvider, StorageChangeSetReader, StorageSettingsCache, + TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, - updates::{StorageTrieUpdates, TrieUpdates}, - HashedPostStateSorted, Nibbles, StateRoot, StoredNibbles, + trie_cursor::{ + InMemoryTrieCursor, InMemoryTrieCursorFactory, TrieCursor, TrieCursorFactory, + TrieCursorIter, + }, + updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, + BranchNodeCompact, HashedPostStateSorted, Nibbles, StoredNibbles, StoredNibblesSubKey, + TrieChangeSetsEntry, +}; +use reth_trie_db::{ + DatabaseAccountTrieCursor, DatabaseStorageTrieCursor, DatabaseTrieCursorFactory, }; -use reth_trie_db::{DatabaseStateRoot, DatabaseStorageTrieCursor}; use revm_database::states::{ PlainStateReverts, PlainStorageChangeset, PlainStorageRevert, StateChangeset, }; @@ -72,7 +82,7 @@ use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet}, fmt::Debug, - ops::{Deref, DerefMut, Not, Range, RangeBounds, RangeInclusive}, + ops::{Deref, DerefMut, Range, RangeBounds, RangeFrom, RangeInclusive}, sync::Arc, }; use tracing::{debug, trace}; @@ -145,6 +155,8 @@ pub struct DatabaseProvider { prune_modes: PruneModes, /// Node storage handler. storage: Arc, + /// Storage configuration settings for this node + storage_settings: Arc>, } impl DatabaseProvider { @@ -206,7 +218,7 @@ impl DatabaseProvider { #[cfg(feature = "test-utils")] /// Sets the prune modes for provider. - pub fn set_prune_modes(&mut self, prune_modes: PruneModes) { + pub const fn set_prune_modes(&mut self, prune_modes: PruneModes) { self.prune_modes = prune_modes; } } @@ -240,8 +252,9 @@ impl DatabaseProvider { static_file_provider: StaticFileProvider, prune_modes: PruneModes, storage: Arc, + storage_settings: Arc>, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { tx, chain_spec, static_file_provider, prune_modes, storage, storage_settings } } } @@ -253,10 +266,7 @@ impl AsRef for DatabaseProvider { impl DatabaseProvider { /// Writes executed blocks and state to storage. - pub fn save_blocks( - &self, - blocks: Vec>, - ) -> ProviderResult<()> { + pub fn save_blocks(&self, blocks: Vec>) -> ProviderResult<()> { if blocks.is_empty() { debug!(target: "providers::db", "Attempted to write empty block range"); return Ok(()) @@ -280,12 +290,10 @@ impl DatabaseProvider DatabaseProvider DatabaseProvider, - ) -> ProviderResult<()> { + pub fn unwind_trie_state_from(&self, from: BlockNumber) -> ProviderResult<()> { let changed_accounts = self .tx .cursor_read::()? - .walk_range(range.clone())? + .walk_range(from..)? .collect::, _>>()?; - // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; - let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account.is_none() { - destroyed_accounts.insert(hashed_address); - } - } + // Unwind account hashes. + self.unwind_account_hashing(changed_accounts.iter())?; // Unwind account history indices. self.unwind_account_history_indices(changed_accounts.iter())?; - let storage_range = BlockNumberAddress::range(range); + let storage_start = BlockNumberAddress((from, Address::ZERO)); let changed_storages = self .tx .cursor_read::()? - .walk_range(storage_range)? + .walk_range(storage_start..)? .collect::, _>>()?; - // Unwind storage hashes. Add changed account and storage keys to corresponding prefix - // sets. - let mut storage_prefix_sets = B256Map::::default(); - let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; - for (hashed_address, hashed_slots) in storage_entries { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); - for slot in hashed_slots { - storage_prefix_set.insert(Nibbles::unpack(slot)); - } - storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); - } + // Unwind storage hashes. + self.unwind_storage_hashing(changed_storages.iter().copied())?; // Unwind storage history indices. self.unwind_storage_history_indices(changed_storages.iter().copied())?; - // Calculate the reverted merkle root. - // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets - // are pre-loaded. - let prefix_sets = TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - }; - let (_, trie_updates) = StateRoot::from_tx(&self.tx) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(reth_db_api::DatabaseError::from)?; + // Unwind accounts/storages trie tables using the revert. + let trie_revert = self.trie_reverts(from)?; + self.write_trie_updates_sorted(&trie_revert)?; - self.write_trie_updates(&trie_updates)?; + // Clear trie changesets which have been unwound. + self.clear_trie_changesets_from(from)?; Ok(()) } @@ -518,8 +499,9 @@ impl DatabaseProvider { static_file_provider: StaticFileProvider, prune_modes: PruneModes, storage: Arc, + storage_settings: Arc>, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { tx, chain_spec, static_file_provider, prune_modes, storage, storage_settings } } /// Consume `DbTx` or `DbTxMut`. @@ -921,6 +903,19 @@ impl ChangeSetReader for DatabaseProvider { }) .collect() } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult> { + self.tx + .cursor_dup_read::()? + .seek_by_key_subkey(block_number, address)? + .filter(|acc| acc.address == address) + .map(Ok) + .transpose() + } } impl HeaderSyncGapProvider @@ -983,26 +978,6 @@ impl HeaderProvider for DatabasePro self.static_file_provider.header_by_number(num) } - fn header_td(&self, block_hash: BlockHash) -> ProviderResult> { - if let Some(num) = self.block_number(block_hash)? { - self.header_td_by_number(num) - } else { - Ok(None) - } - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - if self.chain_spec.is_paris_active_at_block(number) && - let Some(td) = self.chain_spec.final_paris_total_difficulty() - { - // if this block is higher than the final paris(merge) block, return the final paris - // difficulty - return Ok(Some(td)) - } - - self.static_file_provider.header_td_by_number(number) - } - fn headers_range( &self, range: impl RangeBounds, @@ -1637,45 +1612,30 @@ impl StateWriter )); } - let has_receipts_pruning = self.prune_modes.has_receipts_pruning(); - - // Prepare receipts cursor if we are going to write receipts to the database - // - // We are writing to database if requested or if there's any kind of receipt pruning - // configured - let mut receipts_cursor = self.tx.cursor_write::>()?; - - // Prepare receipts static writer if we are going to write receipts to static files - // - // We are writing to static files if requested and if there's no receipt pruning configured - let mut receipts_static_writer = has_receipts_pruning - .not() - .then(|| self.static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)) - .transpose()?; - - let has_contract_log_filter = !self.prune_modes.receipts_log_filter.is_empty(); - let contract_log_pruner = self.prune_modes.receipts_log_filter.group_by_block(tip, None)?; + // Write receipts to static files only if they're explicitly enabled or we don't have + // receipts pruning + let mut receipts_writer = if self.storage_settings.read().receipts_in_static_files || + !self.prune_modes.has_receipts_pruning() + { + EitherWriter::StaticFile( + self.static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?, + ) + } else { + EitherWriter::Database(self.tx.cursor_write::>()?) + }; // All receipts from the last 128 blocks are required for blockchain tree, even with // [`PruneSegment::ContractLogs`]. let prunable_receipts = PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(first_block, tip); - // Prepare set of addresses which logs should not be pruned. - let mut allowed_addresses: HashSet = HashSet::new(); - for (_, addresses) in contract_log_pruner.range(..first_block) { - allowed_addresses.extend(addresses.iter().copied()); - } - for (idx, (receipts, first_tx_index)) in execution_outcome.receipts.iter().zip(block_indices).enumerate() { let block_number = first_block + idx as u64; // Increment block number for receipts static file writer - if let Some(writer) = receipts_static_writer.as_mut() { - writer.increment_block(block_number)?; - } + receipts_writer.increment_block(block_number)?; // Skip writing receipts if pruning configuration requires us to. if prunable_receipts && @@ -1686,27 +1646,10 @@ impl StateWriter continue } - // If there are new addresses to retain after this block number, track them - if let Some(new_addresses) = contract_log_pruner.get(&block_number) { - allowed_addresses.extend(new_addresses.iter().copied()); - } - for (idx, receipt) in receipts.iter().enumerate() { let receipt_idx = first_tx_index + idx as u64; - // Skip writing receipt if log filter is active and it does not have any logs to - // retain - if prunable_receipts && - has_contract_log_filter && - !receipt.logs().iter().any(|log| allowed_addresses.contains(&log.address)) - { - continue - } - - if let Some(writer) = &mut receipts_static_writer { - writer.append_receipt(receipt_idx, receipt)?; - } - receipts_cursor.append(receipt_idx, receipt)?; + receipts_writer.append_receipt(receipt_idx, receipt)?; } } @@ -1742,6 +1685,10 @@ impl StateWriter // If we are writing the primary storage wipe transition, the pre-existing plain // storage state has to be taken from the database and written to storage history. // See [StorageWipe::Primary] for more details. + // + // TODO(mediocregopher): This could be rewritten in a way which doesn't require + // collecting wiped entries into a Vec like this, see + // `write_storage_trie_changesets`. let mut wiped_storage = Vec::new(); if wiped { tracing::trace!(?address, "Wiping storage"); @@ -1950,7 +1897,6 @@ impl StateWriter for (storage_key, (old_storage_value, _new_storage_value)) in storage { let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; // delete previous value - // TODO: This does not use dupsort features if plain_storage_cursor .seek_by_key_subkey(*address, *storage_key)? .filter(|s| s.key == *storage_key) @@ -2049,7 +1995,6 @@ impl StateWriter for (storage_key, (old_storage_value, _new_storage_value)) in storage { let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; // delete previous value - // TODO: This does not use dupsort features if plain_storage_cursor .seek_by_key_subkey(*address, *storage_key)? .filter(|s| s.key == *storage_key) @@ -2114,8 +2059,10 @@ impl StateWriter } impl TrieWriter for DatabaseProvider { - /// Writes trie updates. Returns the number of entries modified. - fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { + /// Writes trie updates to the database with already sorted updates. + /// + /// Returns the number of entries modified. + fn write_trie_updates_sorted(&self, trie_updates: &TrieUpdatesSorted) -> ProviderResult { if trie_updates.is_empty() { return Ok(0) } @@ -2123,23 +2070,11 @@ impl TrieWriter for DatabaseProvider // Track the number of inserted entries. let mut num_entries = 0; - // Merge updated and removed nodes. Updated nodes must take precedence. - let mut account_updates = trie_updates - .removed_nodes_ref() - .iter() - .filter_map(|n| { - (!trie_updates.account_nodes_ref().contains_key(n)).then_some((n, None)) - }) - .collect::>(); - account_updates.extend( - trie_updates.account_nodes_ref().iter().map(|(nibbles, node)| (nibbles, Some(node))), - ); - // Sort trie node updates. - account_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); - let tx = self.tx_ref(); let mut account_trie_cursor = tx.cursor_write::()?; - for (key, updated_node) in account_updates { + + // Process sorted account nodes + for (key, updated_node) in trie_updates.account_nodes_ref() { let nibbles = StoredNibbles(*key); match updated_node { Some(node) => { @@ -2157,18 +2092,230 @@ impl TrieWriter for DatabaseProvider } } - num_entries += self.write_storage_trie_updates(trie_updates.storage_tries_ref().iter())?; + num_entries += + self.write_storage_trie_updates_sorted(trie_updates.storage_tries_ref().iter())?; + + Ok(num_entries) + } + + /// Records the current values of all trie nodes which will be updated using the `TrieUpdates` + /// into the trie changesets tables. + /// + /// The intended usage of this method is to call it _prior_ to calling `write_trie_updates` with + /// the same `TrieUpdates`. + /// + /// Returns the number of keys written. + fn write_trie_changesets( + &self, + block_number: BlockNumber, + trie_updates: &TrieUpdatesSorted, + updates_overlay: Option<&TrieUpdatesSorted>, + ) -> ProviderResult { + let mut num_entries = 0; + + let mut changeset_cursor = + self.tx_ref().cursor_dup_write::()?; + let curr_values_cursor = self.tx_ref().cursor_read::()?; + + // Wrap the cursor in DatabaseAccountTrieCursor + let mut db_account_cursor = DatabaseAccountTrieCursor::new(curr_values_cursor); + + // Static empty array for when updates_overlay is None + static EMPTY_ACCOUNT_UPDATES: Vec<(Nibbles, Option)> = Vec::new(); + + // Get the overlay updates for account trie, or use an empty array + let account_overlay_updates = updates_overlay + .map(|overlay| overlay.account_nodes_ref()) + .unwrap_or(&EMPTY_ACCOUNT_UPDATES); + + // Wrap the cursor in InMemoryTrieCursor with the overlay + let mut in_memory_account_cursor = + InMemoryTrieCursor::new(Some(&mut db_account_cursor), account_overlay_updates); + + for (path, _) in trie_updates.account_nodes_ref() { + num_entries += 1; + let node = in_memory_account_cursor.seek_exact(*path)?.map(|(_, node)| node); + changeset_cursor.append_dup( + block_number, + TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(*path), node }, + )?; + } + + let mut storage_updates = trie_updates.storage_tries_ref().iter().collect::>(); + storage_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); + + num_entries += self.write_storage_trie_changesets( + block_number, + storage_updates.into_iter(), + updates_overlay, + )?; Ok(num_entries) } + + fn clear_trie_changesets(&self) -> ProviderResult<()> { + let tx = self.tx_ref(); + tx.clear::()?; + tx.clear::()?; + Ok(()) + } + + fn clear_trie_changesets_from(&self, from: BlockNumber) -> ProviderResult<()> { + let tx = self.tx_ref(); + { + let range = from..; + let mut cursor = tx.cursor_dup_write::()?; + let mut walker = cursor.walk_range(range)?; + + while walker.next().transpose()?.is_some() { + walker.delete_current()?; + } + } + + { + let range: RangeFrom = (from, B256::ZERO).into()..; + let mut cursor = tx.cursor_dup_write::()?; + let mut walker = cursor.walk_range(range)?; + + while walker.next().transpose()?.is_some() { + walker.delete_current()?; + } + } + + Ok(()) + } +} + +impl TrieReader for DatabaseProvider { + fn trie_reverts(&self, from: BlockNumber) -> ProviderResult { + let tx = self.tx_ref(); + + // Read account trie changes directly into a Vec - data is already sorted by nibbles + // within each block, and we want the oldest (first) version of each node sorted by path. + let mut account_nodes = Vec::new(); + let mut seen_account_keys = HashSet::new(); + let mut accounts_cursor = tx.cursor_dup_read::()?; + + for entry in accounts_cursor.walk_range(from..)? { + let (_, TrieChangeSetsEntry { nibbles, node }) = entry?; + // Only keep the first (oldest) version of each node + if seen_account_keys.insert(nibbles.0) { + account_nodes.push((nibbles.0, node)); + } + } + + account_nodes.sort_by_key(|(path, _)| *path); + + // Read storage trie changes - data is sorted by (block, hashed_address, nibbles) + // Keep track of seen (address, nibbles) pairs to only keep the oldest version per address, + // sorted by path. + let mut storage_tries = B256Map::>::default(); + let mut seen_storage_keys = HashSet::new(); + let mut storages_cursor = tx.cursor_dup_read::()?; + + // Create storage range starting from `from` block + let storage_range_start = BlockNumberHashedAddress((from, B256::ZERO)); + + for entry in storages_cursor.walk_range(storage_range_start..)? { + let ( + BlockNumberHashedAddress((_, hashed_address)), + TrieChangeSetsEntry { nibbles, node }, + ) = entry?; + + // Only keep the first (oldest) version of each node for this address + if seen_storage_keys.insert((hashed_address, nibbles.0)) { + storage_tries.entry(hashed_address).or_default().push((nibbles.0, node)); + } + } + + // Convert to StorageTrieUpdatesSorted + let storage_tries = storage_tries + .into_iter() + .map(|(address, mut nodes)| { + nodes.sort_by_key(|(path, _)| *path); + (address, StorageTrieUpdatesSorted { storage_nodes: nodes, is_deleted: false }) + }) + .collect(); + + Ok(TrieUpdatesSorted::new(account_nodes, storage_tries)) + } + + fn get_block_trie_updates( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + let tx = self.tx_ref(); + + // Step 1: Get the trie reverts for the state after the target block + let reverts = self.trie_reverts(block_number + 1)?; + + // Step 2: Create an InMemoryTrieCursorFactory with the reverts + // This gives us the trie state as it was after the target block was processed + let db_cursor_factory = DatabaseTrieCursorFactory::new(tx); + let cursor_factory = InMemoryTrieCursorFactory::new(db_cursor_factory, &reverts); + + // Step 3: Collect all account trie nodes that changed in the target block + let mut account_nodes = Vec::new(); + + // Walk through all account trie changes for this block + let mut accounts_trie_cursor = tx.cursor_dup_read::()?; + let mut account_cursor = cursor_factory.account_trie_cursor()?; + + for entry in accounts_trie_cursor.walk_dup(Some(block_number), None)? { + let (_, TrieChangeSetsEntry { nibbles, .. }) = entry?; + // Look up the current value of this trie node using the overlay cursor + let node_value = account_cursor.seek_exact(nibbles.0)?.map(|(_, node)| node); + account_nodes.push((nibbles.0, node_value)); + } + + // Step 4: Collect all storage trie nodes that changed in the target block + let mut storage_tries = B256Map::default(); + let mut storages_trie_cursor = tx.cursor_dup_read::()?; + let storage_range_start = BlockNumberHashedAddress((block_number, B256::ZERO)); + let storage_range_end = BlockNumberHashedAddress((block_number + 1, B256::ZERO)); + + let mut current_hashed_address = None; + let mut storage_cursor = None; + + for entry in storages_trie_cursor.walk_range(storage_range_start..storage_range_end)? { + let ( + BlockNumberHashedAddress((_, hashed_address)), + TrieChangeSetsEntry { nibbles, .. }, + ) = entry?; + + // Check if we need to create a new storage cursor for a different account + if current_hashed_address != Some(hashed_address) { + storage_cursor = Some(cursor_factory.storage_trie_cursor(hashed_address)?); + current_hashed_address = Some(hashed_address); + } + + // Look up the current value of this storage trie node + let cursor = + storage_cursor.as_mut().expect("storage_cursor was just initialized above"); + let node_value = cursor.seek_exact(nibbles.0)?.map(|(_, node)| node); + storage_tries + .entry(hashed_address) + .or_insert_with(|| StorageTrieUpdatesSorted { + storage_nodes: Vec::new(), + is_deleted: false, + }) + .storage_nodes + .push((nibbles.0, node_value)); + } + + Ok(TrieUpdatesSorted::new(account_nodes, storage_tries)) + } } impl StorageTrieWriter for DatabaseProvider { - /// Writes storage trie updates from the given storage trie map. First sorts the storage trie - /// updates by the hashed address, writing in sorted order. - fn write_storage_trie_updates<'a>( + /// Writes storage trie updates from the given storage trie map with already sorted updates. + /// + /// Expects the storage trie updates to already be sorted by the hashed address key. + /// + /// Returns the number of entries modified. + fn write_storage_trie_updates_sorted<'a>( &self, - storage_tries: impl Iterator, + storage_tries: impl Iterator, ) -> ProviderResult { let mut num_entries = 0; let mut storage_tries = storage_tries.collect::>(); @@ -2178,12 +2325,110 @@ impl StorageTrieWriter for DatabaseP let mut db_storage_trie_cursor = DatabaseStorageTrieCursor::new(cursor, *hashed_address); num_entries += - db_storage_trie_cursor.write_storage_trie_updates(storage_trie_updates)?; + db_storage_trie_cursor.write_storage_trie_updates_sorted(storage_trie_updates)?; cursor = db_storage_trie_cursor.cursor; } Ok(num_entries) } + + /// Records the current values of all trie nodes which will be updated using the + /// `StorageTrieUpdates` into the storage trie changesets table. + /// + /// The intended usage of this method is to call it _prior_ to calling + /// `write_storage_trie_updates` with the same set of `StorageTrieUpdates`. + /// + /// Returns the number of keys written. + fn write_storage_trie_changesets<'a>( + &self, + block_number: BlockNumber, + storage_tries: impl Iterator, + updates_overlay: Option<&TrieUpdatesSorted>, + ) -> ProviderResult { + let mut num_written = 0; + + let mut changeset_cursor = + self.tx_ref().cursor_dup_write::()?; + + // We hold two cursors to the same table because we use them simultaneously when an + // account's storage is wiped. We keep them outside the for-loop so they can be re-used + // between accounts. + let changed_curr_values_cursor = self.tx_ref().cursor_dup_read::()?; + let wiped_nodes_cursor = self.tx_ref().cursor_dup_read::()?; + + // DatabaseStorageTrieCursor requires ownership of the cursor. The easiest way to deal with + // this is to create this outer variable with an initial dummy account, and overwrite it on + // every loop for every real account. + let mut changed_curr_values_cursor = DatabaseStorageTrieCursor::new( + changed_curr_values_cursor, + B256::default(), // Will be set per iteration + ); + let mut wiped_nodes_cursor = DatabaseStorageTrieCursor::new( + wiped_nodes_cursor, + B256::default(), // Will be set per iteration + ); + + // Static empty array for when updates_overlay is None + static EMPTY_UPDATES: Vec<(Nibbles, Option)> = Vec::new(); + + for (hashed_address, storage_trie_updates) in storage_tries { + let changeset_key = BlockNumberHashedAddress((block_number, *hashed_address)); + + // Update the hashed address for the cursors + changed_curr_values_cursor = + DatabaseStorageTrieCursor::new(changed_curr_values_cursor.cursor, *hashed_address); + + // Get the overlay updates for this storage trie, or use an empty array + let overlay_updates = updates_overlay + .and_then(|overlay| overlay.storage_tries_ref().get(hashed_address)) + .map(|updates| updates.storage_nodes_ref()) + .unwrap_or(&EMPTY_UPDATES); + + // Wrap the cursor in InMemoryTrieCursor with the overlay + let mut in_memory_changed_cursor = + InMemoryTrieCursor::new(Some(&mut changed_curr_values_cursor), overlay_updates); + + // Create an iterator which produces the current values of all updated paths, or None if + // they are currently unset. + let curr_values_of_changed = StorageTrieCurrentValuesIter::new( + storage_trie_updates.storage_nodes.iter().map(|e| e.0), + &mut in_memory_changed_cursor, + )?; + + if storage_trie_updates.is_deleted() { + // Create an iterator that starts from the beginning of the storage trie for this + // account + wiped_nodes_cursor = + DatabaseStorageTrieCursor::new(wiped_nodes_cursor.cursor, *hashed_address); + + // Wrap the wiped nodes cursor in InMemoryTrieCursor with the overlay + let mut in_memory_wiped_cursor = + InMemoryTrieCursor::new(Some(&mut wiped_nodes_cursor), overlay_updates); + + let all_nodes = TrieCursorIter::new(&mut in_memory_wiped_cursor); + + for wiped in storage_trie_wiped_changeset_iter(curr_values_of_changed, all_nodes)? { + let (path, node) = wiped?; + num_written += 1; + changeset_cursor.append_dup( + changeset_key, + TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(path), node }, + )?; + } + } else { + for curr_value in curr_values_of_changed { + let (path, node) = curr_value?; + num_written += 1; + changeset_cursor.append_dup( + changeset_key, + TrieChangeSetsEntry { nibbles: StoredNibblesSubKey(path), node }, + )?; + } + } + } + + Ok(num_written) + } } impl HashingWriter for DatabaseProvider { @@ -2478,7 +2723,7 @@ impl BlockExecu ) -> ProviderResult> { let range = block + 1..=self.last_block_number()?; - self.unwind_trie_state_range(range.clone())?; + self.unwind_trie_state_from(block + 1)?; // get execution res let execution_state = self.take_state_above(block)?; @@ -2496,9 +2741,7 @@ impl BlockExecu } fn remove_block_and_execution_above(&self, block: BlockNumber) -> ProviderResult<()> { - let range = block + 1..=self.last_block_number()?; - - self.unwind_trie_state_range(range)?; + self.unwind_trie_state_from(block + 1)?; // remove execution res self.remove_state_above(block)?; @@ -2524,7 +2767,6 @@ impl BlockWrite /// tables: /// * [`StaticFileSegment::Headers`] /// * [`tables::HeaderNumbers`] - /// * [`tables::HeaderTerminalDifficulties`] /// * [`tables::BlockBodyIndices`] /// /// If there are transactions in the block, the following static file segments and tables will @@ -2549,19 +2791,9 @@ impl BlockWrite let mut durations_recorder = metrics::DurationsRecorder::default(); - // total difficulty - let ttd = if block_number == 0 { - block.header().difficulty() - } else { - let parent_block_number = block_number - 1; - let parent_ttd = self.header_td_by_number(parent_block_number)?.unwrap_or_default(); - durations_recorder.record_relative(metrics::Action::GetParentTD); - parent_ttd + block.header().difficulty() - }; - self.static_file_provider .get_writer(block_number, StaticFileSegment::Headers)? - .append_header(block.header(), ttd, &block.hash())?; + .append_header(block.header(), &block.hash())?; self.tx.put::(block.hash(), block_number)?; durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); @@ -2789,10 +3021,13 @@ impl PruneCheckpointReader for DatabaseProvide } fn get_prune_checkpoints(&self) -> ProviderResult> { - Ok(self - .tx - .cursor_read::()? - .walk(None)? + Ok(PruneSegment::variants() + .filter_map(|segment| { + self.tx + .get::(segment) + .transpose() + .map(|chk| chk.map(|chk| (segment, chk))) + }) .collect::>()?) } } @@ -2837,7 +3072,7 @@ impl ChainStateBlockReader for DatabaseProvide let mut finalized_blocks = self .tx .cursor_read::()? - .walk(Some(tables::ChainStateKey::LastSafeBlockBlock))? + .walk(Some(tables::ChainStateKey::LastSafeBlock))? .take(1) .collect::, _>>()?; @@ -2854,9 +3089,7 @@ impl ChainStateBlockWriter for DatabaseProvider ProviderResult<()> { - Ok(self - .tx - .put::(tables::ChainStateKey::LastSafeBlockBlock, block_number)?) + Ok(self.tx.put::(tables::ChainStateKey::LastSafeBlock, block_number)?) } } @@ -2897,6 +3130,28 @@ impl DBProvider for DatabaseProvider } } +impl MetadataProvider for DatabaseProvider { + fn get_metadata(&self, key: &str) -> ProviderResult>> { + self.tx.get::(key.to_string()).map_err(Into::into) + } +} + +impl MetadataWriter for DatabaseProvider { + fn write_metadata(&self, key: &str, value: Vec) -> ProviderResult<()> { + self.tx.put::(key.to_string(), value).map_err(Into::into) + } +} + +impl StorageSettingsCache for DatabaseProvider { + fn cached_storage_settings(&self) -> StorageSettings { + *self.storage_settings.read() + } + + fn set_storage_settings_cache(&self, settings: StorageSettings) { + *self.storage_settings.write() = settings; + } +} + #[cfg(test)] mod tests { use super::*; @@ -3112,4 +3367,1272 @@ mod tests { assert_eq!(range_result, individual_results); } + + #[test] + fn test_write_trie_changesets() { + use reth_db_api::models::BlockNumberHashedAddress; + use reth_trie::{BranchNodeCompact, StorageTrieEntry}; + + let factory = create_test_provider_factory(); + let provider_rw = factory.provider_rw().unwrap(); + + let block_number = 1u64; + + // Create some test nibbles and nodes + let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); + let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]); + + let node1 = BranchNodeCompact::new( + 0b1111_1111_1111_1111, // state_mask + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask + vec![], // hashes + None, // root hash + ); + + // Pre-populate AccountsTrie with a node that will be updated (for account_nibbles1) + { + let mut cursor = provider_rw.tx_ref().cursor_write::().unwrap(); + cursor.insert(StoredNibbles(account_nibbles1), &node1).unwrap(); + } + + // Create account trie updates: one Some (update) and one None (removal) + let account_nodes = vec![ + (account_nibbles1, Some(node1.clone())), // This will update existing node + (account_nibbles2, None), // This will be a removal (no existing node) + ]; + + // Create storage trie updates + let storage_address1 = B256::from([1u8; 32]); // Normal storage trie + let storage_address2 = B256::from([2u8; 32]); // Wiped storage trie + + let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]); + let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]); + let storage_nibbles3 = Nibbles::from_nibbles([0xe, 0xf]); + + let storage_node1 = BranchNodeCompact::new( + 0b1111_0000_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + let storage_node2 = BranchNodeCompact::new( + 0b0000_1111_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Create an old version of storage_node1 to prepopulate + let storage_node1_old = BranchNodeCompact::new( + 0b1010_0000_0000_0000, // Different mask to show it's an old value + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Pre-populate StoragesTrie for normal storage (storage_address1) + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + // Add node that will be updated (storage_nibbles1) with old value + let entry = StorageTrieEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: storage_node1_old.clone(), + }; + cursor.upsert(storage_address1, &entry).unwrap(); + } + + // Pre-populate StoragesTrie for wiped storage (storage_address2) + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + // Add node that will be updated (storage_nibbles1) + let entry1 = StorageTrieEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: storage_node1.clone(), + }; + cursor.upsert(storage_address2, &entry1).unwrap(); + // Add node that won't be updated but exists (storage_nibbles3) + let entry3 = StorageTrieEntry { + nibbles: StoredNibblesSubKey(storage_nibbles3), + node: storage_node2.clone(), + }; + cursor.upsert(storage_address2, &entry3).unwrap(); + } + + // Normal storage trie: one Some (update) and one None (new) + let storage_trie1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // This will update existing node + (storage_nibbles2, None), // This is a new node + ], + }; + + // Wiped storage trie + let storage_trie2 = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // Updated node already in db + (storage_nibbles2, Some(storage_node2.clone())), /* Updated node not in db + * storage_nibbles3 is in db + * but not updated */ + ], + }; + + let mut storage_tries = B256Map::default(); + storage_tries.insert(storage_address1, storage_trie1); + storage_tries.insert(storage_address2, storage_trie2); + + let trie_updates = TrieUpdatesSorted::new(account_nodes, storage_tries); + + // Write the changesets + let num_written = + provider_rw.write_trie_changesets(block_number, &trie_updates, None).unwrap(); + + // Verify number of entries written + // Account changesets: 2 (one update, one removal) + // Storage changesets: + // - Normal storage: 2 (one update, one removal) + // - Wiped storage: 3 (two updated, one existing not updated) + // Total: 2 + 2 + 3 = 7 + assert_eq!(num_written, 7); + + // Verify account changesets were written correctly + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_read::().unwrap(); + + // Get all entries for this block to see what was written + let all_entries = cursor + .walk_dup(Some(block_number), None) + .unwrap() + .collect::, _>>() + .unwrap(); + + // Assert the full value of all_entries in a single assert_eq + assert_eq!( + all_entries, + vec![ + ( + block_number, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles1), + node: Some(node1), + } + ), + ( + block_number, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles2), + node: None, + } + ), + ] + ); + } + + // Verify storage changesets were written correctly + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_read::().unwrap(); + + // Check normal storage trie changesets + let key1 = BlockNumberHashedAddress((block_number, storage_address1)); + let entries1 = + cursor.walk_dup(Some(key1), None).unwrap().collect::, _>>().unwrap(); + + assert_eq!( + entries1, + vec![ + ( + key1, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(storage_node1_old), // Old value that was prepopulated + } + ), + ( + key1, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, // New node, no previous value + } + ), + ] + ); + + // Check wiped storage trie changesets + let key2 = BlockNumberHashedAddress((block_number, storage_address2)); + let entries2 = + cursor.walk_dup(Some(key2), None).unwrap().collect::, _>>().unwrap(); + + assert_eq!( + entries2, + vec![ + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(storage_node1), // Was in db, so has old value + } + ), + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, // Was not in db + } + ), + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles3), + node: Some(storage_node2), // Existing node in wiped storage + } + ), + ] + ); + } + + provider_rw.commit().unwrap(); + } + + #[test] + fn test_write_trie_changesets_with_overlay() { + use reth_db_api::models::BlockNumberHashedAddress; + use reth_trie::BranchNodeCompact; + + let factory = create_test_provider_factory(); + let provider_rw = factory.provider_rw().unwrap(); + + let block_number = 1u64; + + // Create some test nibbles and nodes + let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); + let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]); + + let node1 = BranchNodeCompact::new( + 0b1111_1111_1111_1111, // state_mask + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask + vec![], // hashes + None, // root hash + ); + + // NOTE: Unlike the previous test, we're NOT pre-populating the database + // All node values will come from the overlay + + // Create the overlay with existing values that would normally be in the DB + let node1_old = BranchNodeCompact::new( + 0b1010_1010_1010_1010, // Different mask to show it's the overlay "existing" value + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Create overlay account nodes + let overlay_account_nodes = vec![ + (account_nibbles1, Some(node1_old.clone())), // This simulates existing node in overlay + ]; + + // Create account trie updates: one Some (update) and one None (removal) + let account_nodes = vec![ + (account_nibbles1, Some(node1)), // This will update overlay node + (account_nibbles2, None), // This will be a removal (no existing node) + ]; + + // Create storage trie updates + let storage_address1 = B256::from([1u8; 32]); // Normal storage trie + let storage_address2 = B256::from([2u8; 32]); // Wiped storage trie + + let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]); + let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]); + let storage_nibbles3 = Nibbles::from_nibbles([0xe, 0xf]); + + let storage_node1 = BranchNodeCompact::new( + 0b1111_0000_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + let storage_node2 = BranchNodeCompact::new( + 0b0000_1111_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Create old versions for overlay + let storage_node1_old = BranchNodeCompact::new( + 0b1010_0000_0000_0000, // Different mask to show it's an old value + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Create overlay storage nodes + let mut overlay_storage_tries = B256Map::default(); + + // Overlay for normal storage (storage_address1) + let overlay_storage_trie1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1_old.clone())), /* Simulates existing in + * overlay */ + ], + }; + + // Overlay for wiped storage (storage_address2) + let overlay_storage_trie2 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // Existing in overlay + (storage_nibbles3, Some(storage_node2.clone())), // Also existing in overlay + ], + }; + + overlay_storage_tries.insert(storage_address1, overlay_storage_trie1); + overlay_storage_tries.insert(storage_address2, overlay_storage_trie2); + + let overlay = TrieUpdatesSorted::new(overlay_account_nodes, overlay_storage_tries); + + // Normal storage trie: one Some (update) and one None (new) + let storage_trie1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // This will update overlay node + (storage_nibbles2, None), // This is a new node + ], + }; + + // Wiped storage trie + let storage_trie2 = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![ + (storage_nibbles1, Some(storage_node1.clone())), // Updated node from overlay + (storage_nibbles2, Some(storage_node2.clone())), /* Updated node not in overlay + * storage_nibbles3 is in + * overlay + * but not updated */ + ], + }; + + let mut storage_tries = B256Map::default(); + storage_tries.insert(storage_address1, storage_trie1); + storage_tries.insert(storage_address2, storage_trie2); + + let trie_updates = TrieUpdatesSorted::new(account_nodes, storage_tries); + + // Write the changesets WITH OVERLAY + let num_written = + provider_rw.write_trie_changesets(block_number, &trie_updates, Some(&overlay)).unwrap(); + + // Verify number of entries written + // Account changesets: 2 (one update from overlay, one removal) + // Storage changesets: + // - Normal storage: 2 (one update from overlay, one new) + // - Wiped storage: 3 (two updated, one existing from overlay not updated) + // Total: 2 + 2 + 3 = 7 + assert_eq!(num_written, 7); + + // Verify account changesets were written correctly + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_read::().unwrap(); + + // Get all entries for this block to see what was written + let all_entries = cursor + .walk_dup(Some(block_number), None) + .unwrap() + .collect::, _>>() + .unwrap(); + + // Assert the full value of all_entries in a single assert_eq + assert_eq!( + all_entries, + vec![ + ( + block_number, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles1), + node: Some(node1_old), // Value from overlay, not DB + } + ), + ( + block_number, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles2), + node: None, + } + ), + ] + ); + } + + // Verify storage changesets were written correctly + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_read::().unwrap(); + + // Check normal storage trie changesets + let key1 = BlockNumberHashedAddress((block_number, storage_address1)); + let entries1 = + cursor.walk_dup(Some(key1), None).unwrap().collect::, _>>().unwrap(); + + assert_eq!( + entries1, + vec![ + ( + key1, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(storage_node1_old), // Old value from overlay + } + ), + ( + key1, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, // New node, no previous value + } + ), + ] + ); + + // Check wiped storage trie changesets + let key2 = BlockNumberHashedAddress((block_number, storage_address2)); + let entries2 = + cursor.walk_dup(Some(key2), None).unwrap().collect::, _>>().unwrap(); + + assert_eq!( + entries2, + vec![ + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(storage_node1), // Value from overlay + } + ), + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, // Was not in overlay + } + ), + ( + key2, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles3), + node: Some(storage_node2), /* Existing node from overlay in wiped + * storage */ + } + ), + ] + ); + } + + provider_rw.commit().unwrap(); + } + + #[test] + fn test_clear_trie_changesets_from() { + use alloy_primitives::hex_literal::hex; + use reth_db_api::models::BlockNumberHashedAddress; + use reth_trie::{BranchNodeCompact, StoredNibblesSubKey, TrieChangeSetsEntry}; + + let factory = create_test_provider_factory(); + + // Create some test data for different block numbers + let block1 = 100u64; + let block2 = 101u64; + let block3 = 102u64; + let block4 = 103u64; + let block5 = 104u64; + + // Create test addresses for storage changesets + let storage_address1 = + B256::from(hex!("1111111111111111111111111111111111111111111111111111111111111111")); + let storage_address2 = + B256::from(hex!("2222222222222222222222222222222222222222222222222222222222222222")); + + // Create test nibbles + let nibbles1 = StoredNibblesSubKey(Nibbles::from_nibbles([0x1, 0x2, 0x3])); + let nibbles2 = StoredNibblesSubKey(Nibbles::from_nibbles([0x4, 0x5, 0x6])); + let nibbles3 = StoredNibblesSubKey(Nibbles::from_nibbles([0x7, 0x8, 0x9])); + + // Create test nodes + let node1 = BranchNodeCompact::new( + 0b1111_1111_1111_1111, + 0b1111_1111_1111_1111, + 0b0000_0000_0000_0001, + vec![B256::from(hex!( + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" + ))], + None, + ); + let node2 = BranchNodeCompact::new( + 0b1111_1111_1111_1110, + 0b1111_1111_1111_1110, + 0b0000_0000_0000_0010, + vec![B256::from(hex!( + "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890" + ))], + Some(B256::from(hex!( + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" + ))), + ); + + // Populate AccountsTrieChangeSets with data across multiple blocks + { + let provider_rw = factory.provider_rw().unwrap(); + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + + // Block 100: 2 entries (will be kept - before start block) + cursor + .upsert( + block1, + &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) }, + ) + .unwrap(); + cursor + .upsert(block1, &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: None }) + .unwrap(); + + // Block 101: 3 entries with duplicates (will be deleted - from this block onwards) + cursor + .upsert( + block2, + &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node2.clone()) }, + ) + .unwrap(); + cursor + .upsert( + block2, + &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) }, + ) + .unwrap(); // duplicate key + cursor + .upsert(block2, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None }) + .unwrap(); + + // Block 102: 2 entries (will be deleted - after start block) + cursor + .upsert( + block3, + &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node1.clone()) }, + ) + .unwrap(); + cursor + .upsert( + block3, + &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: Some(node2.clone()) }, + ) + .unwrap(); + + // Block 103: 1 entry (will be deleted - after start block) + cursor + .upsert(block4, &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: None }) + .unwrap(); + + // Block 104: 2 entries (will be deleted - after start block) + cursor + .upsert( + block5, + &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node2.clone()) }, + ) + .unwrap(); + cursor + .upsert(block5, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None }) + .unwrap(); + + provider_rw.commit().unwrap(); + } + + // Populate StoragesTrieChangeSets with data across multiple blocks + { + let provider_rw = factory.provider_rw().unwrap(); + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + + // Block 100, address1: 2 entries (will be kept - before start block) + let key1_block1 = BlockNumberHashedAddress((block1, storage_address1)); + cursor + .upsert( + key1_block1, + &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node1.clone()) }, + ) + .unwrap(); + cursor + .upsert(key1_block1, &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: None }) + .unwrap(); + + // Block 101, address1: 3 entries with duplicates (will be deleted - from this block + // onwards) + let key1_block2 = BlockNumberHashedAddress((block2, storage_address1)); + cursor + .upsert( + key1_block2, + &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: Some(node2.clone()) }, + ) + .unwrap(); + cursor + .upsert(key1_block2, &TrieChangeSetsEntry { nibbles: nibbles1.clone(), node: None }) + .unwrap(); // duplicate key + cursor + .upsert( + key1_block2, + &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node1.clone()) }, + ) + .unwrap(); + + // Block 102, address2: 2 entries (will be deleted - after start block) + let key2_block3 = BlockNumberHashedAddress((block3, storage_address2)); + cursor + .upsert( + key2_block3, + &TrieChangeSetsEntry { nibbles: nibbles2.clone(), node: Some(node2.clone()) }, + ) + .unwrap(); + cursor + .upsert(key2_block3, &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: None }) + .unwrap(); + + // Block 103, address1: 2 entries with duplicate (will be deleted - after start block) + let key1_block4 = BlockNumberHashedAddress((block4, storage_address1)); + cursor + .upsert( + key1_block4, + &TrieChangeSetsEntry { nibbles: nibbles3.clone(), node: Some(node1) }, + ) + .unwrap(); + cursor + .upsert( + key1_block4, + &TrieChangeSetsEntry { nibbles: nibbles3, node: Some(node2.clone()) }, + ) + .unwrap(); // duplicate key + + // Block 104, address2: 2 entries (will be deleted - after start block) + let key2_block5 = BlockNumberHashedAddress((block5, storage_address2)); + cursor + .upsert(key2_block5, &TrieChangeSetsEntry { nibbles: nibbles1, node: None }) + .unwrap(); + cursor + .upsert(key2_block5, &TrieChangeSetsEntry { nibbles: nibbles2, node: Some(node2) }) + .unwrap(); + + provider_rw.commit().unwrap(); + } + + // Clear all changesets from block 101 onwards + { + let provider_rw = factory.provider_rw().unwrap(); + provider_rw.clear_trie_changesets_from(block2).unwrap(); + provider_rw.commit().unwrap(); + } + + // Verify AccountsTrieChangeSets after clearing + { + let provider = factory.provider().unwrap(); + let mut cursor = + provider.tx_ref().cursor_dup_read::().unwrap(); + + // Block 100 should still exist (before range) + let block1_entries = cursor + .walk_dup(Some(block1), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(block1_entries.len(), 2, "Block 100 entries should be preserved"); + assert_eq!(block1_entries[0].0, block1); + assert_eq!(block1_entries[1].0, block1); + + // Blocks 101-104 should be deleted + let block2_entries = cursor + .walk_dup(Some(block2), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block2_entries.is_empty(), "Block 101 entries should be deleted"); + + let block3_entries = cursor + .walk_dup(Some(block3), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block3_entries.is_empty(), "Block 102 entries should be deleted"); + + let block4_entries = cursor + .walk_dup(Some(block4), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block4_entries.is_empty(), "Block 103 entries should be deleted"); + + // Block 104 should also be deleted + let block5_entries = cursor + .walk_dup(Some(block5), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block5_entries.is_empty(), "Block 104 entries should be deleted"); + } + + // Verify StoragesTrieChangeSets after clearing + { + let provider = factory.provider().unwrap(); + let mut cursor = + provider.tx_ref().cursor_dup_read::().unwrap(); + + // Block 100 entries should still exist (before range) + let key1_block1 = BlockNumberHashedAddress((block1, storage_address1)); + let block1_entries = cursor + .walk_dup(Some(key1_block1), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(block1_entries.len(), 2, "Block 100 storage entries should be preserved"); + + // Blocks 101-104 entries should be deleted + let key1_block2 = BlockNumberHashedAddress((block2, storage_address1)); + let block2_entries = cursor + .walk_dup(Some(key1_block2), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block2_entries.is_empty(), "Block 101 storage entries should be deleted"); + + let key2_block3 = BlockNumberHashedAddress((block3, storage_address2)); + let block3_entries = cursor + .walk_dup(Some(key2_block3), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block3_entries.is_empty(), "Block 102 storage entries should be deleted"); + + let key1_block4 = BlockNumberHashedAddress((block4, storage_address1)); + let block4_entries = cursor + .walk_dup(Some(key1_block4), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block4_entries.is_empty(), "Block 103 storage entries should be deleted"); + + // Block 104 entries should also be deleted + let key2_block5 = BlockNumberHashedAddress((block5, storage_address2)); + let block5_entries = cursor + .walk_dup(Some(key2_block5), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert!(block5_entries.is_empty(), "Block 104 storage entries should be deleted"); + } + } + + #[test] + fn test_write_trie_updates_sorted() { + use reth_trie::{ + updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, + BranchNodeCompact, StorageTrieEntry, + }; + + let factory = create_test_provider_factory(); + let provider_rw = factory.provider_rw().unwrap(); + + // Pre-populate account trie with data that will be deleted + { + let tx = provider_rw.tx_ref(); + let mut cursor = tx.cursor_write::().unwrap(); + + // Add account node that will be deleted + let to_delete = StoredNibbles(Nibbles::from_nibbles([0x3, 0x4])); + cursor + .upsert( + to_delete, + &BranchNodeCompact::new( + 0b1010_1010_1010_1010, // state_mask + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask + vec![], + None, + ), + ) + .unwrap(); + + // Add account node that will be updated + let to_update = StoredNibbles(Nibbles::from_nibbles([0x1, 0x2])); + cursor + .upsert( + to_update, + &BranchNodeCompact::new( + 0b0101_0101_0101_0101, // old state_mask (will be updated) + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask + vec![], + None, + ), + ) + .unwrap(); + } + + // Pre-populate storage tries with data + let storage_address1 = B256::from([1u8; 32]); + let storage_address2 = B256::from([2u8; 32]); + { + let tx = provider_rw.tx_ref(); + let mut storage_cursor = tx.cursor_dup_write::().unwrap(); + + // Add storage nodes for address1 (one will be deleted) + storage_cursor + .upsert( + storage_address1, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles([0x2, 0x0])), + node: BranchNodeCompact::new( + 0b0011_0011_0011_0011, // will be deleted + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ), + }, + ) + .unwrap(); + + // Add storage nodes for address2 (will be wiped) + storage_cursor + .upsert( + storage_address2, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles([0xa, 0xb])), + node: BranchNodeCompact::new( + 0b1100_1100_1100_1100, // will be wiped + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ), + }, + ) + .unwrap(); + storage_cursor + .upsert( + storage_address2, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles([0xc, 0xd])), + node: BranchNodeCompact::new( + 0b0011_1100_0011_1100, // will be wiped + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ), + }, + ) + .unwrap(); + } + + // Create sorted account trie updates + let account_nodes = vec![ + ( + Nibbles::from_nibbles([0x1, 0x2]), + Some(BranchNodeCompact::new( + 0b1111_1111_1111_1111, // state_mask (updated) + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask (no hashes) + vec![], + None, + )), + ), + (Nibbles::from_nibbles([0x3, 0x4]), None), // Deletion + ( + Nibbles::from_nibbles([0x5, 0x6]), + Some(BranchNodeCompact::new( + 0b1111_1111_1111_1111, // state_mask + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask (no hashes) + vec![], + None, + )), + ), + ]; + + // Create sorted storage trie updates + let storage_trie1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + ( + Nibbles::from_nibbles([0x1, 0x0]), + Some(BranchNodeCompact::new( + 0b1111_0000_0000_0000, // state_mask + 0b0000_0000_0000_0000, // tree_mask + 0b0000_0000_0000_0000, // hash_mask (no hashes) + vec![], + None, + )), + ), + (Nibbles::from_nibbles([0x2, 0x0]), None), // Deletion of existing node + ], + }; + + let storage_trie2 = StorageTrieUpdatesSorted { + is_deleted: true, // Wipe all storage for this address + storage_nodes: vec![], + }; + + let mut storage_tries = B256Map::default(); + storage_tries.insert(storage_address1, storage_trie1); + storage_tries.insert(storage_address2, storage_trie2); + + let trie_updates = TrieUpdatesSorted::new(account_nodes, storage_tries); + + // Write the sorted trie updates + let num_entries = provider_rw.write_trie_updates_sorted(&trie_updates).unwrap(); + + // We should have 2 account insertions + 1 account deletion + 1 storage insertion + 1 + // storage deletion = 5 + assert_eq!(num_entries, 5); + + // Verify account trie updates were written correctly + let tx = provider_rw.tx_ref(); + let mut cursor = tx.cursor_read::().unwrap(); + + // Check first account node was updated + let nibbles1 = StoredNibbles(Nibbles::from_nibbles([0x1, 0x2])); + let entry1 = cursor.seek_exact(nibbles1).unwrap(); + assert!(entry1.is_some(), "Updated account node should exist"); + let expected_mask = reth_trie::TrieMask::new(0b1111_1111_1111_1111); + assert_eq!( + entry1.unwrap().1.state_mask, + expected_mask, + "Account node should have updated state_mask" + ); + + // Check deleted account node no longer exists + let nibbles2 = StoredNibbles(Nibbles::from_nibbles([0x3, 0x4])); + let entry2 = cursor.seek_exact(nibbles2).unwrap(); + assert!(entry2.is_none(), "Deleted account node should not exist"); + + // Check new account node exists + let nibbles3 = StoredNibbles(Nibbles::from_nibbles([0x5, 0x6])); + let entry3 = cursor.seek_exact(nibbles3).unwrap(); + assert!(entry3.is_some(), "New account node should exist"); + + // Verify storage trie updates were written correctly + let mut storage_cursor = tx.cursor_dup_read::().unwrap(); + + // Check storage for address1 + let storage_entries1: Vec<_> = storage_cursor + .walk_dup(Some(storage_address1), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!( + storage_entries1.len(), + 1, + "Storage address1 should have 1 entry after deletion" + ); + assert_eq!( + storage_entries1[0].1.nibbles.0, + Nibbles::from_nibbles([0x1, 0x0]), + "Remaining entry should be [0x1, 0x0]" + ); + + // Check storage for address2 was wiped + let storage_entries2: Vec<_> = storage_cursor + .walk_dup(Some(storage_address2), None) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(storage_entries2.len(), 0, "Storage address2 should be empty after wipe"); + + provider_rw.commit().unwrap(); + } + + #[test] + fn test_get_block_trie_updates() { + use reth_db_api::models::BlockNumberHashedAddress; + use reth_trie::{BranchNodeCompact, StorageTrieEntry}; + + let factory = create_test_provider_factory(); + let provider_rw = factory.provider_rw().unwrap(); + + let target_block = 2u64; + let next_block = 3u64; + + // Create test nibbles and nodes for accounts + let account_nibbles1 = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); + let account_nibbles2 = Nibbles::from_nibbles([0x5, 0x6, 0x7, 0x8]); + let account_nibbles3 = Nibbles::from_nibbles([0x9, 0xa, 0xb, 0xc]); + + let node1 = BranchNodeCompact::new( + 0b1111_1111_0000_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + let node2 = BranchNodeCompact::new( + 0b0000_0000_1111_1111, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + let node3 = BranchNodeCompact::new( + 0b1010_1010_1010_1010, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Pre-populate AccountsTrie with nodes that will be the final state + { + let mut cursor = provider_rw.tx_ref().cursor_write::().unwrap(); + cursor.insert(StoredNibbles(account_nibbles1), &node1).unwrap(); + cursor.insert(StoredNibbles(account_nibbles2), &node2).unwrap(); + // account_nibbles3 will be deleted (not in final state) + } + + // Insert trie changesets for target_block + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + // nibbles1 was updated in target_block (old value stored) + cursor + .append_dup( + target_block, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles1), + node: Some(BranchNodeCompact::new( + 0b1111_0000_0000_0000, // old value + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + )), + }, + ) + .unwrap(); + // nibbles2 was created in target_block (no old value) + cursor + .append_dup( + target_block, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles2), + node: None, + }, + ) + .unwrap(); + } + + // Insert trie changesets for next_block (to test overlay) + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + // nibbles3 was deleted in next_block (old value stored) + cursor + .append_dup( + next_block, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(account_nibbles3), + node: Some(node3), + }, + ) + .unwrap(); + } + + // Storage trie updates + let storage_address1 = B256::from([1u8; 32]); + let storage_nibbles1 = Nibbles::from_nibbles([0xa, 0xb]); + let storage_nibbles2 = Nibbles::from_nibbles([0xc, 0xd]); + + let storage_node1 = BranchNodeCompact::new( + 0b1111_1111_1111_0000, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + let storage_node2 = BranchNodeCompact::new( + 0b0101_0101_0101_0101, + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + ); + + // Pre-populate StoragesTrie with final state + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + cursor + .upsert( + storage_address1, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: storage_node1.clone(), + }, + ) + .unwrap(); + // storage_nibbles2 was deleted in next_block, so it's not in final state + } + + // Insert storage trie changesets for target_block + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + let key = BlockNumberHashedAddress((target_block, storage_address1)); + + // storage_nibbles1 was updated + cursor + .append_dup( + key, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles1), + node: Some(BranchNodeCompact::new( + 0b0000_0000_1111_1111, // old value + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + )), + }, + ) + .unwrap(); + + // storage_nibbles2 was created + cursor + .append_dup( + key, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: None, + }, + ) + .unwrap(); + } + + // Insert storage trie changesets for next_block (to test overlay) + { + let mut cursor = + provider_rw.tx_ref().cursor_dup_write::().unwrap(); + let key = BlockNumberHashedAddress((next_block, storage_address1)); + + // storage_nibbles2 was deleted in next_block + cursor + .append_dup( + key, + TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey(storage_nibbles2), + node: Some(BranchNodeCompact::new( + 0b0101_0101_0101_0101, // value that was deleted + 0b0000_0000_0000_0000, + 0b0000_0000_0000_0000, + vec![], + None, + )), + }, + ) + .unwrap(); + } + + provider_rw.commit().unwrap(); + + // Now test get_block_trie_updates + let provider = factory.provider().unwrap(); + let result = provider.get_block_trie_updates(target_block).unwrap(); + + // Verify account trie updates + assert_eq!(result.account_nodes_ref().len(), 2, "Should have 2 account trie updates"); + + // Check nibbles1 - should have the current value (node1) + let nibbles1_update = result + .account_nodes_ref() + .iter() + .find(|(n, _)| n == &account_nibbles1) + .expect("Should find nibbles1"); + assert!(nibbles1_update.1.is_some(), "nibbles1 should have a value"); + assert_eq!( + nibbles1_update.1.as_ref().unwrap().state_mask, + node1.state_mask, + "nibbles1 should have current value" + ); + + // Check nibbles2 - should have the current value (node2) + let nibbles2_update = result + .account_nodes_ref() + .iter() + .find(|(n, _)| n == &account_nibbles2) + .expect("Should find nibbles2"); + assert!(nibbles2_update.1.is_some(), "nibbles2 should have a value"); + assert_eq!( + nibbles2_update.1.as_ref().unwrap().state_mask, + node2.state_mask, + "nibbles2 should have current value" + ); + + // nibbles3 should NOT be in the result (it was changed in next_block, not target_block) + assert!( + !result.account_nodes_ref().iter().any(|(n, _)| n == &account_nibbles3), + "nibbles3 should not be in target_block updates" + ); + + // Verify storage trie updates + assert_eq!(result.storage_tries_ref().len(), 1, "Should have 1 storage trie"); + let storage_updates = result + .storage_tries_ref() + .get(&storage_address1) + .expect("Should have storage updates for address1"); + + assert_eq!(storage_updates.storage_nodes.len(), 2, "Should have 2 storage node updates"); + + // Check storage_nibbles1 - should have current value + let storage1_update = storage_updates + .storage_nodes + .iter() + .find(|(n, _)| n == &storage_nibbles1) + .expect("Should find storage_nibbles1"); + assert!(storage1_update.1.is_some(), "storage_nibbles1 should have a value"); + assert_eq!( + storage1_update.1.as_ref().unwrap().state_mask, + storage_node1.state_mask, + "storage_nibbles1 should have current value" + ); + + // Check storage_nibbles2 - was created in target_block, will be deleted in next_block + // So it should have a value (the value that will be deleted) + let storage2_update = storage_updates + .storage_nodes + .iter() + .find(|(n, _)| n == &storage_nibbles2) + .expect("Should find storage_nibbles2"); + assert!( + storage2_update.1.is_some(), + "storage_nibbles2 should have a value (the node that will be deleted in next block)" + ); + assert_eq!( + storage2_update.1.as_ref().unwrap().state_mask, + storage_node2.state_mask, + "storage_nibbles2 should have the value that was created and will be deleted" + ); + } } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 56d27ea3361..aba604ee7e9 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -2,22 +2,22 @@ use reth_chainspec::EthereumCapabilities; use reth_db_api::table::Value; -use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB}; +use reth_node_types::{NodePrimitives, NodeTypes, NodeTypesWithDB}; mod database; pub use database::*; mod static_file; pub use static_file::{ - StaticFileAccess, StaticFileJarProvider, StaticFileProvider, StaticFileProviderRW, - StaticFileProviderRWRefMut, StaticFileWriter, + StaticFileAccess, StaticFileJarProvider, StaticFileProvider, StaticFileProviderBuilder, + StaticFileProviderRW, StaticFileProviderRWRefMut, StaticFileWriter, }; mod state; pub use state::{ historical::{HistoricalStateProvider, HistoricalStateProviderRef, LowestAvailableBlocks}, latest::{LatestStateProvider, LatestStateProviderRef}, - overlay::OverlayStateProvider, + overlay::{OverlayStateProvider, OverlayStateProviderFactory}, }; mod consistent_view; @@ -36,7 +36,7 @@ where Self: NodeTypes< ChainSpec: EthereumCapabilities, Storage: ChainStorage, - Primitives: FullNodePrimitives, + Primitives: NodePrimitives, >, { } @@ -45,7 +45,7 @@ impl NodeTypesForProvider for T where T: NodeTypes< ChainSpec: EthereumCapabilities, Storage: ChainStorage, - Primitives: FullNodePrimitives, + Primitives: NodePrimitives, > { } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 9a22a527ccb..666138fae7b 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,6 +1,6 @@ use crate::{ providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, + ChangeSetReader, HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; @@ -241,23 +241,23 @@ impl HistoricalStateProviderRef<'_, Provi } } -impl AccountReader +impl AccountReader for HistoricalStateProviderRef<'_, Provider> { /// Get basic account information. fn basic_account(&self, address: &Address) -> ProviderResult> { match self.account_history_lookup(*address)? { HistoryInfo::NotYetWritten => Ok(None), - HistoryInfo::InChangeset(changeset_block_number) => Ok(self - .tx() - .cursor_dup_read::()? - .seek_by_key_subkey(changeset_block_number, *address)? - .filter(|acc| &acc.address == address) - .ok_or(ProviderError::AccountChangesetNotFound { - block_number: changeset_block_number, - address: *address, - })? - .info), + HistoryInfo::InChangeset(changeset_block_number) => { + // Use ChangeSetReader trait method to get the account from changesets + self.provider + .get_account_before_block(changeset_block_number, *address)? + .ok_or(ProviderError::AccountChangesetNotFound { + block_number: changeset_block_number, + address: *address, + }) + .map(|account_before| account_before.info) + } HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => { Ok(self.tx().get_by_encoded_key::(address)?) } @@ -368,7 +368,8 @@ impl StateProofProvider slots: &[B256], ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_account_proof(self.tx(), input, address, slots).map_err(ProviderError::from) + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_account_proof(input, address, slots).map_err(ProviderError::from) } fn multiproof( @@ -377,7 +378,8 @@ impl StateProofProvider targets: MultiProofTargets, ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_multiproof(self.tx(), input, targets).map_err(ProviderError::from) + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_multiproof(input, targets).map_err(ProviderError::from) } fn witness(&self, mut input: TrieInput, target: HashedPostState) -> ProviderResult> { @@ -394,7 +396,7 @@ impl HashedPostStateProvider for HistoricalStateProviderRef<'_, } } -impl StateProvider +impl StateProvider for HistoricalStateProviderRef<'_, Provider> { /// Get storage. @@ -485,7 +487,7 @@ impl HistoricalStateProvider { } // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader ]); +delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -530,7 +532,9 @@ mod tests { BlockNumberList, }; use reth_primitives_traits::{Account, StorageEntry}; - use reth_storage_api::{BlockHashReader, BlockNumReader, DBProvider, DatabaseProviderFactory}; + use reth_storage_api::{ + BlockHashReader, BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory, + }; use reth_storage_errors::provider::ProviderError; const ADDRESS: Address = address!("0x0000000000000000000000000000000000000001"); @@ -540,7 +544,9 @@ mod tests { const fn assert_state_provider() {} #[expect(dead_code)] - const fn assert_historical_state_provider() { + const fn assert_historical_state_provider< + T: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader, + >() { assert_state_provider::>(); } diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index de8eef2cc9c..092feb37c43 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -124,7 +124,8 @@ impl StateProofProvider for LatestStateProviderRef< address: Address, slots: &[B256], ) -> ProviderResult { - Proof::overlay_account_proof(self.tx(), input, address, slots).map_err(ProviderError::from) + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_account_proof(input, address, slots).map_err(ProviderError::from) } fn multiproof( @@ -132,7 +133,8 @@ impl StateProofProvider for LatestStateProviderRef< input: TrieInput, targets: MultiProofTargets, ) -> ProviderResult { - Proof::overlay_multiproof(self.tx(), input, targets).map_err(ProviderError::from) + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_multiproof(input, targets).map_err(ProviderError::from) } fn witness(&self, input: TrieInput, target: HashedPostState) -> ProviderResult> { diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 7e6a40efef2..d3ef87e6c49 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -1,111 +1,325 @@ -use alloy_primitives::B256; +use alloy_primitives::{BlockNumber, B256}; use reth_db_api::DatabaseError; -use reth_storage_api::DBProvider; +use reth_errors::{ProviderError, ProviderResult}; +use reth_prune_types::PruneSegment; +use reth_stages_types::StageId; +use reth_storage_api::{ + BlockNumReader, DBProvider, DatabaseProviderFactory, DatabaseProviderROFactory, + PruneCheckpointReader, StageCheckpointReader, TrieReader, +}; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdatesSorted, - HashedPostStateSorted, + HashedPostState, HashedPostStateSorted, KeccakKeyHasher, +}; +use reth_trie_db::{ + DatabaseHashedCursorFactory, DatabaseHashedPostState, DatabaseTrieCursorFactory, }; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::sync::Arc; +use tracing::debug; + +/// Factory for creating overlay state providers with optional reverts and overlays. +/// +/// This factory allows building an `OverlayStateProvider` whose DB state has been reverted to a +/// particular block, and/or with additional overlay information added on top. +#[derive(Debug, Clone)] +pub struct OverlayStateProviderFactory { + /// The underlying database provider factory + factory: F, + /// Optional block hash for collecting reverts + block_hash: Option, + /// Optional trie overlay + trie_overlay: Option>, + /// Optional hashed state overlay + hashed_state_overlay: Option>, +} + +impl OverlayStateProviderFactory { + /// Create a new overlay state provider factory + pub const fn new(factory: F) -> Self { + Self { factory, block_hash: None, trie_overlay: None, hashed_state_overlay: None } + } + + /// Set the block hash for collecting reverts. All state will be reverted to the point + /// _after_ this block has been processed. + pub const fn with_block_hash(mut self, block_hash: Option) -> Self { + self.block_hash = block_hash; + self + } + + /// Set the trie overlay. + /// + /// This overlay will be applied on top of any reverts applied via `with_block_hash`. + pub fn with_trie_overlay(mut self, trie_overlay: Option>) -> Self { + self.trie_overlay = trie_overlay; + self + } + + /// Set the hashed state overlay + /// + /// This overlay will be applied on top of any reverts applied via `with_block_hash`. + pub fn with_hashed_state_overlay( + mut self, + hashed_state_overlay: Option>, + ) -> Self { + self.hashed_state_overlay = hashed_state_overlay; + self + } +} + +impl OverlayStateProviderFactory +where + F: DatabaseProviderFactory, + F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader + BlockNumReader, +{ + /// Returns the block number for [`Self`]'s `block_hash` field, if any. + fn get_block_number(&self, provider: &F::Provider) -> ProviderResult> { + if let Some(block_hash) = self.block_hash { + Ok(Some( + provider + .convert_hash_or_number(block_hash.into())? + .ok_or_else(|| ProviderError::BlockHashNotFound(block_hash))?, + )) + } else { + Ok(None) + } + } + + /// Returns whether or not it is required to collect reverts, and validates that there are + /// sufficient changesets to revert to the requested block number if so. + /// + /// Returns an error if the `MerkleChangeSets` checkpoint doesn't cover the requested block. + /// Takes into account both the stage checkpoint and the prune checkpoint to determine the + /// available data range. + fn reverts_required( + &self, + provider: &F::Provider, + requested_block: BlockNumber, + ) -> ProviderResult { + // Get the MerkleChangeSets stage and prune checkpoints. + let stage_checkpoint = provider.get_stage_checkpoint(StageId::MerkleChangeSets)?; + let prune_checkpoint = provider.get_prune_checkpoint(PruneSegment::MerkleChangeSets)?; + + // Get the upper bound from stage checkpoint + let upper_bound = + stage_checkpoint.as_ref().map(|chk| chk.block_number).ok_or_else(|| { + ProviderError::InsufficientChangesets { + requested: requested_block, + available: 0..=0, + } + })?; + + // If the requested block is the DB tip (determined by the MerkleChangeSets stage + // checkpoint) then there won't be any reverts necessary, and we can simply return Ok. + if upper_bound == requested_block { + return Ok(false) + } + + // Extract the lower bound from prune checkpoint if available. + // + // If not available we assume pruning has never ran and so there is no lower bound. This + // should not generally happen, since MerkleChangeSets always have pruning enabled, but when + // starting a new node from scratch (e.g. in a test case or benchmark) it can surface. + // + // The prune checkpoint's block_number is the highest pruned block, so data is available + // starting from the next block + let lower_bound = prune_checkpoint + .and_then(|chk| chk.block_number) + .map(|block_number| block_number + 1) + .unwrap_or_default(); + + let available_range = lower_bound..=upper_bound; + + // Check if the requested block is within the available range + if !available_range.contains(&requested_block) { + return Err(ProviderError::InsufficientChangesets { + requested: requested_block, + available: available_range, + }); + } + + Ok(true) + } +} + +impl DatabaseProviderROFactory for OverlayStateProviderFactory +where + F: DatabaseProviderFactory, + F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader + BlockNumReader, +{ + type Provider = OverlayStateProvider; + + /// Create a read-only [`OverlayStateProvider`]. + fn database_provider_ro(&self) -> ProviderResult> { + // Get a read-only provider + let provider = self.factory.database_provider_ro()?; + + // If block_hash is provided, collect reverts + let (trie_updates, hashed_state) = if let Some(from_block) = + self.get_block_number(&provider)? && + self.reverts_required(&provider, from_block)? + { + // Collect trie reverts + let mut trie_reverts = provider.trie_reverts(from_block + 1)?; + + // Collect state reverts + // + // TODO(mediocregopher) make from_reverts return sorted + // https://github.com/paradigmxyz/reth/issues/19382 + let mut hashed_state_reverts = HashedPostState::from_reverts::( + provider.tx_ref(), + from_block + 1.., + )? + .into_sorted(); + + // Extend with overlays if provided. If the reverts are empty we should just use the + // overlays directly, because `extend_ref` will actually clone the overlay. + let trie_updates = match self.trie_overlay.as_ref() { + Some(trie_overlay) if trie_reverts.is_empty() => Arc::clone(trie_overlay), + Some(trie_overlay) => { + trie_reverts.extend_ref(trie_overlay); + Arc::new(trie_reverts) + } + None => Arc::new(trie_reverts), + }; + + let hashed_state_updates = match self.hashed_state_overlay.as_ref() { + Some(hashed_state_overlay) if hashed_state_reverts.is_empty() => { + Arc::clone(hashed_state_overlay) + } + Some(hashed_state_overlay) => { + hashed_state_reverts.extend_ref(hashed_state_overlay); + Arc::new(hashed_state_reverts) + } + None => Arc::new(hashed_state_reverts), + }; + + debug!( + target: "providers::state::overlay", + block_hash = ?self.block_hash, + ?from_block, + num_trie_updates = ?trie_updates.total_len(), + num_state_updates = ?hashed_state_updates.total_len(), + "Reverted to target block", + ); + + (trie_updates, hashed_state_updates) + } else { + // If no block_hash, use overlays directly or defaults + let trie_updates = + self.trie_overlay.clone().unwrap_or_else(|| Arc::new(TrieUpdatesSorted::default())); + let hashed_state = self + .hashed_state_overlay + .clone() + .unwrap_or_else(|| Arc::new(HashedPostStateSorted::default())); + + (trie_updates, hashed_state) + }; + + Ok(OverlayStateProvider::new(provider, trie_updates, hashed_state)) + } +} /// State provider with in-memory overlay from trie updates and hashed post state. /// /// This provider uses in-memory trie updates and hashed post state as an overlay /// on top of a database provider, implementing [`TrieCursorFactory`] and [`HashedCursorFactory`] /// using the in-memory overlay factories. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct OverlayStateProvider { - /// The in-memory trie cursor factory that wraps the database cursor factory. - trie_cursor_factory: - InMemoryTrieCursorFactory, Arc>, - /// The hashed cursor factory that wraps the database cursor factory. - hashed_cursor_factory: HashedPostStateCursorFactory< - DatabaseHashedCursorFactory, - Arc, - >, + provider: Provider, + trie_updates: Arc, + hashed_post_state: Arc, } impl OverlayStateProvider where - Provider: DBProvider + Clone, + Provider: DBProvider, { /// Create new overlay state provider. The `Provider` must be cloneable, which generally means /// it should be wrapped in an `Arc`. - pub fn new( + pub const fn new( provider: Provider, trie_updates: Arc, hashed_post_state: Arc, ) -> Self { - // Create the trie cursor factory - let db_trie_cursor_factory = DatabaseTrieCursorFactory::new(provider.clone().into_tx()); - let trie_cursor_factory = - InMemoryTrieCursorFactory::new(db_trie_cursor_factory, trie_updates); - - // Create the hashed cursor factory - let db_hashed_cursor_factory = DatabaseHashedCursorFactory::new(provider.into_tx()); - let hashed_cursor_factory = - HashedPostStateCursorFactory::new(db_hashed_cursor_factory, hashed_post_state); - - Self { trie_cursor_factory, hashed_cursor_factory } + Self { provider, trie_updates, hashed_post_state } } } impl TrieCursorFactory for OverlayStateProvider where - Provider: DBProvider + Clone, - InMemoryTrieCursorFactory, Arc>: - TrieCursorFactory, + Provider: DBProvider, { - type AccountTrieCursor = , - Arc, - > as TrieCursorFactory>::AccountTrieCursor; - - type StorageTrieCursor = , - Arc, - > as TrieCursorFactory>::StorageTrieCursor; - - fn account_trie_cursor(&self) -> Result { - self.trie_cursor_factory.account_trie_cursor() + type AccountTrieCursor<'a> + = , + &'a TrieUpdatesSorted, + > as TrieCursorFactory>::AccountTrieCursor<'a> + where + Self: 'a; + + type StorageTrieCursor<'a> + = , + &'a TrieUpdatesSorted, + > as TrieCursorFactory>::StorageTrieCursor<'a> + where + Self: 'a; + + fn account_trie_cursor(&self) -> Result, DatabaseError> { + let db_trie_cursor_factory = DatabaseTrieCursorFactory::new(self.provider.tx_ref()); + let trie_cursor_factory = + InMemoryTrieCursorFactory::new(db_trie_cursor_factory, self.trie_updates.as_ref()); + trie_cursor_factory.account_trie_cursor() } fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result { - self.trie_cursor_factory.storage_trie_cursor(hashed_address) + ) -> Result, DatabaseError> { + let db_trie_cursor_factory = DatabaseTrieCursorFactory::new(self.provider.tx_ref()); + let trie_cursor_factory = + InMemoryTrieCursorFactory::new(db_trie_cursor_factory, self.trie_updates.as_ref()); + trie_cursor_factory.storage_trie_cursor(hashed_address) } } impl HashedCursorFactory for OverlayStateProvider where - Provider: DBProvider + Clone, - HashedPostStateCursorFactory< - DatabaseHashedCursorFactory, - Arc, - >: HashedCursorFactory, + Provider: DBProvider, { - type AccountCursor = , - Arc, - > as HashedCursorFactory>::AccountCursor; - - type StorageCursor = , - Arc, - > as HashedCursorFactory>::StorageCursor; - - fn hashed_account_cursor(&self) -> Result { - self.hashed_cursor_factory.hashed_account_cursor() + type AccountCursor<'a> + = , + &'a Arc, + > as HashedCursorFactory>::AccountCursor<'a> + where + Self: 'a; + + type StorageCursor<'a> + = , + &'a Arc, + > as HashedCursorFactory>::StorageCursor<'a> + where + Self: 'a; + + fn hashed_account_cursor(&self) -> Result, DatabaseError> { + let db_hashed_cursor_factory = DatabaseHashedCursorFactory::new(self.provider.tx_ref()); + let hashed_cursor_factory = + HashedPostStateCursorFactory::new(db_hashed_cursor_factory, &self.hashed_post_state); + hashed_cursor_factory.hashed_account_cursor() } fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result { - self.hashed_cursor_factory.hashed_storage_cursor(hashed_address) + ) -> Result, DatabaseError> { + let db_hashed_cursor_factory = DatabaseHashedCursorFactory::new(self.provider.tx_ref()); + let hashed_cursor_factory = + HashedPostStateCursorFactory::new(db_hashed_cursor_factory, &self.hashed_post_state); + hashed_cursor_factory.hashed_storage_cursor(hashed_address) } } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 9906583f900..2cd7ec98ae9 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -8,11 +8,10 @@ use crate::{ }; use alloy_consensus::transaction::{SignerRecoverable, TransactionMeta}; use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use reth_chainspec::ChainInfo; use reth_db::static_file::{ - BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, TDWithHashMask, - TotalDifficultyMask, TransactionMask, + BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, TransactionMask, }; use reth_db_api::table::{Decompress, Value}; use reth_node_types::NodePrimitives; @@ -101,18 +100,6 @@ impl> HeaderProvider for StaticFileJarProv self.cursor()?.get_one::>(num.into()) } - fn header_td(&self, block_hash: BlockHash) -> ProviderResult> { - Ok(self - .cursor()? - .get_two::((&block_hash).into())? - .filter(|(_, hash)| hash == &block_hash) - .map(|(td, _)| td.into())) - } - - fn header_td_by_number(&self, num: BlockNumber) -> ProviderResult> { - Ok(self.cursor()?.get_one::(num.into())?.map(Into::into)) - } - fn headers_range( &self, range: impl RangeBounds, diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index e93b4fe10df..52e7cad9afd 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -12,9 +12,7 @@ use alloy_consensus::{ Header, }; use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; -use alloy_primitives::{ - b256, keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, -}; +use alloy_primitives::{b256, keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use dashmap::DashMap; use notify::{RecommendedWatcher, RecursiveMode, Watcher}; use parking_lot::RwLock; @@ -23,7 +21,7 @@ use reth_db::{ lockfile::StorageLock, static_file::{ iter_static_files, BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, - StaticFileCursor, TDWithHashMask, TransactionMask, + StaticFileCursor, TransactionMask, }, }; use reth_db_api::{ @@ -35,7 +33,7 @@ use reth_db_api::{ }; use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; -use reth_node_types::{FullNodePrimitives, NodePrimitives}; +use reth_node_types::NodePrimitives; use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction}; use reth_stages_types::{PipelineTarget, StageId}; use reth_static_file_types::{ @@ -45,19 +43,17 @@ use reth_static_file_types::{ use reth_storage_api::{BlockBodyIndicesProvider, DBProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap}, fmt::Debug, - marker::PhantomData, ops::{Deref, Range, RangeBounds, RangeInclusive}, path::{Path, PathBuf}, sync::{atomic::AtomicU64, mpsc, Arc}, }; use tracing::{debug, info, trace, warn}; -/// Alias type for a map that can be queried for block ranges from a transaction -/// segment respectively. It uses `TxNumber` to represent the transaction end of a static file -/// range. -type SegmentRanges = HashMap>; +/// Alias type for a map that can be queried for block or transaction ranges. It uses `u64` to +/// represent either a block or a transaction number end of a static file range. +type SegmentRanges = HashMap>; /// Access mode on a static file provider. RO/RW. #[derive(Debug, Default, PartialEq, Eq)] @@ -98,14 +94,56 @@ impl Clone for StaticFileProvider { } } -impl StaticFileProvider { - /// Creates a new [`StaticFileProvider`] with the given [`StaticFileAccess`]. - fn new(path: impl AsRef, access: StaticFileAccess) -> ProviderResult { - let provider = Self(Arc::new(StaticFileProviderInner::new(path, access)?)); +/// Builder for [`StaticFileProvider`] that allows configuration before initialization. +#[derive(Debug)] +pub struct StaticFileProviderBuilder { + inner: StaticFileProviderInner, +} + +impl StaticFileProviderBuilder { + /// Creates a new builder with read-write access. + pub fn read_write(path: impl AsRef) -> ProviderResult { + StaticFileProviderInner::new(path, StaticFileAccess::RW).map(|inner| Self { inner }) + } + + /// Creates a new builder with read-only access. + pub fn read_only(path: impl AsRef) -> ProviderResult { + StaticFileProviderInner::new(path, StaticFileAccess::RO).map(|inner| Self { inner }) + } + + /// Set a custom number of blocks per file for all segments. + pub fn with_blocks_per_file(mut self, blocks_per_file: u64) -> Self { + for segment in StaticFileSegment::iter() { + self.inner.blocks_per_file.insert(segment, blocks_per_file); + } + self + } + + /// Set a custom number of blocks per file for a specific segment. + pub fn with_blocks_per_file_for_segment( + mut self, + segment: StaticFileSegment, + blocks_per_file: u64, + ) -> Self { + self.inner.blocks_per_file.insert(segment, blocks_per_file); + self + } + + /// Enables metrics on the [`StaticFileProvider`]. + pub fn with_metrics(mut self) -> Self { + self.inner.metrics = Some(Arc::new(StaticFileProviderMetrics::default())); + self + } + + /// Builds the final [`StaticFileProvider`] and initializes the index. + pub fn build(self) -> ProviderResult> { + let provider = StaticFileProvider(Arc::new(self.inner)); provider.initialize_index()?; Ok(provider) } +} +impl StaticFileProvider { /// Creates a new [`StaticFileProvider`] with read-only access. /// /// Set `watch_directory` to `true` to track the most recent changes in static files. Otherwise, @@ -116,7 +154,7 @@ impl StaticFileProvider { /// /// See also [`StaticFileProvider::watch_directory`]. pub fn read_only(path: impl AsRef, watch_directory: bool) -> ProviderResult { - let provider = Self::new(path, StaticFileAccess::RO)?; + let provider = StaticFileProviderBuilder::read_only(path)?.build()?; if watch_directory { provider.watch_directory(); @@ -127,7 +165,7 @@ impl StaticFileProvider { /// Creates a new [`StaticFileProvider`] with read-write access. pub fn read_write(path: impl AsRef) -> ProviderResult { - Self::new(path, StaticFileAccess::RW) + StaticFileProviderBuilder::read_write(path)?.build() } /// Watches the directory for changes and updates the in-memory index when modifications @@ -231,7 +269,7 @@ pub struct StaticFileProviderInner { map: DashMap<(BlockNumber, StaticFileSegment), LoadedJar>, /// Min static file range for each segment. /// This index is initialized on launch to keep track of the lowest, non-expired static file - /// per segment. + /// per segment and gets updated on `Self::update_index()`. /// /// This tracks the lowest static file per segment together with the block range in that /// file. E.g. static file is batched in 500k block intervals then the lowest static file @@ -244,7 +282,7 @@ pub struct StaticFileProviderInner { /// `expired_history_height + 1`. /// /// This is effectively the transaction range that has been expired: - /// [`StaticFileProvider::delete_transactions_below`] and mirrors + /// [`StaticFileProvider::delete_segment_below_block`] and mirrors /// `static_files_min_block[transactions] - blocks_per_file`. /// /// This additional tracker exists for more efficient lookups because the node must be aware of @@ -252,7 +290,18 @@ pub struct StaticFileProviderInner { earliest_history_height: AtomicU64, /// Max static file block for each segment static_files_max_block: RwLock>, - /// Available static file block ranges on disk indexed by max transactions. + /// Expected on disk static file block ranges indexed by max expected blocks. + /// + /// For example, a static file for expected block range `0..=499_000` may have only block range + /// `0..=1000` contained in it, as it wasn't fully filled yet. This index maps the max expected + /// block to the expected range, i.e. block `499_000` to block range `0..=499_000`. + static_files_expected_block_index: RwLock, + /// Available on disk static file block ranges indexed by max transactions. + /// + /// For example, a static file for block range `0..=499_000` may only have block range + /// `0..=1000` and transaction range `0..=2000` contained in it. This index maps the max + /// available transaction to the available block range, i.e. transaction `2000` to block range + /// `0..=1000`. static_files_tx_index: RwLock, /// Directory where `static_files` are located path: PathBuf, @@ -262,12 +311,10 @@ pub struct StaticFileProviderInner { metrics: Option>, /// Access rights of the provider. access: StaticFileAccess, - /// Number of blocks per file. - blocks_per_file: u64, + /// Number of blocks per file, per segment. + blocks_per_file: HashMap, /// Write lock for when access is [`StaticFileAccess::RW`]. _lock_file: Option, - /// Node primitives - _pd: PhantomData, } impl StaticFileProviderInner { @@ -279,19 +326,24 @@ impl StaticFileProviderInner { None }; + let mut blocks_per_file = HashMap::new(); + for segment in StaticFileSegment::iter() { + blocks_per_file.insert(segment, DEFAULT_BLOCKS_PER_STATIC_FILE); + } + let provider = Self { map: Default::default(), writers: Default::default(), static_files_min_block: Default::default(), earliest_history_height: Default::default(), static_files_max_block: Default::default(), + static_files_expected_block_index: Default::default(), static_files_tx_index: Default::default(), path: path.as_ref().to_path_buf(), metrics: None, access, - blocks_per_file: DEFAULT_BLOCKS_PER_STATIC_FILE, + blocks_per_file, _lock_file, - _pd: Default::default(), }; Ok(provider) @@ -303,42 +355,83 @@ impl StaticFileProviderInner { /// Each static file has a fixed number of blocks. This gives out the range where the requested /// block is positioned. - pub const fn find_fixed_range(&self, block: BlockNumber) -> SegmentRangeInclusive { - find_fixed_range(block, self.blocks_per_file) - } -} + /// + /// If the specified block falls into one of the ranges of already initialized static files, + /// this function will return that range. + /// + /// If no matching file exists, this function will derive a new range from the end of the last + /// existing file, if any. + pub fn find_fixed_range_with_block_index( + &self, + segment: StaticFileSegment, + block_index: Option<&BTreeMap>, + block: BlockNumber, + ) -> SegmentRangeInclusive { + let blocks_per_file = + self.blocks_per_file.get(&segment).copied().unwrap_or(DEFAULT_BLOCKS_PER_STATIC_FILE); -impl StaticFileProvider { - /// Set a custom number of blocks per file. - #[cfg(any(test, feature = "test-utils"))] - pub fn with_custom_blocks_per_file(self, blocks_per_file: u64) -> Self { - let mut provider = - Arc::try_unwrap(self.0).expect("should be called when initializing only"); - provider.blocks_per_file = blocks_per_file; - Self(Arc::new(provider)) + if let Some(block_index) = block_index { + // Find first block range that contains the requested block + if let Some((_, range)) = block_index.iter().find(|(max_block, _)| block <= **max_block) + { + // Found matching range for an existing file using block index + return *range + } else if let Some((_, range)) = block_index.last_key_value() { + // Didn't find matching range for an existing file, derive a new range from the end + // of the last existing file range. + // + // `block` is always higher than `range.end()` here, because we iterated over all + // `block_index` ranges above and didn't find one that contains our block + let blocks_after_last_range = block - range.end(); + let segments_to_skip = (blocks_after_last_range - 1) / blocks_per_file; + let start = range.end() + 1 + segments_to_skip * blocks_per_file; + return SegmentRangeInclusive::new(start, start + blocks_per_file - 1) + } + } + // No block index is available, derive a new range using the fixed number of blocks, + // starting from the beginning. + find_fixed_range(block, blocks_per_file) } - /// Enables metrics on the [`StaticFileProvider`]. - pub fn with_metrics(self) -> Self { - let mut provider = - Arc::try_unwrap(self.0).expect("should be called when initializing only"); - provider.metrics = Some(Arc::new(StaticFileProviderMetrics::default())); - Self(Arc::new(provider)) + /// Each static file has a fixed number of blocks. This gives out the range where the requested + /// block is positioned. + /// + /// If the specified block falls into one of the ranges of already initialized static files, + /// this function will return that range. + /// + /// If no matching file exists, this function will derive a new range from the end of the last + /// existing file, if any. + /// + /// This function will block indefinitely if a write lock for + /// [`Self::static_files_expected_block_index`] is acquired. In that case, use + /// [`Self::find_fixed_range_with_block_index`]. + pub fn find_fixed_range( + &self, + segment: StaticFileSegment, + block: BlockNumber, + ) -> SegmentRangeInclusive { + self.find_fixed_range_with_block_index( + segment, + self.static_files_expected_block_index.read().get(&segment), + block, + ) } +} +impl StaticFileProvider { /// Reports metrics for the static files. pub fn report_metrics(&self) -> ProviderResult<()> { let Some(metrics) = &self.metrics else { return Ok(()) }; let static_files = iter_static_files(&self.path).map_err(ProviderError::other)?; - for (segment, ranges) in static_files { + for (segment, headers) in static_files { let mut entries = 0; let mut size = 0; - for (block_range, _) in &ranges { - let fixed_block_range = self.find_fixed_range(block_range.start()); + for (block_range, _) in &headers { + let fixed_block_range = self.find_fixed_range(segment, block_range.start()); let jar_provider = self - .get_segment_provider(segment, || Some(fixed_block_range), None)? + .get_segment_provider_for_range(segment, || Some(fixed_block_range), None)? .ok_or_else(|| { ProviderError::MissingStaticFileBlock(segment, block_range.start()) })?; @@ -361,20 +454,34 @@ impl StaticFileProvider { size += data_size + index_size + offsets_size + config_size; } - metrics.record_segment(segment, size, ranges.len(), entries); + metrics.record_segment(segment, size, headers.len(), entries); } Ok(()) } + /// Gets the [`StaticFileJarProvider`] of the requested segment and start index that can be + /// either block or transaction. + pub fn get_segment_provider( + &self, + segment: StaticFileSegment, + start: u64, + ) -> ProviderResult> { + if segment.is_block_based() { + self.get_segment_provider_for_block(segment, start, None) + } else { + self.get_segment_provider_for_transaction(segment, start, None) + } + } + /// Gets the [`StaticFileJarProvider`] of the requested segment and block. - pub fn get_segment_provider_from_block( + pub fn get_segment_provider_for_block( &self, segment: StaticFileSegment, block: BlockNumber, path: Option<&Path>, ) -> ProviderResult> { - self.get_segment_provider( + self.get_segment_provider_for_range( segment, || self.get_segment_ranges_from_block(segment, block), path, @@ -383,13 +490,13 @@ impl StaticFileProvider { } /// Gets the [`StaticFileJarProvider`] of the requested segment and transaction. - pub fn get_segment_provider_from_transaction( + pub fn get_segment_provider_for_transaction( &self, segment: StaticFileSegment, tx: TxNumber, path: Option<&Path>, ) -> ProviderResult> { - self.get_segment_provider( + self.get_segment_provider_for_range( segment, || self.get_segment_ranges_from_transaction(segment, tx), path, @@ -400,7 +507,7 @@ impl StaticFileProvider { /// Gets the [`StaticFileJarProvider`] of the requested segment and block or transaction. /// /// `fn_range` should make sure the range goes through `find_fixed_range`. - pub fn get_segment_provider( + pub fn get_segment_provider_for_range( &self, segment: StaticFileSegment, fn_range: impl Fn() -> Option, @@ -445,43 +552,59 @@ impl StaticFileProvider { self.map.remove(&(fixed_block_range_end, segment)); } - /// This handles history expiry by deleting all transaction static files below the given block. + /// This handles history expiry by deleting all static files for the given segment below the + /// given block. /// /// For example if block is 1M and the blocks per file are 500K this will delete all individual /// files below 1M, so 0-499K and 500K-999K. /// /// This will not delete the file that contains the block itself, because files can only be /// removed entirely. - pub fn delete_transactions_below(&self, block: BlockNumber) -> ProviderResult<()> { + /// + /// # Safety + /// + /// This method will never delete the highest static file for the segment, even if the + /// requested block is higher than the highest block in static files. This ensures we always + /// maintain at least one static file if any exist. + /// + /// Returns a list of `SegmentHeader`s from the deleted jars. + pub fn delete_segment_below_block( + &self, + segment: StaticFileSegment, + block: BlockNumber, + ) -> ProviderResult> { // Nothing to delete if block is 0. if block == 0 { - return Ok(()) + return Ok(Vec::new()) } + let highest_block = self.get_highest_static_file_block(segment); + let mut deleted_headers = Vec::new(); + loop { - let Some(block_height) = - self.get_lowest_static_file_block(StaticFileSegment::Transactions) - else { - return Ok(()) + let Some(block_height) = self.get_lowest_static_file_block(segment) else { + return Ok(deleted_headers) }; - if block_height >= block { - return Ok(()) + // Stop if we've reached the target block or the highest static file + if block_height >= block || Some(block_height) == highest_block { + return Ok(deleted_headers) } debug!( target: "provider::static_file", + ?segment, ?block_height, - "Deleting transaction static file below block" + "Deleting static file below block" ); // now we need to wipe the static file, this will take care of updating the index and - // advance the lowest tracked block height for the transactions segment. - self.delete_jar(StaticFileSegment::Transactions, block_height) - .inspect_err(|err| { - warn!( target: "provider::static_file", %block_height, ?err, "Failed to delete transaction static file below block") - }) - ?; + // advance the lowest tracked block height for the segment. + let header = self.delete_jar(segment, block_height).inspect_err(|err| { + warn!( target: "provider::static_file", ?segment, %block_height, ?err, "Failed to delete static file below block") + })?; + + deleted_headers.push(header); } } @@ -490,8 +613,14 @@ impl StaticFileProvider { /// CAUTION: destructive. Deletes files on disk. /// /// This will re-initialize the index after deletion, so all files are tracked. - pub fn delete_jar(&self, segment: StaticFileSegment, block: BlockNumber) -> ProviderResult<()> { - let fixed_block_range = self.find_fixed_range(block); + /// + /// Returns the `SegmentHeader` of the deleted jar. + pub fn delete_jar( + &self, + segment: StaticFileSegment, + block: BlockNumber, + ) -> ProviderResult { + let fixed_block_range = self.find_fixed_range(segment, block); let key = (fixed_block_range.end(), segment); let jar = if let Some((_, jar)) = self.map.remove(&key) { jar.jar @@ -507,11 +636,14 @@ impl StaticFileProvider { NippyJar::::load(&file).map_err(ProviderError::other)? }; + let header = *jar.user_header(); jar.delete().map_err(ProviderError::other)?; + // SAFETY: this is currently necessary to ensure that certain indexes like + // `static_files_min_block` have the correct values after pruning. self.initialize_index()?; - Ok(()) + Ok(header) } /// Given a segment and block range it returns a cached @@ -553,7 +685,7 @@ impl StaticFileProvider { .read() .get(&segment) .filter(|max| **max >= block) - .map(|_| self.find_fixed_range(block)) + .map(|_| self.find_fixed_range(segment, block)) } /// Gets a static file segment's fixed block range from the provider inner @@ -577,7 +709,7 @@ impl StaticFileProvider { } let tx_start = static_files_rev_iter.peek().map(|(tx_end, _)| *tx_end + 1).unwrap_or(0); if tx_start <= tx { - return Some(self.find_fixed_range(block_range.end())) + return Some(self.find_fixed_range(segment, block_range.end())) } } None @@ -594,28 +726,72 @@ impl StaticFileProvider { segment: StaticFileSegment, segment_max_block: Option, ) -> ProviderResult<()> { + let mut min_block = self.static_files_min_block.write(); let mut max_block = self.static_files_max_block.write(); + let mut expected_block_index = self.static_files_expected_block_index.write(); let mut tx_index = self.static_files_tx_index.write(); match segment_max_block { Some(segment_max_block) => { // Update the max block for the segment max_block.insert(segment, segment_max_block); - let fixed_range = self.find_fixed_range(segment_max_block); + let fixed_range = self.find_fixed_range_with_block_index( + segment, + expected_block_index.get(&segment), + segment_max_block, + ); let jar = NippyJar::::load( &self.path.join(segment.filename(&fixed_range)), ) .map_err(ProviderError::other)?; + // Update min_block to track the lowest block range of the segment. + // This is initially set by initialize_index() on node startup, but must be updated + // as the file grows to prevent stale values. + // + // Without this update, min_block can remain at genesis (e.g. Some([0..=0]) or None) + // even after syncing to higher blocks (e.g. [0..=100]). A stale + // min_block causes get_lowest_static_file_block() to return the + // wrong end value, which breaks pruning logic that relies on it for + // safety checks. + // + // Example progression: + // 1. Node starts, initialize_index() sets min_block = [0..=0] + // 2. Sync to block 100, this update sets min_block = [0..=100] + // 3. Pruner calls get_lowest_static_file_block() -> returns 100 (correct). Without + // this update, it would incorrectly return 0 (stale) + if let Some(current_block_range) = jar.user_header().block_range() { + min_block + .entry(segment) + .and_modify(|current_min| { + // delete_jar WILL ALWAYS re-initialize all indexes, so we are always + // sure that current_min is always the lowest. + if current_block_range.start() == current_min.start() { + *current_min = current_block_range; + } + }) + .or_insert(current_block_range); + } + + // Update the expected block index + expected_block_index + .entry(segment) + .and_modify(|index| { + index.retain(|_, block_range| block_range.start() < fixed_range.start()); + + index.insert(fixed_range.end(), fixed_range); + }) + .or_insert_with(|| BTreeMap::from([(fixed_range.end(), fixed_range)])); + // Updates the tx index by first removing all entries which have a higher // block_start than our current static file. if let Some(tx_range) = jar.user_header().tx_range() { - let tx_end = tx_range.end(); - // Current block range has the same block start as `fixed_range``, but block end // might be different if we are still filling this static file. - if let Some(current_block_range) = jar.user_header().block_range().copied() { + if let Some(current_block_range) = jar.user_header().block_range() { + let tx_end = tx_range.end(); + // Considering that `update_index` is called when we either append/truncate, // we are sure that we are handling the latest data // points. @@ -655,8 +831,10 @@ impl StaticFileProvider { self.map.retain(|(end, seg), _| !(*seg == segment && *end > fixed_range.end())); } None => { - tx_index.remove(&segment); max_block.remove(&segment); + min_block.remove(&segment); + expected_block_index.remove(&segment); + tx_index.remove(&segment); } }; @@ -667,34 +845,46 @@ impl StaticFileProvider { pub fn initialize_index(&self) -> ProviderResult<()> { let mut min_block = self.static_files_min_block.write(); let mut max_block = self.static_files_max_block.write(); + let mut expected_block_index = self.static_files_expected_block_index.write(); let mut tx_index = self.static_files_tx_index.write(); min_block.clear(); max_block.clear(); tx_index.clear(); - for (segment, ranges) in iter_static_files(&self.path).map_err(ProviderError::other)? { + for (segment, headers) in iter_static_files(&self.path).map_err(ProviderError::other)? { // Update first and last block for each segment - if let Some((first_block_range, _)) = ranges.first() { - min_block.insert(segment, *first_block_range); + if let Some((block_range, _)) = headers.first() { + min_block.insert(segment, *block_range); } - if let Some((last_block_range, _)) = ranges.last() { - max_block.insert(segment, last_block_range.end()); + if let Some((block_range, _)) = headers.last() { + max_block.insert(segment, block_range.end()); } - // Update tx -> block_range index - for (block_range, tx_range) in ranges { - if let Some(tx_range) = tx_range { + for (block_range, header) in headers { + // Update max expected block -> expected_block_range index + expected_block_index + .entry(segment) + .and_modify(|index| { + index.insert(header.expected_block_end(), header.expected_block_range()); + }) + .or_insert_with(|| { + BTreeMap::from([( + header.expected_block_end(), + header.expected_block_range(), + )]) + }); + + // Update max tx -> block_range index + if let Some(tx_range) = header.tx_range() { let tx_end = tx_range.end(); - match tx_index.entry(segment) { - Entry::Occupied(mut index) => { - index.get_mut().insert(tx_end, block_range); - } - Entry::Vacant(index) => { - index.insert(BTreeMap::from([(tx_end, block_range)])); - } - }; + tx_index + .entry(segment) + .and_modify(|index| { + index.insert(tx_end, block_range); + }) + .or_insert_with(|| BTreeMap::from([(tx_end, block_range)])); } } } @@ -776,19 +966,23 @@ impl StaticFileProvider { }; for segment in StaticFileSegment::iter() { - if has_receipt_pruning && segment.is_receipts() { - // Pruned nodes (including full node) do not store receipts as static files. - continue - } + match segment { + StaticFileSegment::Headers | StaticFileSegment::Transactions => {} + StaticFileSegment::Receipts => { + if has_receipt_pruning { + // Pruned nodes (including full node) do not store receipts as static files. + continue + } - if segment.is_receipts() && - (NamedChain::Gnosis == provider.chain_spec().chain_id() || - NamedChain::Chiado == provider.chain_spec().chain_id()) - { - // Gnosis and Chiado's historical import is broken and does not work with this - // check. They are importing receipts along with importing - // headers/bodies. - continue; + if NamedChain::Gnosis == provider.chain_spec().chain_id() || + NamedChain::Chiado == provider.chain_spec().chain_id() + { + // Gnosis and Chiado's historical import is broken and does not work with + // this check. They are importing receipts along + // with importing headers/bodies. + continue; + } + } } let initial_highest_block = self.get_highest_static_file_block(segment); @@ -894,8 +1088,9 @@ impl StaticFileProvider { /// Read-only. pub fn check_segment_consistency(&self, segment: StaticFileSegment) -> ProviderResult<()> { if let Some(latest_block) = self.get_highest_static_file_block(segment) { - let file_path = - self.directory().join(segment.filename(&self.find_fixed_range(latest_block))); + let file_path = self + .directory() + .join(segment.filename(&self.find_fixed_range(segment, latest_block))); let jar = NippyJar::::load(&file_path).map_err(ProviderError::other)?; @@ -1044,6 +1239,13 @@ impl StaticFileProvider { self.static_files_min_block.read().get(&segment).map(|range| range.end()) } + /// Gets the lowest static file's block range if it exists for a static file segment. + /// + /// If there is nothing on disk for the given segment, this will return [`None`]. + pub fn get_lowest_range(&self, segment: StaticFileSegment) -> Option { + self.static_files_min_block.read().get(&segment).copied() + } + /// Gets the highest static file's block height if it exists for a static file segment. /// /// If there is nothing on disk for the given segment, this will return [`None`]. @@ -1064,9 +1266,7 @@ impl StaticFileProvider { /// Gets the highest static file block for all segments. pub fn get_highest_static_files(&self) -> HighestStaticFiles { HighestStaticFiles { - headers: self.get_highest_static_file_block(StaticFileSegment::Headers), receipts: self.get_highest_static_file_block(StaticFileSegment::Receipts), - transactions: self.get_highest_static_file_block(StaticFileSegment::Transactions), } } @@ -1077,16 +1277,12 @@ impl StaticFileProvider { segment: StaticFileSegment, func: impl Fn(StaticFileJarProvider<'_, N>) -> ProviderResult>, ) -> ProviderResult> { - if let Some(highest_block) = self.get_highest_static_file_block(segment) { - let mut range = self.find_fixed_range(highest_block); - while range.end() > 0 { - if let Some(res) = func(self.get_or_create_jar_provider(segment, &range)?)? { + if let Some(ranges) = self.static_files_expected_block_index.read().get(&segment) { + // Iterate through all ranges in reverse order (highest to lowest) + for range in ranges.values().rev() { + if let Some(res) = func(self.get_or_create_jar_provider(segment, range)?)? { return Ok(Some(res)) } - range = SegmentRangeInclusive::new( - range.start().saturating_sub(self.blocks_per_file), - range.end().saturating_sub(self.blocks_per_file), - ); } } @@ -1116,13 +1312,7 @@ impl StaticFileProvider { /// If the static file is missing, the `result` is returned. macro_rules! get_provider { ($number:expr) => {{ - let provider = if segment.is_block_based() { - self.get_segment_provider_from_block(segment, $number, None) - } else { - self.get_segment_provider_from_transaction(segment, $number, None) - }; - - match provider { + match self.get_segment_provider(segment, $number) { Ok(provider) => provider, Err( ProviderError::MissingStaticFileBlock(_, _) | @@ -1187,15 +1377,7 @@ impl StaticFileProvider { F: Fn(&mut StaticFileCursor<'_>, u64) -> ProviderResult> + 'a, T: std::fmt::Debug, { - let get_provider = move |start: u64| { - if segment.is_block_based() { - self.get_segment_provider_from_block(segment, start, None) - } else { - self.get_segment_provider_from_transaction(segment, start, None) - } - }; - - let mut provider = Some(get_provider(range.start)?); + let mut provider = Some(self.get_segment_provider(segment, range.start)?); Ok(range.filter_map(move |number| { match get_fn(&mut provider.as_ref().expect("qed").cursor().ok()?, number).transpose() { Some(result) => Some(result), @@ -1205,7 +1387,7 @@ impl StaticFileProvider { // we don't drop the current provider before requesting the // next one. provider.take(); - provider = Some(get_provider(number).ok()?); + provider = Some(self.get_segment_provider(segment, number).ok()?); get_fn(&mut provider.as_ref().expect("qed").cursor().ok()?, number).transpose() } } @@ -1315,6 +1497,12 @@ impl StaticFileProvider { pub fn tx_index(&self) -> &RwLock { &self.static_files_tx_index } + + /// Returns `static_files` expected block index + #[cfg(any(test, feature = "test-utils"))] + pub fn expected_block_index(&self) -> &RwLock { + &self.static_files_expected_block_index + } } /// Helper trait to manage different [`StaticFileProviderRW`] of an `Arc> HeaderProvider for StaticFileProvide } fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) + self.get_segment_provider_for_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.header_by_number(num)) .or_else(|err| { if let ProviderError::MissingStaticFileBlock(_, _) = err { @@ -1406,27 +1594,6 @@ impl> HeaderProvider for StaticFileProvide }) } - fn header_td(&self, block_hash: BlockHash) -> ProviderResult> { - self.find_static_file(StaticFileSegment::Headers, |jar_provider| { - Ok(jar_provider - .cursor()? - .get_two::((&block_hash).into())? - .and_then(|(td, hash)| (hash == block_hash).then_some(td.0))) - }) - } - - fn header_td_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) - .and_then(|provider| provider.header_td_by_number(num)) - .or_else(|err| { - if let ProviderError::MissingStaticFileBlock(_, _) = err { - Ok(None) - } else { - Err(err) - } - }) - } - fn headers_range( &self, range: impl RangeBounds, @@ -1443,7 +1610,7 @@ impl> HeaderProvider for StaticFileProvide &self, num: BlockNumber, ) -> ProviderResult>> { - self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) + self.get_segment_provider_for_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.sealed_header(num)) .or_else(|err| { if let ProviderError::MissingStaticFileBlock(_, _) = err { @@ -1474,7 +1641,7 @@ impl> HeaderProvider for StaticFileProvide impl BlockHashReader for StaticFileProvider { fn block_hash(&self, num: u64) -> ProviderResult> { - self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) + self.get_segment_provider_for_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.block_hash(num)) .or_else(|err| { if let ProviderError::MissingStaticFileBlock(_, _) = err { @@ -1505,7 +1672,7 @@ impl> Rec type Receipt = N::Receipt; fn receipt(&self, num: TxNumber) -> ProviderResult> { - self.get_segment_provider_from_transaction(StaticFileSegment::Receipts, num, None) + self.get_segment_provider_for_transaction(StaticFileSegment::Receipts, num, None) .and_then(|provider| provider.receipt(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { @@ -1550,8 +1717,8 @@ impl> Rec } } -impl> - TransactionsProviderExt for StaticFileProvider +impl> TransactionsProviderExt + for StaticFileProvider { fn transaction_hashes_by_range( &self, @@ -1634,7 +1801,7 @@ impl> TransactionsPr } fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { - self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) + self.get_segment_provider_for_transaction(StaticFileSegment::Transactions, num, None) .and_then(|provider| provider.transaction_by_id(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { @@ -1649,7 +1816,7 @@ impl> TransactionsPr &self, num: TxNumber, ) -> ProviderResult> { - self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) + self.get_segment_provider_for_transaction(StaticFileSegment::Transactions, num, None) .and_then(|provider| provider.transaction_by_id_unhashed(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { @@ -1749,7 +1916,7 @@ impl BlockNumReader for StaticFileProvider { /* Cannot be successfully implemented but must exist for trait requirements */ -impl> BlockReader +impl> BlockReader for StaticFileProvider { type Block = N::Block; @@ -1872,3 +2039,135 @@ where tx.encode_2718(rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use reth_chain_state::EthPrimitives; + use reth_db::test_utils::create_test_static_files_dir; + use reth_static_file_types::{SegmentRangeInclusive, StaticFileSegment}; + + use crate::StaticFileProviderBuilder; + + #[test] + fn test_find_fixed_range_with_block_index() -> eyre::Result<()> { + let (static_dir, _) = create_test_static_files_dir(); + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(100) + .build()?; + + let segment = StaticFileSegment::Headers; + + // Test with None - should use default behavior + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, None, 0), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, None, 250), + SegmentRangeInclusive::new(200, 299) + ); + + // Test with empty index - should fall back to default behavior + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&BTreeMap::new()), 150), + SegmentRangeInclusive::new(100, 199) + ); + + // Create block index with existing ranges + let block_index = BTreeMap::from_iter([ + (99, SegmentRangeInclusive::new(0, 99)), + (199, SegmentRangeInclusive::new(100, 199)), + (299, SegmentRangeInclusive::new(200, 299)), + ]); + + // Test blocks within existing ranges - should return the matching range + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 0), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 50), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 99), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 100), + SegmentRangeInclusive::new(100, 199) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 150), + SegmentRangeInclusive::new(100, 199) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 199), + SegmentRangeInclusive::new(100, 199) + ); + + // Test blocks beyond existing ranges - should derive new ranges from the last range + // Block 300 is exactly one segment after the last range + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 300), + SegmentRangeInclusive::new(300, 399) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 350), + SegmentRangeInclusive::new(300, 399) + ); + + // Block 500 skips one segment (300-399) + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 500), + SegmentRangeInclusive::new(500, 599) + ); + + // Block 1000 skips many segments + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 1000), + SegmentRangeInclusive::new(1000, 1099) + ); + + // Test with block index having different sizes than blocks_per_file setting + // This simulates the scenario where blocks_per_file was changed between runs + let mixed_size_index = BTreeMap::from_iter([ + (49, SegmentRangeInclusive::new(0, 49)), // 50 blocks + (149, SegmentRangeInclusive::new(50, 149)), // 100 blocks + (349, SegmentRangeInclusive::new(150, 349)), // 200 blocks + ]); + + // Blocks within existing ranges should return those ranges regardless of size + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 25), + SegmentRangeInclusive::new(0, 49) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 100), + SegmentRangeInclusive::new(50, 149) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 200), + SegmentRangeInclusive::new(150, 349) + ); + + // Block after the last range should derive using current blocks_per_file (100) + // from the end of the last range (349) + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 350), + SegmentRangeInclusive::new(350, 449) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 450), + SegmentRangeInclusive::new(450, 549) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 550), + SegmentRangeInclusive::new(550, 649) + ); + + Ok(()) + } +} diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 1c3bfd58a79..85544e7a45a 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -1,5 +1,7 @@ mod manager; -pub use manager::{StaticFileAccess, StaticFileProvider, StaticFileWriter}; +pub use manager::{ + StaticFileAccess, StaticFileProvider, StaticFileProviderBuilder, StaticFileWriter, +}; mod jar; pub use jar::StaticFileJarProvider; @@ -55,22 +57,21 @@ impl Deref for LoadedJar { mod tests { use super::*; use crate::{ + providers::static_file::manager::StaticFileProviderBuilder, test_utils::create_test_provider_factory, HeaderProvider, StaticFileProviderFactory, }; use alloy_consensus::{Header, SignableTransaction, Transaction, TxLegacy}; - use alloy_primitives::{BlockHash, Signature, TxNumber, B256, U256}; + use alloy_primitives::{map::HashMap, BlockHash, Signature, TxNumber, B256}; use rand::seq::SliceRandom; use reth_db::test_utils::create_test_static_files_dir; - use reth_db_api::{ - transaction::DbTxMut, CanonicalHeaders, HeaderNumbers, HeaderTerminalDifficulties, Headers, - }; + use reth_db_api::{transaction::DbTxMut, CanonicalHeaders, HeaderNumbers, Headers}; use reth_ethereum_primitives::{EthPrimitives, Receipt, TransactionSigned}; use reth_static_file_types::{ find_fixed_range, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }; use reth_storage_api::{ReceiptProvider, TransactionsProvider}; use reth_testing_utils::generators::{self, random_header_range}; - use std::{fmt::Debug, fs, ops::Range, path::Path}; + use std::{collections::BTreeMap, fmt::Debug, fs, ops::Range, path::Path}; fn assert_eyre(got: T, expected: T, msg: &str) -> eyre::Result<()> { if got != expected { @@ -80,7 +81,7 @@ mod tests { } #[test] - fn test_snap() { + fn test_static_files() { // Ranges let row_count = 100u64; let range = 0..=(row_count - 1); @@ -102,14 +103,11 @@ mod tests { let mut provider_rw = factory.provider_rw().unwrap(); let tx = provider_rw.tx_mut(); - let mut td = U256::ZERO; for header in headers.clone() { - td += header.header().difficulty; let hash = header.hash(); tx.put::(header.number, hash).unwrap(); tx.put::(header.number, header.clone_header()).unwrap(); - tx.put::(header.number, td.into()).unwrap(); tx.put::(hash, header.number).unwrap(); } provider_rw.commit().unwrap(); @@ -118,12 +116,10 @@ mod tests { { let manager = factory.static_file_provider(); let mut writer = manager.latest_writer(StaticFileSegment::Headers).unwrap(); - let mut td = U256::ZERO; for header in headers.clone() { - td += header.header().difficulty; let hash = header.hash(); - writer.append_header(&header.unseal(), td, &hash).unwrap(); + writer.append_header(&header.unseal(), &hash).unwrap(); } writer.commit().unwrap(); } @@ -133,7 +129,7 @@ mod tests { let db_provider = factory.provider().unwrap(); let manager = db_provider.static_file_provider(); let jar_provider = manager - .get_segment_provider_from_block(StaticFileSegment::Headers, 0, Some(&static_file)) + .get_segment_provider_for_block(StaticFileSegment::Headers, 0, Some(&static_file)) .unwrap(); assert!(!headers.is_empty()); @@ -148,12 +144,6 @@ mod tests { // Compare Header assert_eq!(header, db_provider.header(header_hash).unwrap().unwrap()); assert_eq!(header, jar_provider.header_by_number(header.number).unwrap().unwrap()); - - // Compare HeaderTerminalDifficulties - assert_eq!( - db_provider.header_td(header_hash).unwrap().unwrap(), - jar_provider.header_td_by_number(header.number).unwrap().unwrap() - ); } } } @@ -170,9 +160,11 @@ mod tests { // [ Headers Creation and Commit ] { - let sf_rw = StaticFileProvider::::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers).unwrap(); @@ -180,9 +172,7 @@ mod tests { let mut header = Header::default(); for num in 0..=tip { header.number = num; - header_writer - .append_header(&header, U256::default(), &BlockHash::default()) - .unwrap(); + header_writer.append_header(&header, &BlockHash::default()).unwrap(); } header_writer.commit().unwrap(); } @@ -266,9 +256,11 @@ mod tests { // Test cases execution { - let sf_rw = StaticFileProvider::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); assert_eq!(sf_rw.get_highest_static_file_block(StaticFileSegment::Headers), Some(tip)); assert_eq!( @@ -386,11 +378,11 @@ mod tests { block_ranges.iter().zip(expected_tx_ranges).for_each(|(block_range, expected_tx_range)| { assert_eq!( sf_rw - .get_segment_provider_from_block(segment, block_range.start, None) + .get_segment_provider_for_block(segment, block_range.start, None) .unwrap() .user_header() .tx_range(), - expected_tx_range.as_ref() + expected_tx_range ); }); @@ -481,15 +473,19 @@ mod tests { for segment in segments { let (static_dir, _) = create_test_static_files_dir(); - let sf_rw = StaticFileProvider::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); setup_tx_based_scenario(&sf_rw, segment, blocks_per_file); - let sf_rw = StaticFileProvider::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); let highest_tx = sf_rw.get_highest_static_file_tx(segment).unwrap(); // Test cases @@ -562,4 +558,93 @@ mod tests { Ok(count) } + + #[test] + fn test_dynamic_size() -> eyre::Result<()> { + let (static_dir, _) = create_test_static_files_dir(); + + { + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(10) + .build()?; + let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers)?; + + let mut header = Header::default(); + for num in 0..=15 { + header.number = num; + header_writer.append_header(&header, &BlockHash::default()).unwrap(); + } + header_writer.commit().unwrap(); + + assert_eq!(sf_rw.headers_range(0..=15)?.len(), 16); + assert_eq!( + sf_rw.expected_block_index().read().deref(), + &HashMap::from([( + StaticFileSegment::Headers, + BTreeMap::from([ + (9, SegmentRangeInclusive::new(0, 9)), + (19, SegmentRangeInclusive::new(10, 19)) + ]) + )]) + ) + } + + { + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(5) + .build()?; + let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers)?; + + let mut header = Header::default(); + for num in 16..=22 { + header.number = num; + header_writer.append_header(&header, &BlockHash::default()).unwrap(); + } + header_writer.commit().unwrap(); + + assert_eq!(sf_rw.headers_range(0..=22)?.len(), 23); + assert_eq!( + sf_rw.expected_block_index().read().deref(), + &HashMap::from([( + StaticFileSegment::Headers, + BTreeMap::from([ + (9, SegmentRangeInclusive::new(0, 9)), + (19, SegmentRangeInclusive::new(10, 19)), + (24, SegmentRangeInclusive::new(20, 24)) + ]) + )]) + ) + } + + { + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(15) + .build()?; + let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers)?; + + let mut header = Header::default(); + for num in 23..=40 { + header.number = num; + header_writer.append_header(&header, &BlockHash::default()).unwrap(); + } + header_writer.commit().unwrap(); + + assert_eq!(sf_rw.headers_range(0..=40)?.len(), 41); + assert_eq!( + sf_rw.expected_block_index().read().deref(), + &HashMap::from([( + StaticFileSegment::Headers, + BTreeMap::from([ + (9, SegmentRangeInclusive::new(0, 9)), + (19, SegmentRangeInclusive::new(10, 19)), + (24, SegmentRangeInclusive::new(20, 24)), + (39, SegmentRangeInclusive::new(25, 39)), + (54, SegmentRangeInclusive::new(40, 54)) + ]) + )]) + ) + } + + Ok(()) + } } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index b9c17f82920..2fc4ba61fc7 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -161,8 +161,8 @@ impl StaticFileProviderRW { let static_file_provider = Self::upgrade_provider_to_strong_reference(&reader); - let block_range = static_file_provider.find_fixed_range(block); - let (jar, path) = match static_file_provider.get_segment_provider_from_block( + let block_range = static_file_provider.find_fixed_range(segment, block); + let (jar, path) = match static_file_provider.get_segment_provider_for_block( segment, block_range.start(), None, @@ -351,7 +351,7 @@ impl StaticFileProviderRW { self.data_path = data_path; *self.writer.user_header_mut() = SegmentHeader::new( - self.reader().find_fixed_range(last_block + 1), + self.reader().find_fixed_range(segment, last_block + 1), None, None, segment, @@ -531,7 +531,20 @@ impl StaticFileProviderRW { /// blocks. /// /// Returns the current [`BlockNumber`] as seen in the static file. - pub fn append_header( + pub fn append_header(&mut self, header: &N::BlockHeader, hash: &BlockHash) -> ProviderResult<()> + where + N::BlockHeader: Compact, + { + self.append_header_with_td(header, U256::ZERO, hash) + } + + /// Appends header to static file with a specified total difficulty. + /// + /// It **CALLS** `increment_block()` since the number of headers is equal to the number of + /// blocks. + /// + /// Returns the current [`BlockNumber`] as seen in the static file. + pub fn append_header_with_td( &mut self, header: &N::BlockHeader, total_difficulty: U256, diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 818b97e0c15..0b27c5dc992 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -31,10 +31,6 @@ pub fn assert_genesis_block( assert_eq!(tx.table::().unwrap(), vec![(h, n)]); assert_eq!(tx.table::().unwrap(), vec![(n, h)]); - assert_eq!( - tx.table::().unwrap(), - vec![(n, g.difficulty.into())] - ); assert_eq!( tx.table::().unwrap(), vec![(0, StoredBlockBodyIndices::default())] diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index d5e3fe4da7b..4022efd9a95 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,9 +1,9 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, ChangeSetReader, HeaderProvider, ReceiptProviderIdExt, StateProvider, - StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, - TransactionsProvider, + ChainSpecProvider, ChangeSetReader, HeaderProvider, PruneCheckpointReader, + ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateReader, + StateRootProvider, TransactionVariant, TransactionsProvider, }; use alloy_consensus::{ constants::EMPTY_ROOT_HASH, @@ -29,17 +29,18 @@ use reth_primitives_traits::{ Account, Block, BlockBody, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, SignerRecoverable, }; -use reth_prune_types::PruneModes; +use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory, HashedPostStateProvider, NodePrimitivesProvider, StageCheckpointReader, StateProofProvider, - StorageRootProvider, + StorageRootProvider, TrieReader, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, - MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, + updates::{TrieUpdates, TrieUpdatesSorted}, + AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, + StorageProof, TrieInput, }; use std::{ collections::BTreeMap, @@ -117,7 +118,6 @@ impl MockEthProvider { /// Add multiple blocks to local block store pub fn extend_blocks(&self, iter: impl IntoIterator) { for (hash, block) in iter { - self.add_header(hash, block.header().clone()); self.add_block(hash, block) } } @@ -291,24 +291,6 @@ impl HeaderP Ok(lock.values().find(|h| h.number() == num).cloned()) } - fn header_td(&self, hash: BlockHash) -> ProviderResult> { - let lock = self.headers.lock(); - Ok(lock.get(&hash).map(|target| { - lock.values() - .filter(|h| h.number() < target.number()) - .fold(target.difficulty(), |td, h| td + h.difficulty()) - })) - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - let lock = self.headers.lock(); - let sum = lock - .values() - .filter(|h| h.number() <= number) - .fold(U256::ZERO, |td, h| td + h.difficulty()); - Ok(Some(sum)) - } - fn headers_range( &self, range: impl RangeBounds, @@ -773,6 +755,21 @@ impl StageCheckpointReader } } +impl PruneCheckpointReader + for MockEthProvider +{ + fn get_prune_checkpoint( + &self, + _segment: PruneSegment, + ) -> ProviderResult> { + Ok(None) + } + + fn get_prune_checkpoints(&self) -> ProviderResult> { + Ok(vec![]) + } +} + impl StateRootProvider for MockEthProvider where T: NodePrimitives, @@ -984,6 +981,14 @@ impl ChangeSetReader for MockEthProvi ) -> ProviderResult> { Ok(Vec::default()) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult> { + Ok(None) + } } impl StateReader for MockEthProvider { @@ -997,6 +1002,19 @@ impl StateReader for MockEthProvider< } } +impl TrieReader for MockEthProvider { + fn trie_reverts(&self, _from: BlockNumber) -> ProviderResult { + Ok(TrieUpdatesSorted::default()) + } + + fn get_block_trie_updates( + &self, + _block_number: BlockNumber, + ) -> ProviderResult { + Ok(TrieUpdatesSorted::default()) + } +} + impl CanonStateSubscriptions for MockEthProvider { diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index d65655de8bf..5530c7411c7 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,5 +1,5 @@ use crate::{ - providers::{ProviderNodeTypes, StaticFileProvider}, + providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, HashingWriter, ProviderFactory, TrieWriter, }; use alloy_primitives::B256; @@ -10,7 +10,7 @@ use reth_db::{ }; use reth_errors::ProviderResult; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter}; +use reth_node_types::NodeTypesWithDBAdapter; use reth_primitives_traits::{Account, StorageEntry}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; @@ -50,7 +50,7 @@ pub fn create_test_provider_factory_with_chain_spec( } /// Creates test provider factory with provided chain spec. -pub fn create_test_provider_factory_with_node_types( +pub fn create_test_provider_factory_with_node_types( chain_spec: Arc, ) -> ProviderFactory>>> { let (static_dir, _) = create_test_static_files_dir(); @@ -60,6 +60,7 @@ pub fn create_test_provider_factory_with_node_types( chain_spec, StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"), ) + .expect("failed to create test provider factory") } /// Inserts the genesis alloc from the provided chain spec into the trie. @@ -89,7 +90,7 @@ pub fn insert_genesis>( let (root, updates) = StateRoot::from_tx(provider.tx_ref()) .root_with_updates() .map_err(reth_db::DatabaseError::from)?; - provider.write_trie_updates(&updates).unwrap(); + provider.write_trie_updates(updates).unwrap(); provider.commit()?; diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 374a35f473c..6fe88a6640a 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -2,8 +2,8 @@ use crate::{ AccountReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, - DatabaseProviderFactory, HashedPostStateProvider, StageCheckpointReader, StateProviderFactory, - StateReader, StaticFileProviderFactory, + DatabaseProviderFactory, HashedPostStateProvider, PruneCheckpointReader, StageCheckpointReader, + StateProviderFactory, StateReader, StaticFileProviderFactory, TrieReader, }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; @@ -12,8 +12,10 @@ use std::fmt::Debug; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: - DatabaseProviderFactory - + NodePrimitivesProvider + DatabaseProviderFactory< + DB = N::DB, + Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + > + NodePrimitivesProvider + StaticFileProviderFactory + BlockReaderIdExt< Transaction = TxTy, @@ -37,8 +39,10 @@ pub trait FullProvider: } impl FullProvider for T where - T: DatabaseProviderFactory - + NodePrimitivesProvider + T: DatabaseProviderFactory< + DB = N::DB, + Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + > + NodePrimitivesProvider + StaticFileProviderFactory + BlockReaderIdExt< Transaction = TxTy, diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 1151990f97b..6d990e17a49 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -909,7 +909,7 @@ mod tests { } let (_, updates) = StateRoot::from_tx(tx).root_with_updates().unwrap(); - provider_rw.write_trie_updates(&updates).unwrap(); + provider_rw.write_trie_updates(updates).unwrap(); let mut state = State::builder().with_bundle_update().build(); @@ -1127,7 +1127,10 @@ mod tests { assert_eq!(storage_root, storage_root_prehashed(init_storage.storage)); assert!(!storage_updates.is_empty()); provider_rw - .write_storage_trie_updates(core::iter::once((&hashed_address, &storage_updates))) + .write_storage_trie_updates_sorted(core::iter::once(( + &hashed_address, + &storage_updates.into_sorted(), + ))) .unwrap(); // destroy the storage and re-create with new slots diff --git a/crates/storage/rpc-provider/README.md b/crates/storage/rpc-provider/README.md index 7180d41840d..f1b51a95749 100644 --- a/crates/storage/rpc-provider/README.md +++ b/crates/storage/rpc-provider/README.md @@ -65,7 +65,7 @@ This provider implements the same traits as the local `BlockchainProvider`, maki Licensed under either of: -- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) +- Apache License, Version 2.0, ([LICENSE-APACHE](../../../LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](../../../LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. diff --git a/crates/storage/rpc-provider/src/lib.rs b/crates/storage/rpc-provider/src/lib.rs index 76e511d52d4..6e5bd17218b 100644 --- a/crates/storage/rpc-provider/src/lib.rs +++ b/crates/storage/rpc-provider/src/lib.rs @@ -364,18 +364,6 @@ where Ok(Some(sealed_header.into_header())) } - fn header_td(&self, hash: BlockHash) -> ProviderResult> { - let header = self.header(hash).map_err(ProviderError::other)?; - - Ok(header.map(|b| b.difficulty())) - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - let header = self.header_by_number(number).map_err(ProviderError::other)?; - - Ok(header.map(|b| b.difficulty())) - } - fn headers_range( &self, _range: impl RangeBounds, @@ -1674,14 +1662,6 @@ where Err(ProviderError::UnsupportedProvider) } - fn header_td(&self, _hash: BlockHash) -> Result, ProviderError> { - Err(ProviderError::UnsupportedProvider) - } - - fn header_td_by_number(&self, _number: BlockNumber) -> Result, ProviderError> { - Err(ProviderError::UnsupportedProvider) - } - fn headers_range( &self, _range: impl RangeBounds, @@ -1764,6 +1744,14 @@ where ) -> Result, ProviderError> { Err(ProviderError::UnsupportedProvider) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } } impl StateProviderFactory for RpcBlockchainStateProvider diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index a62193a5dd8..83cbbbd714e 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -32,6 +32,7 @@ alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true auto_impl.workspace = true +serde_json = { workspace = true, optional = true } [features] default = ["std"] @@ -50,10 +51,12 @@ std = [ "reth-storage-errors/std", "reth-db-models/std", "reth-trie-common/std", + "serde_json?/std", ] db-api = [ "dep:reth-db-api", + "dep:serde_json", ] serde = [ diff --git a/crates/storage/storage-api/src/account.rs b/crates/storage/storage-api/src/account.rs index 1692c4c21f4..270bfd1226c 100644 --- a/crates/storage/storage-api/src/account.rs +++ b/crates/storage/storage-api/src/account.rs @@ -54,4 +54,13 @@ pub trait ChangeSetReader { &self, block_number: BlockNumber, ) -> ProviderResult>; + + /// Search the block's changesets for the given address, and return the result. + /// + /// Returns `None` if the account was not changed in this block. + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult>; } diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index a30fd8d4a8a..846f59ca442 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -14,7 +14,7 @@ use reth_db_api::{ use reth_db_models::StoredBlockWithdrawals; use reth_ethereum_primitives::TransactionSigned; use reth_primitives_traits::{ - Block, BlockBody, FullBlockHeader, FullNodePrimitives, SignedTransaction, + Block, BlockBody, FullBlockHeader, NodePrimitives, SignedTransaction, }; use reth_storage_errors::provider::ProviderResult; @@ -40,11 +40,11 @@ pub trait BlockBodyWriter { } /// Trait that implements how chain-specific types are written to the storage. -pub trait ChainStorageWriter: +pub trait ChainStorageWriter: BlockBodyWriter::Body> { } -impl ChainStorageWriter for T where +impl ChainStorageWriter for T where T: BlockBodyWriter::Body> { } @@ -73,11 +73,11 @@ pub trait BlockBodyReader { } /// Trait that implements how chain-specific types are read from storage. -pub trait ChainStorageReader: +pub trait ChainStorageReader: BlockBodyReader { } -impl ChainStorageReader for T where +impl ChainStorageReader for T where T: BlockBodyReader { } diff --git a/crates/storage/storage-api/src/database_provider.rs b/crates/storage/storage-api/src/database_provider.rs index c0e94a044bf..8b5d8281f42 100644 --- a/crates/storage/storage-api/src/database_provider.rs +++ b/crates/storage/storage-api/src/database_provider.rs @@ -160,6 +160,29 @@ pub trait DatabaseProviderFactory: Send + Sync { /// Helper type alias to get the associated transaction type from a [`DatabaseProviderFactory`]. pub type FactoryTx = <::DB as Database>::TX; +/// A trait which can be used to describe any factory-like type which returns a read-only provider. +pub trait DatabaseProviderROFactory { + /// Provider type returned by this factory. + /// + /// This type is intentionally left unconstrained; constraints can be added as-needed when this + /// is used. + type Provider; + + /// Creates and returns a Provider. + fn database_provider_ro(&self) -> ProviderResult; +} + +impl DatabaseProviderROFactory for T +where + T: DatabaseProviderFactory, +{ + type Provider = T::Provider; + + fn database_provider_ro(&self) -> ProviderResult { + ::database_provider_ro(self) + } +} + fn range_size_hint(range: &impl RangeBounds) -> Option { let start = match range.start_bound().cloned() { Bound::Included(start) => start, diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index 7e3133ec712..39b2eef9031 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -1,6 +1,6 @@ use alloc::vec::Vec; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockHash, BlockNumber, U256}; +use alloy_primitives::{BlockHash, BlockNumber}; use core::ops::RangeBounds; use reth_primitives_traits::{BlockHeader, SealedHeader}; use reth_storage_errors::provider::ProviderResult; @@ -44,12 +44,6 @@ pub trait HeaderProvider: Send + Sync { } } - /// Get total difficulty by block hash. - fn header_td(&self, hash: BlockHash) -> ProviderResult>; - - /// Get total difficulty by block number. - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult>; - /// Get headers in range of block numbers fn headers_range( &self, diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 49dcfd56582..086fefce11e 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -97,5 +97,12 @@ pub use state_writer::*; mod header_sync_gap; pub use header_sync_gap::HeaderSyncGapProvider; +#[cfg(feature = "db-api")] +pub mod metadata; +#[cfg(feature = "db-api")] +pub use metadata::{MetadataProvider, MetadataWriter, StorageSettingsCache}; +#[cfg(feature = "db-api")] +pub use reth_db_api::models::StorageSettings; + mod full; pub use full::*; diff --git a/crates/storage/storage-api/src/metadata.rs b/crates/storage/storage-api/src/metadata.rs new file mode 100644 index 00000000000..2ff48f73385 --- /dev/null +++ b/crates/storage/storage-api/src/metadata.rs @@ -0,0 +1,53 @@ +//! Metadata provider trait for reading and writing node metadata. + +use reth_db_api::models::StorageSettings; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; + +/// Metadata keys. +pub mod keys { + /// Storage configuration settings for this node. + pub const STORAGE_SETTINGS: &str = "storage_settings"; +} + +/// Client trait for reading node metadata from the database. +#[auto_impl::auto_impl(&, Arc)] +pub trait MetadataProvider: Send + Sync { + /// Get a metadata value by key + fn get_metadata(&self, key: &str) -> ProviderResult>>; + + /// Get storage settings for this node + fn storage_settings(&self) -> ProviderResult> { + self.get_metadata(keys::STORAGE_SETTINGS)? + .map(|bytes| serde_json::from_slice(&bytes).map_err(ProviderError::other)) + .transpose() + } +} + +/// Client trait for writing node metadata to the database. +pub trait MetadataWriter: Send + Sync { + /// Write a metadata value + fn write_metadata(&self, key: &str, value: Vec) -> ProviderResult<()>; + + /// Write storage settings for this node + /// + /// Be sure to update provider factory cache with + /// [`StorageSettingsCache::set_storage_settings_cache`]. + fn write_storage_settings(&self, settings: StorageSettings) -> ProviderResult<()> { + self.write_metadata( + keys::STORAGE_SETTINGS, + serde_json::to_vec(&settings).map_err(ProviderError::other)?, + ) + } +} + +/// Trait for caching storage settings on a provider factory. +pub trait StorageSettingsCache: Send + Sync { + /// Gets the cached storage settings. + fn cached_storage_settings(&self) -> StorageSettings; + + /// Sets the storage settings of this `ProviderFactory`. + /// + /// IMPORTANT: It does not save settings in storage, that should be done by + /// [`MetadataWriter::write_storage_settings`] + fn set_storage_settings_cache(&self, settings: StorageSettings); +} diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 44e499ae006..e538e1216e8 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -6,7 +6,7 @@ use crate::{ HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider, StateProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - StorageRootProvider, TransactionVariant, TransactionsProvider, + StorageRootProvider, TransactionVariant, TransactionsProvider, TrieReader, }; #[cfg(feature = "db-api")] @@ -15,7 +15,7 @@ use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ - Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, + Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, }; use core::{ fmt::Debug, @@ -35,8 +35,9 @@ use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie_common::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, - MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, + updates::{TrieUpdates, TrieUpdatesSorted}, + AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, + StorageProof, TrieInput, }; /// Supports various api interfaces for testing purposes. @@ -59,7 +60,7 @@ impl NoopProvider { #[cfg(feature = "db-api")] tx: TxMock::default(), #[cfg(feature = "db-api")] - prune_modes: PruneModes::none(), + prune_modes: PruneModes::default(), _phantom: Default::default(), } } @@ -73,7 +74,7 @@ impl NoopProvider { #[cfg(feature = "db-api")] tx: TxMock::default(), #[cfg(feature = "db-api")] - prune_modes: PruneModes::none(), + prune_modes: PruneModes::default(), _phantom: Default::default(), } } @@ -355,14 +356,6 @@ impl HeaderProvider for NoopProvider { Ok(None) } - fn header_td(&self, _hash: BlockHash) -> ProviderResult> { - Ok(None) - } - - fn header_td_by_number(&self, _number: BlockNumber) -> ProviderResult> { - Ok(None) - } - fn headers_range( &self, _range: impl RangeBounds, @@ -399,6 +392,14 @@ impl ChangeSetReader for NoopProvider { ) -> ProviderResult> { Ok(Vec::default()) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult> { + Ok(None) + } } impl StateRootProvider for NoopProvider { @@ -638,6 +639,19 @@ impl DBProvider for NoopProvider TrieReader for NoopProvider { + fn trie_reverts(&self, _from: BlockNumber) -> ProviderResult { + Ok(TrieUpdatesSorted::default()) + } + + fn get_block_trie_updates( + &self, + _block_number: BlockNumber, + ) -> ProviderResult { + Ok(TrieUpdatesSorted::default()) + } +} + #[cfg(feature = "db-api")] impl DatabaseProviderFactory for NoopProvider diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index 3f39cf3838d..9ff02c106e5 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,8 +1,8 @@ use alloc::vec::Vec; -use alloy_primitives::{Address, Bytes, B256}; +use alloy_primitives::{Address, BlockNumber, Bytes, B256}; use reth_storage_errors::provider::ProviderResult; use reth_trie_common::{ - updates::{StorageTrieUpdates, TrieUpdates}, + updates::{StorageTrieUpdatesSorted, TrieUpdates, TrieUpdatesSorted}, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; @@ -89,25 +89,93 @@ pub trait StateProofProvider: Send + Sync { fn witness(&self, input: TrieInput, target: HashedPostState) -> ProviderResult>; } +/// Trie Reader +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait TrieReader: Send + Sync { + /// Returns the [`TrieUpdatesSorted`] for reverting the trie database to its state prior to the + /// given block and onwards having been processed. + fn trie_reverts(&self, from: BlockNumber) -> ProviderResult; + + /// Returns the trie updates that were applied by the specified block. + fn get_block_trie_updates( + &self, + block_number: BlockNumber, + ) -> ProviderResult; +} + /// Trie Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait TrieWriter: Send + Sync { /// Writes trie updates to the database. /// /// Returns the number of entries modified. - fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult; + fn write_trie_updates(&self, trie_updates: TrieUpdates) -> ProviderResult { + self.write_trie_updates_sorted(&trie_updates.into_sorted()) + } + + /// Writes trie updates to the database with already sorted updates. + /// + /// Returns the number of entries modified. + fn write_trie_updates_sorted(&self, trie_updates: &TrieUpdatesSorted) -> ProviderResult; + + /// Records the current values of all trie nodes which will be updated using the [`TrieUpdates`] + /// into the trie changesets tables. + /// + /// The intended usage of this method is to call it _prior_ to calling `write_trie_updates` with + /// the same [`TrieUpdates`]. + /// + /// The `updates_overlay` parameter allows providing additional in-memory trie updates that + /// should be considered when looking up current node values. When provided, these overlay + /// updates are applied on top of the database state, allowing the method to see a view that + /// includes both committed database values and pending in-memory changes. This is useful + /// when writing changesets for updates that depend on previous uncommitted trie changes. + /// + /// Returns the number of keys written. + fn write_trie_changesets( + &self, + block_number: BlockNumber, + trie_updates: &TrieUpdatesSorted, + updates_overlay: Option<&TrieUpdatesSorted>, + ) -> ProviderResult; + + /// Clears contents of trie changesets completely + fn clear_trie_changesets(&self) -> ProviderResult<()>; + + /// Clears contents of trie changesets starting from the given block number (inclusive) onwards. + fn clear_trie_changesets_from(&self, from: BlockNumber) -> ProviderResult<()>; } /// Storage Trie Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait StorageTrieWriter: Send + Sync { - /// Writes storage trie updates from the given storage trie map. + /// Writes storage trie updates from the given storage trie map with already sorted updates. /// - /// First sorts the storage trie updates by the hashed address key, writing in sorted order. + /// Expects the storage trie updates to already be sorted by the hashed address key. /// /// Returns the number of entries modified. - fn write_storage_trie_updates<'a>( + fn write_storage_trie_updates_sorted<'a>( + &self, + storage_tries: impl Iterator, + ) -> ProviderResult; + + /// Records the current values of all trie nodes which will be updated using the + /// [`StorageTrieUpdatesSorted`] into the storage trie changesets table. + /// + /// The intended usage of this method is to call it _prior_ to calling + /// `write_storage_trie_updates` with the same set of [`StorageTrieUpdatesSorted`]. + /// + /// The `updates_overlay` parameter allows providing additional in-memory trie updates that + /// should be considered when looking up current node values. When provided, these overlay + /// updates are applied on top of the database state for each storage trie, allowing the + /// method to see a view that includes both committed database values and pending in-memory + /// changes. This is useful when writing changesets for storage updates that depend on + /// previous uncommitted trie changes. + /// + /// Returns the number of keys written. + fn write_storage_trie_changesets<'a>( &self, - storage_tries: impl Iterator, + block_number: BlockNumber, + storage_tries: impl Iterator, + updates_overlay: Option<&TrieUpdatesSorted>, ) -> ProviderResult; } diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 473a727e10d..de45c41e24d 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -383,15 +383,17 @@ impl TaskExecutor { { let on_shutdown = self.on_shutdown.clone(); - // Clone only the specific counter that we need. - let finished_regular_tasks_total_metrics = - self.metrics.finished_regular_tasks_total.clone(); + // Choose the appropriate finished counter based on task kind + let finished_counter = match task_kind { + TaskKind::Default => self.metrics.finished_regular_tasks_total.clone(), + TaskKind::Blocking => self.metrics.finished_regular_blocking_tasks_total.clone(), + }; + // Wrap the original future to increment the finished tasks counter upon completion let task = { async move { // Create an instance of IncCounterOnDrop with the counter to increment - let _inc_counter_on_drop = - IncCounterOnDrop::new(finished_regular_tasks_total_metrics); + let _inc_counter_on_drop = IncCounterOnDrop::new(finished_counter); let fut = pin!(fut); let _ = select(on_shutdown, fut).await; } @@ -642,7 +644,7 @@ impl TaskSpawner for TaskExecutor { } fn spawn_blocking(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { - self.metrics.inc_regular_tasks(); + self.metrics.inc_regular_blocking_tasks(); self.spawn_blocking(fut) } diff --git a/crates/tasks/src/metrics.rs b/crates/tasks/src/metrics.rs index c486fa681cc..24d3065a529 100644 --- a/crates/tasks/src/metrics.rs +++ b/crates/tasks/src/metrics.rs @@ -16,6 +16,10 @@ pub struct TaskExecutorMetrics { pub(crate) regular_tasks_total: Counter, /// Number of finished spawned regular tasks pub(crate) finished_regular_tasks_total: Counter, + /// Number of spawned regular blocking tasks + pub(crate) regular_blocking_tasks_total: Counter, + /// Number of finished spawned regular blocking tasks + pub(crate) finished_regular_blocking_tasks_total: Counter, } impl TaskExecutorMetrics { @@ -28,6 +32,11 @@ impl TaskExecutorMetrics { pub(crate) fn inc_regular_tasks(&self) { self.regular_tasks_total.increment(1); } + + /// Increments the counter for spawned regular blocking tasks. + pub(crate) fn inc_regular_blocking_tasks(&self) { + self.regular_blocking_tasks_total.increment(1); + } } /// Helper type for increasing counters even if a task fails diff --git a/crates/tracing-otlp/Cargo.toml b/crates/tracing-otlp/Cargo.toml index 7b8b666116c..5b01095d4ff 100644 --- a/crates/tracing-otlp/Cargo.toml +++ b/crates/tracing-otlp/Cargo.toml @@ -9,13 +9,30 @@ repository.workspace = true exclude.workspace = true [dependencies] -opentelemetry_sdk = "0.29.0" -opentelemetry = "0.29.1" -opentelemetry-otlp = "0.29.0" -tracing-opentelemetry = "0.30.0" +# obs +opentelemetry_sdk = { workspace = true, optional = true } +opentelemetry = { workspace = true, optional = true } +opentelemetry-otlp = { workspace = true, optional = true, features = ["grpc-tonic"] } +opentelemetry-semantic-conventions = { workspace = true, optional = true } +tracing-opentelemetry = { workspace = true, optional = true } tracing-subscriber.workspace = true tracing.workspace = true -opentelemetry-semantic-conventions = "0.29.0" + +# misc +clap = { workspace = true, features = ["derive"] } +eyre.workspace = true +url.workspace = true [lints] workspace = true + +[features] +default = ["otlp"] + +otlp = [ + "opentelemetry", + "opentelemetry_sdk", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "tracing-opentelemetry", +] diff --git a/crates/tracing-otlp/src/lib.rs b/crates/tracing-otlp/src/lib.rs index 1de112cdb33..2cfd332a408 100644 --- a/crates/tracing-otlp/src/lib.rs +++ b/crates/tracing-otlp/src/lib.rs @@ -1,12 +1,17 @@ +#![cfg(feature = "otlp")] + //! Provides a tracing layer for `OpenTelemetry` that exports spans to an OTLP endpoint. //! //! This module simplifies the integration of `OpenTelemetry` tracing with OTLP export in Rust //! applications. It allows for easily capturing and exporting distributed traces to compatible //! backends like Jaeger, Zipkin, or any other OpenTelemetry-compatible tracing system. -use opentelemetry::{trace::TracerProvider, KeyValue, Value}; -use opentelemetry_otlp::SpanExporter; +use clap::ValueEnum; +use eyre::ensure; +use opentelemetry::{global, trace::TracerProvider, KeyValue, Value}; +use opentelemetry_otlp::{SpanExporter, WithExportConfig}; use opentelemetry_sdk::{ + propagation::TraceContextPropagator, trace::{SdkTracer, SdkTracerProvider}, Resource, }; @@ -14,25 +19,85 @@ use opentelemetry_semantic_conventions::{attribute::SERVICE_VERSION, SCHEMA_URL} use tracing::Subscriber; use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::registry::LookupSpan; +use url::Url; + +// Otlp http endpoint is expected to end with this path. +// See also . +const HTTP_TRACE_ENDPOINT: &str = "/v1/traces"; /// Creates a tracing [`OpenTelemetryLayer`] that exports spans to an OTLP endpoint. /// /// This layer can be added to a [`tracing_subscriber::Registry`] to enable `OpenTelemetry` tracing -/// with OTLP export. -pub fn layer(service_name: impl Into) -> OpenTelemetryLayer +/// with OTLP export to an url. +pub fn span_layer( + service_name: impl Into, + endpoint: &Url, + protocol: OtlpProtocol, +) -> eyre::Result> where for<'span> S: Subscriber + LookupSpan<'span>, { - let exporter = SpanExporter::builder().with_http().build().unwrap(); + global::set_text_map_propagator(TraceContextPropagator::new()); + + let resource = build_resource(service_name); + + let span_builder = SpanExporter::builder(); + + let span_exporter = match protocol { + OtlpProtocol::Http => span_builder.with_http().with_endpoint(endpoint.as_str()).build()?, + OtlpProtocol::Grpc => span_builder.with_tonic().with_endpoint(endpoint.as_str()).build()?, + }; + + let tracer_provider = SdkTracerProvider::builder() + .with_resource(resource) + .with_batch_exporter(span_exporter) + .build(); - let resource = Resource::builder() + global::set_tracer_provider(tracer_provider.clone()); + + let tracer = tracer_provider.tracer("reth"); + Ok(tracing_opentelemetry::layer().with_tracer(tracer)) +} + +// Builds OTLP resource with service information. +fn build_resource(service_name: impl Into) -> Resource { + Resource::builder() .with_service_name(service_name) .with_schema_url([KeyValue::new(SERVICE_VERSION, env!("CARGO_PKG_VERSION"))], SCHEMA_URL) - .build(); + .build() +} - let provider = - SdkTracerProvider::builder().with_resource(resource).with_batch_exporter(exporter).build(); +/// OTLP transport protocol type +#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] +pub enum OtlpProtocol { + /// HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + Http, + /// gRPC transport, port 4317 + Grpc, +} - let tracer = provider.tracer("reth-otlp"); - tracing_opentelemetry::layer().with_tracer(tracer) +impl OtlpProtocol { + /// Validate and correct the URL to match protocol requirements. + /// + /// For HTTP: Ensures the path ends with `/v1/traces`, appending it if necessary. + /// For gRPC: Ensures the path does NOT include `/v1/traces`. + pub fn validate_endpoint(&self, url: &mut Url) -> eyre::Result<()> { + match self { + Self::Http => { + if !url.path().ends_with(HTTP_TRACE_ENDPOINT) { + let path = url.path().trim_end_matches('/'); + url.set_path(&format!("{}{}", path, HTTP_TRACE_ENDPOINT)); + } + } + Self::Grpc => { + ensure!( + !url.path().ends_with(HTTP_TRACE_ENDPOINT), + "OTLP gRPC endpoint should not include {} path, got: {}", + HTTP_TRACE_ENDPOINT, + url + ); + } + } + Ok(()) + } } diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index a5c09c23a35..8cf83e138ca 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -12,11 +12,22 @@ description = "tracing helpers" workspace = true [dependencies] +# reth +reth-tracing-otlp = { workspace = true, optional = true } + +# obs tracing.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "ansi", "json"] } tracing-appender.workspace = true tracing-journald.workspace = true tracing-logfmt.workspace = true -rolling-file.workspace = true -eyre.workspace = true + +# misc clap = { workspace = true, features = ["derive"] } +eyre.workspace = true +rolling-file.workspace = true +url = { workspace = true, optional = true } + +[features] +default = ["otlp"] +otlp = ["reth-tracing-otlp", "dep:url"] diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 5b9c93b5fb6..33f8c90ada5 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -1,13 +1,16 @@ +use crate::formatter::LogFormat; +use rolling_file::{RollingConditionBasic, RollingFileAppender}; use std::{ fmt, path::{Path, PathBuf}, }; - -use rolling_file::{RollingConditionBasic, RollingFileAppender}; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{filter::Directive, EnvFilter, Layer, Registry}; - -use crate::formatter::LogFormat; +#[cfg(feature = "otlp")] +use { + reth_tracing_otlp::{span_layer, OtlpProtocol}, + url::Url, +}; /// A worker guard returned by the file layer. /// @@ -18,14 +21,20 @@ pub type FileWorkerGuard = tracing_appender::non_blocking::WorkerGuard; /// A boxed tracing [Layer]. pub(crate) type BoxedLayer = Box + Send + Sync>; -/// Default [directives](Directive) for [`EnvFilter`] which disables high-frequency debug logs from -/// `hyper`, `hickory-resolver`, `jsonrpsee-server`, and `discv5`. -const DEFAULT_ENV_FILTER_DIRECTIVES: [&str; 5] = [ +/// Default [directives](Directive) for [`EnvFilter`] which: +/// 1. Disable high-frequency debug logs from dependencies such as `hyper`, `hickory-resolver`, +/// `hickory_proto`, `discv5`, `jsonrpsee-server`, and `hyper_util::client::legacy::pool`. +/// 2. Set `opentelemetry_*` crates log level to `WARN`, as `DEBUG` is too noisy. +const DEFAULT_ENV_FILTER_DIRECTIVES: [&str; 9] = [ "hyper::proto::h1=off", "hickory_resolver=off", "hickory_proto=off", "discv5=off", "jsonrpsee-server=off", + "opentelemetry-otlp=warn", + "opentelemetry_sdk=warn", + "opentelemetry-http=warn", + "hyper_util::client::legacy::pool=off", ]; /// Manages the collection of layers for a tracing subscriber. @@ -123,6 +132,26 @@ impl Layers { self.add_layer(layer); Ok(guard) } + + /// Add OTLP spans layer to the layer collection + #[cfg(feature = "otlp")] + pub fn with_span_layer( + &mut self, + service_name: String, + endpoint_exporter: Url, + filter: EnvFilter, + otlp_protocol: OtlpProtocol, + ) -> eyre::Result<()> { + // Create the span provider + + let span_layer = span_layer(service_name, &endpoint_exporter, otlp_protocol) + .map_err(|e| eyre::eyre!("Failed to build OTLP span exporter {}", e))? + .with_filter(filter); + + self.add_layer(span_layer); + + Ok(()) + } } /// Holds configuration information for file logging. diff --git a/crates/transaction-pool/src/blobstore/converter.rs b/crates/transaction-pool/src/blobstore/converter.rs new file mode 100644 index 00000000000..3f6abc56bff --- /dev/null +++ b/crates/transaction-pool/src/blobstore/converter.rs @@ -0,0 +1,30 @@ +use alloy_consensus::{BlobTransactionSidecar, EnvKzgSettings}; +use alloy_eips::eip7594::BlobTransactionSidecarEip7594; +use tokio::sync::Semaphore; + +// We allow up to 5 concurrent conversions to avoid excessive memory usage. +static SEMAPHORE: Semaphore = Semaphore::const_new(5); + +/// A simple semaphore-based blob sidecar converter. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct BlobSidecarConverter; + +impl BlobSidecarConverter { + /// Creates a new blob sidecar converter. + pub const fn new() -> Self { + Self + } + + /// Converts the blob sidecar to the EIP-7594 format. + pub async fn convert( + &self, + sidecar: BlobTransactionSidecar, + ) -> Option { + let _permit = SEMAPHORE.acquire().await.ok()?; + tokio::task::spawn_blocking(move || sidecar.try_into_7594(EnvKzgSettings::Default.get())) + .await + .ok()? + .ok() + } +} diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 5ccafe15000..b883345aac6 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -4,6 +4,8 @@ use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStor use alloy_eips::{ eip4844::{BlobAndProofV1, BlobAndProofV2}, eip7594::BlobTransactionSidecarVariant, + eip7840::BlobParams, + merge::EPOCH_SLOTS, }; use alloy_primitives::{TxHash, B256}; use parking_lot::{Mutex, RwLock}; @@ -14,6 +16,13 @@ use tracing::{debug, trace}; /// How many [`BlobTransactionSidecarVariant`] to cache in memory. pub const DEFAULT_MAX_CACHED_BLOBS: u32 = 100; +/// A cache size heuristic based on the highest blob params +/// +/// This uses the max blobs per tx and max blobs per block over 16 epochs: `21 * 6 * 512 = 64512` +/// This should be ~4MB +const VERSIONED_HASH_TO_TX_HASH_CACHE_SIZE: u64 = + BlobParams::bpo2().max_blobs_per_tx * BlobParams::bpo2().max_blob_count * EPOCH_SLOTS * 16; + /// A blob store that stores blob data on disk. /// /// The type uses deferred deletion, meaning that blobs are not immediately deleted from disk, but @@ -288,7 +297,9 @@ impl DiskFileBlobStoreInner { size_tracker: Default::default(), file_lock: Default::default(), txs_to_delete: Default::default(), - versioned_hashes_to_txhash: Mutex::new(LruMap::new(ByLength::new(max_length * 6))), + versioned_hashes_to_txhash: Mutex::new(LruMap::new(ByLength::new( + VERSIONED_HASH_TO_TX_HASH_CACHE_SIZE as u32, + ))), } } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index 29844994bc0..ee7eb45af0f 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -5,6 +5,7 @@ use alloy_eips::{ eip7594::BlobTransactionSidecarVariant, }; use alloy_primitives::B256; +pub use converter::BlobSidecarConverter; pub use disk::{DiskFileBlobStore, DiskFileBlobStoreConfig, OpenDiskFileBlobStore}; pub use mem::InMemoryBlobStore; pub use noop::NoopBlobStore; @@ -17,6 +18,7 @@ use std::{ }; pub use tracker::{BlobStoreCanonTracker, BlobStoreUpdates}; +mod converter; pub mod disk; mod mem; mod noop; diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 74d92fb3e6b..3bcbb4cd0ab 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -157,7 +157,7 @@ pub enum Eip4844PoolTransactionError { /// Thrown if an EIP-4844 transaction without any blobs arrives #[error("blobless blob transaction")] NoEip4844Blobs, - /// Thrown if an EIP-4844 transaction without any blobs arrives + /// Thrown if an EIP-4844 transaction arrives with too many blobs #[error("too many blobs in transaction: have {have}, permitted {permitted}")] TooManyEip4844Blobs { /// Number of blobs the transaction has @@ -237,8 +237,13 @@ pub enum InvalidPoolTransactionError { /// Thrown if the input data of a transaction is greater /// than some meaningful limit a user might use. This is not a consensus error /// making the transaction invalid, rather a DOS protection. - #[error("input data too large")] - OversizedData(usize, usize), + #[error("oversized data: transaction size {size}, limit {limit}")] + OversizedData { + /// Size of the transaction/input data that exceeded the limit. + size: usize, + /// Configured limit that was exceeded. + limit: usize, + }, /// Thrown if the transaction's fee is below the minimum fee #[error("transaction underpriced")] Underpriced, @@ -335,7 +340,7 @@ impl InvalidPoolTransactionError { } Self::ExceedsFeeCap { max_tx_fee_wei: _, tx_fee_cap_wei: _ } => true, Self::ExceedsMaxInitCodeSize(_, _) => true, - Self::OversizedData(_, _) => true, + Self::OversizedData { .. } => true, Self::Underpriced => { // local setting false @@ -393,7 +398,7 @@ impl InvalidPoolTransactionError { /// Returns `true` if an import failed due to an oversized transaction pub const fn is_oversized(&self) -> bool { - matches!(self, Self::OversizedData(_, _)) + matches!(self, Self::OversizedData { .. }) } /// Returns `true` if an import failed due to nonce gap. diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 6d289a48ced..8717c6e3135 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -1,22 +1,23 @@ //! Support for maintaining the state of the transaction pool use crate::{ - blobstore::{BlobStoreCanonTracker, BlobStoreUpdates}, + blobstore::{BlobSidecarConverter, BlobStoreCanonTracker, BlobStoreUpdates}, error::PoolError, metrics::MaintainPoolMetrics, traits::{CanonicalStateUpdate, EthPoolTransaction, TransactionPool, TransactionPoolExt}, - BlockInfo, PoolTransaction, PoolUpdateKind, TransactionOrigin, + AllPoolTransactions, BlobTransactionSidecarVariant, BlockInfo, PoolTransaction, PoolUpdateKind, + TransactionOrigin, }; use alloy_consensus::{transaction::TxHashRef, BlockHeader, Typed2718}; use alloy_eips::{BlockNumberOrTag, Decodable2718, Encodable2718}; -use alloy_primitives::{Address, BlockHash, BlockNumber}; -use alloy_rlp::{Bytes, Encodable}; +use alloy_primitives::{Address, BlockHash, BlockNumber, Bytes}; +use alloy_rlp::Encodable; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, }; use reth_chain_state::CanonStateNotification; -use reth_chainspec::{ChainSpecProvider, EthChainSpec}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives_traits::{ @@ -107,13 +108,13 @@ where N: NodePrimitives, Client: StateProviderFactory + BlockReaderIdExt
- + ChainSpecProvider> + + ChainSpecProvider + EthereumHardforks> + Clone + 'static, BaseFee: BaseFeeProvider + Send + 'static, P: TransactionPoolExt> + 'static, St: Stream> + Send + Unpin + 'static, - Tasks: TaskSpawner + 'static, + Tasks: TaskSpawner + Clone + 'static, { async move { maintain_transaction_pool(client, base_fee_provider, pool, events, task_spawner, config) @@ -136,13 +137,13 @@ pub async fn maintain_transaction_pool( N: NodePrimitives, Client: StateProviderFactory + BlockReaderIdExt
- + ChainSpecProvider> + + ChainSpecProvider + EthereumHardforks> + Clone + 'static, BaseFee: BaseFeeProvider + Send + 'static, P: TransactionPoolExt> + 'static, St: Stream> + Send + Unpin + 'static, - Tasks: TaskSpawner + 'static, + Tasks: TaskSpawner + Clone + 'static, { let metrics = MaintainPoolMetrics::default(); let MaintainPoolConfig { max_update_depth, max_reload_accounts, .. } = config; @@ -501,6 +502,89 @@ pub async fn maintain_transaction_pool( // keep track of mined blob transactions blob_store_tracker.add_new_chain_blocks(&blocks); + + // If Osaka activates in 2 slots we need to convert blobs to new format. + if !chain_spec.is_osaka_active_at_timestamp(tip.timestamp()) && + !chain_spec.is_osaka_active_at_timestamp(tip.timestamp().saturating_add(12)) && + chain_spec.is_osaka_active_at_timestamp(tip.timestamp().saturating_add(24)) + { + let pool = pool.clone(); + let spawner = task_spawner.clone(); + let client = client.clone(); + task_spawner.spawn(Box::pin(async move { + // Start converting not eaerlier than 4 seconds into current slot to ensure + // that our pool only contains valid transactions for the next block (as + // it's not Osaka yet). + tokio::time::sleep(Duration::from_secs(4)).await; + + let mut interval = tokio::time::interval(Duration::from_secs(1)); + loop { + // Loop and replace blob transactions until we reach Osaka transition + // block after which no legacy blobs are going to be accepted. + let last_iteration = + client.latest_header().ok().flatten().is_none_or(|header| { + client + .chain_spec() + .is_osaka_active_at_timestamp(header.timestamp()) + }); + + let AllPoolTransactions { pending, queued } = pool.all_transactions(); + for tx in pending + .into_iter() + .chain(queued) + .filter(|tx| tx.transaction.is_eip4844()) + { + let tx_hash = *tx.transaction.hash(); + + // Fetch sidecar from the pool + let Ok(Some(sidecar)) = pool.get_blob(tx_hash) else { + continue; + }; + // Ensure it is a legacy blob + if !sidecar.is_eip4844() { + continue; + } + // Remove transaction and sidecar from the pool, both are in memory + // now + let Some(tx) = pool.remove_transactions(vec![tx_hash]).pop() else { + continue; + }; + pool.delete_blob(tx_hash); + + let BlobTransactionSidecarVariant::Eip4844(sidecar) = + Arc::unwrap_or_clone(sidecar) + else { + continue; + }; + + let converter = BlobSidecarConverter::new(); + let pool = pool.clone(); + spawner.spawn(Box::pin(async move { + // Convert sidecar to EIP-7594 format + let Some(sidecar) = converter.convert(sidecar).await else { + return; + }; + + // Re-insert transaction with the new sidecar + let origin = tx.origin; + let Some(tx) = EthPoolTransaction::try_from_eip4844( + tx.transaction.clone_into_consensus(), + sidecar.into(), + ) else { + return; + }; + let _ = pool.add_transaction(origin, tx).await; + })); + } + + if last_iteration { + break; + } + + interval.tick().await; + } + })); + } } } } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index a5aa664e764..90cd042df69 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -16,6 +16,8 @@ use std::{ use tokio::sync::broadcast::{error::TryRecvError, Receiver}; use tracing::debug; +const MAX_NEW_TRANSACTIONS_PER_BATCH: usize = 16; + /// An iterator that returns transactions that can be executed on the current state (*best* /// transactions). /// @@ -165,13 +167,17 @@ impl BestTransactions { /// Checks for new transactions that have come into the `PendingPool` after this iterator was /// created and inserts them fn add_new_transactions(&mut self) { - while let Some(pending_tx) = self.try_recv() { - // same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked - let tx_id = *pending_tx.transaction.id(); - if self.ancestor(&tx_id).is_none() { - self.independent.insert(pending_tx.clone()); + for _ in 0..MAX_NEW_TRANSACTIONS_PER_BATCH { + if let Some(pending_tx) = self.try_recv() { + // same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked + let tx_id = *pending_tx.transaction.id(); + if self.ancestor(&tx_id).is_none() { + self.independent.insert(pending_tx.clone()); + } + self.all.insert(tx_id, pending_tx); + } else { + break; } - self.all.insert(tx_id, pending_tx); } } } diff --git a/crates/transaction-pool/src/pool/events.rs b/crates/transaction-pool/src/pool/events.rs index 89cfc95bdfe..f6bdd4a4d04 100644 --- a/crates/transaction-pool/src/pool/events.rs +++ b/crates/transaction-pool/src/pool/events.rs @@ -2,6 +2,7 @@ use crate::{traits::PropagateKind, PoolTransaction, SubPool, ValidPoolTransactio use alloy_primitives::{TxHash, B256}; use std::sync::Arc; +use crate::pool::QueuedReason; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -11,7 +12,9 @@ pub enum FullTransactionEvent { /// Transaction has been added to the pending pool. Pending(TxHash), /// Transaction has been added to the queued pool. - Queued(TxHash), + /// + /// If applicable, attached the specific reason why this was queued. + Queued(TxHash, Option), /// Transaction has been included in the block belonging to this hash. Mined { /// The hash of the mined transaction. @@ -40,7 +43,7 @@ impl Clone for FullTransactionEvent { fn clone(&self) -> Self { match self { Self::Pending(hash) => Self::Pending(*hash), - Self::Queued(hash) => Self::Queued(*hash), + Self::Queued(hash, reason) => Self::Queued(*hash, reason.clone()), Self::Mined { tx_hash, block_hash } => { Self::Mined { tx_hash: *tx_hash, block_hash: *block_hash } } diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index 280fb4ad10c..123c6cf956a 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -1,7 +1,10 @@ //! Listeners for the transaction-pool use crate::{ - pool::events::{FullTransactionEvent, NewTransactionEvent, TransactionEvent}, + pool::{ + events::{FullTransactionEvent, NewTransactionEvent, TransactionEvent}, + QueuedReason, + }, traits::{NewBlobSidecar, PropagateKind}, PoolTransaction, ValidPoolTransaction, }; @@ -17,6 +20,7 @@ use tokio::sync::mpsc::{ self as mpsc, error::TrySendError, Receiver, Sender, UnboundedReceiver, UnboundedSender, }; use tracing::debug; + /// The size of the event channel used to propagate transaction events. const TX_POOL_EVENT_CHANNEL_SIZE: usize = 1024; @@ -29,6 +33,11 @@ pub struct TransactionEvents { } impl TransactionEvents { + /// Create a new instance of this stream. + pub const fn new(hash: TxHash, events: UnboundedReceiver) -> Self { + Self { hash, events } + } + /// The hash for this transaction pub const fn hash(&self) -> TxHash { self.hash @@ -159,8 +168,12 @@ impl PoolEventBroadcast { } /// Notify listeners about a transaction that was added to the queued pool. - pub(crate) fn queued(&mut self, tx: &TxHash) { - self.broadcast_event(tx, TransactionEvent::Queued, FullTransactionEvent::Queued(*tx)); + pub(crate) fn queued(&mut self, tx: &TxHash, reason: Option) { + self.broadcast_event( + tx, + TransactionEvent::Queued, + FullTransactionEvent::Queued(*tx, reason), + ); } /// Notify listeners about a transaction that was propagated. diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 7f528cc298c..d185444eadf 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -747,8 +747,8 @@ where listener.discarded(tx.hash()); } } - AddedTransaction::Parked { transaction, replaced, .. } => { - listener.queued(transaction.hash()); + AddedTransaction::Parked { transaction, replaced, queued_reason, .. } => { + listener.queued(transaction.hash(), queued_reason.clone()); if let Some(replaced) = replaced { listener.replaced(replaced.clone(), *transaction.hash()); } diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 43a652a1476..193442174ca 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -260,35 +260,33 @@ impl ParkedPool> { &self, basefee: u64, ) -> Vec>> { - let ids = self.satisfy_base_fee_ids(basefee as u128); - let mut txs = Vec::with_capacity(ids.len()); - for id in ids { - txs.push(self.get(&id).expect("transaction exists").transaction.clone().into()); - } + let mut txs = Vec::new(); + self.satisfy_base_fee_ids(basefee as u128, |tx| { + txs.push(tx.clone()); + }); txs } /// Returns all transactions that satisfy the given basefee. - fn satisfy_base_fee_ids(&self, basefee: u128) -> Vec { - let mut transactions = Vec::new(); - { - let mut iter = self.by_id.iter().peekable(); - - while let Some((id, tx)) = iter.next() { - if tx.transaction.transaction.max_fee_per_gas() < basefee { - // still parked -> skip descendant transactions - 'this: while let Some((peek, _)) = iter.peek() { - if peek.sender != id.sender { - break 'this - } - iter.next(); + fn satisfy_base_fee_ids(&self, basefee: u128, mut tx_handler: F) + where + F: FnMut(&Arc>), + { + let mut iter = self.by_id.iter().peekable(); + + while let Some((id, tx)) = iter.next() { + if tx.transaction.transaction.max_fee_per_gas() < basefee { + // still parked -> skip descendant transactions + 'this: while let Some((peek, _)) = iter.peek() { + if peek.sender != id.sender { + break 'this } - } else { - transactions.push(*id); + iter.next(); } + } else { + tx_handler(&tx.transaction); } } - transactions } /// Removes all transactions from this subpool that can afford the given basefee, @@ -306,7 +304,10 @@ impl ParkedPool> { where F: FnMut(Arc>), { - let to_remove = self.satisfy_base_fee_ids(basefee as u128); + let mut to_remove = Vec::new(); + self.satisfy_base_fee_ids(basefee as u128, |tx| { + to_remove.push(*tx.id()); + }); for id in to_remove { if let Some(tx) = self.remove_transaction(&id) { diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 9bd1d092b4f..dc675031ea6 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -278,14 +278,6 @@ impl PendingPool { } } - /// Returns the ancestor the given transaction, the transaction with `nonce - 1`. - /// - /// Note: for a transaction with nonce higher than the current on chain nonce this will always - /// return an ancestor since all transaction in this pool are gapless. - fn ancestor(&self, id: &TransactionId) -> Option<&PendingTransaction> { - self.get(&id.unchecked_ancestor()?) - } - /// Adds a new transactions to the pending queue. /// /// # Panics @@ -342,14 +334,35 @@ impl PendingPool { let tx = self.by_id.remove(id)?; self.size_of -= tx.transaction.size(); - if let Some(highest) = self.highest_nonces.get(&id.sender) { - if highest.transaction.nonce() == id.nonce { - self.highest_nonces.remove(&id.sender); + match self.highest_nonces.entry(id.sender) { + Entry::Occupied(mut entry) => { + if entry.get().transaction.nonce() == id.nonce { + // we just removed the tx with the highest nonce for this sender, find the + // highest remaining tx from that sender + if let Some((_, new_highest)) = self + .by_id + .range(( + id.sender.start_bound(), + std::ops::Bound::Included(TransactionId::new(id.sender, u64::MAX)), + )) + .last() + { + // insert the new highest nonce for this sender + entry.insert(new_highest.clone()); + } else { + entry.remove(); + } + } } - if let Some(ancestor) = self.ancestor(id) { - self.highest_nonces.insert(id.sender, ancestor.clone()); + Entry::Vacant(_) => { + debug_assert!( + false, + "removed transaction without a tracked highest nonce {:?}", + id + ); } } + Some(tx.transaction) } @@ -921,8 +934,7 @@ mod tests { assert!(removed.is_empty()); // Verify that retrieving transactions from an empty pool yields nothing - let all_txs: Vec<_> = pool.all().collect(); - assert!(all_txs.is_empty()); + assert!(pool.all().next().is_none()); } #[test] @@ -1055,4 +1067,61 @@ mod tests { assert!(pool.get_txs_by_sender(sender_b).is_empty()); assert!(pool.get_txs_by_sender(sender_c).is_empty()); } + + #[test] + fn test_remove_non_highest_keeps_highest() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + let sender = address!("0x00000000000000000000000000000000000000aa"); + let txs = MockTransactionSet::dependent(sender, 0, 3, TxType::Eip1559).into_vec(); + for tx in txs { + pool.add_transaction(f.validated_arc(tx), 0); + } + pool.assert_invariants(); + let sender_id = f.ids.sender_id(&sender).unwrap(); + let mid_id = TransactionId::new(sender_id, 1); + let _ = pool.remove_transaction(&mid_id); + let highest = pool.highest_nonces.get(&sender_id).unwrap(); + assert_eq!(highest.transaction.nonce(), 2); + pool.assert_invariants(); + } + + #[test] + fn test_cascade_removal_recomputes_highest() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + let sender = address!("0x00000000000000000000000000000000000000bb"); + let txs = MockTransactionSet::dependent(sender, 0, 4, TxType::Eip1559).into_vec(); + for tx in txs { + pool.add_transaction(f.validated_arc(tx), 0); + } + pool.assert_invariants(); + let sender_id = f.ids.sender_id(&sender).unwrap(); + let id3 = TransactionId::new(sender_id, 3); + let _ = pool.remove_transaction(&id3); + let highest = pool.highest_nonces.get(&sender_id).unwrap(); + assert_eq!(highest.transaction.nonce(), 2); + let id2 = TransactionId::new(sender_id, 2); + let _ = pool.remove_transaction(&id2); + let highest = pool.highest_nonces.get(&sender_id).unwrap(); + assert_eq!(highest.transaction.nonce(), 1); + pool.assert_invariants(); + } + + #[test] + fn test_remove_only_tx_clears_highest() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + let sender = address!("0x00000000000000000000000000000000000000cc"); + let txs = MockTransactionSet::dependent(sender, 0, 1, TxType::Eip1559).into_vec(); + for tx in txs { + pool.add_transaction(f.validated_arc(tx), 0); + } + pool.assert_invariants(); + let sender_id = f.ids.sender_id(&sender).unwrap(); + let id0 = TransactionId::new(sender_id, 0); + let _ = pool.remove_transaction(&id0); + assert!(!pool.highest_nonces.contains_key(&sender_id)); + pool.assert_invariants(); + } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 9552646652b..2b9d8bae8ab 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -429,6 +429,20 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { /// Consumer: Utility fn all_transaction_hashes(&self) -> Vec; + /// Removes a single transaction corresponding to the given hash. + /// + /// Note: This removes the transaction as if it got discarded (_not_ mined). + /// + /// Returns the removed transaction if it was found in the pool. + /// + /// Consumer: Utility + fn remove_transaction( + &self, + hash: TxHash, + ) -> Option>> { + self.remove_transactions(vec![hash]).pop() + } + /// Removes all transactions corresponding to the given hashes. /// /// Note: This removes the transactions as if they got discarded (_not_ mined). diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 945f6c1c738..ca80b9a0809 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -39,7 +39,7 @@ use std::{ atomic::{AtomicBool, AtomicU64}, Arc, }, - time::Instant, + time::{Instant, SystemTime}, }; use tokio::sync::Mutex; @@ -326,10 +326,10 @@ where if tx_input_len > self.max_tx_input_bytes { return Err(TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::OversizedData( - tx_input_len, - self.max_tx_input_bytes, - ), + InvalidPoolTransactionError::OversizedData { + size: tx_input_len, + limit: self.max_tx_input_bytes, + }, )) } } else { @@ -338,7 +338,10 @@ where if tx_size > self.max_tx_input_bytes { return Err(TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::OversizedData(tx_size, self.max_tx_input_bytes), + InvalidPoolTransactionError::OversizedData { + size: tx_size, + limit: self.max_tx_input_bytes, + }, )) } } @@ -393,15 +396,12 @@ where match self.tx_fee_cap { Some(0) | None => {} // Skip if cap is 0 or None Some(tx_fee_cap_wei) => { - // max possible tx fee is (gas_price * gas_limit) - // (if EIP1559) max possible tx fee is (max_fee_per_gas * gas_limit) - let gas_price = transaction.max_fee_per_gas(); - let max_tx_fee_wei = gas_price.saturating_mul(transaction.gas_limit() as u128); + let max_tx_fee_wei = transaction.cost().saturating_sub(transaction.value()); if max_tx_fee_wei > tx_fee_cap_wei { return Err(TransactionValidationOutcome::Invalid( transaction, InvalidPoolTransactionError::ExceedsFeeCap { - max_tx_fee_wei, + max_tx_fee_wei: max_tx_fee_wei.saturating_to(), tx_fee_cap_wei, }, )) @@ -673,7 +673,7 @@ where Eip4844PoolTransactionError::UnexpectedEip4844SidecarAfterOsaka, )) } - } else if sidecar.is_eip7594() { + } else if sidecar.is_eip7594() && !self.allow_7594_sidecars() { return Err(InvalidPoolTransactionError::Eip4844( Eip4844PoolTransactionError::UnexpectedEip7594SidecarBeforeOsaka, )) @@ -745,6 +745,10 @@ where self.fork_tracker.osaka.store(true, std::sync::atomic::Ordering::Relaxed); } + self.fork_tracker + .tip_timestamp + .store(new_tip_block.timestamp(), std::sync::atomic::Ordering::Relaxed); + if let Some(blob_params) = self.chain_spec().blob_params_at_timestamp(new_tip_block.timestamp()) { @@ -759,6 +763,24 @@ where fn max_gas_limit(&self) -> u64 { self.block_gas_limit.load(std::sync::atomic::Ordering::Relaxed) } + + /// Returns whether EIP-7594 sidecars are allowed + fn allow_7594_sidecars(&self) -> bool { + let tip_timestamp = self.fork_tracker.tip_timestamp(); + + // If next block is Osaka, allow 7594 sidecars + if self.chain_spec().is_osaka_active_at_timestamp(tip_timestamp.saturating_add(12)) { + true + } else if self.chain_spec().is_osaka_active_at_timestamp(tip_timestamp.saturating_add(24)) { + let current_timestamp = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); + + // Allow after 4 seconds into last non-Osaka slot + current_timestamp >= tip_timestamp.saturating_add(4) + } else { + false + } + } } impl TransactionValidator for EthTransactionValidator @@ -811,6 +833,8 @@ pub struct EthTransactionValidatorBuilder { prague: bool, /// Fork indicator whether we are in the Osaka hardfork. osaka: bool, + /// Timestamp of the tip block. + tip_timestamp: u64, /// Max blob count at the block's timestamp. max_blob_count: u64, /// Whether using EIP-2718 type transactions is allowed @@ -885,6 +909,8 @@ impl EthTransactionValidatorBuilder { // osaka not yet activated osaka: false, + tip_timestamp: 0, + // max blob count is prague by default max_blob_count: BlobParams::prague().max_blobs_per_tx, @@ -1012,6 +1038,7 @@ impl EthTransactionValidatorBuilder { self.cancun = self.client.chain_spec().is_cancun_active_at_timestamp(timestamp); self.prague = self.client.chain_spec().is_prague_active_at_timestamp(timestamp); self.osaka = self.client.chain_spec().is_osaka_active_at_timestamp(timestamp); + self.tip_timestamp = timestamp; self.max_blob_count = self .client .chain_spec() @@ -1072,6 +1099,7 @@ impl EthTransactionValidatorBuilder { cancun, prague, osaka, + tip_timestamp, eip2718, eip1559, eip4844, @@ -1094,6 +1122,7 @@ impl EthTransactionValidatorBuilder { cancun: AtomicBool::new(cancun), prague: AtomicBool::new(prague), osaka: AtomicBool::new(osaka), + tip_timestamp: AtomicU64::new(tip_timestamp), max_blob_count: AtomicU64::new(max_blob_count), }; @@ -1175,6 +1204,8 @@ pub struct ForkTracker { pub osaka: AtomicBool, /// Tracks max blob count per transaction at the block's timestamp. pub max_blob_count: AtomicU64, + /// Tracks the timestamp of the tip block. + pub tip_timestamp: AtomicU64, } impl ForkTracker { @@ -1198,6 +1229,11 @@ impl ForkTracker { self.osaka.load(std::sync::atomic::Ordering::Relaxed) } + /// Returns the timestamp of the tip block. + pub fn tip_timestamp(&self) -> u64 { + self.tip_timestamp.load(std::sync::atomic::Ordering::Relaxed) + } + /// Returns the max allowed blob count per transaction. pub fn max_blob_count(&self) -> u64 { self.max_blob_count.load(std::sync::atomic::Ordering::Relaxed) @@ -1278,6 +1314,7 @@ mod tests { cancun: false.into(), prague: false.into(), osaka: false.into(), + tip_timestamp: 0.into(), max_blob_count: 0.into(), }; diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 725f83c392c..bccd4d7b347 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -515,7 +515,7 @@ impl fmt::Debug for ValidPoolTransaction { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ValidPoolTransaction") .field("id", &self.transaction_id) - .field("pragate", &self.propagate) + .field("propagate", &self.propagate) .field("origin", &self.origin) .field("hash", self.transaction.hash()) .field("tx", &self.transaction) diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs index d0a9c9c5aa8..105caae12b4 100644 --- a/crates/transaction-pool/tests/it/listeners.rs +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -82,7 +82,7 @@ async fn txpool_listener_queued_event() { assert_matches!(events.next().await, Some(TransactionEvent::Queued)); // The listener of all should receive queued event as well. - assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Queued(hash)) if hash == *transaction.get_hash()); + assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Queued(hash,_ )) if hash == *transaction.get_hash()); } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 0aa93adb598..2fcc23ab53b 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -23,6 +23,7 @@ reth-codecs = { workspace = true, optional = true } alloy-rpc-types-eth = { workspace = true, optional = true } alloy-serde = { workspace = true, optional = true } +arrayvec = { workspace = true, optional = true } bytes = { workspace = true, optional = true } derive_more.workspace = true itertools = { workspace = true, features = ["use_alloc"] } @@ -52,6 +53,7 @@ alloy-genesis.workspace = true alloy-primitives = { workspace = true, features = ["getrandom"] } alloy-trie = { workspace = true, features = ["arbitrary", "serde"] } bytes.workspace = true +arrayvec.workspace = true hash-db.workspace = true plain_hasher.workspace = true arbitrary = { workspace = true, features = ["derive"] } @@ -74,6 +76,7 @@ std = [ "alloy-rpc-types-eth?/std", "alloy-serde?/std", "alloy-trie/std", + "arrayvec?/std", "bytes?/std", "derive_more/std", "nybbles/std", @@ -87,6 +90,7 @@ std = [ eip1186 = ["alloy-rpc-types-eth/serde", "dep:alloy-serde"] serde = [ "dep:serde", + "arrayvec?/serde", "bytes?/serde", "nybbles/serde", "alloy-primitives/serde", @@ -98,7 +102,7 @@ serde = [ "revm-database/serde", "revm-state/serde", ] -reth-codec = ["dep:reth-codecs", "dep:bytes"] +reth-codec = ["dep:reth-codecs", "dep:bytes", "dep:arrayvec"] serde-bincode-compat = [ "serde", "reth-primitives-traits/serde-bincode-compat", diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 50d9f20af0b..22f57d9f34d 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -3,6 +3,7 @@ use core::ops::Not; use crate::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::{PrefixSetMut, TriePrefixSetsMut}, + utils::extend_sorted_vec, KeyHasher, MultiProofTargets, Nibbles, }; use alloc::{borrow::Cow, vec::Vec}; @@ -277,6 +278,15 @@ impl HashedPostState { ChunkedHashedPostState::new(self, size) } + /// Returns the number of items that will be considered during chunking in `[Self::chunks]`. + pub fn chunking_length(&self) -> usize { + self.accounts.len() + + self.storages + .values() + .map(|storage| if storage.wiped { 1 } else { 0 } + storage.storage.len()) + .sum::() + } + /// Extend this hashed post state with contents of another. /// Entries in the second hashed post state take precedence. pub fn extend(&mut self, other: Self) { @@ -484,6 +494,35 @@ impl HashedPostStateSorted { pub const fn account_storages(&self) -> &B256Map { &self.storages } + + /// Returns `true` if there are no account or storage updates. + pub fn is_empty(&self) -> bool { + self.accounts.accounts.is_empty() && + self.accounts.destroyed_accounts.is_empty() && + self.storages.is_empty() + } + + /// Returns the total number of updates including all accounts and storage updates. + pub fn total_len(&self) -> usize { + self.accounts.accounts.len() + + self.accounts.destroyed_accounts.len() + + self.storages.values().map(|storage| storage.len()).sum::() + } + + /// Extends this state with contents of another sorted state. + /// Entries in `other` take precedence for duplicate keys. + pub fn extend_ref(&mut self, other: &Self) { + // Extend accounts + self.accounts.extend_ref(&other.accounts); + + // Extend storages + for (hashed_address, other_storage) in &other.storages { + self.storages + .entry(*hashed_address) + .and_modify(|existing| existing.extend_ref(other_storage)) + .or_insert_with(|| other_storage.clone()); + } + } } impl AsRef for HashedPostStateSorted { @@ -510,6 +549,20 @@ impl HashedAccountsSorted { .chain(self.destroyed_accounts.iter().map(|address| (*address, None))) .sorted_by_key(|entry| *entry.0) } + + /// Extends this collection with contents of another sorted collection. + /// Entries in `other` take precedence for duplicate keys. + pub fn extend_ref(&mut self, other: &Self) { + // Updates take precedence over removals, so we want removals from `other` to only apply to + // the previous accounts. + self.accounts.retain(|(addr, _)| !other.destroyed_accounts.contains(addr)); + + // Extend the sorted accounts vector + extend_sorted_vec(&mut self.accounts, &other.accounts); + + // Merge destroyed accounts sets + self.destroyed_accounts.extend(&other.destroyed_accounts); + } } /// Sorted hashed storage optimized for iterating during state trie calculation. @@ -537,6 +590,81 @@ impl HashedStorageSorted { .chain(self.zero_valued_slots.iter().map(|hashed_slot| (*hashed_slot, U256::ZERO))) .sorted_by_key(|entry| *entry.0) } + + /// Returns the total number of storage slot updates. + pub fn len(&self) -> usize { + self.non_zero_valued_slots.len() + self.zero_valued_slots.len() + } + + /// Returns `true` if there are no storage slot updates. + pub fn is_empty(&self) -> bool { + self.non_zero_valued_slots.is_empty() && self.zero_valued_slots.is_empty() + } + + /// Extends this storage with contents of another sorted storage. + /// Entries in `other` take precedence for duplicate keys. + pub fn extend_ref(&mut self, other: &Self) { + if other.wiped { + // If other is wiped, clear everything and copy from other + self.wiped = true; + self.non_zero_valued_slots.clear(); + self.zero_valued_slots.clear(); + self.non_zero_valued_slots.extend_from_slice(&other.non_zero_valued_slots); + self.zero_valued_slots.extend(&other.zero_valued_slots); + return; + } + + self.non_zero_valued_slots.retain(|(slot, _)| !other.zero_valued_slots.contains(slot)); + + // Extend the sorted non-zero valued slots + extend_sorted_vec(&mut self.non_zero_valued_slots, &other.non_zero_valued_slots); + + // Merge zero valued slots sets + self.zero_valued_slots.extend(&other.zero_valued_slots); + } +} + +impl From for HashedStorage { + fn from(sorted: HashedStorageSorted) -> Self { + let mut storage = B256Map::default(); + + // Add all non-zero valued slots + for (slot, value) in sorted.non_zero_valued_slots { + storage.insert(slot, value); + } + + // Add all zero valued slots + for slot in sorted.zero_valued_slots { + storage.insert(slot, U256::ZERO); + } + + Self { wiped: sorted.wiped, storage } + } +} + +impl From for HashedPostState { + fn from(sorted: HashedPostStateSorted) -> Self { + let mut accounts = B256Map::default(); + + // Add all updated accounts + for (address, account) in sorted.accounts.accounts { + accounts.insert(address, Some(account)); + } + + // Add all destroyed accounts + for address in sorted.accounts.destroyed_accounts { + accounts.insert(address, None); + } + + // Convert storages + let storages = sorted + .storages + .into_iter() + .map(|(address, storage)| (address, storage.into())) + .collect(); + + Self { accounts, storages } + } } /// An iterator that yields chunks of the state updates of at most `size` account and storage @@ -1072,4 +1200,163 @@ mod tests { ); assert_eq!(chunks.next(), None); } + + #[test] + fn test_hashed_post_state_sorted_extend_ref() { + // Test extending accounts + let mut state1 = HashedPostStateSorted { + accounts: HashedAccountsSorted { + accounts: vec![ + (B256::from([1; 32]), Account::default()), + (B256::from([3; 32]), Account::default()), + ], + destroyed_accounts: B256Set::from_iter([B256::from([5; 32])]), + }, + storages: B256Map::default(), + }; + + let state2 = HashedPostStateSorted { + accounts: HashedAccountsSorted { + accounts: vec![ + (B256::from([2; 32]), Account::default()), + (B256::from([3; 32]), Account { nonce: 1, ..Default::default() }), // Override + (B256::from([4; 32]), Account::default()), + ], + destroyed_accounts: B256Set::from_iter([B256::from([6; 32])]), + }, + storages: B256Map::default(), + }; + + state1.extend_ref(&state2); + + // Check accounts are merged and sorted + assert_eq!(state1.accounts.accounts.len(), 4); + assert_eq!(state1.accounts.accounts[0].0, B256::from([1; 32])); + assert_eq!(state1.accounts.accounts[1].0, B256::from([2; 32])); + assert_eq!(state1.accounts.accounts[2].0, B256::from([3; 32])); + assert_eq!(state1.accounts.accounts[2].1.nonce, 1); // Should have state2's value + assert_eq!(state1.accounts.accounts[3].0, B256::from([4; 32])); + + // Check destroyed accounts are merged + assert!(state1.accounts.destroyed_accounts.contains(&B256::from([5; 32]))); + assert!(state1.accounts.destroyed_accounts.contains(&B256::from([6; 32]))); + } + + #[test] + fn test_hashed_storage_sorted_extend_ref() { + // Test normal extension + let mut storage1 = HashedStorageSorted { + non_zero_valued_slots: vec![ + (B256::from([1; 32]), U256::from(10)), + (B256::from([3; 32]), U256::from(30)), + ], + zero_valued_slots: B256Set::from_iter([B256::from([5; 32])]), + wiped: false, + }; + + let storage2 = HashedStorageSorted { + non_zero_valued_slots: vec![ + (B256::from([2; 32]), U256::from(20)), + (B256::from([3; 32]), U256::from(300)), // Override + (B256::from([4; 32]), U256::from(40)), + ], + zero_valued_slots: B256Set::from_iter([B256::from([6; 32])]), + wiped: false, + }; + + storage1.extend_ref(&storage2); + + assert_eq!(storage1.non_zero_valued_slots.len(), 4); + assert_eq!(storage1.non_zero_valued_slots[0].0, B256::from([1; 32])); + assert_eq!(storage1.non_zero_valued_slots[1].0, B256::from([2; 32])); + assert_eq!(storage1.non_zero_valued_slots[2].0, B256::from([3; 32])); + assert_eq!(storage1.non_zero_valued_slots[2].1, U256::from(300)); // Should have storage2's value + assert_eq!(storage1.non_zero_valued_slots[3].0, B256::from([4; 32])); + assert!(storage1.zero_valued_slots.contains(&B256::from([5; 32]))); + assert!(storage1.zero_valued_slots.contains(&B256::from([6; 32]))); + assert!(!storage1.wiped); + + // Test wiped storage + let mut storage3 = HashedStorageSorted { + non_zero_valued_slots: vec![(B256::from([1; 32]), U256::from(10))], + zero_valued_slots: B256Set::from_iter([B256::from([2; 32])]), + wiped: false, + }; + + let storage4 = HashedStorageSorted { + non_zero_valued_slots: vec![(B256::from([3; 32]), U256::from(30))], + zero_valued_slots: B256Set::from_iter([B256::from([4; 32])]), + wiped: true, + }; + + storage3.extend_ref(&storage4); + + assert!(storage3.wiped); + // When wiped, should only have storage4's values + assert_eq!(storage3.non_zero_valued_slots.len(), 1); + assert_eq!(storage3.non_zero_valued_slots[0].0, B256::from([3; 32])); + assert_eq!(storage3.zero_valued_slots.len(), 1); + assert!(storage3.zero_valued_slots.contains(&B256::from([4; 32]))); + } + + #[test] + fn test_hashed_post_state_chunking_length() { + let addr1 = B256::from([1; 32]); + let addr2 = B256::from([2; 32]); + let addr3 = B256::from([3; 32]); + let addr4 = B256::from([4; 32]); + let slot1 = B256::from([1; 32]); + let slot2 = B256::from([2; 32]); + let slot3 = B256::from([3; 32]); + + let state = HashedPostState { + accounts: B256Map::from_iter([(addr1, None), (addr2, None), (addr4, None)]), + storages: B256Map::from_iter([ + ( + addr1, + HashedStorage { + wiped: false, + storage: B256Map::from_iter([ + (slot1, U256::ZERO), + (slot2, U256::ZERO), + (slot3, U256::ZERO), + ]), + }, + ), + ( + addr2, + HashedStorage { + wiped: true, + storage: B256Map::from_iter([ + (slot1, U256::ZERO), + (slot2, U256::ZERO), + (slot3, U256::ZERO), + ]), + }, + ), + ( + addr3, + HashedStorage { + wiped: false, + storage: B256Map::from_iter([ + (slot1, U256::ZERO), + (slot2, U256::ZERO), + (slot3, U256::ZERO), + ]), + }, + ), + ]), + }; + + let chunking_length = state.chunking_length(); + for size in 1..=state.clone().chunks(1).count() { + let chunk_count = state.clone().chunks(size).count(); + let expected_count = chunking_length.div_ceil(size); + assert_eq!( + chunk_count, expected_count, + "chunking_length: {}, size: {}", + chunking_length, size + ); + } + } } diff --git a/crates/trie/common/src/input.rs b/crates/trie/common/src/input.rs index fff50fbb7b0..522cfa9ed41 100644 --- a/crates/trie/common/src/input.rs +++ b/crates/trie/common/src/input.rs @@ -34,7 +34,7 @@ impl TrieInput { /// Create new trie input from the provided blocks, from oldest to newest. See the documentation /// for [`Self::extend_with_blocks`] for details. pub fn from_blocks<'a>( - blocks: impl IntoIterator)>, + blocks: impl IntoIterator, ) -> Self { let mut input = Self::default(); input.extend_with_blocks(blocks); @@ -47,14 +47,10 @@ impl TrieInput { /// constructed from the state of this block and the state itself, **without** trie updates. pub fn extend_with_blocks<'a>( &mut self, - blocks: impl IntoIterator)>, + blocks: impl IntoIterator, ) { for (hashed_state, trie_updates) in blocks { - if let Some(nodes) = trie_updates.as_ref() { - self.append_cached_ref(nodes, hashed_state); - } else { - self.append_ref(hashed_state); - } + self.append_cached_ref(trie_updates, hashed_state); } } diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index 70616ba5eb8..e4292a52016 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -36,7 +36,7 @@ mod nibbles; pub use nibbles::{Nibbles, StoredNibbles, StoredNibblesSubKey}; mod storage; -pub use storage::StorageTrieEntry; +pub use storage::{StorageTrieEntry, TrieChangeSetsEntry}; mod subnode; pub use subnode::StoredSubNode; @@ -57,6 +57,9 @@ pub mod updates; pub mod added_removed_keys; +/// Utilities used by other modules in this crate. +mod utils; + /// Bincode-compatible serde implementations for trie types. /// /// `bincode` crate allows for more efficient serialization of trie types, because it allows diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index 7d9e6670beb..82d710395f9 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -28,10 +28,9 @@ impl reth_codecs::Compact for StoredNibbles { where B: bytes::BufMut + AsMut<[u8]>, { - for i in self.0.iter() { - buf.put_u8(i); - } - self.0.len() + let bytes = self.0.iter().collect::>(); + buf.put_slice(&bytes); + bytes.len() } fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { @@ -78,14 +77,14 @@ impl reth_codecs::Compact for StoredNibblesSubKey { { assert!(self.0.len() <= 64); - // right-pad with zeros - for i in self.0.iter() { - buf.put_u8(i); - } + let bytes = self.0.iter().collect::>(); + buf.put_slice(&bytes); + + // Right-pad with zeros static ZERO: &[u8; 64] = &[0; 64]; - buf.put_slice(&ZERO[self.0.len()..]); + buf.put_slice(&ZERO[bytes.len()..]); - buf.put_u8(self.0.len() as u8); + buf.put_u8(bytes.len() as u8); 64 + 1 } diff --git a/crates/trie/common/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs index 6714893f16d..74fdb789113 100644 --- a/crates/trie/common/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -71,16 +71,18 @@ pub struct TriePrefixSets { /// This data structure stores a set of `Nibbles` and provides methods to insert /// new elements and check whether any existing element has a given prefix. /// -/// Internally, this implementation uses a `Vec` and aims to act like a `BTreeSet` in being both -/// sorted and deduplicated. It does this by keeping a `sorted` flag. The `sorted` flag represents -/// whether or not the `Vec` is definitely sorted. When a new element is added, it is set to -/// `false.`. The `Vec` is sorted and deduplicated when `sorted` is `true` and: -/// * An element is being checked for inclusion (`contains`), or -/// * The set is being converted into an immutable `PrefixSet` (`freeze`) +/// Internally, this implementation stores keys in an unsorted `Vec` together with an +/// `all` flag. The `all` flag indicates that every entry should be considered changed and that +/// individual keys can be ignored. /// -/// This means that a `PrefixSet` will always be sorted and deduplicated when constructed from a -/// `PrefixSetMut`. +/// Sorting and deduplication do not happen during insertion or membership checks on this mutable +/// structure. Instead, keys are sorted and deduplicated when converting into the immutable +/// `PrefixSet` via `freeze()`. The immutable `PrefixSet` provides `contains` and relies on the +/// sorted and unique keys produced by `freeze()`; it does not perform additional sorting or +/// deduplication. /// +/// This guarantees that a `PrefixSet` constructed from a `PrefixSetMut` is always sorted and +/// deduplicated. /// # Examples /// /// ``` @@ -165,8 +167,7 @@ impl PrefixSetMut { } else { self.keys.sort_unstable(); self.keys.dedup(); - // We need to shrink in both the sorted and non-sorted cases because deduping may have - // occurred either on `freeze`, or during `contains`. + // Shrink after deduplication to release unused capacity. self.keys.shrink_to_fit(); PrefixSet { index: 0, all: false, keys: Arc::new(self.keys) } } @@ -280,8 +281,8 @@ mod tests { prefix_set_mut.insert(Nibbles::from_nibbles([4, 5, 6])); prefix_set_mut.insert(Nibbles::from_nibbles([1, 2, 3])); // Duplicate - assert_eq!(prefix_set_mut.keys.len(), 4); // Length should be 3 (including duplicate) - assert_eq!(prefix_set_mut.keys.capacity(), 4); // Capacity should be 4 (including duplicate) + assert_eq!(prefix_set_mut.keys.len(), 4); // Length is 4 (before deduplication) + assert_eq!(prefix_set_mut.keys.capacity(), 4); // Capacity is 4 (before deduplication) let mut prefix_set = prefix_set_mut.freeze(); assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([1, 2]))); @@ -300,8 +301,8 @@ mod tests { prefix_set_mut.insert(Nibbles::from_nibbles([4, 5, 6])); prefix_set_mut.insert(Nibbles::from_nibbles([1, 2, 3])); // Duplicate - assert_eq!(prefix_set_mut.keys.len(), 4); // Length should be 3 (including duplicate) - assert_eq!(prefix_set_mut.keys.capacity(), 101); // Capacity should be 101 (including duplicate) + assert_eq!(prefix_set_mut.keys.len(), 4); // Length is 4 (before deduplication) + assert_eq!(prefix_set_mut.keys.capacity(), 101); // Capacity is 101 (before deduplication) let mut prefix_set = prefix_set_mut.freeze(); assert!(prefix_set.contains(&Nibbles::from_nibbles_unchecked([1, 2]))); diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index b7961f047a4..a8e0bb59b93 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -89,6 +89,11 @@ impl MultiProofTargets { pub fn chunks(self, size: usize) -> ChunkedMultiProofTargets { ChunkedMultiProofTargets::new(self, size) } + + /// Returns the number of items that will be considered during chunking in `[Self::chunks]`. + pub fn chunking_length(&self) -> usize { + self.values().map(|slots| 1 + slots.len().saturating_sub(1)).sum::() + } } /// An iterator that yields chunks of the proof targets of at most `size` account and storage @@ -1067,4 +1072,33 @@ mod tests { acc.storage_root = EMPTY_ROOT_HASH; assert_eq!(acc, inverse); } + + #[test] + fn test_multiproof_targets_chunking_length() { + let mut targets = MultiProofTargets::default(); + targets.insert(B256::with_last_byte(1), B256Set::default()); + targets.insert( + B256::with_last_byte(2), + B256Set::from_iter([B256::with_last_byte(10), B256::with_last_byte(20)]), + ); + targets.insert( + B256::with_last_byte(3), + B256Set::from_iter([ + B256::with_last_byte(30), + B256::with_last_byte(31), + B256::with_last_byte(32), + ]), + ); + + let chunking_length = targets.chunking_length(); + for size in 1..=targets.clone().chunks(1).count() { + let chunk_count = targets.clone().chunks(size).count(); + let expected_count = chunking_length.div_ceil(size); + assert_eq!( + chunk_count, expected_count, + "chunking_length: {}, size: {}", + chunking_length, size + ); + } + } } diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index 187a097bfd4..1e567393864 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -1,6 +1,8 @@ use super::{BranchNodeCompact, StoredNibblesSubKey}; /// Account storage trie node. +/// +/// `nibbles` is the subkey when used as a value in the `StorageTrie` table. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] #[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieEntry { @@ -31,3 +33,173 @@ impl reth_codecs::Compact for StorageTrieEntry { (this, buf) } } + +/// Trie changeset entry representing the state of a trie node before a block. +/// +/// `nibbles` is the subkey when used as a value in the changeset tables. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +pub struct TrieChangeSetsEntry { + /// The nibbles of the intermediate node + pub nibbles: StoredNibblesSubKey, + /// Node value prior to the block being processed, None indicating it didn't exist. + pub node: Option, +} + +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for TrieChangeSetsEntry { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let nibbles_len = self.nibbles.to_compact(buf); + let node_len = self.node.as_ref().map(|node| node.to_compact(buf)).unwrap_or(0); + nibbles_len + node_len + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + if len == 0 { + // Return an empty entry without trying to parse anything + return ( + Self { nibbles: StoredNibblesSubKey::from(super::Nibbles::default()), node: None }, + buf, + ) + } + + let (nibbles, buf) = StoredNibblesSubKey::from_compact(buf, 65); + + if len <= 65 { + return (Self { nibbles, node: None }, buf) + } + + let (node, buf) = BranchNodeCompact::from_compact(buf, len - 65); + (Self { nibbles, node: Some(node) }, buf) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bytes::BytesMut; + use reth_codecs::Compact; + + #[test] + fn test_trie_changesets_entry_full_empty() { + // Test a fully empty entry (empty nibbles, None node) + let entry = TrieChangeSetsEntry { nibbles: StoredNibblesSubKey::from(vec![]), node: None }; + + let mut buf = BytesMut::new(); + let len = entry.to_compact(&mut buf); + + // Empty nibbles takes 65 bytes (64 for padding + 1 for length) + // None node adds 0 bytes + assert_eq!(len, 65); + assert_eq!(buf.len(), 65); + + // Deserialize and verify + let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); + assert_eq!(decoded.nibbles.0.to_vec(), Vec::::new()); + assert_eq!(decoded.node, None); + assert_eq!(remaining.len(), 0); + } + + #[test] + fn test_trie_changesets_entry_none_node() { + // Test non-empty nibbles with None node + let nibbles_data = vec![0x01, 0x02, 0x03, 0x04]; + let entry = TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey::from(nibbles_data.clone()), + node: None, + }; + + let mut buf = BytesMut::new(); + let len = entry.to_compact(&mut buf); + + // Nibbles takes 65 bytes regardless of content + assert_eq!(len, 65); + + // Deserialize and verify + let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); + assert_eq!(decoded.nibbles.0.to_vec(), nibbles_data); + assert_eq!(decoded.node, None); + assert_eq!(remaining.len(), 0); + } + + #[test] + fn test_trie_changesets_entry_empty_path_with_node() { + // Test empty path with Some node + // Using the same signature as in the codebase: (state_mask, hash_mask, tree_mask, hashes, + // value) + let test_node = BranchNodeCompact::new( + 0b1111_1111_1111_1111, // state_mask: all children present + 0b1111_1111_1111_1111, // hash_mask: all have hashes + 0b0000_0000_0000_0000, // tree_mask: no embedded trees + vec![], // hashes + None, // value + ); + + let entry = TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey::from(vec![]), + node: Some(test_node.clone()), + }; + + let mut buf = BytesMut::new(); + let len = entry.to_compact(&mut buf); + + // Calculate expected length + let mut temp_buf = BytesMut::new(); + let node_len = test_node.to_compact(&mut temp_buf); + assert_eq!(len, 65 + node_len); + + // Deserialize and verify + let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); + assert_eq!(decoded.nibbles.0.to_vec(), Vec::::new()); + assert_eq!(decoded.node, Some(test_node)); + assert_eq!(remaining.len(), 0); + } + + #[test] + fn test_trie_changesets_entry_normal() { + // Test normal case: non-empty path with Some node + let nibbles_data = vec![0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f]; + // Using the same signature as in the codebase + let test_node = BranchNodeCompact::new( + 0b0000_0000_1111_0000, // state_mask: some children present + 0b0000_0000_0011_0000, // hash_mask: some have hashes + 0b0000_0000_0000_0000, // tree_mask: no embedded trees + vec![], // hashes (empty for this test) + None, // value + ); + + let entry = TrieChangeSetsEntry { + nibbles: StoredNibblesSubKey::from(nibbles_data.clone()), + node: Some(test_node.clone()), + }; + + let mut buf = BytesMut::new(); + let len = entry.to_compact(&mut buf); + + // Verify serialization length + let mut temp_buf = BytesMut::new(); + let node_len = test_node.to_compact(&mut temp_buf); + assert_eq!(len, 65 + node_len); + + // Deserialize and verify + let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, len); + assert_eq!(decoded.nibbles.0.to_vec(), nibbles_data); + assert_eq!(decoded.node, Some(test_node)); + assert_eq!(remaining.len(), 0); + } + + #[test] + fn test_trie_changesets_entry_from_compact_zero_len() { + // Test from_compact with zero length + let buf = vec![0x01, 0x02, 0x03]; + let (decoded, remaining) = TrieChangeSetsEntry::from_compact(&buf, 0); + + // Should return empty nibbles and None node + assert_eq!(decoded.nibbles.0.to_vec(), Vec::::new()); + assert_eq!(decoded.node, None); + assert_eq!(remaining, &buf[..]); // Buffer should be unchanged + } +} diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 441e407db16..b0d178cd1d0 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -1,4 +1,4 @@ -use crate::{BranchNodeCompact, HashBuilder, Nibbles}; +use crate::{utils::extend_sorted_vec, BranchNodeCompact, HashBuilder, Nibbles}; use alloc::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, vec::Vec, @@ -432,12 +432,40 @@ pub struct TrieUpdatesSortedRef<'a> { pub struct TrieUpdatesSorted { /// Sorted collection of updated state nodes with corresponding paths. None indicates that a /// node was removed. - pub account_nodes: Vec<(Nibbles, Option)>, + account_nodes: Vec<(Nibbles, Option)>, /// Storage tries stored by hashed address of the account the trie belongs to. - pub storage_tries: B256Map, + storage_tries: B256Map, } impl TrieUpdatesSorted { + /// Creates a new `TrieUpdatesSorted` with the given account nodes and storage tries. + /// + /// # Panics + /// + /// In debug mode, panics if `account_nodes` is not sorted by the `Nibbles` key, + /// or if any storage trie's `storage_nodes` is not sorted by its `Nibbles` key. + pub fn new( + account_nodes: Vec<(Nibbles, Option)>, + storage_tries: B256Map, + ) -> Self { + debug_assert!( + account_nodes.is_sorted_by_key(|item| &item.0), + "account_nodes must be sorted by Nibbles key" + ); + debug_assert!( + storage_tries.values().all(|storage_trie| { + storage_trie.storage_nodes.is_sorted_by_key(|item| &item.0) + }), + "all storage_nodes in storage_tries must be sorted by Nibbles key" + ); + Self { account_nodes, storage_tries } + } + + /// Returns `true` if the updates are empty. + pub fn is_empty(&self) -> bool { + self.account_nodes.is_empty() && self.storage_tries.is_empty() + } + /// Returns reference to updated account nodes. pub fn account_nodes_ref(&self) -> &[(Nibbles, Option)] { &self.account_nodes @@ -447,6 +475,30 @@ impl TrieUpdatesSorted { pub const fn storage_tries_ref(&self) -> &B256Map { &self.storage_tries } + + /// Returns the total number of updates including account nodes and all storage updates. + pub fn total_len(&self) -> usize { + self.account_nodes.len() + + self.storage_tries.values().map(|storage| storage.len()).sum::() + } + + /// Extends the trie updates with another set of sorted updates. + /// + /// This merges the account nodes and storage tries from `other` into `self`. + /// Account nodes are merged and re-sorted, with `other`'s values taking precedence + /// for duplicate keys. + pub fn extend_ref(&mut self, other: &Self) { + // Extend account nodes + extend_sorted_vec(&mut self.account_nodes, &other.account_nodes); + + // Merge storage tries + for (hashed_address, storage_trie) in &other.storage_tries { + self.storage_tries + .entry(*hashed_address) + .and_modify(|existing| existing.extend_ref(storage_trie)) + .or_insert_with(|| storage_trie.clone()); + } + } } impl AsRef for TrieUpdatesSorted { @@ -455,6 +507,29 @@ impl AsRef for TrieUpdatesSorted { } } +impl From for TrieUpdates { + fn from(sorted: TrieUpdatesSorted) -> Self { + let mut account_nodes = HashMap::default(); + let mut removed_nodes = HashSet::default(); + + for (nibbles, node) in sorted.account_nodes { + if let Some(node) = node { + account_nodes.insert(nibbles, node); + } else { + removed_nodes.insert(nibbles); + } + } + + let storage_tries = sorted + .storage_tries + .into_iter() + .map(|(address, storage)| (address, storage.into())) + .collect(); + + Self { account_nodes, removed_nodes, storage_tries } + } +} + /// Sorted storage trie updates reference used for serializing to file. #[derive(PartialEq, Eq, Clone, Default, Debug)] #[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize))] @@ -488,6 +563,33 @@ impl StorageTrieUpdatesSorted { pub fn storage_nodes_ref(&self) -> &[(Nibbles, Option)] { &self.storage_nodes } + + /// Returns the total number of storage node updates. + pub const fn len(&self) -> usize { + self.storage_nodes.len() + } + + /// Returns `true` if there are no storage node updates. + pub const fn is_empty(&self) -> bool { + self.storage_nodes.is_empty() + } + + /// Extends the storage trie updates with another set of sorted updates. + /// + /// If `other` is marked as deleted, this will be marked as deleted and all nodes cleared. + /// Otherwise, nodes are merged with `other`'s values taking precedence for duplicates. + pub fn extend_ref(&mut self, other: &Self) { + if other.is_deleted { + self.is_deleted = true; + self.storage_nodes.clear(); + self.storage_nodes.extend(other.storage_nodes.iter().cloned()); + return; + } + + // Extend storage nodes + extend_sorted_vec(&mut self.storage_nodes, &other.storage_nodes); + self.is_deleted = self.is_deleted || other.is_deleted; + } } /// Excludes empty nibbles from the given iterator. @@ -502,6 +604,153 @@ fn exclude_empty_from_pair( iter.into_iter().filter(|(n, _)| !n.is_empty()) } +impl From for StorageTrieUpdates { + fn from(sorted: StorageTrieUpdatesSorted) -> Self { + let mut storage_nodes = HashMap::default(); + let mut removed_nodes = HashSet::default(); + + for (nibbles, node) in sorted.storage_nodes { + if let Some(node) = node { + storage_nodes.insert(nibbles, node); + } else { + removed_nodes.insert(nibbles); + } + } + + Self { is_deleted: sorted.is_deleted, storage_nodes, removed_nodes } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + + #[test] + fn test_trie_updates_sorted_extend_ref() { + // Test extending with empty updates + let mut updates1 = TrieUpdatesSorted::default(); + let updates2 = TrieUpdatesSorted::default(); + updates1.extend_ref(&updates2); + assert_eq!(updates1.account_nodes.len(), 0); + assert_eq!(updates1.storage_tries.len(), 0); + + // Test extending account nodes + let mut updates1 = TrieUpdatesSorted { + account_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x01]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x03]), None), + ], + storage_tries: B256Map::default(), + }; + let updates2 = TrieUpdatesSorted { + account_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x02]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x03]), Some(BranchNodeCompact::default())), /* Override */ + ], + storage_tries: B256Map::default(), + }; + updates1.extend_ref(&updates2); + assert_eq!(updates1.account_nodes.len(), 3); + // Should be sorted: 0x01, 0x02, 0x03 + assert_eq!(updates1.account_nodes[0].0, Nibbles::from_nibbles_unchecked([0x01])); + assert_eq!(updates1.account_nodes[1].0, Nibbles::from_nibbles_unchecked([0x02])); + assert_eq!(updates1.account_nodes[2].0, Nibbles::from_nibbles_unchecked([0x03])); + // 0x03 should have Some value from updates2 (override) + assert!(updates1.account_nodes[2].1.is_some()); + + // Test extending storage tries + let storage_trie1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![( + Nibbles::from_nibbles_unchecked([0x0a]), + Some(BranchNodeCompact::default()), + )], + }; + let storage_trie2 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![(Nibbles::from_nibbles_unchecked([0x0b]), None)], + }; + + let hashed_address1 = B256::from([1; 32]); + let hashed_address2 = B256::from([2; 32]); + + let mut updates1 = TrieUpdatesSorted { + account_nodes: vec![], + storage_tries: B256Map::from_iter([(hashed_address1, storage_trie1.clone())]), + }; + let updates2 = TrieUpdatesSorted { + account_nodes: vec![], + storage_tries: B256Map::from_iter([ + (hashed_address1, storage_trie2), + (hashed_address2, storage_trie1), + ]), + }; + updates1.extend_ref(&updates2); + assert_eq!(updates1.storage_tries.len(), 2); + assert!(updates1.storage_tries.contains_key(&hashed_address1)); + assert!(updates1.storage_tries.contains_key(&hashed_address2)); + // Check that storage trie for hashed_address1 was extended + let merged_storage = &updates1.storage_tries[&hashed_address1]; + assert_eq!(merged_storage.storage_nodes.len(), 2); + } + + #[test] + fn test_storage_trie_updates_sorted_extend_ref_deleted() { + // Test case 1: Extending with a deleted storage trie that has nodes + let mut storage1 = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x01]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x02]), None), + ], + }; + + let storage2 = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x03]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x04]), None), + ], + }; + + storage1.extend_ref(&storage2); + + // Should be marked as deleted + assert!(storage1.is_deleted); + // Original nodes should be cleared, but other's nodes should be added + assert_eq!(storage1.storage_nodes.len(), 2); + assert_eq!(storage1.storage_nodes[0].0, Nibbles::from_nibbles_unchecked([0x03])); + assert_eq!(storage1.storage_nodes[1].0, Nibbles::from_nibbles_unchecked([0x04])); + + // Test case 2: Extending a deleted storage trie with more nodes + let mut storage3 = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![( + Nibbles::from_nibbles_unchecked([0x05]), + Some(BranchNodeCompact::default()), + )], + }; + + let storage4 = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x06]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x07]), None), + ], + }; + + storage3.extend_ref(&storage4); + + // Should remain deleted + assert!(storage3.is_deleted); + // Should have nodes from other (original cleared then extended) + assert_eq!(storage3.storage_nodes.len(), 2); + assert_eq!(storage3.storage_nodes[0].0, Nibbles::from_nibbles_unchecked([0x06])); + assert_eq!(storage3.storage_nodes[1].0, Nibbles::from_nibbles_unchecked([0x07])); + } +} + /// Bincode-compatible trie updates type serde implementations. #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { @@ -717,7 +966,7 @@ pub mod serde_bincode_compat { } #[cfg(all(test, feature = "serde"))] -mod tests { +mod serde_tests { use super::*; #[test] diff --git a/crates/trie/common/src/utils.rs b/crates/trie/common/src/utils.rs new file mode 100644 index 00000000000..5a2234fe26b --- /dev/null +++ b/crates/trie/common/src/utils.rs @@ -0,0 +1,53 @@ +use alloc::vec::Vec; + +/// Helper function to extend a sorted vector with another sorted vector. +/// Values from `other` take precedence for duplicate keys. +/// +/// This function efficiently merges two sorted vectors by: +/// 1. Iterating through the target vector with mutable references +/// 2. Using a peekable iterator for the other vector +/// 3. For each target item, processing other items that come before or equal to it +/// 4. Collecting items from other that need to be inserted +/// 5. Appending and re-sorting only if new items were added +pub(crate) fn extend_sorted_vec(target: &mut Vec<(K, V)>, other: &[(K, V)]) +where + K: Clone + Ord, + V: Clone, +{ + if other.is_empty() { + return; + } + + let mut other_iter = other.iter().peekable(); + let mut to_insert = Vec::new(); + + // Iterate through target and update/collect items from other + for target_item in target.iter_mut() { + while let Some(other_item) = other_iter.peek() { + use core::cmp::Ordering; + match other_item.0.cmp(&target_item.0) { + Ordering::Less => { + // Other item comes before current target item, collect it + to_insert.push(other_iter.next().unwrap().clone()); + } + Ordering::Equal => { + // Same key, update target with other's value + target_item.1 = other_iter.next().unwrap().1.clone(); + break; + } + Ordering::Greater => { + // Other item comes after current target item, keep target unchanged + break; + } + } + } + } + + // Append collected new items, as well as any remaining from `other` which are necessarily also + // new, and sort if needed + if !to_insert.is_empty() || other_iter.peek().is_some() { + target.extend(to_insert); + target.extend(other_iter.cloned()); + target.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + } +} diff --git a/crates/trie/db/src/hashed_cursor.rs b/crates/trie/db/src/hashed_cursor.rs index 06e6914275c..4fe3d57429f 100644 --- a/crates/trie/db/src/hashed_cursor.rs +++ b/crates/trie/db/src/hashed_cursor.rs @@ -20,18 +20,23 @@ impl DatabaseHashedCursorFactory { } impl HashedCursorFactory for DatabaseHashedCursorFactory<&TX> { - type AccountCursor = DatabaseHashedAccountCursor<::Cursor>; - type StorageCursor = - DatabaseHashedStorageCursor<::DupCursor>; - - fn hashed_account_cursor(&self) -> Result { + type AccountCursor<'a> + = DatabaseHashedAccountCursor<::Cursor> + where + Self: 'a; + type StorageCursor<'a> + = DatabaseHashedStorageCursor<::DupCursor> + where + Self: 'a; + + fn hashed_account_cursor(&self) -> Result, DatabaseError> { Ok(DatabaseHashedAccountCursor(self.0.cursor_read::()?)) } fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(DatabaseHashedStorageCursor::new( self.0.cursor_dup_read::()?, hashed_address, diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 8b338001fae..8f79c21c156 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -11,13 +11,16 @@ use reth_trie::{ }; /// Extends [`Proof`] with operations specific for working with a database transaction. -pub trait DatabaseProof<'a, TX> { - /// Create a new [Proof] from database transaction. - fn from_tx(tx: &'a TX) -> Self; +pub trait DatabaseProof<'a> { + /// Associated type for the database transaction. + type Tx; + + /// Create a new [`Proof`] instance from database transaction. + fn from_tx(tx: &'a Self::Tx) -> Self; /// Generates the state proof for target account based on [`TrieInput`]. fn overlay_account_proof( - tx: &'a TX, + &self, input: TrieInput, address: Address, slots: &[B256], @@ -25,59 +28,49 @@ pub trait DatabaseProof<'a, TX> { /// Generates the state [`MultiProof`] for target hashed account and storage keys. fn overlay_multiproof( - tx: &'a TX, + &self, input: TrieInput, targets: MultiProofTargets, ) -> Result; } -impl<'a, TX: DbTx> DatabaseProof<'a, TX> +impl<'a, TX: DbTx> DatabaseProof<'a> for Proof, DatabaseHashedCursorFactory<&'a TX>> { - /// Create a new [Proof] instance from database transaction. - fn from_tx(tx: &'a TX) -> Self { + type Tx = TX; + + fn from_tx(tx: &'a Self::Tx) -> Self { Self::new(DatabaseTrieCursorFactory::new(tx), DatabaseHashedCursorFactory::new(tx)) } - fn overlay_account_proof( - tx: &'a TX, + &self, input: TrieInput, address: Address, slots: &[B256], ) -> Result { let nodes_sorted = input.nodes.into_sorted(); let state_sorted = input.state.into_sorted(); - Self::from_tx(tx) - .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(tx), - &nodes_sorted, - )) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(tx), - &state_sorted, - )) - .with_prefix_sets_mut(input.prefix_sets) - .account_proof(address, slots) + Proof::new( + InMemoryTrieCursorFactory::new(self.trie_cursor_factory().clone(), &nodes_sorted), + HashedPostStateCursorFactory::new(self.hashed_cursor_factory().clone(), &state_sorted), + ) + .with_prefix_sets_mut(input.prefix_sets) + .account_proof(address, slots) } fn overlay_multiproof( - tx: &'a TX, + &self, input: TrieInput, targets: MultiProofTargets, ) -> Result { let nodes_sorted = input.nodes.into_sorted(); let state_sorted = input.state.into_sorted(); - Self::from_tx(tx) - .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(tx), - &nodes_sorted, - )) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(tx), - &state_sorted, - )) - .with_prefix_sets_mut(input.prefix_sets) - .multiproof(targets) + Proof::new( + InMemoryTrieCursorFactory::new(self.trie_cursor_factory().clone(), &nodes_sorted), + HashedPostStateCursorFactory::new(self.hashed_cursor_factory().clone(), &state_sorted), + ) + .with_prefix_sets_mut(input.prefix_sets) + .multiproof(targets) } } diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 256ee20794e..6d37c5f3413 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -20,7 +20,7 @@ use std::{ collections::HashMap, ops::{RangeBounds, RangeInclusive}, }; -use tracing::debug; +use tracing::{debug, instrument}; /// Extends [`StateRoot`] with operations specific for working with a database transaction. pub trait DatabaseStateRoot<'a, TX>: Sized { @@ -226,6 +226,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } impl DatabaseHashedPostState for HashedPostState { + #[instrument(target = "trie::db", skip(tx), fields(range))] fn from_reverts( tx: &TX, range: impl RangeBounds, diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index 62d376d1b54..d05c3fd92da 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -7,7 +7,7 @@ use reth_db_api::{ }; use reth_trie::{ trie_cursor::{TrieCursor, TrieCursorFactory}, - updates::StorageTrieUpdates, + updates::StorageTrieUpdatesSorted, BranchNodeCompact, Nibbles, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, }; @@ -26,18 +26,24 @@ impl TrieCursorFactory for DatabaseTrieCursorFactory<&TX> where TX: DbTx, { - type AccountTrieCursor = DatabaseAccountTrieCursor<::Cursor>; - type StorageTrieCursor = - DatabaseStorageTrieCursor<::DupCursor>; + type AccountTrieCursor<'a> + = DatabaseAccountTrieCursor<::Cursor> + where + Self: 'a; - fn account_trie_cursor(&self) -> Result { + type StorageTrieCursor<'a> + = DatabaseStorageTrieCursor<::DupCursor> + where + Self: 'a; + + fn account_trie_cursor(&self) -> Result, DatabaseError> { Ok(DatabaseAccountTrieCursor::new(self.0.cursor_read::()?)) } fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(DatabaseStorageTrieCursor::new( self.0.cursor_dup_read::()?, hashed_address, @@ -110,31 +116,19 @@ where + DbDupCursorRO + DbDupCursorRW, { - /// Writes storage updates - pub fn write_storage_trie_updates( + /// Writes storage updates that are already sorted + pub fn write_storage_trie_updates_sorted( &mut self, - updates: &StorageTrieUpdates, + updates: &StorageTrieUpdatesSorted, ) -> Result { // The storage trie for this account has to be deleted. if updates.is_deleted() && self.cursor.seek_exact(self.hashed_address)?.is_some() { self.cursor.delete_current_duplicates()?; } - // Merge updated and removed nodes. Updated nodes must take precedence. - let mut storage_updates = updates - .removed_nodes_ref() - .iter() - .filter_map(|n| (!updates.storage_nodes_ref().contains_key(n)).then_some((n, None))) - .collect::>(); - storage_updates.extend( - updates.storage_nodes_ref().iter().map(|(nibbles, node)| (nibbles, Some(node))), - ); - - // Sort trie node updates. - storage_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); - let mut num_entries = 0; - for (nibbles, maybe_updated) in storage_updates.into_iter().filter(|(n, _)| !n.is_empty()) { + for (nibbles, maybe_updated) in updates.storage_nodes.iter().filter(|(n, _)| !n.is_empty()) + { num_entries += 1; let nibbles = StoredNibblesSubKey(*nibbles); // Delete the old entry if it exists. diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs index 401ba07b22d..402f0cabff3 100644 --- a/crates/trie/db/tests/proof.rs +++ b/crates/trie/db/tests/proof.rs @@ -86,7 +86,8 @@ fn testspec_proofs() { let provider = factory.provider().unwrap(); for (target, expected_proof) in data { let target = Address::from_str(target).unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!( account_proof.proof, expected_proof, @@ -106,7 +107,8 @@ fn testspec_empty_storage_proof() { let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]); let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &slots).unwrap(); assert_eq!(account_proof.storage_root, EMPTY_ROOT_HASH, "expected empty storage root"); assert_eq!(slots.len(), account_proof.storage_proofs.len()); @@ -141,7 +143,8 @@ fn mainnet_genesis_account_proof() { ]); let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); assert_eq!(account_proof.verify(root), Ok(())); } @@ -164,7 +167,8 @@ fn mainnet_genesis_account_proof_nonexistent() { ]); let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); assert_eq!(account_proof.verify(root), Ok(())); } @@ -259,7 +263,8 @@ fn holesky_deposit_contract_proof() { }; let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &slots).unwrap(); similar_asserts::assert_eq!(account_proof, expected); assert_eq!(account_proof.verify(root), Ok(())); } diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index e9fcb5a1c48..8f543a711d8 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -81,7 +81,11 @@ fn incremental_vs_full_root(inputs: &[&str], modified: &str) { let modified_root = loader.root().unwrap(); // Update the intermediate roots table so that we can run the incremental verification - tx.write_storage_trie_updates(core::iter::once((&hashed_address, &trie_updates))).unwrap(); + tx.write_storage_trie_updates_sorted(core::iter::once(( + &hashed_address, + &trie_updates.into_sorted(), + ))) + .unwrap(); // 3. Calculate the incremental root let mut storage_changes = PrefixSetMut::default(); @@ -620,7 +624,7 @@ fn account_trie_around_extension_node_with_dbtrie() { let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); assert_eq!(expected, got); - tx.write_trie_updates(&updates).unwrap(); + tx.write_trie_updates(updates).unwrap(); // read the account updates from the db let mut accounts_trie = tx.tx_ref().cursor_read::().unwrap(); @@ -667,7 +671,7 @@ proptest! { state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) ); assert_eq!(expected_root, state_root); - tx.write_trie_updates(&trie_updates).unwrap(); + tx.write_trie_updates(trie_updates).unwrap(); } } } diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 5dfa1c3e4ae..14457fccc6e 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -41,7 +41,8 @@ fn includes_empty_node_preimage() { provider.insert_account_for_hashing([(address, Some(Account::default()))]).unwrap(); let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); - let multiproof = Proof::from_tx(provider.tx_ref()) + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let multiproof = proof .multiproof(MultiProofTargets::from_iter([( hashed_address, HashSet::from_iter([hashed_slot]), @@ -82,7 +83,8 @@ fn includes_nodes_for_destroyed_storage_nodes() { .unwrap(); let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); - let multiproof = Proof::from_tx(provider.tx_ref()) + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let multiproof = proof .multiproof(MultiProofTargets::from_iter([( hashed_address, HashSet::from_iter([hashed_slot]), @@ -130,7 +132,8 @@ fn correctly_decodes_branch_node_values() { .unwrap(); let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); - let multiproof = Proof::from_tx(provider.tx_ref()) + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let multiproof = proof .multiproof(MultiProofTargets::from_iter([( hashed_address, HashSet::from_iter([hashed_slot1, hashed_slot2]), diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index c9f625a1500..9fb882b44a5 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -13,12 +13,10 @@ workspace = true [dependencies] # reth -reth-db-api.workspace = true reth-execution-errors.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true reth-trie-common.workspace = true -reth-trie-db.workspace = true reth-trie-sparse = { workspace = true, features = ["std"] } reth-trie.workspace = true @@ -36,6 +34,7 @@ derive_more.workspace = true rayon.workspace = true itertools.workspace = true tokio = { workspace = true, features = ["rt-multi-thread"] } +crossbeam-channel.workspace = true # `metrics` feature reth-metrics = { workspace = true, optional = true } @@ -45,6 +44,7 @@ metrics = { workspace = true, optional = true } # reth reth-primitives-traits.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } +reth-trie-db.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } # misc @@ -58,7 +58,6 @@ tokio = { workspace = true, features = ["rt", "rt-multi-thread", "macros"] } default = ["metrics"] metrics = ["reth-metrics", "dep:metrics", "reth-trie/metrics", "reth-trie-sparse/metrics"] test-utils = [ - "reth-db-api/test-utils", "reth-primitives-traits/test-utils", "reth-provider/test-utils", "reth-trie-common/test-utils", diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index fe1953b9055..53719892748 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -5,7 +5,8 @@ use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use proptest_arbitrary_interop::arb; use reth_primitives_traits::Account; use reth_provider::{ - providers::ConsistentDbView, test_utils::create_test_provider_factory, StateWriter, TrieWriter, + providers::OverlayStateProviderFactory, test_utils::create_test_provider_factory, StateWriter, + TrieWriter, }; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StateRoot, @@ -33,11 +34,11 @@ pub fn calculate_state_root(c: &mut Criterion) { provider_rw.write_hashed_state(&db_state.into_sorted()).unwrap(); let (_, updates) = StateRoot::from_tx(provider_rw.tx_ref()).root_with_updates().unwrap(); - provider_rw.write_trie_updates(&updates).unwrap(); + provider_rw.write_trie_updates(updates).unwrap(); provider_rw.commit().unwrap(); } - let view = ConsistentDbView::new(provider_factory.clone(), None); + let factory = OverlayStateProviderFactory::new(provider_factory.clone()); // state root group.bench_function(BenchmarkId::new("sync root", size), |b| { @@ -65,10 +66,8 @@ pub fn calculate_state_root(c: &mut Criterion) { group.bench_function(BenchmarkId::new("parallel root", size), |b| { b.iter_with_setup( || { - ParallelStateRoot::new( - view.clone(), - TrieInput::from_state(updated_state.clone()), - ) + let trie_input = TrieInput::from_state(updated_state.clone()); + ParallelStateRoot::new(factory.clone(), trie_input.prefix_sets.freeze()) }, |calculator| calculator.incremental_root(), ); diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index d6e1b57ed9b..433c13fb08f 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -1,40 +1,23 @@ use crate::{ metrics::ParallelTrieMetrics, - proof_task::{ProofTaskKind, ProofTaskManagerHandle, StorageProofInput}, + proof_task::{ + AccountMultiproofInput, ProofResultContext, ProofResultMessage, ProofWorkerHandle, + StorageProofInput, + }, root::ParallelStateRootError, - stats::ParallelTrieTracker, StorageRootTargets, }; -use alloy_primitives::{ - map::{B256Map, B256Set, HashMap}, - B256, -}; -use alloy_rlp::{BufMut, Encodable}; +use alloy_primitives::{map::B256Set, B256}; +use crossbeam_channel::{unbounded as crossbeam_unbounded, Receiver as CrossbeamReceiver}; use dashmap::DashMap; -use itertools::Itertools; use reth_execution_errors::StorageRootError; -use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, FactoryTx, - ProviderError, -}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, - node_iter::{TrieElement, TrieNodeIter}, - prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSetsMut}, - proof::StorageProof, - trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, - updates::TrieUpdatesSorted, - walker::TrieWalker, - DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostStateSorted, - MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, -}; -use reth_trie_common::{ - added_removed_keys::MultiAddedRemovedKeys, - proof::{DecodedProofNodes, ProofRetainer}, + prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets, TriePrefixSetsMut}, + DecodedMultiProof, DecodedStorageMultiProof, HashedPostState, MultiProofTargets, Nibbles, }; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use std::sync::{mpsc::Receiver, Arc}; +use reth_trie_common::added_removed_keys::MultiAddedRemovedKeys; +use std::{sync::Arc, time::Instant}; use tracing::trace; /// Parallel proof calculator. @@ -42,24 +25,15 @@ use tracing::trace; /// This can collect proof for many targets in parallel, spawning a task for each hashed address /// that has proof targets. #[derive(Debug)] -pub struct ParallelProof { - /// Consistent view of the database. - view: ConsistentDbView, - /// The sorted collection of cached in-memory intermediate trie nodes that - /// can be reused for computation. - pub nodes_sorted: Arc, - /// The sorted in-memory overlay hashed state. - pub state_sorted: Arc, - /// The collection of prefix sets for the computation. Since the prefix sets _always_ - /// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here, - /// if we have cached nodes for them. +pub struct ParallelProof { + /// The collection of prefix sets for the computation. pub prefix_sets: Arc, /// Flag indicating whether to include branch node masks in the proof. collect_branch_node_masks: bool, /// Provided by the user to give the necessary context to retain extra proofs. multi_added_removed_keys: Option>, - /// Handle to the storage proof task. - storage_proof_task_handle: ProofTaskManagerHandle>, + /// Handle to the proof worker pools. + proof_worker_handle: ProofWorkerHandle, /// Cached storage proof roots for missed leaves; this maps /// hashed (missed) addresses to their storage proof roots. missed_leaves_storage_roots: Arc>, @@ -67,25 +41,19 @@ pub struct ParallelProof { metrics: ParallelTrieMetrics, } -impl ParallelProof { +impl ParallelProof { /// Create new state proof generator. pub fn new( - view: ConsistentDbView, - nodes_sorted: Arc, - state_sorted: Arc, prefix_sets: Arc, missed_leaves_storage_roots: Arc>, - storage_proof_task_handle: ProofTaskManagerHandle>, + proof_worker_handle: ProofWorkerHandle, ) -> Self { Self { - view, - nodes_sorted, - state_sorted, prefix_sets, missed_leaves_storage_roots, collect_branch_node_masks: false, multi_added_removed_keys: None, - storage_proof_task_handle, + proof_worker_handle, #[cfg(feature = "metrics")] metrics: ParallelTrieMetrics::new_with_labels(&[("type", "proof")]), } @@ -106,19 +74,16 @@ impl ParallelProof { self.multi_added_removed_keys = multi_added_removed_keys; self } -} - -impl ParallelProof -where - Factory: DatabaseProviderFactory + Clone + 'static, -{ /// Queues a storage proof task and returns a receiver for the result. - fn queue_storage_proof( + fn send_storage_proof( &self, hashed_address: B256, prefix_set: PrefixSet, target_slots: B256Set, - ) -> Receiver> { + ) -> Result, ParallelStateRootError> { + let (result_tx, result_rx) = crossbeam_channel::unbounded(); + let start = Instant::now(); + let input = StorageProofInput::new( hashed_address, prefix_set, @@ -127,10 +92,14 @@ where self.multi_added_removed_keys.clone(), ); - let (sender, receiver) = std::sync::mpsc::channel(); - let _ = - self.storage_proof_task_handle.queue_task(ProofTaskKind::StorageProof(input, sender)); - receiver + self.proof_worker_handle + .dispatch_storage_proof( + input, + ProofResultContext::new(result_tx, 0, HashedPostState::default(), start), + ) + .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; + + Ok(result_rx) } /// Generate a storage multiproof according to the specified targets and hashed address. @@ -150,13 +119,28 @@ where "Starting storage proof generation" ); - let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots); - let proof_result = receiver.recv().map_err(|_| { + let receiver = self.send_storage_proof(hashed_address, prefix_set, target_slots)?; + let proof_msg = receiver.recv().map_err(|_| { ParallelStateRootError::StorageRoot(StorageRootError::Database(DatabaseError::Other( format!("channel closed for {hashed_address}"), ))) })?; + // Extract storage proof directly from the result + let storage_proof = match proof_msg.result? { + crate::proof_task::ProofResult::StorageProof { hashed_address: addr, proof } => { + debug_assert_eq!( + addr, + hashed_address, + "storage worker must return same address: expected {hashed_address}, got {addr}" + ); + proof + } + crate::proof_task::ProofResult::AccountMultiproof { .. } => { + unreachable!("storage worker only sends StorageProof variant") + } + }; + trace!( target: "trie::parallel_proof", total_targets, @@ -164,19 +148,19 @@ where "Storage proof generation completed" ); - proof_result + Ok(storage_proof) } - /// Generate a state multiproof according to specified targets. - pub fn decoded_multiproof( - self, - targets: MultiProofTargets, - ) -> Result { - let mut tracker = ParallelTrieTracker::default(); - - // Extend prefix sets with targets - let mut prefix_sets = (*self.prefix_sets).clone(); - prefix_sets.extend(TriePrefixSetsMut { + /// Extends prefix sets with the given multiproof targets and returns the frozen result. + /// + /// This is a helper function used to prepare prefix sets before computing multiproofs. + /// Returns frozen (immutable) prefix sets ready for use in proof computation. + pub fn extend_prefix_sets_with_targets( + base_prefix_sets: &TriePrefixSetsMut, + targets: &MultiProofTargets, + ) -> TriePrefixSets { + let mut extended = base_prefix_sets.clone(); + extended.extend(TriePrefixSetsMut { account_prefix_set: PrefixSetMut::from(targets.keys().copied().map(Nibbles::unpack)), storage_prefix_sets: targets .iter() @@ -187,13 +171,21 @@ where .collect(), destroyed_accounts: Default::default(), }); - let prefix_sets = prefix_sets.freeze(); + extended.freeze() + } - let storage_root_targets = StorageRootTargets::new( - prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), - prefix_sets.storage_prefix_sets.clone(), + /// Generate a state multiproof according to specified targets. + pub fn decoded_multiproof( + self, + targets: MultiProofTargets, + ) -> Result { + // Extend prefix sets with targets + let prefix_sets = Self::extend_prefix_sets_with_targets(&self.prefix_sets, &targets); + + let storage_root_targets_len = StorageRootTargets::count( + &prefix_sets.account_prefix_set, + &prefix_sets.storage_prefix_sets, ); - let storage_root_targets_len = storage_root_targets.len(); trace!( target: "trie::parallel_proof", @@ -201,150 +193,46 @@ where "Starting parallel proof generation" ); - // Pre-calculate storage roots for accounts which were changed. - tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); + // Queue account multiproof request to account worker pool + // Create channel for receiving ProofResultMessage + let (result_tx, result_rx) = crossbeam_unbounded(); + let account_multiproof_start_time = Instant::now(); - // stores the receiver for the storage proof outcome for the hashed addresses - // this way we can lazily await the outcome when we iterate over the map - let mut storage_proof_receivers = - B256Map::with_capacity_and_hasher(storage_root_targets.len(), Default::default()); - - for (hashed_address, prefix_set) in - storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) - { - let target_slots = targets.get(&hashed_address).cloned().unwrap_or_default(); - let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots); + let input = AccountMultiproofInput { + targets, + prefix_sets, + collect_branch_node_masks: self.collect_branch_node_masks, + multi_added_removed_keys: self.multi_added_removed_keys.clone(), + missed_leaves_storage_roots: self.missed_leaves_storage_roots.clone(), + proof_result_sender: ProofResultContext::new( + result_tx, + 0, + HashedPostState::default(), + account_multiproof_start_time, + ), + }; - // store the receiver for that result with the hashed address so we can await this in - // place when we iterate over the trie - storage_proof_receivers.insert(hashed_address, receiver); - } + self.proof_worker_handle + .dispatch_account_multiproof(input) + .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; - let provider_ro = self.view.provider_ro()?; - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - &self.nodes_sorted, - ); - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &self.state_sorted, - ); + // Wait for account multiproof result from worker + let proof_result_msg = result_rx.recv().map_err(|_| { + ParallelStateRootError::Other( + "Account multiproof channel dropped: worker died or pool shutdown".to_string(), + ) + })?; - let accounts_added_removed_keys = - self.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); - - // Create the walker. - let walker = TrieWalker::<_>::state_trie( - trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, - prefix_sets.account_prefix_set, - ) - .with_added_removed_keys(accounts_added_removed_keys) - .with_deletions_retained(true); - - // Create a hash builder to rebuild the root node since it is not available in the database. - let retainer = targets - .keys() - .map(Nibbles::unpack) - .collect::() - .with_added_removed_keys(accounts_added_removed_keys); - let mut hash_builder = HashBuilder::default() - .with_proof_retainer(retainer) - .with_updates(self.collect_branch_node_masks); - - // Initialize all storage multiproofs as empty. - // Storage multiproofs for non empty tries will be overwritten if necessary. - let mut collected_decoded_storages: B256Map = - targets.keys().map(|key| (*key, DecodedStorageMultiProof::empty())).collect(); - let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); - let mut account_node_iter = TrieNodeIter::state_trie( - walker, - hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, - ); - while let Some(account_node) = - account_node_iter.try_next().map_err(ProviderError::Database)? - { - match account_node { - TrieElement::Branch(node) => { - hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); - } - TrieElement::Leaf(hashed_address, account) => { - let root = match storage_proof_receivers.remove(&hashed_address) { - Some(rx) => { - let decoded_storage_multiproof = rx.recv().map_err(|e| { - ParallelStateRootError::StorageRoot(StorageRootError::Database( - DatabaseError::Other(format!( - "channel closed for {hashed_address}: {e}" - )), - )) - })??; - let root = decoded_storage_multiproof.root; - collected_decoded_storages - .insert(hashed_address, decoded_storage_multiproof); - root - } - // Since we do not store all intermediate nodes in the database, there might - // be a possibility of re-adding a non-modified leaf to the hash builder. - None => { - tracker.inc_missed_leaves(); - - match self.missed_leaves_storage_roots.entry(hashed_address) { - dashmap::Entry::Occupied(occ) => *occ.get(), - dashmap::Entry::Vacant(vac) => { - let root = StorageProof::new_hashed( - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), - hashed_address, - ) - .with_prefix_set_mut(Default::default()) - .storage_multiproof( - targets.get(&hashed_address).cloned().unwrap_or_default(), - ) - .map_err(|e| { - ParallelStateRootError::StorageRoot( - StorageRootError::Database(DatabaseError::Other( - e.to_string(), - )), - ) - })? - .root; - vac.insert(root); - root - } - } - } - }; - - // Encode account - account_rlp.clear(); - let account = account.into_trie_account(root); - account.encode(&mut account_rlp as &mut dyn BufMut); - - hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - } + let (multiproof, stats) = match proof_result_msg.result? { + crate::proof_task::ProofResult::AccountMultiproof { proof, stats } => (proof, stats), + crate::proof_task::ProofResult::StorageProof { .. } => { + unreachable!("account worker only sends AccountMultiproof variant") } - } - let _ = hash_builder.root(); + }; - let stats = tracker.finish(); #[cfg(feature = "metrics")] self.metrics.record(stats); - let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); - let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; - - let (branch_node_hash_masks, branch_node_tree_masks) = if self.collect_branch_node_masks { - let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); - ( - updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), - updated_branch_nodes - .into_iter() - .map(|(path, node)| (path, node.tree_mask)) - .collect(), - ) - } else { - (HashMap::default(), HashMap::default()) - }; - trace!( target: "trie::parallel_proof", total_targets = storage_root_targets_len, @@ -356,34 +244,29 @@ where "Calculated decoded proof" ); - Ok(DecodedMultiProof { - account_subtree: decoded_account_subtree, - branch_node_hash_masks, - branch_node_tree_masks, - storages: collected_decoded_storages, - }) + Ok(multiproof) } } #[cfg(test)] mod tests { use super::*; - use crate::proof_task::{ProofTaskCtx, ProofTaskManager}; + use crate::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use alloy_primitives::{ keccak256, - map::{B256Set, DefaultHashBuilder}, + map::{B256Set, DefaultHashBuilder, HashMap}, Address, U256, }; use rand::Rng; use reth_primitives_traits::{Account, StorageEntry}; use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; use reth_trie::proof::Proof; + use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use tokio::runtime::Runtime; #[test] fn random_parallel_proof() { let factory = create_test_provider_factory(); - let consistent_view = ConsistentDbView::new(factory.clone(), None); let mut rng = rand::rng(); let state = (0..100) @@ -445,26 +328,14 @@ mod tests { let rt = Runtime::new().unwrap(); - let task_ctx = - ProofTaskCtx::new(Default::default(), Default::default(), Default::default()); - let proof_task = - ProofTaskManager::new(rt.handle().clone(), consistent_view.clone(), task_ctx, 1); - let proof_task_handle = proof_task.handle(); - - // keep the join handle around to make sure it does not return any errors - // after we compute the state root - let join_handle = rt.spawn_blocking(move || proof_task.run()); - - let parallel_result = ParallelProof::new( - consistent_view, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - proof_task_handle.clone(), - ) - .decoded_multiproof(targets.clone()) - .unwrap(); + let factory = reth_provider::providers::OverlayStateProviderFactory::new(factory); + let task_ctx = ProofTaskCtx::new(factory); + let proof_worker_handle = ProofWorkerHandle::new(rt.handle().clone(), task_ctx, 1, 1); + + let parallel_result = + ParallelProof::new(Default::default(), Default::default(), proof_worker_handle.clone()) + .decoded_multiproof(targets.clone()) + .unwrap(); let sequential_result_raw = Proof::new(trie_cursor_factory, hashed_cursor_factory) .multiproof(targets.clone()) @@ -489,9 +360,7 @@ mod tests { // then compare the entire thing for any mask differences assert_eq!(parallel_result, sequential_result_decoded); - // drop the handle to terminate the task and then block on the proof task handle to make - // sure it does not return any errors - drop(proof_task_handle); - rt.block_on(join_handle).unwrap().expect("The proof task should not return an error"); + // Workers shut down automatically when handle is dropped + drop(proof_worker_handle); } } diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 9bb96d4b19e..8da4c28d91a 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -1,435 +1,1392 @@ -//! A Task that manages sending proof requests to a number of tasks that have longer-running -//! database transactions. +//! Parallel proof computation using worker pools with dedicated database transactions. //! -//! The [`ProofTaskManager`] ensures that there are a max number of currently executing proof tasks, -//! and is responsible for managing the fixed number of database transactions created at the start -//! of the task. //! -//! Individual [`ProofTaskTx`] instances manage a dedicated [`InMemoryTrieCursorFactory`] and -//! [`HashedPostStateCursorFactory`], which are each backed by a database transaction. - -use crate::root::ParallelStateRootError; -use alloy_primitives::{map::B256Set, B256}; -use reth_db_api::transaction::DbTx; -use reth_execution_errors::SparseTrieError; -use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, FactoryTx, - ProviderResult, +//! # Architecture +//! +//! - **Worker Pools**: Pre-spawned workers with dedicated database transactions +//! - Storage pool: Handles storage proofs and blinded storage node requests +//! - Account pool: Handles account multiproofs and blinded account node requests +//! - **Direct Channel Access**: [`ProofWorkerHandle`] provides type-safe queue methods with direct +//! access to worker channels, eliminating routing overhead +//! - **Automatic Shutdown**: Workers terminate gracefully when all handles are dropped +//! +//! # Message Flow +//! +//! 1. `MultiProofTask` prepares a storage or account job and hands it to [`ProofWorkerHandle`]. The +//! job carries a [`ProofResultContext`] so the worker knows how to send the result back. +//! 2. A worker receives the job, runs the proof, and sends a [`ProofResultMessage`] through the +//! provided [`ProofResultSender`]. +//! 3. `MultiProofTask` receives the message, uses `sequence_number` to keep proofs in order, and +//! proceeds with its state-root logic. +//! +//! Each job gets its own direct channel so results go straight back to `MultiProofTask`. That keeps +//! ordering decisions in one place and lets workers run independently. +//! +//! ```text +//! MultiProofTask -> MultiproofManager -> ProofWorkerHandle -> Storage/Account Worker +//! ^ | +//! | v +//! ProofResultMessage <-------- ProofResultSender --- +//! ``` + +use crate::{ + root::ParallelStateRootError, + stats::{ParallelTrieStats, ParallelTrieTracker}, + StorageRootTargets, +}; +use alloy_primitives::{ + map::{B256Map, B256Set}, + B256, }; +use alloy_rlp::{BufMut, Encodable}; +use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; +use dashmap::DashMap; +use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; +use reth_provider::{DatabaseProviderROFactory, ProviderError, ProviderResult}; +use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::HashedPostStateCursorFactory, - prefix_set::TriePrefixSetsMut, - proof::{ProofTrieNodeProviderFactory, StorageProof}, - trie_cursor::InMemoryTrieCursorFactory, - updates::TrieUpdatesSorted, - DecodedStorageMultiProof, HashedPostStateSorted, Nibbles, + hashed_cursor::HashedCursorFactory, + node_iter::{TrieElement, TrieNodeIter}, + prefix_set::TriePrefixSets, + proof::{ProofBlindedAccountProvider, ProofBlindedStorageProvider, StorageProof}, + trie_cursor::TrieCursorFactory, + walker::TrieWalker, + DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostState, MultiProofTargets, + Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::{PrefixSet, PrefixSetMut}, + proof::{DecodedProofNodes, ProofRetainer}, }; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ - collections::VecDeque, sync::{ atomic::{AtomicUsize, Ordering}, - mpsc::{channel, Receiver, SendError, Sender}, + mpsc::{channel, Receiver, Sender}, Arc, }, - time::Instant, + time::{Duration, Instant}, }; use tokio::runtime::Handle; -use tracing::{debug, trace}; +use tracing::{debug, debug_span, error, trace}; #[cfg(feature = "metrics")] -use crate::proof_task_metrics::ProofTaskMetrics; +use crate::proof_task_metrics::ProofTaskTrieMetrics; type StorageProofResult = Result; type TrieNodeProviderResult = Result, SparseTrieError>; -/// A task that manages sending multiproof requests to a number of tasks that have longer-running -/// database transactions -#[derive(Debug)] -pub struct ProofTaskManager { - /// Max number of database transactions to create - max_concurrency: usize, - /// Number of database transactions created - total_transactions: usize, - /// Consistent view provider used for creating transactions on-demand - view: ConsistentDbView, - /// Proof task context shared across all proof tasks - task_ctx: ProofTaskCtx, - /// Proof tasks pending execution - pending_tasks: VecDeque, - /// The underlying handle from which to spawn proof tasks - executor: Handle, - /// The proof task transactions, containing owned cursor factories that are reused for proof - /// calculation. - proof_task_txs: Vec>>, - /// A receiver for new proof tasks. - proof_task_rx: Receiver>>, - /// A sender for sending back transactions. - tx_sender: Sender>>, - /// The number of active handles. - /// - /// Incremented in [`ProofTaskManagerHandle::new`] and decremented in - /// [`ProofTaskManagerHandle::drop`]. - active_handles: Arc, - /// Metrics tracking blinded node fetches. - #[cfg(feature = "metrics")] - metrics: ProofTaskMetrics, +/// A handle that provides type-safe access to proof worker pools. +/// +/// The handle stores direct senders to both storage and account worker pools, +/// eliminating the need for a routing thread. All handles share reference-counted +/// channels, and workers shut down gracefully when all handles are dropped. +#[derive(Debug, Clone)] +pub struct ProofWorkerHandle { + /// Direct sender to storage worker pool + storage_work_tx: CrossbeamSender, + /// Direct sender to account worker pool + account_work_tx: CrossbeamSender, + /// Counter tracking available storage workers. Workers decrement when starting work, + /// increment when finishing. Used to determine whether to chunk multiproofs. + storage_available_workers: Arc, + /// Counter tracking available account workers. Workers decrement when starting work, + /// increment when finishing. Used to determine whether to chunk multiproofs. + account_available_workers: Arc, + /// Total number of storage workers spawned + storage_worker_count: usize, + /// Total number of account workers spawned + account_worker_count: usize, } -impl ProofTaskManager { - /// Creates a new [`ProofTaskManager`] with the given max concurrency, creating that number of - /// cursor factories. +impl ProofWorkerHandle { + /// Spawns storage and account worker pools with dedicated database transactions. + /// + /// Returns a handle for submitting proof tasks to the worker pools. + /// Workers run until the last handle is dropped. /// - /// Returns an error if the consistent view provider fails to create a read-only transaction. - pub fn new( + /// # Parameters + /// - `executor`: Tokio runtime handle for spawning blocking tasks + /// - `task_ctx`: Shared context with database view and prefix sets + /// - `storage_worker_count`: Number of storage workers to spawn + /// - `account_worker_count`: Number of account workers to spawn + pub fn new( executor: Handle, - view: ConsistentDbView, - task_ctx: ProofTaskCtx, - max_concurrency: usize, - ) -> Self { - let (tx_sender, proof_task_rx) = channel(); + task_ctx: ProofTaskCtx, + storage_worker_count: usize, + account_worker_count: usize, + ) -> Self + where + Factory: DatabaseProviderROFactory + + Clone + + Send + + 'static, + { + let (storage_work_tx, storage_work_rx) = unbounded::(); + let (account_work_tx, account_work_rx) = unbounded::(); + + // Initialize availability counters at zero. Each worker will increment when it + // successfully initializes, ensuring only healthy workers are counted. + let storage_available_workers = Arc::new(AtomicUsize::new(0)); + let account_available_workers = Arc::new(AtomicUsize::new(0)); + + debug!( + target: "trie::proof_task", + storage_worker_count, + account_worker_count, + "Spawning proof worker pools" + ); + + let parent_span = + debug_span!(target: "trie::proof_task", "storage proof workers", ?storage_worker_count) + .entered(); + // Spawn storage workers + for worker_id in 0..storage_worker_count { + let span = debug_span!(target: "trie::proof_task", "storage worker", ?worker_id); + let task_ctx_clone = task_ctx.clone(); + let work_rx_clone = storage_work_rx.clone(); + let storage_available_workers_clone = storage_available_workers.clone(); + + executor.spawn_blocking(move || { + #[cfg(feature = "metrics")] + let metrics = ProofTaskTrieMetrics::default(); + + let _guard = span.enter(); + let worker = StorageProofWorker::new( + task_ctx_clone, + work_rx_clone, + worker_id, + storage_available_workers_clone, + #[cfg(feature = "metrics")] + metrics, + ); + if let Err(error) = worker.run() { + error!( + target: "trie::proof_task", + worker_id, + ?error, + "Storage worker failed" + ); + } + }); + } + drop(parent_span); + + let parent_span = + debug_span!(target: "trie::proof_task", "account proof workers", ?storage_worker_count) + .entered(); + // Spawn account workers + for worker_id in 0..account_worker_count { + let span = debug_span!(target: "trie::proof_task", "account worker", ?worker_id); + let task_ctx_clone = task_ctx.clone(); + let work_rx_clone = account_work_rx.clone(); + let storage_work_tx_clone = storage_work_tx.clone(); + let account_available_workers_clone = account_available_workers.clone(); + + executor.spawn_blocking(move || { + #[cfg(feature = "metrics")] + let metrics = ProofTaskTrieMetrics::default(); + + let _guard = span.enter(); + let worker = AccountProofWorker::new( + task_ctx_clone, + work_rx_clone, + worker_id, + storage_work_tx_clone, + account_available_workers_clone, + #[cfg(feature = "metrics")] + metrics, + ); + if let Err(error) = worker.run() { + error!( + target: "trie::proof_task", + worker_id, + ?error, + "Account worker failed" + ); + } + }); + } + drop(parent_span); + Self { - max_concurrency, - total_transactions: 0, - view, - task_ctx, - pending_tasks: VecDeque::new(), - executor, - proof_task_txs: Vec::new(), - proof_task_rx, - tx_sender, - active_handles: Arc::new(AtomicUsize::new(0)), - #[cfg(feature = "metrics")] - metrics: ProofTaskMetrics::default(), + storage_work_tx, + account_work_tx, + storage_available_workers, + account_available_workers, + storage_worker_count, + account_worker_count, } } - /// Returns a handle for sending new proof tasks to the [`ProofTaskManager`]. - pub fn handle(&self) -> ProofTaskManagerHandle> { - ProofTaskManagerHandle::new(self.tx_sender.clone(), self.active_handles.clone()) + /// Returns how many storage workers are currently available/idle. + pub fn available_storage_workers(&self) -> usize { + self.storage_available_workers.load(Ordering::Relaxed) } -} -impl ProofTaskManager -where - Factory: DatabaseProviderFactory + 'static, -{ - /// Inserts the task into the pending tasks queue. - pub fn queue_proof_task(&mut self, task: ProofTaskKind) { - self.pending_tasks.push_back(task); + /// Returns how many account workers are currently available/idle. + pub fn available_account_workers(&self) -> usize { + self.account_available_workers.load(Ordering::Relaxed) } - /// Gets either the next available transaction, or creates a new one if all are in use and the - /// total number of transactions created is less than the max concurrency. - pub fn get_or_create_tx(&mut self) -> ProviderResult>>> { - if let Some(proof_task_tx) = self.proof_task_txs.pop() { - return Ok(Some(proof_task_tx)); - } + /// Returns the number of pending storage tasks in the queue. + pub fn pending_storage_tasks(&self) -> usize { + self.storage_work_tx.len() + } - // if we can create a new tx within our concurrency limits, create one on-demand - if self.total_transactions < self.max_concurrency { - let provider_ro = self.view.provider_ro()?; - let tx = provider_ro.into_tx(); - self.total_transactions += 1; - return Ok(Some(ProofTaskTx::new(tx, self.task_ctx.clone(), self.total_transactions))); - } + /// Returns the number of pending account tasks in the queue. + pub fn pending_account_tasks(&self) -> usize { + self.account_work_tx.len() + } - Ok(None) + /// Returns the total number of storage workers in the pool. + pub const fn total_storage_workers(&self) -> usize { + self.storage_worker_count } - /// Spawns the next queued proof task on the executor with the given input, if there are any - /// transactions available. + /// Returns the total number of account workers in the pool. + pub const fn total_account_workers(&self) -> usize { + self.account_worker_count + } + + /// Returns the number of storage workers currently processing tasks. /// - /// This will return an error if a transaction must be created on-demand and the consistent view - /// provider fails. - pub fn try_spawn_next(&mut self) -> ProviderResult<()> { - let Some(task) = self.pending_tasks.pop_front() else { return Ok(()) }; - - let Some(proof_task_tx) = self.get_or_create_tx()? else { - // if there are no txs available, requeue the proof task - self.pending_tasks.push_front(task); - return Ok(()) - }; + /// This is calculated as total workers minus available workers. + pub fn active_storage_workers(&self) -> usize { + self.storage_worker_count.saturating_sub(self.available_storage_workers()) + } - let tx_sender = self.tx_sender.clone(); - self.executor.spawn_blocking(move || match task { - ProofTaskKind::StorageProof(input, sender) => { - proof_task_tx.storage_proof(input, sender, tx_sender); - } - ProofTaskKind::BlindedAccountNode(path, sender) => { - proof_task_tx.blinded_account_node(path, sender, tx_sender); - } - ProofTaskKind::BlindedStorageNode(account, path, sender) => { - proof_task_tx.blinded_storage_node(account, path, sender, tx_sender); - } - }); + /// Returns the number of account workers currently processing tasks. + /// + /// This is calculated as total workers minus available workers. + pub fn active_account_workers(&self) -> usize { + self.account_worker_count.saturating_sub(self.available_account_workers()) + } - Ok(()) + /// Dispatch a storage proof computation to storage worker pool + /// + /// The result will be sent via the `proof_result_sender` channel. + pub fn dispatch_storage_proof( + &self, + input: StorageProofInput, + proof_result_sender: ProofResultContext, + ) -> Result<(), ProviderError> { + self.storage_work_tx + .send(StorageWorkerJob::StorageProof { input, proof_result_sender }) + .map_err(|err| { + let error = + ProviderError::other(std::io::Error::other("storage workers unavailable")); + + if let StorageWorkerJob::StorageProof { proof_result_sender, .. } = err.0 { + let ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + } = proof_result_sender; + + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(ParallelStateRootError::Provider(error.clone())), + elapsed: start.elapsed(), + state, + }); + } + + error + }) } - /// Loops, managing the proof tasks, and sending new tasks to the executor. - pub fn run(mut self) -> ProviderResult<()> { - loop { - match self.proof_task_rx.recv() { - Ok(message) => match message { - ProofTaskMessage::QueueTask(task) => { - // Track metrics for blinded node requests - #[cfg(feature = "metrics")] - match &task { - ProofTaskKind::BlindedAccountNode(_, _) => { - self.metrics.account_nodes += 1; - } - ProofTaskKind::BlindedStorageNode(_, _, _) => { - self.metrics.storage_nodes += 1; - } - _ => {} - } - // queue the task - self.queue_proof_task(task) - } - ProofTaskMessage::Transaction(tx) => { - // return the transaction to the pool - self.proof_task_txs.push(tx); - } - ProofTaskMessage::Terminate => { - // Record metrics before terminating - #[cfg(feature = "metrics")] - self.metrics.record(); - return Ok(()) - } - }, - // All senders are disconnected, so we can terminate - // However this should never happen, as this struct stores a sender - Err(_) => return Ok(()), - }; - - // try spawning the next task - self.try_spawn_next()?; - } + /// Dispatch an account multiproof computation + /// + /// The result will be sent via the `result_sender` channel included in the input. + pub fn dispatch_account_multiproof( + &self, + input: AccountMultiproofInput, + ) -> Result<(), ProviderError> { + self.account_work_tx + .send(AccountWorkerJob::AccountMultiproof { input: Box::new(input) }) + .map_err(|err| { + let error = + ProviderError::other(std::io::Error::other("account workers unavailable")); + + if let AccountWorkerJob::AccountMultiproof { input } = err.0 { + let AccountMultiproofInput { + proof_result_sender: + ProofResultContext { + sender: result_tx, + sequence_number: seq, + state, + start_time: start, + }, + .. + } = *input; + + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(ParallelStateRootError::Provider(error.clone())), + elapsed: start.elapsed(), + state, + }); + } + + error + }) + } + + /// Dispatch blinded storage node request to storage worker pool + pub(crate) fn dispatch_blinded_storage_node( + &self, + account: B256, + path: Nibbles, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.storage_work_tx + .send(StorageWorkerJob::BlindedStorageNode { account, path, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("storage workers unavailable")) + })?; + + Ok(rx) + } + + /// Dispatch blinded account node request to account worker pool + pub(crate) fn dispatch_blinded_account_node( + &self, + path: Nibbles, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.account_work_tx + .send(AccountWorkerJob::BlindedAccountNode { path, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("account workers unavailable")) + })?; + + Ok(rx) } } -/// Type alias for the factory tuple returned by `create_factories` -type ProofFactories<'a, Tx> = ( - InMemoryTrieCursorFactory, &'a TrieUpdatesSorted>, - HashedPostStateCursorFactory, &'a HashedPostStateSorted>, -); +/// Data used for initializing cursor factories that is shared across all storage proof instances. +#[derive(Clone, Debug)] +pub struct ProofTaskCtx { + /// The factory for creating state providers. + factory: Factory, +} + +impl ProofTaskCtx { + /// Creates a new [`ProofTaskCtx`] with the given factory. + pub const fn new(factory: Factory) -> Self { + Self { factory } + } +} /// This contains all information shared between all storage proof instances. #[derive(Debug)] -pub struct ProofTaskTx { - /// The tx that is reused for proof calculations. - tx: Tx, - - /// Trie updates, prefix sets, and state updates - task_ctx: ProofTaskCtx, +pub struct ProofTaskTx { + /// The provider that implements `TrieCursorFactory` and `HashedCursorFactory`. + provider: Provider, - /// Identifier for the tx within the context of a single [`ProofTaskManager`], used only for - /// tracing. + /// Identifier for the worker within the worker pool, used only for tracing. id: usize, } -impl ProofTaskTx { - /// Initializes a [`ProofTaskTx`] using the given transaction and a [`ProofTaskCtx`]. The id is - /// used only for tracing. - const fn new(tx: Tx, task_ctx: ProofTaskCtx, id: usize) -> Self { - Self { tx, task_ctx, id } +impl ProofTaskTx { + /// Initializes a [`ProofTaskTx`] with the given provider and ID. + const fn new(provider: Provider, id: usize) -> Self { + Self { provider, id } } } -impl ProofTaskTx +impl ProofTaskTx where - Tx: DbTx, + Provider: TrieCursorFactory + HashedCursorFactory, { - fn create_factories(&self) -> ProofFactories<'_, Tx> { - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(&self.tx), - self.task_ctx.nodes_sorted.as_ref(), - ); - - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(&self.tx), - self.task_ctx.state_sorted.as_ref(), - ); - - (trie_cursor_factory, hashed_cursor_factory) - } - - /// Calculates a storage proof for the given hashed address, and desired prefix set. - fn storage_proof( - self, - input: StorageProofInput, - result_sender: Sender, - tx_sender: Sender>, - ) { - trace!( - target: "trie::proof_task", - hashed_address=?input.hashed_address, - "Starting storage proof task calculation" - ); + /// Compute storage proof. + /// + /// Used by storage workers in the worker pool to compute storage proofs. + #[inline] + fn compute_storage_proof(&self, input: StorageProofInput) -> StorageProofResult { + // Consume the input so we can move large collections (e.g. target slots) without cloning. + let StorageProofInput { + hashed_address, + prefix_set, + target_slots, + with_branch_node_masks, + multi_added_removed_keys, + } = input; - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - let multi_added_removed_keys = input - .multi_added_removed_keys - .unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); - let added_removed_keys = multi_added_removed_keys.get_storage(&input.hashed_address); + // Get or create added/removed keys context + let multi_added_removed_keys = + multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); + let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); - let span = tracing::trace_span!( + let span = debug_span!( target: "trie::proof_task", "Storage proof calculation", - hashed_address=?input.hashed_address, - // Add a unique id because we often have parallel storage proof calculations for the - // same hashed address, and we want to differentiate them during trace analysis. - span_id=self.id, + hashed_address = ?hashed_address, + worker_id = self.id, ); - let span_guard = span.enter(); + let _span_guard = span.enter(); - let target_slots_len = input.target_slots.len(); let proof_start = Instant::now(); - let raw_proof_result = StorageProof::new_hashed( - trie_cursor_factory, - hashed_cursor_factory, - input.hashed_address, - ) - .with_prefix_set_mut(PrefixSetMut::from(input.prefix_set.iter().copied())) - .with_branch_node_masks(input.with_branch_node_masks) - .with_added_removed_keys(added_removed_keys) - .storage_multiproof(input.target_slots) - .map_err(|e| ParallelStateRootError::Other(e.to_string())); - - drop(span_guard); + // Compute raw storage multiproof + let raw_proof_result = + StorageProof::new_hashed(&self.provider, &self.provider, hashed_address) + .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().copied())) + .with_branch_node_masks(with_branch_node_masks) + .with_added_removed_keys(added_removed_keys) + .storage_multiproof(target_slots) + .map_err(|e| ParallelStateRootError::Other(e.to_string())); + // Decode proof into DecodedStorageMultiProof let decoded_result = raw_proof_result.and_then(|raw_proof| { raw_proof.try_into().map_err(|e: alloy_rlp::Error| { ParallelStateRootError::Other(format!( "Failed to decode storage proof for {}: {}", - input.hashed_address, e + hashed_address, e )) }) }); trace!( target: "trie::proof_task", - hashed_address=?input.hashed_address, - prefix_set = ?input.prefix_set.len(), - target_slots = ?target_slots_len, - proof_time = ?proof_start.elapsed(), - "Completed storage proof task calculation" + hashed_address = ?hashed_address, + proof_time_us = proof_start.elapsed().as_micros(), + worker_id = self.id, + "Completed storage proof calculation" ); - // send the result back - if let Err(error) = result_sender.send(decoded_result) { - debug!( - target: "trie::proof_task", - hashed_address = ?input.hashed_address, - ?error, - task_time = ?proof_start.elapsed(), - "Storage proof receiver is dropped, discarding the result" - ); + decoded_result + } + + /// Process a blinded storage node request. + /// + /// Used by storage workers to retrieve blinded storage trie nodes for proof construction. + fn process_blinded_storage_node( + &self, + account: B256, + path: &Nibbles, + ) -> TrieNodeProviderResult { + let storage_node_provider = + ProofBlindedStorageProvider::new(&self.provider, &self.provider, account); + storage_node_provider.trie_node(path) + } + + /// Process a blinded account node request. + /// + /// Used by account workers to retrieve blinded account trie nodes for proof construction. + fn process_blinded_account_node(&self, path: &Nibbles) -> TrieNodeProviderResult { + let account_node_provider = + ProofBlindedAccountProvider::new(&self.provider, &self.provider); + account_node_provider.trie_node(path) + } +} +impl TrieNodeProviderFactory for ProofWorkerHandle { + type AccountNodeProvider = ProofTaskTrieNodeProvider; + type StorageNodeProvider = ProofTaskTrieNodeProvider; + + fn account_node_provider(&self) -> Self::AccountNodeProvider { + ProofTaskTrieNodeProvider::AccountNode { handle: self.clone() } + } + + fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { + ProofTaskTrieNodeProvider::StorageNode { account, handle: self.clone() } + } +} + +/// Trie node provider for retrieving trie nodes by path. +#[derive(Debug)] +pub enum ProofTaskTrieNodeProvider { + /// Blinded account trie node provider. + AccountNode { + /// Handle to the proof worker pools. + handle: ProofWorkerHandle, + }, + /// Blinded storage trie node provider. + StorageNode { + /// Target account. + account: B256, + /// Handle to the proof worker pools. + handle: ProofWorkerHandle, + }, +} + +impl TrieNodeProvider for ProofTaskTrieNodeProvider { + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { + match self { + Self::AccountNode { handle } => { + let rx = handle + .dispatch_blinded_account_node(*path) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? + } + Self::StorageNode { handle, account } => { + let rx = handle + .dispatch_blinded_storage_node(*account, *path) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? + } } + } +} +/// Result of a proof calculation, which can be either an account multiproof or a storage proof. +#[derive(Debug)] +pub enum ProofResult { + /// Account multiproof with statistics + AccountMultiproof { + /// The account multiproof + proof: DecodedMultiProof, + /// Statistics collected during proof computation + stats: ParallelTrieStats, + }, + /// Storage proof for a specific account + StorageProof { + /// The hashed address this storage proof belongs to + hashed_address: B256, + /// The storage multiproof + proof: DecodedStorageMultiProof, + }, +} - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); +impl ProofResult { + /// Convert this proof result into a `DecodedMultiProof`. + /// + /// For account multiproofs, returns the multiproof directly (discarding stats). + /// For storage proofs, wraps the storage proof into a minimal multiproof. + pub fn into_multiproof(self) -> DecodedMultiProof { + match self { + Self::AccountMultiproof { proof, stats: _ } => proof, + Self::StorageProof { hashed_address, proof } => { + DecodedMultiProof::from_storage_proof(hashed_address, proof) + } + } } +} +/// Channel used by worker threads to deliver `ProofResultMessage` items back to +/// `MultiProofTask`. +/// +/// Workers use this sender to deliver proof results directly to `MultiProofTask`. +pub type ProofResultSender = CrossbeamSender; + +/// Message containing a completed proof result with metadata for direct delivery to +/// `MultiProofTask`. +/// +/// This type enables workers to send proof results directly to the `MultiProofTask` event loop. +#[derive(Debug)] +pub struct ProofResultMessage { + /// Sequence number for ordering proofs + pub sequence_number: u64, + /// The proof calculation result (either account multiproof or storage proof) + pub result: Result, + /// Time taken for the entire proof calculation (from dispatch to completion) + pub elapsed: Duration, + /// Original state update that triggered this proof + pub state: HashedPostState, +} + +/// Context for sending proof calculation results back to `MultiProofTask`. +/// +/// This struct contains all context needed to send and track proof calculation results. +/// Workers use this to deliver completed proofs back to the main event loop. +#[derive(Debug, Clone)] +pub struct ProofResultContext { + /// Channel sender for result delivery + pub sender: ProofResultSender, + /// Sequence number for proof ordering + pub sequence_number: u64, + /// Original state update that triggered this proof + pub state: HashedPostState, + /// Calculation start time for measuring elapsed duration + pub start_time: Instant, +} - /// Retrieves blinded account node by path. - fn blinded_account_node( - self, +impl ProofResultContext { + /// Creates a new proof result context. + pub const fn new( + sender: ProofResultSender, + sequence_number: u64, + state: HashedPostState, + start_time: Instant, + ) -> Self { + Self { sender, sequence_number, state, start_time } + } +} +/// Internal message for storage workers. +#[derive(Debug)] +enum StorageWorkerJob { + /// Storage proof computation request + StorageProof { + /// Storage proof input parameters + input: StorageProofInput, + /// Context for sending the proof result. + proof_result_sender: ProofResultContext, + }, + /// Blinded storage node retrieval request + BlindedStorageNode { + /// Target account + account: B256, + /// Path to the storage node path: Nibbles, + /// Channel to send result back to original caller result_sender: Sender, - tx_sender: Sender>, - ) { + }, +} + +/// Worker for storage trie operations. +/// +/// Each worker maintains a dedicated database transaction and processes +/// storage proof requests and blinded node lookups. +struct StorageProofWorker { + /// Shared task context with database factory and prefix sets + task_ctx: ProofTaskCtx, + /// Channel for receiving work + work_rx: CrossbeamReceiver, + /// Unique identifier for this worker (used for tracing) + worker_id: usize, + /// Counter tracking worker availability + available_workers: Arc, + /// Metrics collector for this worker + #[cfg(feature = "metrics")] + metrics: ProofTaskTrieMetrics, +} + +impl StorageProofWorker +where + Factory: DatabaseProviderROFactory, +{ + /// Creates a new storage proof worker. + const fn new( + task_ctx: ProofTaskCtx, + work_rx: CrossbeamReceiver, + worker_id: usize, + available_workers: Arc, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, + ) -> Self { + Self { + task_ctx, + work_rx, + worker_id, + available_workers, + #[cfg(feature = "metrics")] + metrics, + } + } + + /// Runs the worker loop, processing jobs until the channel closes. + /// + /// # Lifecycle + /// + /// 1. Initializes database provider and transaction + /// 2. Advertises availability + /// 3. Processes jobs in a loop: + /// - Receives job from channel + /// - Marks worker as busy + /// - Processes the job + /// - Marks worker as available + /// 4. Shuts down when channel closes + /// + /// # Panic Safety + /// + /// If this function panics, the worker thread terminates but other workers + /// continue operating and the system degrades gracefully. + fn run(self) -> ProviderResult<()> { + let Self { + task_ctx, + work_rx, + worker_id, + available_workers, + #[cfg(feature = "metrics")] + metrics, + } = self; + + // Create provider from factory + let provider = task_ctx.factory.database_provider_ro()?; + let proof_tx = ProofTaskTx::new(provider, worker_id); + trace!( target: "trie::proof_task", - ?path, - "Starting blinded account node retrieval" + worker_id, + "Storage worker started" ); - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); + let mut storage_proofs_processed = 0u64; + let mut storage_nodes_processed = 0u64; + + // Initially mark this worker as available. + available_workers.fetch_add(1, Ordering::Relaxed); + + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); + + match job { + StorageWorkerJob::StorageProof { input, proof_result_sender } => { + Self::process_storage_proof( + worker_id, + &proof_tx, + input, + proof_result_sender, + &mut storage_proofs_processed, + ); + } + + StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { + Self::process_blinded_node( + worker_id, + &proof_tx, + account, + path, + result_sender, + &mut storage_nodes_processed, + ); + } + } + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); + } - let blinded_provider_factory = ProofTrieNodeProviderFactory::new( - trie_cursor_factory, - hashed_cursor_factory, - self.task_ctx.prefix_sets.clone(), + trace!( + target: "trie::proof_task", + worker_id, + storage_proofs_processed, + storage_nodes_processed, + "Storage worker shutting down" ); - let start = Instant::now(); - let result = blinded_provider_factory.account_node_provider().trie_node(&path); + #[cfg(feature = "metrics")] + metrics.record_storage_nodes(storage_nodes_processed as usize); + + Ok(()) + } + + /// Processes a storage proof request. + fn process_storage_proof( + worker_id: usize, + proof_tx: &ProofTaskTx, + input: StorageProofInput, + proof_result_sender: ProofResultContext, + storage_proofs_processed: &mut u64, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let hashed_address = input.hashed_address; + let ProofResultContext { sender, sequence_number: seq, state, start_time } = + proof_result_sender; + trace!( target: "trie::proof_task", - ?path, - elapsed = ?start.elapsed(), - "Completed blinded account node retrieval" + worker_id, + hashed_address = ?hashed_address, + prefix_set_len = input.prefix_set.len(), + target_slots_len = input.target_slots.len(), + "Processing storage proof" ); - if let Err(error) = result_sender.send(result) { - tracing::error!( + let proof_start = Instant::now(); + let result = proof_tx.compute_storage_proof(input); + + let proof_elapsed = proof_start.elapsed(); + *storage_proofs_processed += 1; + + let result_msg = result.map(|storage_proof| ProofResult::StorageProof { + hashed_address, + proof: storage_proof, + }); + + if sender + .send(ProofResultMessage { + sequence_number: seq, + result: result_msg, + elapsed: start_time.elapsed(), + state, + }) + .is_err() + { + trace!( target: "trie::proof_task", - ?path, - ?error, - "Failed to send blinded account node result" + worker_id, + hashed_address = ?hashed_address, + storage_proofs_processed, + "Proof result receiver dropped, discarding result" ); } - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + proof_time_us = proof_elapsed.as_micros(), + total_processed = storage_proofs_processed, + "Storage proof completed" + ); } - /// Retrieves blinded storage node of the given account by path. - fn blinded_storage_node( - self, + /// Processes a blinded storage node lookup request. + fn process_blinded_node( + worker_id: usize, + proof_tx: &ProofTaskTx, account: B256, path: Nibbles, result_sender: Sender, - tx_sender: Sender>, - ) { + storage_nodes_processed: &mut u64, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { trace!( target: "trie::proof_task", + worker_id, ?account, ?path, - "Starting blinded storage node retrieval" + "Processing blinded storage node" ); - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); + let start = Instant::now(); + let result = proof_tx.process_blinded_storage_node(account, &path); + let elapsed = start.elapsed(); - let blinded_provider_factory = ProofTrieNodeProviderFactory::new( - trie_cursor_factory, - hashed_cursor_factory, - self.task_ctx.prefix_sets.clone(), - ); + *storage_nodes_processed += 1; + + if result_sender.send(result).is_err() { + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + storage_nodes_processed, + "Blinded storage node receiver dropped, discarding result" + ); + } - let start = Instant::now(); - let result = blinded_provider_factory.storage_node_provider(account).trie_node(&path); trace!( target: "trie::proof_task", + worker_id, ?account, ?path, - elapsed = ?start.elapsed(), - "Completed blinded storage node retrieval" + elapsed_us = elapsed.as_micros(), + total_processed = storage_nodes_processed, + "Blinded storage node completed" + ); + } +} + +/// Worker for account trie operations. +/// +/// Each worker maintains a dedicated database transaction and processes +/// account multiproof requests and blinded node lookups. +struct AccountProofWorker { + /// Shared task context with database factory and prefix sets + task_ctx: ProofTaskCtx, + /// Channel for receiving work + work_rx: CrossbeamReceiver, + /// Unique identifier for this worker (used for tracing) + worker_id: usize, + /// Channel for dispatching storage proof work + storage_work_tx: CrossbeamSender, + /// Counter tracking worker availability + available_workers: Arc, + /// Metrics collector for this worker + #[cfg(feature = "metrics")] + metrics: ProofTaskTrieMetrics, +} + +impl AccountProofWorker +where + Factory: DatabaseProviderROFactory, +{ + /// Creates a new account proof worker. + const fn new( + task_ctx: ProofTaskCtx, + work_rx: CrossbeamReceiver, + worker_id: usize, + storage_work_tx: CrossbeamSender, + available_workers: Arc, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, + ) -> Self { + Self { + task_ctx, + work_rx, + worker_id, + storage_work_tx, + available_workers, + #[cfg(feature = "metrics")] + metrics, + } + } + + /// Runs the worker loop, processing jobs until the channel closes. + /// + /// # Lifecycle + /// + /// 1. Initializes database provider and transaction + /// 2. Advertises availability + /// 3. Processes jobs in a loop: + /// - Receives job from channel + /// - Marks worker as busy + /// - Processes the job + /// - Marks worker as available + /// 4. Shuts down when channel closes + /// + /// # Panic Safety + /// + /// If this function panics, the worker thread terminates but other workers + /// continue operating and the system degrades gracefully. + fn run(self) -> ProviderResult<()> { + let Self { + task_ctx, + work_rx, + worker_id, + storage_work_tx, + available_workers, + #[cfg(feature = "metrics")] + metrics, + } = self; + + // Create provider from factory + let provider = task_ctx.factory.database_provider_ro()?; + let proof_tx = ProofTaskTx::new(provider, worker_id); + + trace!( + target: "trie::proof_task", + worker_id, + "Account worker started" + ); + + let mut account_proofs_processed = 0u64; + let mut account_nodes_processed = 0u64; + + // Count this worker as available only after successful initialization. + available_workers.fetch_add(1, Ordering::Relaxed); + + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); + + match job { + AccountWorkerJob::AccountMultiproof { input } => { + Self::process_account_multiproof( + worker_id, + &proof_tx, + storage_work_tx.clone(), + *input, + &mut account_proofs_processed, + ); + } + + AccountWorkerJob::BlindedAccountNode { path, result_sender } => { + Self::process_blinded_node( + worker_id, + &proof_tx, + path, + result_sender, + &mut account_nodes_processed, + ); + } + } + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); + } + + trace!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + account_nodes_processed, + "Account worker shutting down" + ); + + #[cfg(feature = "metrics")] + metrics.record_account_nodes(account_nodes_processed as usize); + + Ok(()) + } + + /// Processes an account multiproof request. + fn process_account_multiproof( + worker_id: usize, + proof_tx: &ProofTaskTx, + storage_work_tx: CrossbeamSender, + input: AccountMultiproofInput, + account_proofs_processed: &mut u64, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let AccountMultiproofInput { + targets, + mut prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys, + missed_leaves_storage_roots, + proof_result_sender: + ProofResultContext { sender: result_tx, sequence_number: seq, state, start_time: start }, + } = input; + + let span = debug_span!( + target: "trie::proof_task", + "Account multiproof calculation", + targets = targets.len(), + worker_id, + ); + let _span_guard = span.enter(); + + trace!( + target: "trie::proof_task", + "Processing account multiproof" + ); + + let proof_start = Instant::now(); + + let mut tracker = ParallelTrieTracker::default(); + + let mut storage_prefix_sets = std::mem::take(&mut prefix_sets.storage_prefix_sets); + + let storage_root_targets_len = + StorageRootTargets::count(&prefix_sets.account_prefix_set, &storage_prefix_sets); + + tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); + + let storage_proof_receivers = match dispatch_storage_proofs( + &storage_work_tx, + &targets, + &mut storage_prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys.as_ref(), + ) { + Ok(receivers) => receivers, + Err(error) => { + // Send error through result channel + error!(target: "trie::proof_task", "Failed to dispatch storage proofs: {error}"); + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(error), + elapsed: start.elapsed(), + state, + }); + return; + } + }; + + // Use the missed leaves cache passed from the multiproof manager + let account_prefix_set = std::mem::take(&mut prefix_sets.account_prefix_set); + + let ctx = AccountMultiproofParams { + targets: &targets, + prefix_set: account_prefix_set, + collect_branch_node_masks, + multi_added_removed_keys: multi_added_removed_keys.as_ref(), + storage_proof_receivers, + missed_leaves_storage_roots: missed_leaves_storage_roots.as_ref(), + }; + + let result = + build_account_multiproof_with_storage_roots(&proof_tx.provider, ctx, &mut tracker); + + let proof_elapsed = proof_start.elapsed(); + let total_elapsed = start.elapsed(); + let stats = tracker.finish(); + let result = result.map(|proof| ProofResult::AccountMultiproof { proof, stats }); + *account_proofs_processed += 1; + + // Send result to MultiProofTask + if result_tx + .send(ProofResultMessage { + sequence_number: seq, + result, + elapsed: total_elapsed, + state, + }) + .is_err() + { + trace!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + "Account multiproof receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + proof_time_us = proof_elapsed.as_micros(), + total_elapsed_us = total_elapsed.as_micros(), + total_processed = account_proofs_processed, + "Account multiproof completed" + ); + } + + /// Processes a blinded account node lookup request. + fn process_blinded_node( + worker_id: usize, + proof_tx: &ProofTaskTx, + path: Nibbles, + result_sender: Sender, + account_nodes_processed: &mut u64, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let span = debug_span!( + target: "trie::proof_task", + "Blinded account node calculation", + ?path, + worker_id, + ); + let _span_guard = span.enter(); + + trace!( + target: "trie::proof_task", + "Processing blinded account node" ); - if let Err(error) = result_sender.send(result) { - tracing::error!( + let start = Instant::now(); + let result = proof_tx.process_blinded_account_node(&path); + let elapsed = start.elapsed(); + + *account_nodes_processed += 1; + + if result_sender.send(result).is_err() { + trace!( target: "trie::proof_task", - ?account, + worker_id, ?path, - ?error, - "Failed to send blinded storage node result" + account_nodes_processed, + "Blinded account node receiver dropped, discarding result" ); } - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); + trace!( + target: "trie::proof_task", + node_time_us = elapsed.as_micros(), + total_processed = account_nodes_processed, + "Blinded account node completed" + ); + } +} + +/// Builds an account multiproof by consuming storage proof receivers lazily during trie walk. +/// +/// This is a helper function used by account workers to build the account subtree proof +/// while storage proofs are still being computed. Receivers are consumed only when needed, +/// enabling interleaved parallelism between account trie traversal and storage proof computation. +/// +/// Returns a `DecodedMultiProof` containing the account subtree and storage proofs. +fn build_account_multiproof_with_storage_roots

( + provider: &P, + ctx: AccountMultiproofParams<'_>, + tracker: &mut ParallelTrieTracker, +) -> Result +where + P: TrieCursorFactory + HashedCursorFactory, +{ + let accounts_added_removed_keys = + ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); + + // Create the walker. + let walker = TrieWalker::<_>::state_trie( + provider.account_trie_cursor().map_err(ProviderError::Database)?, + ctx.prefix_set, + ) + .with_added_removed_keys(accounts_added_removed_keys) + .with_deletions_retained(true); + + // Create a hash builder to rebuild the root node since it is not available in the database. + let retainer = ctx + .targets + .keys() + .map(Nibbles::unpack) + .collect::() + .with_added_removed_keys(accounts_added_removed_keys); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(ctx.collect_branch_node_masks); + + // Initialize storage multiproofs map with pre-allocated capacity. + // Proofs will be inserted as they're consumed from receivers during trie walk. + let mut collected_decoded_storages: B256Map = + B256Map::with_capacity_and_hasher(ctx.targets.len(), Default::default()); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); + let mut account_node_iter = TrieNodeIter::state_trie( + walker, + provider.hashed_account_cursor().map_err(ProviderError::Database)?, + ); + + let mut storage_proof_receivers = ctx.storage_proof_receivers; + + while let Some(account_node) = account_node_iter.try_next().map_err(ProviderError::Database)? { + match account_node { + TrieElement::Branch(node) => { + hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); + } + TrieElement::Leaf(hashed_address, account) => { + let root = match storage_proof_receivers.remove(&hashed_address) { + Some(receiver) => { + // Block on this specific storage proof receiver - enables interleaved + // parallelism + let proof_msg = receiver.recv().map_err(|_| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(format!( + "Storage proof channel closed for {hashed_address}" + )), + ), + ) + })?; + + // Extract storage proof from the result + let proof = match proof_msg.result? { + ProofResult::StorageProof { hashed_address: addr, proof } => { + debug_assert_eq!( + addr, + hashed_address, + "storage worker must return same address: expected {hashed_address}, got {addr}" + ); + proof + } + ProofResult::AccountMultiproof { .. } => { + unreachable!("storage worker only sends StorageProof variant") + } + }; + + let root = proof.root; + collected_decoded_storages.insert(hashed_address, proof); + root + } + // Since we do not store all intermediate nodes in the database, there might + // be a possibility of re-adding a non-modified leaf to the hash builder. + None => { + tracker.inc_missed_leaves(); + + match ctx.missed_leaves_storage_roots.entry(hashed_address) { + dashmap::Entry::Occupied(occ) => *occ.get(), + dashmap::Entry::Vacant(vac) => { + let root = + StorageProof::new_hashed(provider, provider, hashed_address) + .with_prefix_set_mut(Default::default()) + .storage_multiproof( + ctx.targets + .get(&hashed_address) + .cloned() + .unwrap_or_default(), + ) + .map_err(|e| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(e.to_string()), + ), + ) + })? + .root; + + vac.insert(root); + root + } + } + } + }; + + // Encode account + account_rlp.clear(); + let account = account.into_trie_account(root); + account.encode(&mut account_rlp as &mut dyn BufMut); + + hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + } + } + } + + // Consume remaining storage proof receivers for accounts not encountered during trie walk. + for (hashed_address, receiver) in storage_proof_receivers { + if let Ok(proof_msg) = receiver.recv() { + // Extract storage proof from the result + if let Ok(ProofResult::StorageProof { proof, .. }) = proof_msg.result { + collected_decoded_storages.insert(hashed_address, proof); + } + } } + + let _ = hash_builder.root(); + + let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); + let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; + + let (branch_node_hash_masks, branch_node_tree_masks) = if ctx.collect_branch_node_masks { + let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); + ( + updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), + updated_branch_nodes.into_iter().map(|(path, node)| (path, node.tree_mask)).collect(), + ) + } else { + (Default::default(), Default::default()) + }; + + Ok(DecodedMultiProof { + account_subtree: decoded_account_subtree, + branch_node_hash_masks, + branch_node_tree_masks, + storages: collected_decoded_storages, + }) } +/// Queues storage proofs for all accounts in the targets and returns receivers. +/// +/// This function queues all storage proof tasks to the worker pool but returns immediately +/// with receivers, allowing the account trie walk to proceed in parallel with storage proof +/// computation. This enables interleaved parallelism for better performance. +/// +/// Propagates errors up if queuing fails. Receivers must be consumed by the caller. +fn dispatch_storage_proofs( + storage_work_tx: &CrossbeamSender, + targets: &MultiProofTargets, + storage_prefix_sets: &mut B256Map, + with_branch_node_masks: bool, + multi_added_removed_keys: Option<&Arc>, +) -> Result>, ParallelStateRootError> { + let mut storage_proof_receivers = + B256Map::with_capacity_and_hasher(targets.len(), Default::default()); + + // Dispatch all storage proofs to worker pool + for (hashed_address, target_slots) in targets.iter() { + let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); + + // Create channel for receiving ProofResultMessage + let (result_tx, result_rx) = crossbeam_channel::unbounded(); + let start = Instant::now(); + + // Create computation input (data only, no communication channel) + let input = StorageProofInput::new( + *hashed_address, + prefix_set, + target_slots.clone(), + with_branch_node_masks, + multi_added_removed_keys.cloned(), + ); + + // Always dispatch a storage proof so we obtain the storage root even when no slots are + // requested. + storage_work_tx + .send(StorageWorkerJob::StorageProof { + input, + proof_result_sender: ProofResultContext::new( + result_tx, + 0, + HashedPostState::default(), + start, + ), + }) + .map_err(|_| { + ParallelStateRootError::Other(format!( + "Failed to queue storage proof for {}: storage worker pool unavailable", + hashed_address + )) + })?; -/// This represents an input for a storage proof. + storage_proof_receivers.insert(*hashed_address, result_rx); + } + + Ok(storage_proof_receivers) +} +/// Input parameters for storage proof computation. #[derive(Debug)] pub struct StorageProofInput { /// The hashed address for which the proof is calculated. @@ -463,147 +1420,85 @@ impl StorageProofInput { } } } - -/// Data used for initializing cursor factories that is shared across all storage proof instances. +/// Input parameters for account multiproof computation. #[derive(Debug, Clone)] -pub struct ProofTaskCtx { - /// The sorted collection of cached in-memory intermediate trie nodes that can be reused for - /// computation. - nodes_sorted: Arc, - /// The sorted in-memory overlay hashed state. - state_sorted: Arc, - /// The collection of prefix sets for the computation. Since the prefix sets _always_ - /// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here, - /// if we have cached nodes for them. - prefix_sets: Arc, -} - -impl ProofTaskCtx { - /// Creates a new [`ProofTaskCtx`] with the given sorted nodes and state. - pub const fn new( - nodes_sorted: Arc, - state_sorted: Arc, - prefix_sets: Arc, - ) -> Self { - Self { nodes_sorted, state_sorted, prefix_sets } - } -} - -/// Message used to communicate with [`ProofTaskManager`]. -#[derive(Debug)] -pub enum ProofTaskMessage { - /// A request to queue a proof task. - QueueTask(ProofTaskKind), - /// A returned database transaction. - Transaction(ProofTaskTx), - /// A request to terminate the proof task manager. - Terminate, -} - -/// Proof task kind. -/// -/// When queueing a task using [`ProofTaskMessage::QueueTask`], this enum -/// specifies the type of proof task to be executed. -#[derive(Debug)] -pub enum ProofTaskKind { - /// A storage proof request. - StorageProof(StorageProofInput, Sender), - /// A blinded account node request. - BlindedAccountNode(Nibbles, Sender), - /// A blinded storage node request. - BlindedStorageNode(B256, Nibbles, Sender), -} - -/// A handle that wraps a single proof task sender that sends a terminate message on `Drop` if the -/// number of active handles went to zero. -#[derive(Debug)] -pub struct ProofTaskManagerHandle { - /// The sender for the proof task manager. - sender: Sender>, - /// The number of active handles. - active_handles: Arc, -} - -impl ProofTaskManagerHandle { - /// Creates a new [`ProofTaskManagerHandle`] with the given sender. - pub fn new(sender: Sender>, active_handles: Arc) -> Self { - active_handles.fetch_add(1, Ordering::SeqCst); - Self { sender, active_handles } - } - - /// Queues a task to the proof task manager. - pub fn queue_task(&self, task: ProofTaskKind) -> Result<(), SendError>> { - self.sender.send(ProofTaskMessage::QueueTask(task)) - } - - /// Terminates the proof task manager. - pub fn terminate(&self) { - let _ = self.sender.send(ProofTaskMessage::Terminate); - } -} - -impl Clone for ProofTaskManagerHandle { - fn clone(&self) -> Self { - Self::new(self.sender.clone(), self.active_handles.clone()) - } -} - -impl Drop for ProofTaskManagerHandle { - fn drop(&mut self) { - // Decrement the number of active handles and terminate the manager if it was the last - // handle. - if self.active_handles.fetch_sub(1, Ordering::SeqCst) == 1 { - self.terminate(); - } - } +pub struct AccountMultiproofInput { + /// The targets for which to compute the multiproof. + pub targets: MultiProofTargets, + /// The prefix sets for the proof calculation. + pub prefix_sets: TriePrefixSets, + /// Whether or not to collect branch node masks. + pub collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + pub multi_added_removed_keys: Option>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + pub missed_leaves_storage_roots: Arc>, + /// Context for sending the proof result. + pub proof_result_sender: ProofResultContext, } -impl TrieNodeProviderFactory for ProofTaskManagerHandle { - type AccountNodeProvider = ProofTaskTrieNodeProvider; - type StorageNodeProvider = ProofTaskTrieNodeProvider; - - fn account_node_provider(&self) -> Self::AccountNodeProvider { - ProofTaskTrieNodeProvider::AccountNode { sender: self.sender.clone() } - } - - fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { - ProofTaskTrieNodeProvider::StorageNode { account, sender: self.sender.clone() } - } +/// Parameters for building an account multiproof with pre-computed storage roots. +struct AccountMultiproofParams<'a> { + /// The targets for which to compute the multiproof. + targets: &'a MultiProofTargets, + /// The prefix set for the account trie walk. + prefix_set: PrefixSet, + /// Whether or not to collect branch node masks. + collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + multi_added_removed_keys: Option<&'a Arc>, + /// Receivers for storage proofs being computed in parallel. + storage_proof_receivers: B256Map>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + missed_leaves_storage_roots: &'a DashMap, } -/// Trie node provider for retrieving trie nodes by path. +/// Internal message for account workers. #[derive(Debug)] -pub enum ProofTaskTrieNodeProvider { - /// Blinded account trie node provider. - AccountNode { - /// Sender to the proof task. - sender: Sender>, +enum AccountWorkerJob { + /// Account multiproof computation request + AccountMultiproof { + /// Account multiproof input parameters + input: Box, }, - /// Blinded storage trie node provider. - StorageNode { - /// Target account. - account: B256, - /// Sender to the proof task. - sender: Sender>, + /// Blinded account node retrieval request + BlindedAccountNode { + /// Path to the account node + path: Nibbles, + /// Channel to send result back to original caller + result_sender: Sender, }, } -impl TrieNodeProvider for ProofTaskTrieNodeProvider { - fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { - let (tx, rx) = channel(); - match self { - Self::AccountNode { sender } => { - let _ = sender.send(ProofTaskMessage::QueueTask( - ProofTaskKind::BlindedAccountNode(*path, tx), - )); - } - Self::StorageNode { sender, account } => { - let _ = sender.send(ProofTaskMessage::QueueTask( - ProofTaskKind::BlindedStorageNode(*account, *path, tx), - )); - } - } +#[cfg(test)] +mod tests { + use super::*; + use reth_provider::test_utils::create_test_provider_factory; + use tokio::{runtime::Builder, task}; - rx.recv().unwrap() + fn test_ctx(factory: Factory) -> ProofTaskCtx { + ProofTaskCtx::new(factory) + } + + /// Ensures `ProofWorkerHandle::new` spawns workers correctly. + #[test] + fn spawn_proof_workers_creates_handle() { + let runtime = Builder::new_multi_thread().worker_threads(1).enable_all().build().unwrap(); + runtime.block_on(async { + let handle = tokio::runtime::Handle::current(); + let provider_factory = create_test_provider_factory(); + let factory = + reth_provider::providers::OverlayStateProviderFactory::new(provider_factory); + let ctx = test_ctx(factory); + + let proof_handle = ProofWorkerHandle::new(handle.clone(), ctx, 5, 3); + + // Verify handle can be cloned + let _cloned_handle = proof_handle.clone(); + + // Workers shut down automatically when handle is dropped + drop(proof_handle); + task::yield_now().await; + }); } } diff --git a/crates/trie/parallel/src/proof_task_metrics.rs b/crates/trie/parallel/src/proof_task_metrics.rs index cdb59d078d8..6492e28d12d 100644 --- a/crates/trie/parallel/src/proof_task_metrics.rs +++ b/crates/trie/parallel/src/proof_task_metrics.rs @@ -1,24 +1,5 @@ use reth_metrics::{metrics::Histogram, Metrics}; -/// Metrics for blinded node fetching for the duration of the proof task manager. -#[derive(Clone, Debug, Default)] -pub struct ProofTaskMetrics { - /// The actual metrics for blinded nodes. - pub task_metrics: ProofTaskTrieMetrics, - /// Count of blinded account node requests. - pub account_nodes: usize, - /// Count of blinded storage node requests. - pub storage_nodes: usize, -} - -impl ProofTaskMetrics { - /// Record the blinded node counts into the histograms. - pub fn record(&self) { - self.task_metrics.record_account_nodes(self.account_nodes); - self.task_metrics.record_storage_nodes(self.storage_nodes); - } -} - /// Metrics for the proof task. #[derive(Clone, Metrics)] #[metrics(scope = "trie.proof_task")] diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index 61d8f69a1d2..5c9294e8f92 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -5,22 +5,20 @@ use alloy_primitives::B256; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; use reth_execution_errors::StorageRootError; -use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, -}; +use reth_provider::{DatabaseProviderROFactory, ProviderError}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, + hashed_cursor::HashedCursorFactory, node_iter::{TrieElement, TrieNodeIter}, - trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, + prefix_set::TriePrefixSets, + trie_cursor::TrieCursorFactory, updates::TrieUpdates, walker::TrieWalker, - HashBuilder, Nibbles, StorageRoot, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE, + HashBuilder, Nibbles, StorageRoot, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::{ collections::HashMap, - sync::{mpsc, Arc, OnceLock}, + sync::{mpsc, OnceLock}, time::Duration, }; use thiserror::Error; @@ -34,20 +32,15 @@ use tracing::*; /// nodes in the process. Upon encountering a leaf node, it will poll the storage root /// task for the corresponding hashed address. /// -/// Internally, the calculator uses [`ConsistentDbView`] since -/// it needs to rely on database state saying the same until -/// the last transaction is open. -/// See docs of using [`ConsistentDbView`] for caveats. -/// /// Note: This implementation only serves as a fallback for the sparse trie-based /// state root calculation. The sparse trie approach is more efficient as it avoids traversing /// the entire trie, only operating on the modified parts. #[derive(Debug)] pub struct ParallelStateRoot { - /// Consistent view of the database. - view: ConsistentDbView, - /// Trie input. - input: TrieInput, + /// Factory for creating state providers. + factory: Factory, + // Prefix sets indicating which portions of the trie need to be recomputed. + prefix_sets: TriePrefixSets, /// Parallel state root metrics. #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics, @@ -55,10 +48,10 @@ pub struct ParallelStateRoot { impl ParallelStateRoot { /// Create new parallel state root calculator. - pub fn new(view: ConsistentDbView, input: TrieInput) -> Self { + pub fn new(factory: Factory, prefix_sets: TriePrefixSets) -> Self { Self { - view, - input, + factory, + prefix_sets, #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics::default(), } @@ -67,7 +60,10 @@ impl ParallelStateRoot { impl ParallelStateRoot where - Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, + Factory: DatabaseProviderROFactory + + Clone + + Send + + 'static, { /// Calculate incremental state root in parallel. pub fn incremental_root(self) -> Result { @@ -88,12 +84,12 @@ where retain_updates: bool, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { let mut tracker = ParallelTrieTracker::default(); - let trie_nodes_sorted = Arc::new(self.input.nodes.into_sorted()); - let hashed_state_sorted = Arc::new(self.input.state.into_sorted()); - let prefix_sets = self.input.prefix_sets.freeze(); let storage_root_targets = StorageRootTargets::new( - prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), - prefix_sets.storage_prefix_sets, + self.prefix_sets + .account_prefix_set + .iter() + .map(|nibbles| B256::from_slice(&nibbles.pack())), + self.prefix_sets.storage_prefix_sets, ); // Pre-calculate storage roots in parallel for accounts which were changed. @@ -107,9 +103,7 @@ where for (hashed_address, prefix_set) in storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) { - let view = self.view.clone(); - let hashed_state_sorted = hashed_state_sorted.clone(); - let trie_nodes_sorted = trie_nodes_sorted.clone(); + let factory = self.factory.clone(); #[cfg(feature = "metrics")] let metrics = self.metrics.storage_trie.clone(); @@ -118,18 +112,10 @@ where // Spawn a blocking task to calculate account's storage root from database I/O drop(handle.spawn_blocking(move || { let result = (|| -> Result<_, ParallelStateRootError> { - let provider_ro = view.provider_ro()?; - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - &trie_nodes_sorted, - ); - let hashed_state = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &hashed_state_sorted, - ); + let provider = factory.database_provider_ro()?; Ok(StorageRoot::new_hashed( - trie_cursor_factory, - hashed_state, + &provider, + &provider, hashed_address, prefix_set, #[cfg(feature = "metrics")] @@ -145,24 +131,16 @@ where trace!(target: "trie::parallel_state_root", "calculating state root"); let mut trie_updates = TrieUpdates::default(); - let provider_ro = self.view.provider_ro()?; - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - &trie_nodes_sorted, - ); - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &hashed_state_sorted, - ); + let provider = self.factory.database_provider_ro()?; let walker = TrieWalker::<_>::state_trie( - trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, - prefix_sets.account_prefix_set, + provider.account_trie_cursor().map_err(ProviderError::Database)?, + self.prefix_sets.account_prefix_set, ) .with_deletions_retained(retain_updates); let mut account_node_iter = TrieNodeIter::state_trie( walker, - hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, + provider.hashed_account_cursor().map_err(ProviderError::Database)?, ); let mut hash_builder = HashBuilder::default().with_updates(retain_updates); @@ -186,8 +164,8 @@ where None => { tracker.inc_missed_leaves(); StorageRoot::new_hashed( - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), + &provider, + &provider, hashed_address, Default::default(), #[cfg(feature = "metrics")] @@ -223,7 +201,7 @@ where let root = hash_builder.root(); let removed_keys = account_node_iter.walker.take_removed_keys(); - trie_updates.finalize(hash_builder, removed_keys, prefix_sets.destroyed_accounts); + trie_updates.finalize(hash_builder, removed_keys, self.prefix_sets.destroyed_accounts); let stats = tracker.finish(); @@ -306,11 +284,13 @@ mod tests { use reth_primitives_traits::{Account, StorageEntry}; use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; use reth_trie::{test_utils, HashedPostState, HashedStorage}; + use std::sync::Arc; #[tokio::test] async fn random_parallel_root() { let factory = create_test_provider_factory(); - let consistent_view = ConsistentDbView::new(factory.clone(), None); + let mut overlay_factory = + reth_provider::providers::OverlayStateProviderFactory::new(factory.clone()); let mut rng = rand::rng(); let mut state = (0..100) @@ -353,7 +333,7 @@ mod tests { } assert_eq!( - ParallelStateRoot::new(consistent_view.clone(), Default::default()) + ParallelStateRoot::new(overlay_factory.clone(), Default::default()) .incremental_root() .unwrap(), test_utils::state_root(state.clone()) @@ -384,8 +364,12 @@ mod tests { } } + let prefix_sets = hashed_state.construct_prefix_sets(); + overlay_factory = + overlay_factory.with_hashed_state_overlay(Some(Arc::new(hashed_state.into_sorted()))); + assert_eq!( - ParallelStateRoot::new(consistent_view, TrieInput::from_state(hashed_state)) + ParallelStateRoot::new(overlay_factory, prefix_sets.freeze()) .incremental_root() .unwrap(), test_utils::state_root(state) diff --git a/crates/trie/parallel/src/storage_root_targets.rs b/crates/trie/parallel/src/storage_root_targets.rs index f844b70fca5..0c6d9f43498 100644 --- a/crates/trie/parallel/src/storage_root_targets.rs +++ b/crates/trie/parallel/src/storage_root_targets.rs @@ -24,6 +24,23 @@ impl StorageRootTargets { .collect(), ) } + + /// Returns the total number of unique storage root targets without allocating new maps. + pub fn count( + account_prefix_set: &PrefixSet, + storage_prefix_sets: &B256Map, + ) -> usize { + let mut count = storage_prefix_sets.len(); + + for nibbles in account_prefix_set { + let hashed_address = B256::from_slice(&nibbles.pack()); + if !storage_prefix_sets.contains_key(&hashed_address) { + count += 1; + } + } + + count + } } impl IntoIterator for StorageRootTargets { diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs index 449c3a7b29b..b7eceb133b8 100644 --- a/crates/trie/sparse-parallel/src/lower.rs +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -106,4 +106,26 @@ impl LowerSparseSubtrie { Self::Revealed(_) | Self::Blind(_) => None, } } + + /// Shrinks the capacity of the subtrie's node storage. + /// Works for both revealed and blind tries with allocated storage. + pub(crate) fn shrink_nodes_to(&mut self, size: usize) { + match self { + Self::Revealed(trie) | Self::Blind(Some(trie)) => { + trie.shrink_nodes_to(size); + } + Self::Blind(None) => {} + } + } + + /// Shrinks the capacity of the subtrie's value storage. + /// Works for both revealed and blind tries with allocated storage. + pub(crate) fn shrink_values_to(&mut self, size: usize) { + match self { + Self::Revealed(trie) | Self::Blind(Some(trie)) => { + trie.shrink_values_to(size); + } + Self::Blind(None) => {} + } + } } diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index d973d705de2..133cdfece4c 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -623,51 +623,19 @@ impl SparseTrieInterface for ParallelSparseTrie { "Branch node has only one child", ); - let remaining_child_subtrie = self.subtrie_for_path_mut(&remaining_child_path); - // If the remaining child node is not yet revealed then we have to reveal it here, // otherwise it's not possible to know how to collapse the branch. - let remaining_child_node = - match remaining_child_subtrie.nodes.get(&remaining_child_path).unwrap() { - SparseNode::Hash(_) => { - debug!( - target: "trie::parallel_sparse", - child_path = ?remaining_child_path, - leaf_full_path = ?full_path, - "Branch node child not revealed in remove_leaf, falling back to db", - ); - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.trie_node(&remaining_child_path)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::parallel_sparse", - ?remaining_child_path, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing remaining blinded branch child" - ); - remaining_child_subtrie.reveal_node( - remaining_child_path, - &decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - remaining_child_subtrie.nodes.get(&remaining_child_path).unwrap() - } else { - return Err(SparseTrieErrorKind::NodeNotFoundInProvider { - path: remaining_child_path, - } - .into()) - } - } - node => node, - }; + let remaining_child_node = self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_child_path, + true, // recurse_into_extension + )?; let (new_branch_node, remove_child) = Self::branch_changes_on_leaf_removal( branch_path, &remaining_child_path, - remaining_child_node, + &remaining_child_node, ); if remove_child { @@ -720,6 +688,7 @@ impl SparseTrieInterface for ParallelSparseTrie { Ok(()) } + #[instrument(level = "trace", target = "trie::sparse::parallel", skip(self))] fn root(&mut self) -> B256 { trace!(target: "trie::parallel_sparse", "Calculating trie root hash"); @@ -735,6 +704,7 @@ impl SparseTrieInterface for ParallelSparseTrie { root_rlp.as_hash().unwrap_or(EMPTY_ROOT_HASH) } + #[instrument(level = "trace", target = "trie::sparse::parallel", skip(self))] fn update_subtrie_hashes(&mut self) { trace!(target: "trie::parallel_sparse", "Updating subtrie hashes"); @@ -773,13 +743,24 @@ impl SparseTrieInterface for ParallelSparseTrie { // Update subtrie hashes in parallel { use rayon::iter::{IntoParallelIterator, ParallelIterator}; + use tracing::debug_span; + let (tx, rx) = mpsc::channel(); let branch_node_tree_masks = &self.branch_node_tree_masks; let branch_node_hash_masks = &self.branch_node_hash_masks; + let span = tracing::Span::current(); changed_subtries .into_par_iter() .map(|mut changed_subtrie| { + let _enter = debug_span!( + target: "trie::parallel_sparse", + parent: span.clone(), + "subtrie", + index = changed_subtrie.index + ) + .entered(); + #[cfg(feature = "metrics")] let start = std::time::Instant::now(); changed_subtrie.subtrie.update_hashes( @@ -894,6 +875,42 @@ impl SparseTrieInterface for ParallelSparseTrie { } } } + + fn shrink_nodes_to(&mut self, size: usize) { + // Distribute the capacity across upper and lower subtries + // + // Always include upper subtrie, plus any lower subtries + let total_subtries = 1 + NUM_LOWER_SUBTRIES; + let size_per_subtrie = size / total_subtries; + + // Shrink the upper subtrie + self.upper_subtrie.shrink_nodes_to(size_per_subtrie); + + // Shrink lower subtries (works for both revealed and blind with allocation) + for subtrie in &mut self.lower_subtries { + subtrie.shrink_nodes_to(size_per_subtrie); + } + + // shrink masks maps + self.branch_node_hash_masks.shrink_to(size); + self.branch_node_tree_masks.shrink_to(size); + } + + fn shrink_values_to(&mut self, size: usize) { + // Distribute the capacity across upper and lower subtries + // + // Always include upper subtrie, plus any lower subtries + let total_subtries = 1 + NUM_LOWER_SUBTRIES; + let size_per_subtrie = size / total_subtries; + + // Shrink the upper subtrie + self.upper_subtrie.shrink_values_to(size_per_subtrie); + + // Shrink lower subtries (works for both revealed and blind with allocation) + for subtrie in &mut self.lower_subtries { + subtrie.shrink_values_to(size_per_subtrie); + } + } } impl ParallelSparseTrie { @@ -1228,8 +1245,93 @@ impl ParallelSparseTrie { } } + /// Called when a leaf is removed on a branch which has only one other remaining child. That + /// child must be revealed in order to properly collapse the branch. + /// + /// If `recurse_into_extension` is true, and the remaining child is an extension node, then its + /// child will be ensured to be revealed as well. + /// + /// ## Returns + /// + /// The node of the remaining child, whether it was already revealed or not. + fn reveal_remaining_child_on_leaf_removal( + &mut self, + provider: P, + full_path: &Nibbles, // only needed for logs + remaining_child_path: &Nibbles, + recurse_into_extension: bool, + ) -> SparseTrieResult { + let remaining_child_subtrie = self.subtrie_for_path_mut(remaining_child_path); + + let remaining_child_node = + match remaining_child_subtrie.nodes.get(remaining_child_path).unwrap() { + SparseNode::Hash(_) => { + debug!( + target: "trie::parallel_sparse", + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Node child not revealed in remove_leaf, falling back to db", + ); + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(remaining_child_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?remaining_child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + remaining_child_subtrie.reveal_node( + *remaining_child_path, + &decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + remaining_child_subtrie.nodes.get(remaining_child_path).unwrap().clone() + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: *remaining_child_path, + } + .into()) + } + } + node => node.clone(), + }; + + // If `recurse_into_extension` is true, and the remaining child is an extension node, then + // its child will be ensured to be revealed as well. This is required for generation of + // trie updates; without revealing the grandchild branch it's not always possible to know + // if the tree mask bit should be set for the child extension on its parent branch. + if let SparseNode::Extension { key, .. } = &remaining_child_node && + recurse_into_extension + { + let mut remaining_grandchild_path = *remaining_child_path; + remaining_grandchild_path.extend(key); + + trace!( + target: "trie::parallel_sparse", + remaining_grandchild_path = ?remaining_grandchild_path, + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Revealing child of extension node, which is the last remaining child of the branch" + ); + + self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_grandchild_path, + false, // recurse_into_extension + )?; + } + + Ok(remaining_child_node) + } + /// Drains any [`SparseTrieUpdatesAction`]s from the given subtrie, and applies each action to /// the given `updates` set. If the given set is None then this is a no-op. + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all)] fn apply_subtrie_update_actions( &mut self, update_actions: impl Iterator, @@ -1331,6 +1433,7 @@ impl ParallelSparseTrie { /// /// IMPORTANT: The method removes the subtries from `lower_subtries`, and the caller is /// responsible for returning them back into the array. + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, fields(prefix_set_len = prefix_set.len()))] fn take_changed_lower_subtries( &mut self, prefix_set: &mut PrefixSet, @@ -1487,6 +1590,7 @@ impl ParallelSparseTrie { /// Return updated subtries back to the trie after executing any actions required on the /// top-level `SparseTrieUpdates`. + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all)] fn insert_changed_subtries( &mut self, changed_subtries: impl IntoIterator, @@ -2025,6 +2129,16 @@ impl SparseSubtrie { self.nodes.clear(); self.inner.clear(); } + + /// Shrinks the capacity of the subtrie's node storage. + pub(crate) fn shrink_nodes_to(&mut self, size: usize) { + self.nodes.shrink_to(size); + } + + /// Shrinks the capacity of the subtrie's value storage. + pub(crate) fn shrink_values_to(&mut self, size: usize) { + self.inner.values.shrink_to(size); + } } /// Helper type for [`SparseSubtrie`] to mutably access only a subset of fields from the original @@ -2480,10 +2594,19 @@ impl SparseSubtrieBuffers { /// Clears all buffers. fn clear(&mut self) { self.path_stack.clear(); + self.path_stack.shrink_to_fit(); + self.rlp_node_stack.clear(); + self.rlp_node_stack.shrink_to_fit(); + self.branch_child_buf.clear(); + self.branch_child_buf.shrink_to_fit(); + self.branch_value_stack_buf.clear(); + self.branch_value_stack_buf.shrink_to_fit(); + self.rlp_buf.clear(); + self.rlp_buf.shrink_to_fit(); } } @@ -4076,6 +4199,185 @@ mod tests { ); } + #[test] + fn test_remove_leaf_remaining_extension_node_child_is_revealed() { + let branch_path = Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7]); + let removed_branch_path = Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2]); + + // Convert the logs into reveal_nodes call on a fresh ParallelSparseTrie + let nodes = vec![ + // Branch at 0x4f8807 + RevealedSparseNode { + path: branch_path, + node: { + TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::from(hex!( + "dede882d52f0e0eddfb5b89293a10c87468b4a73acd0d4ae550054a92353f6d5" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "8746f18e465e2eed16117306b6f2eef30bc9d2978aee4a7838255e39c41a3222" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "35a4ea861548af5f0262a9b6d619b4fc88fce6531cbd004eab1530a73f34bbb1" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "47d5c2bf9eea5c1ee027e4740c2b86159074a27d52fd2f6a8a8c86c77e48006f" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "eb76a359b216e1d86b1f2803692a9fe8c3d3f97a9fe6a82b396e30344febc0c1" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "437656f2697f167b23e33cb94acc8550128cfd647fc1579d61e982cb7616b8bc" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "45a1ac2faf15ea8a4da6f921475974e0379f39c3d08166242255a567fa88ce6c" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "7dbb299d714d3dfa593f53bc1b8c66d5c401c30a0b5587b01254a56330361395" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "ae407eb14a74ed951c9949c1867fb9ee9ba5d5b7e03769eaf3f29c687d080429" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "768d0fe1003f0e85d3bc76e4a1fa0827f63b10ca9bca52d56c2b1cceb8eb8b08" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "e5127935143493d5094f4da6e4f7f5a0f62d524fbb61e7bb9fb63d8a166db0f3" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "7f3698297308664fbc1b9e2c41d097fbd57d8f364c394f6ad7c71b10291fbf42" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "4a2bc7e19cec63cb5ef5754add0208959b50bcc79f13a22a370f77b277dbe6db" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "40764b8c48de59258e62a3371909a107e76e1b5e847cfa94dbc857e9fd205103" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "2985dca29a7616920d95c43ab62eb013a40e6a0c88c284471e4c3bd22f3b9b25" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "1b6511f7a385e79477239f7dd4a49f52082ecac05aa5bd0de18b1d55fe69d10c" + ))), + ], + TrieMask::new(0b1111111111111111), + )) + }, + masks: TrieMasks { + hash_mask: Some(TrieMask::new(0b1111111111111111)), + tree_mask: Some(TrieMask::new(0b0011110100100101)), + }, + }, + // Branch at 0x4f88072 + RevealedSparseNode { + path: removed_branch_path, + node: { + let stack = vec![ + RlpNode::word_rlp(&B256::from(hex!( + "15fd4993a41feff1af3b629b32572ab05acddd97c681d82ec2eb89c8a8e3ab9e" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "a272b0b94ced4e6ec7adb41719850cf4a167ad8711d0dda6a810d129258a0d94" + ))), + ]; + let branch_node = BranchNode::new(stack, TrieMask::new(0b0001000000000100)); + TrieNode::Branch(branch_node) + }, + masks: TrieMasks { + hash_mask: Some(TrieMask::new(0b0000000000000000)), + tree_mask: Some(TrieMask::new(0b0000000000000100)), + }, + }, + // Extension at 0x4f880722 + RevealedSparseNode { + path: Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0x2]), + node: { + let extension_node = ExtensionNode::new( + Nibbles::from_nibbles([0x6]), + RlpNode::word_rlp(&B256::from(hex!( + "56fab2b106a97eae9c7197f86d03bca292da6e0ac725b783082f7d950cc4e0fc" + ))), + ); + TrieNode::Extension(extension_node) + }, + masks: TrieMasks { hash_mask: None, tree_mask: None }, + }, + // Leaf at 0x4f88072c + RevealedSparseNode { + path: Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0xc]), + node: { + let leaf_node = LeafNode::new( + Nibbles::from_nibbles([ + 0x0, 0x7, 0x7, 0xf, 0x8, 0x6, 0x6, 0x1, 0x3, 0x0, 0x8, 0x8, 0xd, 0xf, + 0xc, 0xa, 0xe, 0x6, 0x4, 0x8, 0xa, 0xb, 0xe, 0x8, 0x3, 0x1, 0xf, 0xa, + 0xd, 0xc, 0xa, 0x5, 0x5, 0xa, 0xd, 0x4, 0x3, 0xa, 0xb, 0x1, 0x6, 0x5, + 0xd, 0x1, 0x6, 0x8, 0x0, 0xd, 0xd, 0x5, 0x6, 0x7, 0xb, 0x5, 0xd, 0x6, + ]), + hex::decode("8468d3971d").unwrap(), + ); + TrieNode::Leaf(leaf_node) + }, + masks: TrieMasks { hash_mask: None, tree_mask: None }, + }, + ]; + + // Create a fresh ParallelSparseTrie + let mut trie = ParallelSparseTrie::from_root( + TrieNode::Extension(ExtensionNode::new( + Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7]), + RlpNode::word_rlp(&B256::from(hex!( + "56fab2b106a97eae9c7197f86d03bca292da6e0ac725b783082f7d950cc4e0fc" + ))), + )), + TrieMasks::none(), + true, + ) + .unwrap(); + + // Call reveal_nodes + trie.reveal_nodes(nodes).unwrap(); + + // Remove the leaf at "0x4f88072c077f86613088dfcae648abe831fadca55ad43ab165d1680dd567b5d6" + let leaf_key = Nibbles::from_nibbles([ + 0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0xc, 0x0, 0x7, 0x7, 0xf, 0x8, 0x6, 0x6, 0x1, 0x3, + 0x0, 0x8, 0x8, 0xd, 0xf, 0xc, 0xa, 0xe, 0x6, 0x4, 0x8, 0xa, 0xb, 0xe, 0x8, 0x3, 0x1, + 0xf, 0xa, 0xd, 0xc, 0xa, 0x5, 0x5, 0xa, 0xd, 0x4, 0x3, 0xa, 0xb, 0x1, 0x6, 0x5, 0xd, + 0x1, 0x6, 0x8, 0x0, 0xd, 0xd, 0x5, 0x6, 0x7, 0xb, 0x5, 0xd, 0x6, + ]); + + let mut provider = MockTrieNodeProvider::new(); + let revealed_branch = create_branch_node_with_children(&[], []); + let mut encoded = Vec::new(); + revealed_branch.encode(&mut encoded); + provider.add_revealed_node( + Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0x2, 0x6]), + RevealedNode { + node: encoded.into(), + tree_mask: None, + // Give it a fake hashmask so that it appears like it will be stored in the db + hash_mask: Some(TrieMask::new(0b1111)), + }, + ); + + trie.remove_leaf(&leaf_key, provider).unwrap(); + + // Calculate root so that updates are calculated. + trie.root(); + + // Take updates and assert they are correct + let updates = trie.take_updates(); + assert_eq!( + updates.removed_nodes.into_iter().collect::>(), + vec![removed_branch_path] + ); + assert_eq!(updates.updated_nodes.len(), 1); + let updated_node = updates.updated_nodes.get(&branch_path).unwrap(); + + // Second bit must be set, indicating that the extension's child is in the db + assert_eq!(updated_node.tree_mask, TrieMask::new(0b011110100100101),) + } + #[test] fn test_parallel_sparse_trie_root() { // Step 1: Create the trie structure @@ -4764,12 +5066,15 @@ mod tests { state.clone(), trie_cursor.account_trie_cursor().unwrap(), Default::default(), - state.keys().copied().collect::>(), + state.keys().copied(), ); + // Extract account nodes before moving hash_builder_updates + let hash_builder_account_nodes = hash_builder_updates.account_nodes.clone(); + // Write trie updates to the database let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.write_trie_updates(hash_builder_updates).unwrap(); provider_rw.commit().unwrap(); // Assert that the sparse trie root matches the hash builder root @@ -4777,7 +5082,7 @@ mod tests { // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( BTreeMap::from_iter(sparse_updates.updated_nodes), - BTreeMap::from_iter(hash_builder_updates.account_nodes) + BTreeMap::from_iter(hash_builder_account_nodes) ); // Assert that the sparse trie nodes match the hash builder proof nodes assert_eq_parallel_sparse_trie_proof_nodes( @@ -4809,12 +5114,15 @@ mod tests { .iter() .map(|nibbles| B256::from_slice(&nibbles.pack())) .collect(), - state.keys().copied().collect::>(), + state.keys().copied(), ); + // Extract account nodes before moving hash_builder_updates + let hash_builder_account_nodes = hash_builder_updates.account_nodes.clone(); + // Write trie updates to the database let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.write_trie_updates(hash_builder_updates).unwrap(); provider_rw.commit().unwrap(); // Assert that the sparse trie root matches the hash builder root @@ -4822,7 +5130,7 @@ mod tests { // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( BTreeMap::from_iter(sparse_updates.updated_nodes), - BTreeMap::from_iter(hash_builder_updates.account_nodes) + BTreeMap::from_iter(hash_builder_account_nodes) ); // Assert that the sparse trie nodes match the hash builder proof nodes assert_eq_parallel_sparse_trie_proof_nodes( diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 6fac7c5faad..b2c7ee0f566 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-primitives-traits.workspace = true reth-execution-errors.workspace = true reth-trie-common.workspace = true -tracing.workspace = true +tracing = { workspace = true, features = ["attributes"] } alloy-trie.workspace = true # alloy diff --git a/crates/trie/sparse/src/metrics.rs b/crates/trie/sparse/src/metrics.rs index 430a831a2f7..8dc64ddc599 100644 --- a/crates/trie/sparse/src/metrics.rs +++ b/crates/trie/sparse/src/metrics.rs @@ -16,7 +16,7 @@ pub(crate) struct SparseStateTrieMetrics { /// Number of total storage nodes, including those that were skipped. pub(crate) multiproof_total_storage_nodes: u64, /// The actual metrics we will record into the histogram - pub(crate) histograms: SparseStateTrieHistograms, + pub(crate) histograms: SparseStateTrieInnerMetrics, } impl SparseStateTrieMetrics { @@ -61,7 +61,7 @@ impl SparseStateTrieMetrics { /// Metrics for the sparse state trie #[derive(Metrics)] #[metrics(scope = "sparse_state_trie")] -pub(crate) struct SparseStateTrieHistograms { +pub(crate) struct SparseStateTrieInnerMetrics { /// Histogram of account nodes that were skipped during a multiproof reveal due to being /// redundant (i.e. they were already revealed) pub(crate) multiproof_skipped_account_nodes: Histogram, diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index fde4810da57..f142385c3cd 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -18,7 +18,7 @@ use reth_trie_common::{ DecodedMultiProof, DecodedStorageMultiProof, MultiProof, Nibbles, RlpNode, StorageMultiProof, TrieAccount, TrieMask, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use tracing::trace; +use tracing::{instrument, trace}; /// Provides type-safe re-use of cleared [`SparseStateTrie`]s, which helps to save allocations /// across payload runs. @@ -43,6 +43,32 @@ where Self(trie) } + /// Shrink the cleared sparse trie's capacity to the given node and value size. + /// This helps reduce memory usage when the trie has excess capacity. + /// The capacity is distributed equally across the account trie and all storage tries. + pub fn shrink_to(&mut self, node_size: usize, value_size: usize) { + // Count total number of storage tries (active + cleared + default) + let storage_tries_count = self.0.storage.tries.len() + self.0.storage.cleared_tries.len(); + + // Total tries = 1 account trie + all storage tries + let total_tries = 1 + storage_tries_count; + + // Distribute capacity equally among all tries + let node_size_per_trie = node_size / total_tries; + let value_size_per_trie = value_size / total_tries; + + // Shrink the account trie + self.0.state.shrink_nodes_to(node_size_per_trie); + self.0.state.shrink_values_to(value_size_per_trie); + + // Give storage tries the remaining capacity after account trie allocation + let storage_node_size = node_size.saturating_sub(node_size_per_trie); + let storage_value_size = value_size.saturating_sub(value_size_per_trie); + + // Shrink all storage tries (they will redistribute internally) + self.0.storage.shrink_to(storage_node_size, storage_value_size); + } + /// Returns the cleared [`SparseStateTrie`], consuming this instance. pub fn into_inner(self) -> SparseStateTrie { self.0 @@ -208,6 +234,14 @@ where /// Reveal unknown trie paths from decoded multiproof. /// NOTE: This method does not extensively validate the proof. + #[instrument( + target = "trie::sparse", + skip_all, + fields( + account_nodes = multiproof.account_subtree.len(), + storages = multiproof.storages.len() + ) + )] pub fn reveal_decoded_multiproof( &mut self, multiproof: DecodedMultiProof, @@ -532,6 +566,7 @@ where /// Calculates the hashes of subtries. /// /// If the trie has not been revealed, this function does nothing. + #[instrument(target = "trie::sparse", skip_all)] pub fn calculate_subtries(&mut self) { if let SparseTrie::Revealed(trie) = &mut self.state { trie.update_subtrie_hashes(); @@ -584,6 +619,7 @@ where } /// Returns sparse trie root and trie updates if the trie has been revealed. + #[instrument(target = "trie::sparse", skip_all)] pub fn root_with_updates( &mut self, provider_factory: impl TrieNodeProviderFactory, @@ -679,6 +715,7 @@ where /// /// Returns false if the new account info and storage trie are empty, indicating the account /// leaf should be removed. + #[instrument(level = "trace", target = "trie::sparse", skip_all)] pub fn update_account( &mut self, address: B256, @@ -721,6 +758,7 @@ where /// /// Returns false if the new storage root is empty, and the account info was already empty, /// indicating the account leaf should be removed. + #[instrument(target = "trie::sparse", skip_all)] pub fn update_account_storage_root( &mut self, address: B256, @@ -768,6 +806,7 @@ where } /// Remove the account leaf node. + #[instrument(target = "trie::sparse", skip_all)] pub fn remove_account_leaf( &mut self, path: &Nibbles, @@ -821,6 +860,31 @@ impl StorageTries { set })); } + + /// Shrinks the capacity of all storage tries (active, cleared, and default) to the given sizes. + /// The capacity is distributed equally among all tries that have allocations. + fn shrink_to(&mut self, node_size: usize, value_size: usize) { + // Count total number of tries with capacity (active + cleared + default) + let active_count = self.tries.len(); + let cleared_count = self.cleared_tries.len(); + let total_tries = 1 + active_count + cleared_count; + + // Distribute capacity equally among all tries + let node_size_per_trie = node_size / total_tries; + let value_size_per_trie = value_size / total_tries; + + // Shrink active storage tries + for trie in self.tries.values_mut() { + trie.shrink_nodes_to(node_size_per_trie); + trie.shrink_values_to(value_size_per_trie); + } + + // Shrink cleared storage tries + for trie in &mut self.cleared_tries { + trie.shrink_nodes_to(node_size_per_trie); + trie.shrink_values_to(value_size_per_trie); + } + } } impl StorageTries { diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 300ac39c1b6..308695ec0fd 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -222,6 +222,14 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// /// This is useful for reusing the trie without needing to reallocate memory. fn clear(&mut self); + + /// Shrink the capacity of the sparse trie's node storage to the given size. + /// This will reduce memory usage if the current capacity is higher than the given size. + fn shrink_nodes_to(&mut self, size: usize); + + /// Shrink the capacity of the sparse trie's value storage to the given size. + /// This will reduce memory usage if the current capacity is higher than the given size. + fn shrink_values_to(&mut self, size: usize); } /// Struct for passing around branch node mask information. diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 76dadc8fc9c..500b642cd1e 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -24,7 +24,7 @@ use reth_trie_common::{ TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; -use tracing::{debug, trace}; +use tracing::{debug, instrument, trace}; /// The level below which the sparse trie hashes are calculated in /// [`SerialSparseTrie::update_subtrie_hashes`]. @@ -230,6 +230,7 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the update fails. + #[instrument(level = "trace", target = "trie::sparse", skip_all)] pub fn update_leaf( &mut self, path: Nibbles, @@ -246,6 +247,7 @@ impl SparseTrie { /// # Errors /// /// Returns an error if the trie is still blind, or if the leaf cannot be removed + #[instrument(level = "trace", target = "trie::sparse", skip_all)] pub fn remove_leaf( &mut self, path: &Nibbles, @@ -255,6 +257,28 @@ impl SparseTrie { revealed.remove_leaf(path, provider)?; Ok(()) } + + /// Shrinks the capacity of the sparse trie's node storage. + /// Works for both revealed and blind tries with allocated storage. + pub fn shrink_nodes_to(&mut self, size: usize) { + match self { + Self::Blind(Some(trie)) | Self::Revealed(trie) => { + trie.shrink_nodes_to(size); + } + _ => {} + } + } + + /// Shrinks the capacity of the sparse trie's value storage. + /// Works for both revealed and blind tries with allocated storage. + pub fn shrink_values_to(&mut self, size: usize) { + match self { + Self::Blind(Some(trie)) | Self::Revealed(trie) => { + trie.shrink_values_to(size); + } + _ => {} + } + } } /// The representation of revealed sparse trie. @@ -573,14 +597,13 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(level = "trace", target = "trie::sparse::serial", skip(self, provider))] fn update_leaf( &mut self, full_path: Nibbles, value: Vec, provider: P, ) -> SparseTrieResult<()> { - trace!(target: "trie::sparse", ?full_path, ?value, "update_leaf called"); - self.prefix_set.insert(full_path); let existing = self.values.insert(full_path, value); if existing.is_some() { @@ -712,6 +735,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(level = "trace", target = "trie::sparse::serial", skip(self, provider))] fn remove_leaf( &mut self, full_path: &Nibbles, @@ -821,38 +845,17 @@ impl SparseTrieInterface for SerialSparseTrie { trace!(target: "trie::sparse", ?removed_path, ?child_path, "Branch node has only one child"); - if self.nodes.get(&child_path).unwrap().is_hash() { - debug!( - target: "trie::sparse", - ?child_path, - leaf_full_path = ?full_path, - "Branch node child not revealed in remove_leaf, falling back to db", - ); - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.trie_node(&child_path)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::sparse", - ?child_path, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing remaining blinded branch child" - ); - self.reveal_node( - child_path, - decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - } - } - - // Get the only child node. - let child = self.nodes.get(&child_path).unwrap(); + // If the remaining child node is not yet revealed then we have to reveal + // it here, otherwise it's not possible to know how to collapse the branch. + let child = self.reveal_remaining_child_on_leaf_removal( + &provider, + full_path, + &child_path, + true, // recurse_into_extension + )?; let mut delete_child = false; - let new_node = match child { + let new_node = match &child { SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), &SparseNode::Hash(hash) => { return Err(SparseTrieErrorKind::BlindedNode { @@ -918,6 +921,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + #[instrument(target = "trie::sparse::serial", skip(self))] fn root(&mut self) -> B256 { // Take the current prefix set let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); @@ -970,6 +974,7 @@ impl SparseTrieInterface for SerialSparseTrie { expected_value: Option<&Vec>, ) -> Result { // Helper function to check if a value matches the expected value + #[inline] fn check_value_match( actual_value: &Vec, expected_value: Option<&Vec>, @@ -1080,6 +1085,16 @@ impl SparseTrieInterface for SerialSparseTrie { // If we get here, there's no leaf at the target path Ok(LeafLookup::NonExistent) } + + fn shrink_nodes_to(&mut self, size: usize) { + self.nodes.shrink_to(size); + self.branch_node_tree_masks.shrink_to(size); + self.branch_node_hash_masks.shrink_to(size); + } + + fn shrink_values_to(&mut self, size: usize) { + self.values.shrink_to(size); + } } impl SerialSparseTrie { @@ -1256,6 +1271,87 @@ impl SerialSparseTrie { Ok(nodes) } + /// Called when a leaf is removed on a branch which has only one other remaining child. That + /// child must be revealed in order to properly collapse the branch. + /// + /// If `recurse_into_extension` is true, and the remaining child is an extension node, then its + /// child will be ensured to be revealed as well. + /// + /// ## Returns + /// + /// The node of the remaining child, whether it was already revealed or not. + fn reveal_remaining_child_on_leaf_removal( + &mut self, + provider: P, + full_path: &Nibbles, // only needed for logs + remaining_child_path: &Nibbles, + recurse_into_extension: bool, + ) -> SparseTrieResult { + let remaining_child_node = match self.nodes.get(remaining_child_path).unwrap() { + SparseNode::Hash(_) => { + debug!( + target: "trie::parallel_sparse", + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Node child not revealed in remove_leaf, falling back to db", + ); + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(remaining_child_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?remaining_child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + self.reveal_node( + *remaining_child_path, + decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + self.nodes.get(remaining_child_path).unwrap().clone() + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: *remaining_child_path, + } + .into()) + } + } + node => node.clone(), + }; + + // If `recurse_into_extension` is true, and the remaining child is an extension node, then + // its child will be ensured to be revealed as well. This is required for generation of + // trie updates; without revealing the grandchild branch it's not always possible to know + // if the tree mask bit should be set for the child extension on its parent branch. + if let SparseNode::Extension { key, .. } = &remaining_child_node && + recurse_into_extension + { + let mut remaining_grandchild_path = *remaining_child_path; + remaining_grandchild_path.extend(key); + + trace!( + target: "trie::parallel_sparse", + remaining_grandchild_path = ?remaining_grandchild_path, + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Revealing child of extension node, which is the last remaining child of the branch" + ); + + self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_grandchild_path, + false, // recurse_into_extension + )?; + } + + Ok(remaining_child_node) + } + /// Recalculates and updates the RLP hashes of nodes deeper than or equal to the specified /// `depth`. /// @@ -1264,6 +1360,7 @@ impl SerialSparseTrie { /// /// This function identifies all nodes that have changed (based on the prefix set) at the given /// depth and recalculates their RLP representation. + #[instrument(level = "trace", target = "trie::sparse::serial", skip(self))] pub fn update_rlp_node_level(&mut self, depth: usize) { // Take the current prefix set let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); @@ -1309,6 +1406,7 @@ impl SerialSparseTrie { /// specified depth. /// - A `PrefixSetMut` containing paths shallower than the specified depth that still need to be /// tracked for future updates. + #[instrument(level = "trace", target = "trie::sparse::serial", skip(self))] fn get_changed_nodes_at_depth( &self, prefix_set: &mut PrefixSet, @@ -1395,6 +1493,7 @@ impl SerialSparseTrie { /// # Panics /// /// If the node at provided path does not exist. + #[instrument(level = "trace", target = "trie::sparse::serial", skip_all, ret(level = "trace"))] pub fn rlp_node( &mut self, prefix_set: &mut PrefixSet, @@ -2971,12 +3070,15 @@ mod tests { state.clone(), trie_cursor.account_trie_cursor().unwrap(), Default::default(), - state.keys().copied().collect::>(), + state.keys().copied(), ); + // Extract account nodes before moving hash_builder_updates + let hash_builder_account_nodes = hash_builder_updates.account_nodes.clone(); + // Write trie updates to the database let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.write_trie_updates(hash_builder_updates).unwrap(); provider_rw.commit().unwrap(); // Assert that the sparse trie root matches the hash builder root @@ -2984,7 +3086,7 @@ mod tests { // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( BTreeMap::from_iter(sparse_updates.updated_nodes), - BTreeMap::from_iter(hash_builder_updates.account_nodes) + BTreeMap::from_iter(hash_builder_account_nodes) ); // Assert that the sparse trie nodes match the hash builder proof nodes assert_eq_sparse_trie_proof_nodes(&updated_sparse, hash_builder_proof_nodes); @@ -3013,12 +3115,15 @@ mod tests { .iter() .map(|nibbles| B256::from_slice(&nibbles.pack())) .collect(), - state.keys().copied().collect::>(), + state.keys().copied(), ); + // Extract account nodes before moving hash_builder_updates + let hash_builder_account_nodes = hash_builder_updates.account_nodes.clone(); + // Write trie updates to the database let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.write_trie_updates(hash_builder_updates).unwrap(); provider_rw.commit().unwrap(); // Assert that the sparse trie root matches the hash builder root @@ -3026,7 +3131,7 @@ mod tests { // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( BTreeMap::from_iter(sparse_updates.updated_nodes), - BTreeMap::from_iter(hash_builder_updates.account_nodes) + BTreeMap::from_iter(hash_builder_account_nodes) ); // Assert that the sparse trie nodes match the hash builder proof nodes assert_eq_sparse_trie_proof_nodes(&updated_sparse, hash_builder_proof_nodes); @@ -3073,7 +3178,7 @@ mod tests { } /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has - /// only nodes 0x00 and 0x01, and we have proofs for them. Node B is new and inserted in the + /// only nodes 0x00 and 0x02, and we have proofs for them. Node 0x01 is new and inserted in the /// sparse trie first. /// /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. diff --git a/crates/trie/trie/src/forward_cursor.rs b/crates/trie/trie/src/forward_cursor.rs index b1b6c041289..c99b0d049ee 100644 --- a/crates/trie/trie/src/forward_cursor.rs +++ b/crates/trie/trie/src/forward_cursor.rs @@ -23,8 +23,9 @@ impl<'a, K, V> ForwardInMemoryCursor<'a, K, V> { self.is_empty } + /// Returns the current entry pointed to be the cursor, or `None` if no entries are left. #[inline] - fn peek(&self) -> Option<&(K, V)> { + pub fn current(&self) -> Option<&(K, V)> { self.entries.clone().next() } @@ -59,7 +60,7 @@ where fn advance_while(&mut self, predicate: impl Fn(&K) -> bool) -> Option<(K, V)> { let mut entry; loop { - entry = self.peek(); + entry = self.current(); if entry.is_some_and(|(k, _)| predicate(k)) { self.next(); } else { @@ -77,20 +78,21 @@ mod tests { #[test] fn test_cursor() { let mut cursor = ForwardInMemoryCursor::new(&[(1, ()), (2, ()), (3, ()), (4, ()), (5, ())]); + assert_eq!(cursor.current(), Some(&(1, ()))); assert_eq!(cursor.seek(&0), Some((1, ()))); - assert_eq!(cursor.peek(), Some(&(1, ()))); + assert_eq!(cursor.current(), Some(&(1, ()))); assert_eq!(cursor.seek(&3), Some((3, ()))); - assert_eq!(cursor.peek(), Some(&(3, ()))); + assert_eq!(cursor.current(), Some(&(3, ()))); assert_eq!(cursor.seek(&3), Some((3, ()))); - assert_eq!(cursor.peek(), Some(&(3, ()))); + assert_eq!(cursor.current(), Some(&(3, ()))); assert_eq!(cursor.seek(&4), Some((4, ()))); - assert_eq!(cursor.peek(), Some(&(4, ()))); + assert_eq!(cursor.current(), Some(&(4, ()))); assert_eq!(cursor.seek(&6), None); - assert_eq!(cursor.peek(), None); + assert_eq!(cursor.current(), None); } } diff --git a/crates/trie/trie/src/hashed_cursor/mock.rs b/crates/trie/trie/src/hashed_cursor/mock.rs index 895bf852a22..f091ae6ffe5 100644 --- a/crates/trie/trie/src/hashed_cursor/mock.rs +++ b/crates/trie/trie/src/hashed_cursor/mock.rs @@ -55,17 +55,23 @@ impl MockHashedCursorFactory { } impl HashedCursorFactory for MockHashedCursorFactory { - type AccountCursor = MockHashedCursor; - type StorageCursor = MockHashedCursor; - - fn hashed_account_cursor(&self) -> Result { + type AccountCursor<'a> + = MockHashedCursor + where + Self: 'a; + type StorageCursor<'a> + = MockHashedCursor + where + Self: 'a; + + fn hashed_account_cursor(&self) -> Result, DatabaseError> { Ok(MockHashedCursor::new(self.hashed_accounts.clone(), self.visited_account_keys.clone())) } fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(MockHashedCursor::new( self.hashed_storage_tries .get(&hashed_address) @@ -101,7 +107,7 @@ impl MockHashedCursor { impl HashedCursor for MockHashedCursor { type Value = T; - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek(&mut self, key: B256) -> Result, DatabaseError> { // Find the first key that is greater than or equal to the given key. let entry = self.values.iter().find_map(|(k, v)| (k >= &key).then(|| (*k, v.clone()))); @@ -115,7 +121,7 @@ impl HashedCursor for MockHashedCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn next(&mut self) -> Result, DatabaseError> { let mut iter = self.values.iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first diff --git a/crates/trie/trie/src/hashed_cursor/mod.rs b/crates/trie/trie/src/hashed_cursor/mod.rs index 7917f675452..6c4788a3360 100644 --- a/crates/trie/trie/src/hashed_cursor/mod.rs +++ b/crates/trie/trie/src/hashed_cursor/mod.rs @@ -14,23 +14,29 @@ pub mod noop; pub mod mock; /// The factory trait for creating cursors over the hashed state. +#[auto_impl::auto_impl(&)] pub trait HashedCursorFactory { /// The hashed account cursor type. - type AccountCursor: HashedCursor; + type AccountCursor<'a>: HashedCursor + where + Self: 'a; /// The hashed storage cursor type. - type StorageCursor: HashedStorageCursor; + type StorageCursor<'a>: HashedStorageCursor + where + Self: 'a; /// Returns a cursor for iterating over all hashed accounts in the state. - fn hashed_account_cursor(&self) -> Result; + fn hashed_account_cursor(&self) -> Result, DatabaseError>; /// Returns a cursor for iterating over all hashed storage entries in the state. fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result; + ) -> Result, DatabaseError>; } /// The cursor for iterating over hashed entries. +#[auto_impl::auto_impl(&mut)] pub trait HashedCursor { /// Value returned by the cursor. type Value: std::fmt::Debug; @@ -44,6 +50,7 @@ pub trait HashedCursor { } /// The cursor for iterating over hashed storage entries. +#[auto_impl::auto_impl(&mut)] pub trait HashedStorageCursor: HashedCursor { /// Returns `true` if there are no entries for a given key. fn is_storage_empty(&mut self) -> Result; diff --git a/crates/trie/trie/src/hashed_cursor/noop.rs b/crates/trie/trie/src/hashed_cursor/noop.rs index 58b78dc245f..e5bc44f0f5c 100644 --- a/crates/trie/trie/src/hashed_cursor/noop.rs +++ b/crates/trie/trie/src/hashed_cursor/noop.rs @@ -9,17 +9,23 @@ use reth_storage_errors::db::DatabaseError; pub struct NoopHashedCursorFactory; impl HashedCursorFactory for NoopHashedCursorFactory { - type AccountCursor = NoopHashedAccountCursor; - type StorageCursor = NoopHashedStorageCursor; + type AccountCursor<'a> + = NoopHashedAccountCursor + where + Self: 'a; + type StorageCursor<'a> + = NoopHashedStorageCursor + where + Self: 'a; - fn hashed_account_cursor(&self) -> Result { + fn hashed_account_cursor(&self) -> Result, DatabaseError> { Ok(NoopHashedAccountCursor::default()) } fn hashed_storage_cursor( &self, _hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(NoopHashedStorageCursor::default()) } } diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index e81aa4af22a..896251f3634 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -19,15 +19,21 @@ impl HashedPostStateCursorFactory { } } -impl<'a, CF, T> HashedCursorFactory for HashedPostStateCursorFactory +impl<'overlay, CF, T> HashedCursorFactory for HashedPostStateCursorFactory where CF: HashedCursorFactory, T: AsRef, { - type AccountCursor = HashedPostStateAccountCursor<'a, CF::AccountCursor>; - type StorageCursor = HashedPostStateStorageCursor<'a, CF::StorageCursor>; - - fn hashed_account_cursor(&self) -> Result { + type AccountCursor<'cursor> + = HashedPostStateAccountCursor<'overlay, CF::AccountCursor<'cursor>> + where + Self: 'cursor; + type StorageCursor<'cursor> + = HashedPostStateStorageCursor<'overlay, CF::StorageCursor<'cursor>> + where + Self: 'cursor; + + fn hashed_account_cursor(&self) -> Result, DatabaseError> { let cursor = self.cursor_factory.hashed_account_cursor()?; Ok(HashedPostStateAccountCursor::new(cursor, &self.post_state.as_ref().accounts)) } @@ -35,7 +41,7 @@ where fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { let cursor = self.cursor_factory.hashed_storage_cursor(hashed_address)?; Ok(HashedPostStateStorageCursor::new( cursor, diff --git a/crates/trie/trie/src/proof/mod.rs b/crates/trie/trie/src/proof/mod.rs index 348cdb430a2..efd958e5743 100644 --- a/crates/trie/trie/src/proof/mod.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -80,6 +80,16 @@ impl Proof { self.collect_branch_node_masks = branch_node_masks; self } + + /// Get a reference to the trie cursor factory. + pub const fn trie_cursor_factory(&self) -> &T { + &self.trie_cursor_factory + } + + /// Get a reference to the hashed cursor factory. + pub const fn hashed_cursor_factory(&self) -> &H { + &self.hashed_cursor_factory + } } impl Proof diff --git a/crates/trie/trie/src/proof/trie_node.rs b/crates/trie/trie/src/proof/trie_node.rs index 3d964cf5e8b..8625412f3ae 100644 --- a/crates/trie/trie/src/proof/trie_node.rs +++ b/crates/trie/trie/src/proof/trie_node.rs @@ -2,11 +2,11 @@ use super::{Proof, StorageProof}; use crate::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; use alloy_primitives::{map::HashSet, B256}; use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; -use reth_trie_common::{prefix_set::TriePrefixSetsMut, MultiProofTargets, Nibbles}; +use reth_trie_common::{MultiProofTargets, Nibbles}; use reth_trie_sparse::provider::{ pad_path_to_key, RevealedNode, TrieNodeProvider, TrieNodeProviderFactory, }; -use std::{sync::Arc, time::Instant}; +use std::time::Instant; use tracing::{enabled, trace, Level}; /// Factory for instantiating providers capable of retrieving blinded trie nodes via proofs. @@ -16,18 +16,12 @@ pub struct ProofTrieNodeProviderFactory { trie_cursor_factory: T, /// The factory for hashed cursors. hashed_cursor_factory: H, - /// A set of prefix sets that have changes. - prefix_sets: Arc, } impl ProofTrieNodeProviderFactory { /// Create new proof-based blinded provider factory. - pub const fn new( - trie_cursor_factory: T, - hashed_cursor_factory: H, - prefix_sets: Arc, - ) -> Self { - Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets } + pub const fn new(trie_cursor_factory: T, hashed_cursor_factory: H) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory } } } @@ -43,7 +37,6 @@ where ProofBlindedAccountProvider { trie_cursor_factory: self.trie_cursor_factory.clone(), hashed_cursor_factory: self.hashed_cursor_factory.clone(), - prefix_sets: self.prefix_sets.clone(), } } @@ -51,7 +44,6 @@ where ProofBlindedStorageProvider { trie_cursor_factory: self.trie_cursor_factory.clone(), hashed_cursor_factory: self.hashed_cursor_factory.clone(), - prefix_sets: self.prefix_sets.clone(), account, } } @@ -64,36 +56,28 @@ pub struct ProofBlindedAccountProvider { trie_cursor_factory: T, /// The factory for hashed cursors. hashed_cursor_factory: H, - /// A set of prefix sets that have changes. - prefix_sets: Arc, } impl ProofBlindedAccountProvider { /// Create new proof-based blinded account node provider. - pub const fn new( - trie_cursor_factory: T, - hashed_cursor_factory: H, - prefix_sets: Arc, - ) -> Self { - Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets } + pub const fn new(trie_cursor_factory: T, hashed_cursor_factory: H) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory } } } impl TrieNodeProvider for ProofBlindedAccountProvider where - T: TrieCursorFactory + Clone + Send + Sync, - H: HashedCursorFactory + Clone + Send + Sync, + T: TrieCursorFactory, + H: HashedCursorFactory, { fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let start = enabled!(target: "trie::proof::blinded", Level::TRACE).then(Instant::now); let targets = MultiProofTargets::from_iter([(pad_path_to_key(path), HashSet::default())]); - let mut proof = - Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) - .with_prefix_sets_mut(self.prefix_sets.as_ref().clone()) - .with_branch_node_masks(true) - .multiproof(targets) - .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + let mut proof = Proof::new(&self.trie_cursor_factory, &self.hashed_cursor_factory) + .with_branch_node_masks(true) + .multiproof(targets) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; let node = proof.account_subtree.into_inner().remove(path); let tree_mask = proof.branch_node_tree_masks.remove(path); let hash_mask = proof.branch_node_hash_masks.remove(path); @@ -118,41 +102,31 @@ pub struct ProofBlindedStorageProvider { trie_cursor_factory: T, /// The factory for hashed cursors. hashed_cursor_factory: H, - /// A set of prefix sets that have changes. - prefix_sets: Arc, /// Target account. account: B256, } impl ProofBlindedStorageProvider { /// Create new proof-based blinded storage node provider. - pub const fn new( - trie_cursor_factory: T, - hashed_cursor_factory: H, - prefix_sets: Arc, - account: B256, - ) -> Self { - Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets, account } + pub const fn new(trie_cursor_factory: T, hashed_cursor_factory: H, account: B256) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory, account } } } impl TrieNodeProvider for ProofBlindedStorageProvider where - T: TrieCursorFactory + Clone + Send + Sync, - H: HashedCursorFactory + Clone + Send + Sync, + T: TrieCursorFactory, + H: HashedCursorFactory, { fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let start = enabled!(target: "trie::proof::blinded", Level::TRACE).then(Instant::now); let targets = HashSet::from_iter([pad_path_to_key(path)]); - let storage_prefix_set = - self.prefix_sets.storage_prefix_sets.get(&self.account).cloned().unwrap_or_default(); let mut proof = StorageProof::new_hashed( - self.trie_cursor_factory.clone(), - self.hashed_cursor_factory.clone(), + &self.trie_cursor_factory, + &self.hashed_cursor_factory, self.account, ) - .with_prefix_set_mut(storage_prefix_set) .with_branch_node_masks(true) .storage_multiproof(targets) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 7f1b933e206..2311dce82b3 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -20,15 +20,22 @@ impl InMemoryTrieCursorFactory { } } -impl<'a, CF, T> TrieCursorFactory for InMemoryTrieCursorFactory +impl<'overlay, CF, T> TrieCursorFactory for InMemoryTrieCursorFactory where - CF: TrieCursorFactory, + CF: TrieCursorFactory + 'overlay, T: AsRef, { - type AccountTrieCursor = InMemoryTrieCursor<'a, CF::AccountTrieCursor>; - type StorageTrieCursor = InMemoryTrieCursor<'a, CF::StorageTrieCursor>; + type AccountTrieCursor<'cursor> + = InMemoryTrieCursor<'overlay, CF::AccountTrieCursor<'cursor>> + where + Self: 'cursor; - fn account_trie_cursor(&self) -> Result { + type StorageTrieCursor<'cursor> + = InMemoryTrieCursor<'overlay, CF::StorageTrieCursor<'cursor>> + where + Self: 'cursor; + + fn account_trie_cursor(&self) -> Result, DatabaseError> { let cursor = self.cursor_factory.account_trie_cursor()?; Ok(InMemoryTrieCursor::new(Some(cursor), self.trie_updates.as_ref().account_nodes_ref())) } @@ -36,11 +43,12 @@ where fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { // if the storage trie has no updates then we use this as the in-memory overlay. static EMPTY_UPDATES: Vec<(Nibbles, Option)> = Vec::new(); - let storage_trie_updates = self.trie_updates.as_ref().storage_tries.get(&hashed_address); + let storage_trie_updates = + self.trie_updates.as_ref().storage_tries_ref().get(&hashed_address); let (storage_nodes, cleared) = storage_trie_updates .map(|u| (u.storage_nodes_ref(), u.is_deleted())) .unwrap_or((&EMPTY_UPDATES, false)); @@ -61,10 +69,14 @@ where pub struct InMemoryTrieCursor<'a, C> { /// The underlying cursor. If None then it is assumed there is no DB data. cursor: Option, + /// Entry that `cursor` is currently pointing to. + cursor_entry: Option<(Nibbles, BranchNodeCompact)>, /// Forward-only in-memory cursor over storage trie nodes. in_memory_cursor: ForwardInMemoryCursor<'a, Nibbles, Option>, - /// Last key returned by the cursor. + /// The key most recently returned from the Cursor. last_key: Option, + /// Whether an initial seek was called. + seeked: bool, } impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { @@ -75,47 +87,76 @@ impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { trie_updates: &'a [(Nibbles, Option)], ) -> Self { let in_memory_cursor = ForwardInMemoryCursor::new(trie_updates); - Self { cursor, in_memory_cursor, last_key: None } + Self { cursor, cursor_entry: None, in_memory_cursor, last_key: None, seeked: false } } - fn seek_inner( - &mut self, - key: Nibbles, - exact: bool, - ) -> Result, DatabaseError> { - let mut mem_entry = self.in_memory_cursor.seek(&key); - let mut db_entry = self.cursor.as_mut().map(|c| c.seek(key)).transpose()?.flatten(); - - // exact matching is easy, if overlay has a value then return that (updated or removed), or - // if db has a value then return that. - if exact { - return Ok(match (mem_entry, db_entry) { - (Some((mem_key, entry_inner)), _) if mem_key == key => { - entry_inner.map(|node| (key, node)) - } - (_, Some((db_key, node))) if db_key == key => Some((key, node)), - _ => None, - }) + /// Asserts that the next entry to be returned from the cursor is not previous to the last entry + /// returned. + fn set_last_key(&mut self, next_entry: &Option<(Nibbles, BranchNodeCompact)>) { + let next_key = next_entry.as_ref().map(|e| e.0); + debug_assert!( + self.last_key.is_none_or(|last| next_key.is_none_or(|next| next >= last)), + "Cannot return entry {:?} previous to the last returned entry at {:?}", + next_key, + self.last_key, + ); + self.last_key = next_key; + } + + /// Seeks the `cursor_entry` field of the struct using the cursor. + fn cursor_seek(&mut self, key: Nibbles) -> Result<(), DatabaseError> { + // Only seek if: + // 1. We have a cursor entry and need to seek forward (entry.0 < key), OR + // 2. We have no cursor entry and haven't seeked yet (!self.seeked) + let should_seek = match self.cursor_entry.as_ref() { + Some(entry) => entry.0 < key, + None => !self.seeked, + }; + + if should_seek { + self.cursor_entry = self.cursor.as_mut().map(|c| c.seek(key)).transpose()?.flatten(); } + Ok(()) + } + + /// Seeks the `cursor_entry` field of the struct to the subsequent entry using the cursor. + fn cursor_next(&mut self) -> Result<(), DatabaseError> { + debug_assert!(self.seeked); + + // If the previous entry is `None`, and we've done a seek previously, then the cursor is + // exhausted and we shouldn't call `next` again. + if self.cursor_entry.is_some() { + self.cursor_entry = self.cursor.as_mut().map(|c| c.next()).transpose()?.flatten(); + } + + Ok(()) + } + + /// Compares the current in-memory entry with the current entry of the cursor, and applies the + /// in-memory entry to the cursor entry as an overlay. + // + /// This may consume and move forward the current entries when the overlay indicates a removed + /// node. + fn choose_next_entry(&mut self) -> Result, DatabaseError> { loop { - match (mem_entry, &db_entry) { + match (self.in_memory_cursor.current().cloned(), &self.cursor_entry) { (Some((mem_key, None)), _) - if db_entry.as_ref().is_none_or(|(db_key, _)| &mem_key < db_key) => + if self.cursor_entry.as_ref().is_none_or(|(db_key, _)| &mem_key < db_key) => { // If overlay has a removed node but DB cursor is exhausted or ahead of the // in-memory cursor then move ahead in-memory, as there might be further // non-removed overlay nodes. - mem_entry = self.in_memory_cursor.first_after(&mem_key); + self.in_memory_cursor.first_after(&mem_key); } (Some((mem_key, None)), Some((db_key, _))) if &mem_key == db_key => { // If overlay has a removed node which is returned from DB then move both // cursors ahead to the next key. - mem_entry = self.in_memory_cursor.first_after(&mem_key); - db_entry = self.cursor.as_mut().map(|c| c.next()).transpose()?.flatten(); + self.in_memory_cursor.first_after(&mem_key); + self.cursor_next()?; } (Some((mem_key, Some(node))), _) - if db_entry.as_ref().is_none_or(|(db_key, _)| &mem_key <= db_key) => + if self.cursor_entry.as_ref().is_none_or(|(db_key, _)| &mem_key <= db_key) => { // If overlay returns a node prior to the DB's node, or the DB is exhausted, // then we return the overlay's node. @@ -125,18 +166,10 @@ impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { // - mem_key > db_key // - overlay is exhausted // Return the db_entry. If DB is also exhausted then this returns None. - _ => return Ok(db_entry), + _ => return Ok(self.cursor_entry.clone()), } } } - - fn next_inner( - &mut self, - last: Nibbles, - ) -> Result, DatabaseError> { - let Some(key) = last.increment() else { return Ok(None) }; - self.seek_inner(key, false) - } } impl TrieCursor for InMemoryTrieCursor<'_, C> { @@ -144,8 +177,20 @@ impl TrieCursor for InMemoryTrieCursor<'_, C> { &mut self, key: Nibbles, ) -> Result, DatabaseError> { - let entry = self.seek_inner(key, true)?; - self.last_key = entry.as_ref().map(|(nibbles, _)| *nibbles); + self.cursor_seek(key)?; + let mem_entry = self.in_memory_cursor.seek(&key); + + self.seeked = true; + + let entry = match (mem_entry, &self.cursor_entry) { + (Some((mem_key, entry_inner)), _) if mem_key == key => { + entry_inner.map(|node| (key, node)) + } + (_, Some((db_key, node))) if db_key == &key => Some((key, node.clone())), + _ => None, + }; + + self.set_last_key(&entry); Ok(entry) } @@ -153,22 +198,41 @@ impl TrieCursor for InMemoryTrieCursor<'_, C> { &mut self, key: Nibbles, ) -> Result, DatabaseError> { - let entry = self.seek_inner(key, false)?; - self.last_key = entry.as_ref().map(|(nibbles, _)| *nibbles); + self.cursor_seek(key)?; + self.in_memory_cursor.seek(&key); + + self.seeked = true; + + let entry = self.choose_next_entry()?; + self.set_last_key(&entry); Ok(entry) } fn next(&mut self) -> Result, DatabaseError> { - let next = match &self.last_key { - Some(last) => { - let entry = self.next_inner(*last)?; - self.last_key = entry.as_ref().map(|entry| entry.0); - entry - } - // no previous entry was found - None => None, + debug_assert!(self.seeked, "Cursor must be seek'd before next is called"); + + // A `last_key` of `None` indicates that the cursor is exhausted. + let Some(last_key) = self.last_key else { + return Ok(None); }; - Ok(next) + + // If either cursor is currently pointing to the last entry which was returned then consume + // that entry so that `choose_next_entry` is looking at the subsequent one. + if let Some((key, _)) = self.in_memory_cursor.current() && + key == &last_key + { + self.in_memory_cursor.first_after(&last_key); + } + + if let Some((key, _)) = &self.cursor_entry && + key == &last_key + { + self.cursor_next()?; + } + + let entry = self.choose_next_entry()?; + self.set_last_key(&entry); + Ok(entry) } fn current(&mut self) -> Result, DatabaseError> { @@ -210,8 +274,10 @@ mod tests { results.push(entry); } - while let Ok(Some(entry)) = cursor.next() { - results.push(entry); + if !test_case.expected_results.is_empty() { + while let Ok(Some(entry)) = cursor.next() { + results.push(entry); + } } assert_eq!( @@ -493,4 +559,317 @@ mod tests { cursor.next().unwrap(); assert_eq!(cursor.current().unwrap(), Some(Nibbles::from_nibbles([0x3]))); } + + #[test] + fn test_all_storage_slots_deleted_not_wiped_exact_keys() { + use tracing::debug; + reth_tracing::init_test_tracing(); + + // This test reproduces an edge case where: + // - cursor is not None (not wiped) + // - All in-memory entries are deletions (None values) + // - Database has corresponding entries + // - Expected: NO leaves should be returned (all deleted) + + // Generate 42 trie node entries with keys distributed across the keyspace + let mut db_nodes: Vec<(Nibbles, BranchNodeCompact)> = (0..10) + .map(|i| { + let key_bytes = vec![(i * 6) as u8, i as u8]; // Spread keys across keyspace + let nibbles = Nibbles::unpack(key_bytes); + (nibbles, BranchNodeCompact::new(i as u16, i as u16, 0, vec![], None)) + }) + .collect(); + + db_nodes.sort_by_key(|(key, _)| *key); + db_nodes.dedup_by_key(|(key, _)| *key); + + for (key, _) in &db_nodes { + debug!("node at {key:?}"); + } + + // Create in-memory entries with same keys but all None values (deletions) + let in_memory_nodes: Vec<(Nibbles, Option)> = + db_nodes.iter().map(|(key, _)| (*key, None)).collect(); + + let db_nodes_map: BTreeMap = db_nodes.into_iter().collect(); + let db_nodes_arc = Arc::new(db_nodes_map); + let visited_keys = Arc::new(Mutex::new(Vec::new())); + let mock_cursor = MockTrieCursor::new(db_nodes_arc, visited_keys); + + let mut cursor = InMemoryTrieCursor::new(Some(mock_cursor), &in_memory_nodes); + + // Seek to beginning should return None (all nodes are deleted) + tracing::debug!("seeking to 0x"); + let result = cursor.seek(Nibbles::default()).unwrap(); + assert_eq!( + result, None, + "Expected no entries when all nodes are deleted, but got {:?}", + result + ); + + // Test seek operations at various positions - all should return None + let seek_keys = vec![ + Nibbles::unpack([0x00]), + Nibbles::unpack([0x5d]), + Nibbles::unpack([0x5e]), + Nibbles::unpack([0x5f]), + Nibbles::unpack([0xc2]), + Nibbles::unpack([0xc5]), + Nibbles::unpack([0xc9]), + Nibbles::unpack([0xf0]), + ]; + + for seek_key in seek_keys { + tracing::debug!("seeking to {seek_key:?}"); + let result = cursor.seek(seek_key).unwrap(); + assert_eq!( + result, None, + "Expected None when seeking to {:?} but got {:?}", + seek_key, result + ); + } + + // next() should also always return None + let result = cursor.next().unwrap(); + assert_eq!(result, None, "Expected None from next() but got {:?}", result); + } + + mod proptest_tests { + use super::*; + use itertools::Itertools; + use proptest::prelude::*; + + /// Merge `db_nodes` with `in_memory_nodes`, applying the in-memory overlay. + /// This properly handles deletions (None values in `in_memory_nodes`). + fn merge_with_overlay( + db_nodes: Vec<(Nibbles, BranchNodeCompact)>, + in_memory_nodes: Vec<(Nibbles, Option)>, + ) -> Vec<(Nibbles, BranchNodeCompact)> { + db_nodes + .into_iter() + .merge_join_by(in_memory_nodes, |db_entry, mem_entry| db_entry.0.cmp(&mem_entry.0)) + .filter_map(|entry| match entry { + // Only in db: keep it + itertools::EitherOrBoth::Left((key, node)) => Some((key, node)), + // Only in memory: keep if not a deletion + itertools::EitherOrBoth::Right((key, node_opt)) => { + node_opt.map(|node| (key, node)) + } + // In both: memory takes precedence (keep if not a deletion) + itertools::EitherOrBoth::Both(_, (key, node_opt)) => { + node_opt.map(|node| (key, node)) + } + }) + .collect() + } + + /// Generate a strategy for a `BranchNodeCompact` with simplified parameters. + /// The constraints are: + /// - `tree_mask` must be a subset of `state_mask` + /// - `hash_mask` must be a subset of `state_mask` + /// - `hash_mask.count_ones()` must equal `hashes.len()` + /// + /// To keep it simple, we use an empty hashes vec and `hash_mask` of 0. + fn branch_node_strategy() -> impl Strategy { + any::() + .prop_flat_map(|state_mask| { + let tree_mask_strategy = any::().prop_map(move |tree| tree & state_mask); + (Just(state_mask), tree_mask_strategy) + }) + .prop_map(|(state_mask, tree_mask)| { + BranchNodeCompact::new(state_mask, tree_mask, 0, vec![], None) + }) + } + + /// Generate a sorted vector of (Nibbles, `BranchNodeCompact`) entries + fn sorted_db_nodes_strategy() -> impl Strategy> { + prop::collection::vec( + (prop::collection::vec(any::(), 0..2), branch_node_strategy()), + 0..20, + ) + .prop_map(|entries| { + // Convert Vec to Nibbles and sort + let mut result: Vec<(Nibbles, BranchNodeCompact)> = entries + .into_iter() + .map(|(bytes, node)| (Nibbles::from_nibbles_unchecked(bytes), node)) + .collect(); + result.sort_by(|a, b| a.0.cmp(&b.0)); + result.dedup_by(|a, b| a.0 == b.0); + result + }) + } + + /// Generate a sorted vector of (Nibbles, Option) entries + fn sorted_in_memory_nodes_strategy( + ) -> impl Strategy)>> { + prop::collection::vec( + ( + prop::collection::vec(any::(), 0..2), + prop::option::of(branch_node_strategy()), + ), + 0..20, + ) + .prop_map(|entries| { + // Convert Vec to Nibbles and sort + let mut result: Vec<(Nibbles, Option)> = entries + .into_iter() + .map(|(bytes, node)| (Nibbles::from_nibbles_unchecked(bytes), node)) + .collect(); + result.sort_by(|a, b| a.0.cmp(&b.0)); + result.dedup_by(|a, b| a.0 == b.0); + result + }) + } + + proptest! { + #![proptest_config(ProptestConfig::with_cases(10000))] + + #[test] + fn proptest_in_memory_trie_cursor( + db_nodes in sorted_db_nodes_strategy(), + in_memory_nodes in sorted_in_memory_nodes_strategy(), + op_choices in prop::collection::vec(any::(), 10..500), + ) { + reth_tracing::init_test_tracing(); + use tracing::debug; + + debug!( + db_paths=?db_nodes.iter().map(|(k, _)| k).collect::>(), + in_mem_nodes=?in_memory_nodes.iter().map(|(k, v)| (k, v.is_some())).collect::>(), + num_op_choices=?op_choices.len(), + "Starting proptest!", + ); + + // Create the expected results by merging the two sorted vectors, + // properly handling deletions (None values in in_memory_nodes) + let expected_combined = merge_with_overlay(db_nodes.clone(), in_memory_nodes.clone()); + + // Collect all keys for operation generation + let all_keys: Vec = expected_combined.iter().map(|(k, _)| *k).collect(); + + // Create a control cursor using the combined result with a mock cursor + let control_db_map: BTreeMap = + expected_combined.into_iter().collect(); + let control_db_arc = Arc::new(control_db_map); + let control_visited_keys = Arc::new(Mutex::new(Vec::new())); + let mut control_cursor = MockTrieCursor::new(control_db_arc, control_visited_keys); + + // Create the InMemoryTrieCursor being tested + let db_nodes_map: BTreeMap = + db_nodes.into_iter().collect(); + let db_nodes_arc = Arc::new(db_nodes_map); + let visited_keys = Arc::new(Mutex::new(Vec::new())); + let mock_cursor = MockTrieCursor::new(db_nodes_arc, visited_keys); + let mut test_cursor = InMemoryTrieCursor::new(Some(mock_cursor), &in_memory_nodes); + + // Test: seek to the beginning first + let control_first = control_cursor.seek(Nibbles::default()).unwrap(); + let test_first = test_cursor.seek(Nibbles::default()).unwrap(); + debug!( + control=?control_first.as_ref().map(|(k, _)| k), + test=?test_first.as_ref().map(|(k, _)| k), + "Initial seek returned", + ); + assert_eq!(control_first, test_first, "Initial seek mismatch"); + + // If both cursors returned None, nothing to test + if control_first.is_none() && test_first.is_none() { + return Ok(()); + } + + // Track the last key returned from the cursor + let mut last_returned_key = control_first.as_ref().map(|(k, _)| *k); + + // Execute a sequence of random operations + for choice in op_choices { + let op_type = choice % 3; + + match op_type { + 0 => { + // Next operation + let control_result = control_cursor.next().unwrap(); + let test_result = test_cursor.next().unwrap(); + debug!( + control=?control_result.as_ref().map(|(k, _)| k), + test=?test_result.as_ref().map(|(k, _)| k), + "Next returned", + ); + assert_eq!(control_result, test_result, "Next operation mismatch"); + + last_returned_key = control_result.as_ref().map(|(k, _)| *k); + + // Stop if both cursors are exhausted + if control_result.is_none() && test_result.is_none() { + break; + } + } + 1 => { + // Seek operation - choose a key >= last_returned_key + if all_keys.is_empty() { + continue; + } + + let valid_keys: Vec<_> = all_keys + .iter() + .filter(|k| last_returned_key.is_none_or(|last| **k >= last)) + .collect(); + + if valid_keys.is_empty() { + continue; + } + + let key = *valid_keys[choice as usize % valid_keys.len()]; + + let control_result = control_cursor.seek(key).unwrap(); + let test_result = test_cursor.seek(key).unwrap(); + debug!( + control=?control_result.as_ref().map(|(k, _)| k), + test=?test_result.as_ref().map(|(k, _)| k), + ?key, + "Seek returned", + ); + assert_eq!(control_result, test_result, "Seek operation mismatch for key {:?}", key); + + last_returned_key = control_result.as_ref().map(|(k, _)| *k); + + // Stop if both cursors are exhausted + if control_result.is_none() && test_result.is_none() { + break; + } + } + _ => { + // SeekExact operation - choose a key >= last_returned_key + if all_keys.is_empty() { + continue; + } + + let valid_keys: Vec<_> = all_keys + .iter() + .filter(|k| last_returned_key.is_none_or(|last| **k >= last)) + .collect(); + + if valid_keys.is_empty() { + continue; + } + + let key = *valid_keys[choice as usize % valid_keys.len()]; + + let control_result = control_cursor.seek_exact(key).unwrap(); + let test_result = test_cursor.seek_exact(key).unwrap(); + debug!( + control=?control_result.as_ref().map(|(k, _)| k), + test=?test_result.as_ref().map(|(k, _)| k), + ?key, + "SeekExact returned", + ); + assert_eq!(control_result, test_result, "SeekExact operation mismatch for key {:?}", key); + + // seek_exact updates the last_key internally but only if it found something + last_returned_key = control_result.as_ref().map(|(k, _)| *k); + } + } + } + } + } + } } diff --git a/crates/trie/trie/src/trie_cursor/mock.rs b/crates/trie/trie/src/trie_cursor/mock.rs index 4b0b7f699dc..313df0443e3 100644 --- a/crates/trie/trie/src/trie_cursor/mock.rs +++ b/crates/trie/trie/src/trie_cursor/mock.rs @@ -52,11 +52,17 @@ impl MockTrieCursorFactory { } impl TrieCursorFactory for MockTrieCursorFactory { - type AccountTrieCursor = MockTrieCursor; - type StorageTrieCursor = MockTrieCursor; + type AccountTrieCursor<'a> + = MockTrieCursor + where + Self: 'a; + type StorageTrieCursor<'a> + = MockTrieCursor + where + Self: 'a; /// Generates a mock account trie cursor. - fn account_trie_cursor(&self) -> Result { + fn account_trie_cursor(&self) -> Result, DatabaseError> { Ok(MockTrieCursor::new(self.account_trie_nodes.clone(), self.visited_account_keys.clone())) } @@ -64,7 +70,7 @@ impl TrieCursorFactory for MockTrieCursorFactory { fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(MockTrieCursor::new( self.storage_tries .get(&hashed_address) @@ -103,7 +109,7 @@ impl MockTrieCursor { } impl TrieCursor for MockTrieCursor { - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek_exact( &mut self, key: Nibbles, @@ -119,7 +125,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn seek( &mut self, key: Nibbles, @@ -136,7 +142,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn next(&mut self) -> Result, DatabaseError> { let mut iter = self.trie_nodes.iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first @@ -155,7 +161,7 @@ impl TrieCursor for MockTrieCursor { Ok(entry) } - #[instrument(level = "trace", skip(self), ret)] + #[instrument(skip(self), ret(level = "trace"))] fn current(&mut self) -> Result, DatabaseError> { Ok(self.current_key) } diff --git a/crates/trie/trie/src/trie_cursor/mod.rs b/crates/trie/trie/src/trie_cursor/mod.rs index 01eea4c40e6..05a6c09e948 100644 --- a/crates/trie/trie/src/trie_cursor/mod.rs +++ b/crates/trie/trie/src/trie_cursor/mod.rs @@ -24,24 +24,29 @@ pub use self::{depth_first::DepthFirstTrieIterator, in_memory::*, subnode::Curso #[auto_impl::auto_impl(&)] pub trait TrieCursorFactory { /// The account trie cursor type. - type AccountTrieCursor: TrieCursor; + type AccountTrieCursor<'a>: TrieCursor + where + Self: 'a; + /// The storage trie cursor type. - type StorageTrieCursor: TrieCursor; + type StorageTrieCursor<'a>: TrieCursor + where + Self: 'a; /// Create an account trie cursor. - fn account_trie_cursor(&self) -> Result; + fn account_trie_cursor(&self) -> Result, DatabaseError>; /// Create a storage tries cursor. fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result; + ) -> Result, DatabaseError>; } /// A cursor for traversing stored trie nodes. The cursor must iterate over keys in /// lexicographical order. -#[auto_impl::auto_impl(&mut, Box)] -pub trait TrieCursor: Send + Sync { +#[auto_impl::auto_impl(&mut)] +pub trait TrieCursor { /// Move the cursor to the key and return if it is an exact match. fn seek_exact( &mut self, @@ -58,3 +63,48 @@ pub trait TrieCursor: Send + Sync { /// Get the current entry. fn current(&mut self) -> Result, DatabaseError>; } + +/// Iterator wrapper for `TrieCursor` types +#[derive(Debug)] +pub struct TrieCursorIter<'a, C> { + cursor: &'a mut C, + /// The initial value from seek, if any + initial: Option>, +} + +impl<'a, C> TrieCursorIter<'a, C> { + /// Create a new iterator from a mutable reference to a cursor. The Iterator will start from the + /// empty path. + pub fn new(cursor: &'a mut C) -> Self + where + C: TrieCursor, + { + let initial = cursor.seek(Nibbles::default()).transpose(); + Self { cursor, initial } + } +} + +impl<'a, C> From<&'a mut C> for TrieCursorIter<'a, C> +where + C: TrieCursor, +{ + fn from(cursor: &'a mut C) -> Self { + Self::new(cursor) + } +} + +impl<'a, C> Iterator for TrieCursorIter<'a, C> +where + C: TrieCursor, +{ + type Item = Result<(Nibbles, BranchNodeCompact), DatabaseError>; + + fn next(&mut self) -> Option { + // If we have an initial value from seek, return it first + if let Some(initial) = self.initial.take() { + return Some(initial); + } + + self.cursor.next().transpose() + } +} diff --git a/crates/trie/trie/src/trie_cursor/noop.rs b/crates/trie/trie/src/trie_cursor/noop.rs index de409c59fe1..a00a18e4f00 100644 --- a/crates/trie/trie/src/trie_cursor/noop.rs +++ b/crates/trie/trie/src/trie_cursor/noop.rs @@ -9,11 +9,18 @@ use reth_storage_errors::db::DatabaseError; pub struct NoopTrieCursorFactory; impl TrieCursorFactory for NoopTrieCursorFactory { - type AccountTrieCursor = NoopAccountTrieCursor; - type StorageTrieCursor = NoopStorageTrieCursor; + type AccountTrieCursor<'a> + = NoopAccountTrieCursor + where + Self: 'a; + + type StorageTrieCursor<'a> + = NoopStorageTrieCursor + where + Self: 'a; /// Generates a noop account trie cursor. - fn account_trie_cursor(&self) -> Result { + fn account_trie_cursor(&self) -> Result, DatabaseError> { Ok(NoopAccountTrieCursor::default()) } @@ -21,7 +28,7 @@ impl TrieCursorFactory for NoopTrieCursorFactory { fn storage_trie_cursor( &self, _hashed_address: B256, - ) -> Result { + ) -> Result, DatabaseError> { Ok(NoopStorageTrieCursor::default()) } } diff --git a/crates/trie/trie/src/verify.rs b/crates/trie/trie/src/verify.rs index 5f2260bc7dc..4299a669165 100644 --- a/crates/trie/trie/src/verify.rs +++ b/crates/trie/trie/src/verify.rs @@ -301,21 +301,24 @@ impl SingleVerifier> { /// database tables as the source of truth. This will iteratively recompute the entire trie based /// on the hashed state, and produce any discovered [`Output`]s via the `next` method. #[derive(Debug)] -pub struct Verifier { - trie_cursor_factory: T, +pub struct Verifier<'a, T: TrieCursorFactory, H> { + trie_cursor_factory: &'a T, hashed_cursor_factory: H, branch_node_iter: StateRootBranchNodesIter, outputs: Vec, - account: SingleVerifier>, - storage: Option<(B256, SingleVerifier>)>, + account: SingleVerifier>>, + storage: Option<(B256, SingleVerifier>>)>, complete: bool, } -impl Verifier { +impl<'a, T: TrieCursorFactory, H: HashedCursorFactory + Clone> Verifier<'a, T, H> { /// Creates a new verifier instance. - pub fn new(trie_cursor_factory: T, hashed_cursor_factory: H) -> Result { + pub fn new( + trie_cursor_factory: &'a T, + hashed_cursor_factory: H, + ) -> Result { Ok(Self { - trie_cursor_factory: trie_cursor_factory.clone(), + trie_cursor_factory, hashed_cursor_factory: hashed_cursor_factory.clone(), branch_node_iter: StateRootBranchNodesIter::new(hashed_cursor_factory), outputs: Default::default(), @@ -326,7 +329,7 @@ impl Verifier Verifier { +impl<'a, T: TrieCursorFactory, H: HashedCursorFactory + Clone> Verifier<'a, T, H> { fn new_storage( &mut self, account: B256, @@ -400,9 +403,8 @@ impl Verifier { // need to validate that all accounts coming after it have empty storages. let prev_account = *prev_account; - // Calculate the max possible account address. - let mut max_account = B256::ZERO; - max_account.reverse(); + // Calculate the max possible account address (all bits set). + let max_account = B256::from([0xFFu8; 32]); self.verify_empty_storages(prev_account, max_account, false, true)?; } @@ -445,7 +447,7 @@ impl Verifier { } } -impl Iterator for Verifier { +impl<'a, T: TrieCursorFactory, H: HashedCursorFactory + Clone> Iterator for Verifier<'a, T, H> { type Item = Result; fn next(&mut self) -> Option { diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 871d599c76b..763908c242d 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -24,7 +24,7 @@ use reth_trie_sparse::{ provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}, SerialSparseTrie, SparseStateTrie, }; -use std::sync::{mpsc, Arc}; +use std::sync::mpsc; /// State transition witness for the trie. #[derive(Debug)] @@ -147,11 +147,7 @@ where let (tx, rx) = mpsc::channel(); let blinded_provider_factory = WitnessTrieNodeProviderFactory::new( - ProofTrieNodeProviderFactory::new( - self.trie_cursor_factory, - self.hashed_cursor_factory, - Arc::new(self.prefix_sets), - ), + ProofTrieNodeProviderFactory::new(self.trie_cursor_factory, self.hashed_cursor_factory), tx, ); let mut sparse_trie = SparseStateTrie::::new(); diff --git a/docs/cli/help.rs b/docs/cli/help.rs index 05e61eef740..0474d00e723 100755 --- a/docs/cli/help.rs +++ b/docs/cli/help.rs @@ -269,11 +269,6 @@ fn preprocess_help(s: &str) -> Cow<'_, str> { r"(rpc.max-tracing-requests \n.*\n.*\n.*\n.*\n.*)\[default: \d+\]", r"$1[default: ]", ), - // Handle engine.max-proof-task-concurrency dynamic default - ( - r"(engine\.max-proof-task-concurrency.*)\[default: \d+\]", - r"$1[default: ]", - ), // Handle engine.reserved-cpu-cores dynamic default ( r"(engine\.reserved-cpu-cores.*)\[default: \d+\]", diff --git a/docs/crates/eth-wire.md b/docs/crates/eth-wire.md index cf0c2cc5377..cf62ab143e8 100644 --- a/docs/crates/eth-wire.md +++ b/docs/crates/eth-wire.md @@ -9,48 +9,70 @@ This crate can be thought of as having 2 components: 2. Abstractions over Tokio Streams that operate on these types. (Note that ECIES is implemented in a separate `reth-ecies` crate.) +Additionally, this crate focuses on stream implementations (P2P and Eth), handshakes, and multiplexing. The protocol +message types and RLP encoding/decoding live in the separate `eth-wire-types` crate and are re-exported by `eth-wire` +for convenience. ## Types The most basic Eth-wire type is a `ProtocolMessage`. It describes all messages that reth can send/receive. -[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/message.rs) +[File: crates/net/eth-wire-types/src/message.rs](../../crates/net/eth-wire-types/src/message.rs) ```rust, ignore /// An `eth` protocol message, containing a message ID and payload. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct ProtocolMessage { +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ProtocolMessage { pub message_type: EthMessageID, - pub message: EthMessage, + pub message: EthMessage, } -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum EthMessage { - Status(Status), +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum EthMessage { + Status(StatusMessage), NewBlockHashes(NewBlockHashes), - Transactions(Transactions), - NewPooledTransactionHashes(NewPooledTransactionHashes), + NewBlock(Box), + Transactions(Transactions), + NewPooledTransactionHashes66(NewPooledTransactionHashes66), + NewPooledTransactionHashes68(NewPooledTransactionHashes68), GetBlockHeaders(RequestPair), - // ... + BlockHeaders(RequestPair>), + GetBlockBodies(RequestPair), + BlockBodies(RequestPair>), + GetPooledTransactions(RequestPair), + PooledTransactions(RequestPair>), + GetNodeData(RequestPair), + NodeData(RequestPair), GetReceipts(RequestPair), - Receipts(RequestPair), + Receipts(RequestPair>), + Receipts69(RequestPair>), + BlockRangeUpdate(BlockRangeUpdate), } /// Represents message IDs for eth protocol messages. #[repr(u8)] -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum EthMessageID { Status = 0x00, NewBlockHashes = 0x01, Transactions = 0x02, - // ... + GetBlockHeaders = 0x03, + BlockHeaders = 0x04, + GetBlockBodies = 0x05, + BlockBodies = 0x06, + NewBlock = 0x07, + NewPooledTransactionHashes = 0x08, + GetPooledTransactions = 0x09, + PooledTransactions = 0x0a, + GetNodeData = 0x0d, NodeData = 0x0e, GetReceipts = 0x0f, Receipts = 0x10, + BlockRangeUpdate = 0x11, } ``` Messages can either be broadcast to the network, or can be a request/response message to a single peer. This 2nd type of message is described using a `RequestPair` struct, which is simply a concatenation of the underlying message with a request id. -[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/message.rs) +[File: crates/net/eth-wire-types/src/message.rs](../../crates/net/eth-wire-types/src/message.rs) ```rust, ignore #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct RequestPair { @@ -59,10 +81,8 @@ pub struct RequestPair { } ``` -Every `Ethmessage` has a corresponding rust struct that implements the `Encodable` and `Decodable` traits. -These traits are defined as follows: - -[Crate: crates/rlp](https://github.com/paradigmxyz/reth/tree/1563506aea09049a85e5cc72c2894f3f7a371581/crates/rlp) +Every `EthMessage` has a corresponding Rust struct that implements `alloy_rlp::Encodable` and `alloy_rlp::Decodable` +(often via derive macros like `RlpEncodable`/`RlpDecodable`). These traits are defined in `alloy_rlp`: ```rust, ignore pub trait Decodable: Sized { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result; @@ -72,10 +92,11 @@ pub trait Encodable { fn length(&self) -> usize; } ``` -These traits describe how the `Ethmessage` should be serialized/deserialized into raw bytes using the RLP format. -In reth all [RLP](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp/) encode/decode operations are handled by the `common/rlp` and `common/rlp-derive` crates. +These traits describe how the `EthMessage` should be serialized/deserialized into raw bytes using the RLP format. +In reth all [RLP](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp/) encode/decode operations are handled by `alloy_rlp` and the derive macros used in `eth-wire-types`. -Note that the `ProtocolMessage` itself implements these traits, so any stream of bytes can be converted into it by calling `ProtocolMessage::decode()` and vice versa with `ProtocolMessage::encode()`. The message type is determined by the first byte of the byte stream. +Note: `ProtocolMessage` implements `Encodable`, while decoding is performed via +`ProtocolMessage::decode_message(version, &mut bytes)` because decoding must respect the negotiated `EthVersion`. ### Example: The Transactions message Let's understand how an `EthMessage` is implemented by taking a look at the `Transactions` Message. The eth specification describes a Transaction message as a list of RLP-encoded transactions: @@ -93,17 +114,17 @@ The items in the list are transactions in the format described in the main Ether In reth, this is represented as: -[File: crates/net/eth-wire/src/types/broadcast.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/broadcast.rs) +[File: crates/net/eth-wire-types/src/broadcast.rs](../../crates/net/eth-wire-types/src/broadcast.rs) ```rust,ignore -pub struct Transactions( +pub struct Transactions( /// New transactions for the peer to include in its mempool. - pub Vec, + pub Vec, ); ``` -And the corresponding trait implementations are present in the primitives crate. +And the corresponding transaction type is defined here: -[File: crates/primitives/src/transaction/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/transaction/mod.rs) +[File: crates/ethereum/primitives/src/transaction.rs](../../crates/ethereum/primitives/src/transaction.rs) ```rust, ignore #[reth_codec] #[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Default, Serialize, Deserialize)] @@ -146,7 +167,7 @@ The lowest level stream to communicate with other peers is the P2P stream. It ta Decompression/Compression of bytes is done with snappy algorithm ([EIP 706](https://eips.ethereum.org/EIPS/eip-706)) using the external `snap` crate. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](../../crates/net/eth-wire/src/p2pstream.rs) ```rust,ignore #[pin_project] pub struct P2PStream { @@ -155,23 +176,29 @@ pub struct P2PStream { encoder: snap::raw::Encoder, decoder: snap::raw::Decoder, pinger: Pinger, - shared_capability: SharedCapability, + /// Negotiated shared capabilities + shared_capabilities: SharedCapabilities, + /// Outgoing messages buffered for sending to the underlying stream. outgoing_messages: VecDeque, + /// Maximum number of messages that can be buffered before yielding backpressure. + outgoing_message_buffer_capacity: usize, + /// Whether this stream is currently in the process of gracefully disconnecting. disconnecting: bool, } ``` ### Pinger -To manage pinging, an instance of the `Pinger` struct is used. This is a state machine that keeps track of how many pings -we have sent/received and the timeouts associated with them. +To manage pinging, an instance of the `Pinger` struct is used. This is a state machine that keeps track of pings +we have sent/received and the timeout associated with them. -[File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/pinger.rs) +[File: crates/net/eth-wire/src/pinger.rs](../../crates/net/eth-wire/src/pinger.rs) ```rust,ignore #[derive(Debug)] pub(crate) struct Pinger { /// The timer used for the next ping. ping_interval: Interval, - /// The timer used for the next ping. + /// The timer used to detect a ping timeout. timeout_timer: Pin>, + /// The timeout duration for each ping. timeout: Duration, state: PingState, } @@ -205,7 +232,7 @@ pub(crate) fn poll_ping( } } PingState::WaitingForPong => { - if self.timeout_timer.is_elapsed() { + if self.timeout_timer.as_mut().poll(cx).is_ready() { self.state = PingState::TimedOut; return Poll::Ready(Ok(PingerEvent::Timeout)) } @@ -223,7 +250,7 @@ To send and receive data, the P2PStream itself is a future that implements the ` For the `Stream` trait, the `inner` stream is polled, decompressed and returned. Most of the code is just error handling and is omitted here for clarity. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](../../crates/net/eth-wire/src/p2pstream.rs) ```rust,ignore impl Stream for P2PStream { @@ -240,7 +267,8 @@ impl Stream for P2PStream { let mut decompress_buf = BytesMut::zeroed(decompressed_len + 1); this.decoder.decompress(&bytes[1..], &mut decompress_buf[1..])?; // ... Omitted Error handling - decompress_buf[0] = bytes[0] - this.shared_capability.offset(); + // Normalize IDs: reserved p2p range is 0x00..=0x0f; subprotocols start at 0x10 + decompress_buf[0] = bytes[0] - MAX_RESERVED_MESSAGE_ID - 1; return Poll::Ready(Some(Ok(decompress_buf))) } } @@ -250,7 +278,7 @@ impl Stream for P2PStream { Similarly, for the `Sink` trait, we do the reverse, compressing and sending data out to the `inner` stream. The important functions in this trait are shown below. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](../../crates/net/eth-wire/src/p2pstream.rs) ```rust, ignore impl Sink for P2PStream { fn start_send(self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> { @@ -258,7 +286,8 @@ impl Sink for P2PStream { let mut compressed = BytesMut::zeroed(1 + snap::raw::max_compress_len(item.len() - 1)); let compressed_size = this.encoder.compress(&item[1..], &mut compressed[1..])?; compressed.truncate(compressed_size + 1); - compressed[0] = item[0] + this.shared_capability.offset(); + // Mask subprotocol IDs into global space above reserved p2p IDs + compressed[0] = item[0] + MAX_RESERVED_MESSAGE_ID + 1; this.outgoing_messages.push_back(compressed.freeze()); Ok(()) } @@ -285,9 +314,9 @@ impl Sink for P2PStream { ## EthStream -The EthStream is very simple, it does not keep track of any state, it simply wraps the P2Pstream. +The EthStream wraps a stream and handles eth message (RLP) encoding/decoding with respect to the negotiated `EthVersion`. -[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) +[File: crates/net/eth-wire/src/ethstream.rs](../../crates/net/eth-wire/src/ethstream.rs) ```rust,ignore #[pin_project] pub struct EthStream { @@ -295,10 +324,10 @@ pub struct EthStream { inner: S, } ``` -EthStream's only job is to perform the RLP decoding/encoding, using the `ProtocolMessage::decode()` and `ProtocolMessage::encode()` -functions we looked at earlier. +EthStream performs RLP decoding/encoding using `ProtocolMessage::decode_message(version, &mut bytes)` +and `ProtocolMessage::encode()`, and enforces protocol rules (e.g., prohibiting `Status` after handshake). -[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) +[File: crates/net/eth-wire/src/ethstream.rs](../../crates/net/eth-wire/src/ethstream.rs) ```rust,ignore impl Stream for EthStream { // ... @@ -306,7 +335,7 @@ impl Stream for EthStream { let this = self.project(); let bytes = ready!(this.inner.poll_next(cx)).unwrap(); // ... - let msg = match ProtocolMessage::decode(&mut bytes.as_ref()) { + let msg = match ProtocolMessage::decode_message(self.version(), &mut bytes.as_ref()) { Ok(m) => m, Err(err) => { return Poll::Ready(Some(Err(err.into()))) @@ -319,10 +348,12 @@ impl Stream for EthStream { impl Sink for EthStream { // ... fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { - // ... + if matches!(item, EthMessage::Status(_)) { + let _ = self.project().inner.disconnect(DisconnectReason::ProtocolBreach); + return Err(EthStreamError::EthHandshakeError(EthHandshakeError::StatusNotInHandshake)) + } let mut bytes = BytesMut::new(); ProtocolMessage::from(item).encode(&mut bytes); - let bytes = bytes.freeze(); self.project().inner.start_send(bytes)?; Ok(()) @@ -339,9 +370,9 @@ For a session to be established, peers in the Ethereum network must first exchan To perform these, reth has special `Unauthed` versions of streams described above. -The `UnauthedP2Pstream` does the `Hello` handshake and returns a `P2PStream`. +The `UnauthedP2PStream` does the `Hello` handshake and returns a `P2PStream`. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](../../crates/net/eth-wire/src/p2pstream.rs) ```rust, ignore #[pin_project] pub struct UnauthedP2PStream { @@ -351,8 +382,8 @@ pub struct UnauthedP2PStream { impl UnauthedP2PStream { // ... - pub async fn handshake(mut self, hello: HelloMessage) -> Result<(P2PStream, HelloMessage), Error> { - self.inner.send(alloy_rlp::encode(P2PMessage::Hello(hello.clone())).into()).await?; + pub async fn handshake(mut self, hello: HelloMessageWithProtocols) -> Result<(P2PStream, HelloMessage), Error> { + self.inner.send(alloy_rlp::encode(P2PMessage::Hello(hello.message())).into()).await?; let first_message_bytes = tokio::time::timeout(HANDSHAKE_TIMEOUT, self.inner.next()).await; let their_hello = match P2PMessage::decode(&mut &first_message_bytes[..]) { @@ -360,11 +391,25 @@ impl UnauthedP2PStream { // ... } }?; - let stream = P2PStream::new(self.inner, capability); + let stream = P2PStream::new(self.inner, shared_capabilities); Ok((stream, their_hello)) } } ``` -Similarly, UnauthedEthStream does the `Status` handshake and returns an `EthStream`. The code is [here](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) +Similarly, `UnauthedEthStream` does the `Status` handshake and returns an `EthStream`. It accepts a `UnifiedStatus` +and a `ForkFilter`, and provides a timeout wrapper. The code is [here](../../crates/net/eth-wire/src/ethstream.rs) + +### Multiplexing and satellites + +`eth-wire` also provides `RlpxProtocolMultiplexer`/`RlpxSatelliteStream` to run the primary `eth` protocol alongside +additional "satellite" protocols (e.g. `snap`) using negotiated `SharedCapabilities`. + +## Message variants and versions + +- `NewPooledTransactionHashes` differs between ETH66 (`NewPooledTransactionHashes66`) and ETH68 (`NewPooledTransactionHashes68`). +- Starting with ETH67, `GetNodeData` and `NodeData` are removed (decoding them for >=67 yields an error). +- Starting with ETH69: + - `BlockRangeUpdate (0x11)` announces the historical block range served. + - Receipts omit bloom: encoded as `Receipts69` instead of `Receipts`. diff --git a/docs/design/review.md b/docs/design/review.md index 22a32ef904f..304d3582f5e 100644 --- a/docs/design/review.md +++ b/docs/design/review.md @@ -4,7 +4,7 @@ This document contains some of our research on how other codebases designed vari ## P2P -* [`Sentry`](https://erigon.gitbook.io/erigon/advanced-usage/sentry), a pluggable p2p node following the [Erigon gRPC architecture](https://erigon.substack.com/p/current-status-of-silkworm-and-silkrpc): +* [`Sentry`](https://erigon.gitbook.io/docs/summary/fundamentals/modules/sentry), a pluggable p2p node following the [Erigon gRPC architecture](https://erigon.substack.com/p/current-status-of-silkworm-and-silkrpc): * [`vorot93`](https://github.com/vorot93/) first started by implementing a rust devp2p stack in [`devp2p`](https://github.com/vorot93/devp2p) * vorot93 then started work on sentry, using devp2p, to satisfy the erigon architecture of modular components connected with gRPC. * The code from rust-ethereum/devp2p was merged into sentry, and rust-ethereum/devp2p was archived diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 22aae4c3512..22d13ffd01a 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -40,7 +40,7 @@ All binaries are stored in [`bin`](../../bin). These crates are related to the database. - [`storage/codecs`](../../crates/storage/codecs): Different storage codecs. -- [`storage/libmdbx-rs`](../../crates/storage/libmdbx-rs): Rust bindings for [libmdbx](https://libmdbx.dqdkfa.ru). A fork of an earlier Apache-licensed version of [libmdbx-rs][libmdbx-rs]. +- [`storage/libmdbx-rs`](../../crates/storage/libmdbx-rs): Rust bindings for [libmdbx](https://github.com/erthink/libmdbx). A fork of an earlier Apache-licensed version of [libmdbx-rs][libmdbx-rs]. - [`storage/db`](../../crates/storage/db): Strongly typed Database abstractions (transactions, cursors, tables) over lower level database backends. - Implemented backends: mdbx - [`storage/provider`](../../crates/storage/provider): Traits which provide a higher level api over the database to access the Ethereum state and historical data (transactions, blocks etc.) diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 9a32d647876..c35216d6b5c 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -96,7 +96,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -113,4 +113,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index b449f118168..6b3c9e4b657 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -82,7 +82,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -99,4 +99,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 2553a1480f9..6b98c08112b 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -72,7 +72,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -83,6 +87,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Logging: --log.stdout.format The format to use for logs written to stdout @@ -147,7 +154,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -164,4 +171,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index ba12fd1b2f5..4b8b8ca2cce 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -116,4 +116,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 79e324021bf..1548558fe39 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -91,7 +91,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -108,4 +108,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index 843f5253c9a..b48ba180982 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -90,7 +90,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -107,4 +107,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 3af272ff362..9f22178ec4c 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -93,7 +93,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -110,4 +110,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index f440545f129..56258531188 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -35,7 +35,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -46,6 +50,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --table The table name to diff. If not specified, all tables are diffed. @@ -126,7 +133,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -143,4 +150,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index 64552318a21..c778320f2d8 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -89,7 +89,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -106,4 +106,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index c7fc831b764..dfcfcac1886 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -91,7 +91,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -108,4 +108,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 48fd6c889c6..981d0c9f9a5 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -116,4 +116,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index af21819a452..8e045a4cdf1 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -116,4 +116,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index cff6c7eed5e..3be1cd183b2 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -132,7 +132,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -149,4 +149,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index 1dd3279a797..a954093dd5d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -86,7 +86,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -103,4 +103,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index f5058265196..6436afc2133 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -89,7 +89,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -106,4 +106,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 1f2c50908dc..5bd316847c0 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -99,7 +99,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -116,4 +116,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index a683749fcdf..c87496d910d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -86,7 +86,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -103,4 +103,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 973dce74a22..0ba6c7407e8 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -70,6 +74,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + -u, --url Specify a snapshot URL or let the command propose a default one. @@ -78,7 +85,7 @@ Database: - https://publicnode.com/snapshots (full nodes & testnets) If no URL is provided, the latest mainnet archive snapshot - will be proposed for download from merkle.io + will be proposed for download from https://downloads.merkle.io Logging: --log.stdout.format @@ -144,7 +151,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -161,4 +168,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 6bc27381a24..7aeaa8db49a 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -85,7 +85,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -102,4 +102,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 896f7f34d08..051c81fcce9 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -70,6 +74,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --first-block-number Optional first block number to export from the db. It is by default 0. @@ -150,7 +157,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -167,4 +174,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index a783067d193..14aa47e0ef1 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -70,6 +74,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --path The path to a directory for import. @@ -145,7 +152,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -162,4 +169,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 0914444e108..b8051d9d2f8 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -70,6 +74,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --no-state Disables stages that require state. @@ -146,7 +153,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -163,4 +170,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 8c0cfa6e4d3..e43c87f806f 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -70,6 +74,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --without-evm Specifies whether to initialize the state without relying on EVM historical data. @@ -80,9 +87,6 @@ Database: --header Header file containing the header in an RLP encoded format. - --total-difficulty - Total difficulty of the header. - --header-hash Hash of the header. @@ -169,7 +173,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -186,4 +190,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index b1ac27e8ba7..6ad439c6a03 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -70,6 +74,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Logging: --log.stdout.format The format to use for logs written to stdout @@ -134,7 +141,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -151,4 +158,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index fc9d6b317a6..dc59bc6fbde 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -39,11 +39,23 @@ Options: Print help (see a summary with '-h') Metrics: - --metrics + --metrics Enable Prometheus metrics. The metrics will be served at the given interface and port. + --metrics.prometheus.push.url + URL for pushing Prometheus metrics to a push gateway. + + If set, the node will periodically push metrics to the specified push gateway URL. + + --metrics.prometheus.push.interval + Interval in seconds for pushing metrics to push gateway. + + Default: 5 seconds + + [default: 5] + Datadir: --datadir The path to the data dir for all reth files and subdirectories. @@ -167,10 +179,10 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 --max-tx-reqs Max concurrent `GetPooledTransactions` requests. @@ -248,6 +260,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + RPC: --http Enable the HTTP-RPC server @@ -390,6 +405,11 @@ RPC: [default: 50000000] + --rpc.evm-memory-limit + Maximum memory the EVM can allocate per RPC request + + [default: 4294967295] + --rpc.txfeecap Maximum eth transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) @@ -681,6 +701,11 @@ Debug: --ethstats The URL of the ethstats server to connect to. Example: `nodename:secret@host:port` + --debug.startup-sync-state-idle + Set the node to idle state when the backfill is not running. + + This makes the `eth_syncing` RPC return "Idle" when the node has just started or finished the backfill, but did not yet receive any new blocks. + Database: --db.log-level Database logging level. Levels higher than "notice" require a debug build @@ -701,7 +726,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -712,6 +741,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Dev testnet: --dev Start the node in dev mode @@ -731,29 +763,34 @@ Dev testnet: Parses strings using [`humantime::parse_duration`] --dev.block-time 12s + --dev.mnemonic + Derive dev accounts from a fixed mnemonic instead of random ones. + + [default: "test test test test test test test test test test test junk"] + Pruning: --full Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored - --block-interval + --prune.block-interval Minimum pruning interval measured in blocks - --prune.senderrecovery.full + --prune.sender-recovery.full Prunes all sender recovery data - --prune.senderrecovery.distance + --prune.sender-recovery.distance Prune sender recovery data before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.senderrecovery.before + --prune.sender-recovery.before Prune sender recovery data before the specified block number. The specified block number is not pruned - --prune.transactionlookup.full + --prune.transaction-lookup.full Prunes all transaction lookup data - --prune.transactionlookup.distance + --prune.transaction-lookup.distance Prune transaction lookup data before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.transactionlookup.before + --prune.transaction-lookup.before Prune transaction lookup data before the specified block number. The specified block number is not pruned --prune.receipts.full @@ -768,25 +805,22 @@ Pruning: --prune.receipts.before Prune receipts before the specified block number. The specified block number is not pruned - --prune.receiptslogfilter - Configure receipts log filter. Format: <`address`>:<`prune_mode`>... where <`prune_mode`> can be 'full', 'distance:<`blocks`>', or 'before:<`block_number`>' - - --prune.accounthistory.full + --prune.account-history.full Prunes all account history - --prune.accounthistory.distance + --prune.account-history.distance Prune account before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.accounthistory.before + --prune.account-history.before Prune account history before the specified block number. The specified block number is not pruned - --prune.storagehistory.full + --prune.storage-history.full Prunes all storage history data - --prune.storagehistory.distance + --prune.storage-history.distance Prune storage history before the `head-N` block number. In other words, keep last N + 1 blocks - --prune.storagehistory.before + --prune.storage-history.before Prune storage history before the specified block number. The specified block number is not pruned --prune.bodies.pre-merge @@ -812,8 +846,8 @@ Engine: --engine.legacy-state-root Enable legacy state root - --engine.disable-caching-and-prewarming - Disable cross-block caching and parallel prewarming + --engine.disable-prewarming + Disable parallel prewarming --engine.disable-parallel-sparse-trie Disable the parallel sparse trie in the engine @@ -832,11 +866,6 @@ Engine: --engine.accept-execution-requests-hash Enables accepting requests hash instead of an array of requests in `engine_newPayloadV4` - --engine.max-proof-task-concurrency - Configure the maximum number of concurrent proof tasks - - [default: 256] - --engine.multiproof-chunking Whether multiproof task should chunk proof targets @@ -864,6 +893,12 @@ Engine: --engine.allow-unwind-canonical-header Allow unwinding canonical header to ancestor during forkchoice updates. See `TreeConfig::unwind_canonical_header` for more details + --engine.storage-worker-count + Configure the number of storage proof workers in the Tokio blocking pool. If not specified, defaults to 2x available parallelism, clamped between 2 and 64 + + --engine.account-worker-count + Configure the number of account proof workers in the Tokio blocking pool. If not specified, defaults to the same count as storage workers + ERA: --era.enable Enable import from ERA1 files @@ -967,7 +1002,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -984,4 +1019,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 6b24d9d326b..7b37fdfdaa3 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -83,7 +83,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -100,4 +100,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index ecd6ccf8141..a7670bacce9 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -125,10 +125,10 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 --max-tx-reqs Max concurrent `GetPooledTransactions` requests. @@ -206,6 +206,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + Datadir: --datadir The path to the data dir for all reth files and subdirectories. @@ -300,7 +303,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -317,4 +320,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 2a0a5b6a808..324b01daac5 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -94,7 +94,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -111,4 +111,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index fee957e3385..76afd9a4cf5 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -125,10 +125,10 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 --max-tx-reqs Max concurrent `GetPooledTransactions` requests. @@ -206,6 +206,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + Datadir: --datadir The path to the data dir for all reth files and subdirectories. @@ -300,7 +303,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -317,4 +320,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index dbd7ca91b34..a8ac7fbd0df 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -80,7 +80,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -97,4 +97,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index ac123d47285..2d136630298 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -80,7 +80,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -97,4 +97,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index ce6bc399d8e..1febf6cdd5b 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -70,6 +74,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Logging: --log.stdout.format The format to use for logs written to stdout @@ -134,7 +141,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -151,4 +158,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index ec5e048b5cd..742cbe54822 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -70,6 +74,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --from The height to start at @@ -147,7 +154,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -164,4 +171,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index bc693f7e463..006c6c74340 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -83,7 +83,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -100,4 +100,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index a36545638ce..05153f3fc2a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -70,19 +74,23 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Possible values: - - headers: The headers stage within the pipeline - - bodies: The bodies stage within the pipeline - - senders: The senders stage within the pipeline - - execution: The execution stage within the pipeline - - account-hashing: The account hashing stage within the pipeline - - storage-hashing: The storage hashing stage within the pipeline - - hashing: The account and storage hashing stages within the pipeline - - merkle: The merkle stage within the pipeline - - tx-lookup: The transaction lookup stage within the pipeline - - account-history: The account history stage within the pipeline - - storage-history: The storage history stage within the pipeline + - headers: The headers stage within the pipeline + - bodies: The bodies stage within the pipeline + - senders: The senders stage within the pipeline + - execution: The execution stage within the pipeline + - account-hashing: The account hashing stage within the pipeline + - storage-hashing: The storage hashing stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline + - merkle-changesets: The merkle changesets stage within the pipeline + - tx-lookup: The transaction lookup stage within the pipeline + - account-history: The account history stage within the pipeline + - storage-history: The storage history stage within the pipeline Logging: --log.stdout.format @@ -148,7 +156,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -165,4 +173,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 97211934295..b74ee2280bc 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -66,7 +66,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -77,6 +81,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + Logging: --log.stdout.format The format to use for logs written to stdout @@ -141,7 +148,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -158,4 +165,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index c1459ee5498..70fad94ea3a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -115,4 +115,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index 4f39dccac12..bed5d33329a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -115,4 +115,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index f5d6a07b09a..3bada103c87 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -115,4 +115,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index fce03ffa753..723a54e9272 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -98,7 +98,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -115,4 +115,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 76ce30a2f79..b7a5a41aaf9 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -59,7 +59,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -70,6 +74,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --metrics Enable Prometheus metrics. @@ -101,17 +108,18 @@ Database: The name of the stage to run Possible values: - - headers: The headers stage within the pipeline - - bodies: The bodies stage within the pipeline - - senders: The senders stage within the pipeline - - execution: The execution stage within the pipeline - - account-hashing: The account hashing stage within the pipeline - - storage-hashing: The storage hashing stage within the pipeline - - hashing: The account and storage hashing stages within the pipeline - - merkle: The merkle stage within the pipeline - - tx-lookup: The transaction lookup stage within the pipeline - - account-history: The account history stage within the pipeline - - storage-history: The storage history stage within the pipeline + - headers: The headers stage within the pipeline + - bodies: The bodies stage within the pipeline + - senders: The senders stage within the pipeline + - execution: The execution stage within the pipeline + - account-hashing: The account hashing stage within the pipeline + - storage-hashing: The storage hashing stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline + - merkle-changesets: The merkle changesets stage within the pipeline + - tx-lookup: The transaction lookup stage within the pipeline + - account-history: The account history stage within the pipeline + - storage-history: The storage history stage within the pipeline Networking: -d, --disable-discovery @@ -221,10 +229,10 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 --max-tx-reqs Max concurrent `GetPooledTransactions` requests. @@ -302,6 +310,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + Logging: --log.stdout.format The format to use for logs written to stdout @@ -366,7 +377,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -383,4 +394,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 1a3fd02cae8..5c3a7d54f4d 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -64,7 +64,11 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -75,6 +79,9 @@ Database: --db.max-readers Maximum number of readers allowed to access the database concurrently + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + --offline If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound @@ -142,7 +149,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -159,4 +166,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index bed98899e19..b04e1920b75 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -90,7 +90,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -107,4 +107,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index bcfc87cf3e5..2c22f8127c1 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -90,7 +90,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -107,4 +107,37 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/run/monitoring.mdx b/docs/vocs/docs/pages/run/monitoring.mdx index 30ce967bb10..1b463efdb7d 100644 --- a/docs/vocs/docs/pages/run/monitoring.mdx +++ b/docs/vocs/docs/pages/run/monitoring.mdx @@ -10,6 +10,12 @@ Reth exposes a number of metrics which can be enabled by adding the `--metrics` reth node --metrics 127.0.0.1:9001 ``` +Additionally, you can export spans to an OpenTelemetry collector using `--tracing-otlp`: + +```bash +reth node --tracing-otlp=http://localhost:4318/v1/traces +``` + Now, as the node is running, you can `curl` the endpoint you provided to the `--metrics` flag to get a text dump of the metrics at that time: ```bash diff --git a/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx b/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx index 52881a368fb..407ebcfdec3 100644 --- a/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx +++ b/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx @@ -67,7 +67,7 @@ pub enum CustomTransaction { /// A regular Optimism transaction as defined by [`OpTxEnvelope`]. #[envelope(flatten)] Op(OpTxEnvelope), - /// A [`TxPayment`] tagged with type 0x7E. + /// A [`TxPayment`] tagged with type 0x2A (decimal 42). #[envelope(ty = 42)] Payment(Signed), } @@ -178,7 +178,7 @@ pub enum CustomPooledTransaction { /// A regular Optimism transaction as defined by [`OpPooledTransaction`]. #[envelope(flatten)] Op(OpPooledTransaction), - /// A [`TxPayment`] tagged with type 0x7E. + /// A [`TxPayment`] tagged with type 0x2A (decimal 42). #[envelope(ty = 42)] Payment(Signed), } diff --git a/docs/vocs/docs/pages/sdk/examples/modify-node.mdx b/docs/vocs/docs/pages/sdk/examples/modify-node.mdx index b8f21a06bbf..b5297504f3a 100644 --- a/docs/vocs/docs/pages/sdk/examples/modify-node.mdx +++ b/docs/vocs/docs/pages/sdk/examples/modify-node.mdx @@ -4,13 +4,82 @@ This guide demonstrates how to extend a Reth node with custom functionality, inc ## Adding Custom RPC Endpoints -One of the most common modifications is adding custom RPC methods to expose additional functionality. +One of the most common modifications is adding custom RPC methods to expose additional functionality. This allows you to extend the standard Ethereum RPC API with your own methods while maintaining compatibility with existing tools and clients. ### Basic Custom RPC Module +The following example shows how to add a custom RPC namespace called `txpoolExt` that provides additional transaction pool functionality. This example is based on the `node-custom-rpc` example in the Reth repository. + +#### Project Structure + +First, create a new binary crate with the following dependencies in your `Cargo.toml`: + +```toml +[package] +name = "node-custom-rpc" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "node-custom-rpc" +path = "src/main.rs" + +[dependencies] +clap = { version = "4.0", features = ["derive"] } +jsonrpsee = { version = "0.22", features = ["macros", "server", "http-server", "ws-server"] } +reth-ethereum = { path = "../../crates/ethereum" } +tokio = { version = "1.0", features = ["full"] } +``` + +#### Implementation + +The complete implementation can be found in the [node-custom-rpc example](https://github.com/paradigmxyz/reth/tree/main/examples/node-custom-rpc). Here's a summary of the key components: + +1. **RPC Interface**: Define your custom RPC methods using `jsonrpsee` proc macros with a custom namespace +2. **RPC Handler**: Implement the trait with access to node components like the transaction pool +3. **CLI Extension**: Add custom CLI arguments to control your extensions +4. **Node Integration**: Use `extend_rpc_modules` to integrate your custom functionality + +#### Running the Custom Node + +Build and run your custom node with the extension enabled: + +```bash +cargo run -p node-custom-rpc -- node --http --ws --enable-ext +``` + +This will start a Reth node with your custom RPC methods available on both HTTP and WebSocket transports. + +#### Testing the Custom RPC Methods + +You can test your custom RPC methods using tools like `cast` from the Foundry suite: + +```bash +# Get transaction count +cast rpc txpoolExt_transactionCount + +# Clear the transaction pool +cast rpc txpoolExt_clearTxpool + +# Subscribe to transaction count updates (WebSocket only) +cast rpc txpoolExt_subscribeTransactionCount +``` + +### Key Concepts + +1. **RPC Namespaces**: Use the `namespace` parameter in the `rpc` macro to create a custom namespace for your methods. + +2. **Node Context**: Access node components like the transaction pool through the `ctx` parameter in `extend_rpc_modules`. + +3. **Transport Integration**: Your custom RPC methods are automatically available on all configured transports (HTTP, WebSocket, IPC). + +4. **CLI Integration**: Extend the default Reth CLI with your own arguments to control custom functionality. + +5. **Error Handling**: Use `RpcResult` for methods that can fail and handle errors appropriately. ## Next Steps - Explore [Standalone Components](/sdk/examples/standalone-components) for direct blockchain interaction - Learn about [Custom Node Building](/sdk/custom-node/prerequisites) for production deployments - Review [Type System](/sdk/typesystem/block) for working with blockchain data +- Check out the [node-custom-rpc example](https://github.com/paradigmxyz/reth/tree/main/examples/node-custom-rpc) for the complete implementation diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index 92aee418311..4deb6c6df0b 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -21,7 +21,7 @@ export default defineConfig({ }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { - text: 'v1.8.2', + text: 'v1.9.0', items: [ { text: 'Releases', diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 5b271d7ea8e..ef52e1c8cd9 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -2,7 +2,7 @@ "__inputs": [ { "name": "DS_PROMETHEUS", - "label": "prometheus", + "label": "Prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", @@ -46,7 +46,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "12.1.0-pre" + "version": "12.2.1" }, { "type": "panel", @@ -110,7 +110,6 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": null, "links": [], "panels": [ { @@ -164,9 +163,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -177,12 +174,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -234,9 +231,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -247,12 +242,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -304,9 +299,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -317,12 +310,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -374,9 +367,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -387,12 +378,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -444,9 +435,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -457,12 +446,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -514,9 +503,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -527,12 +514,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -596,9 +583,7 @@ "minVizWidth": 75, "orientation": "auto", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -606,12 +591,12 @@ "showThresholdMarkers": true, "sizing": "auto" }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -672,9 +657,7 @@ "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -682,16 +665,16 @@ "sizing": "auto", "valueMode": "color" }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, - "expr": "reth_sync_checkpoint{$instance_label=\"$instance\"}", + "expr": "max by (stage) (reth_sync_checkpoint{$instance_label=\"$instance\"})", "instant": true, "legendFormat": "{{stage}}", "range": false, @@ -774,9 +757,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -784,12 +765,12 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_db_table_size{$instance_label=\"$instance\"})", @@ -813,7 +794,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_static_files_segment_size{$instance_label=\"$instance\"})", @@ -844,7 +825,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -875,6 +856,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -920,7 +902,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -928,7 +910,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_sync_entities_processed{$instance_label=\"$instance\"} / reth_sync_entities_total{$instance_label=\"$instance\"}", + "expr": "avg by (stage) (reth_sync_entities_processed{$instance_label=\"$instance\"} / reth_sync_entities_total{$instance_label=\"$instance\"})", "legendFormat": "{{stage}}", "range": true, "refId": "A" @@ -940,7 +922,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -971,6 +953,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1017,7 +1000,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -1025,7 +1008,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_sync_checkpoint{$instance_label=\"$instance\"}", + "expr": "max by (stage) (reth_sync_checkpoint{$instance_label=\"$instance\"})", "legendFormat": "{{stage}}", "range": true, "refId": "A" @@ -1037,7 +1020,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_forkchoiceUpdated RPC API", "fieldConfig": { @@ -1069,6 +1052,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1137,7 +1121,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -1145,171 +1129,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 min", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p50", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p90", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p95", - "range": true, - "refId": "D", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p99", - "range": true, - "refId": "E", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 min", - "range": true, - "refId": "F", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p50", - "range": true, - "refId": "G", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p90", - "range": true, - "refId": "H", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p95", - "range": true, - "refId": "I", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p99", - "range": true, - "refId": "J", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 min", + "legendFormat": "min", "range": true, "refId": "K", "useBackend": false @@ -1317,15 +1142,15 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.5\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p50", + "legendFormat": "p50", "range": true, "refId": "L", "useBackend": false @@ -1336,12 +1161,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.9\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p90", + "legendFormat": "p90", "range": true, "refId": "M", "useBackend": false @@ -1349,15 +1174,15 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.95\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p95", + "legendFormat": "p95", "range": true, "refId": "N", "useBackend": false @@ -1368,12 +1193,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.99\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p99", + "legendFormat": "p99", "range": true, "refId": "O", "useBackend": false @@ -1385,7 +1210,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_newPayload RPC API", "fieldConfig": { @@ -1417,6 +1242,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1451,293 +1277,54 @@ "reducer": "allIsZero", "value": 0 } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": true, - "tooltip": true, - "viz": true - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 21 - }, - "id": 210, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.1.0-pre", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 min", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p50", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p90", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p95", - "range": true, - "refId": "D", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p99", - "range": true, - "refId": "E", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 min", - "range": true, - "refId": "F", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p50", - "range": true, - "refId": "G", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p90", - "range": true, - "refId": "H", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p95", - "range": true, - "refId": "I", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p99", - "range": true, - "refId": "J", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 min", - "range": true, - "refId": "K", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p50", - "range": true, - "refId": "L", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p90", - "range": true, - "refId": "M", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p95", - "range": true, - "refId": "N", - "useBackend": false + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 21 + }, + "id": 210, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p99", - "range": true, - "refId": "O", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 min", + "legendFormat": "min", "range": true, "refId": "P", "useBackend": false @@ -1748,12 +1335,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.5\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p50", + "legendFormat": "p50", "range": true, "refId": "Q", "useBackend": false @@ -1761,15 +1348,15 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.9\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p90", + "legendFormat": "p90", "range": true, "refId": "R", "useBackend": false @@ -1780,12 +1367,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.95\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p95", + "legendFormat": "p95", "range": true, "refId": "S", "useBackend": false @@ -1793,15 +1380,15 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.99\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p99", + "legendFormat": "p99", "range": true, "refId": "T", "useBackend": false @@ -1845,6 +1432,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1888,15 +1476,15 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.5\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.5\"}", "legendFormat": "p50", "range": true, "refId": "A" @@ -1904,10 +1492,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.9\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.9\"}", "hide": false, "legendFormat": "p90", "range": true, @@ -1916,10 +1504,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.95\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.95\"}", "hide": false, "legendFormat": "p95", "range": true, @@ -1928,10 +1516,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.99\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.99\"}", "hide": false, "legendFormat": "p99", "range": true, @@ -1976,6 +1564,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2019,15 +1608,15 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.5\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.5\"}", "legendFormat": "p50", "range": true, "refId": "A" @@ -2035,10 +1624,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.9\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.9\"}", "hide": false, "legendFormat": "p90", "range": true, @@ -2047,10 +1636,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.95\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.95\"}", "hide": false, "legendFormat": "p95", "range": true, @@ -2059,10 +1648,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.99\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.99\"}", "hide": false, "legendFormat": "p99", "range": true, @@ -2107,6 +1696,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2150,12 +1740,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sync_execution_gas_per_second{$instance_label=\"$instance\"}", @@ -2182,7 +1772,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2214,7 +1804,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2246,7 +1836,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2297,6 +1887,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2344,12 +1935,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2400,7 +1991,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Engine API messages received by the CL, either engine_newPayload or engine_forkchoiceUpdated", "fieldConfig": { @@ -2432,6 +2023,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2478,7 +2070,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2494,7 +2086,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_consensus_engine_beacon_new_payload_messages{$instance_label=\"$instance\"}[$__rate_interval])", @@ -2542,6 +2134,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2589,12 +2182,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_consensus_engine_beacon_failed_new_payload_response_deliveries{$instance_label=\"$instance\"}[$__rate_interval])", @@ -2620,7 +2213,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_newPayload to engine_forkchoiceUpdated", "fieldConfig": { @@ -2652,6 +2245,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2699,7 +2293,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2719,7 +2313,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histograms for the engine_getPayloadBodiesByHashV1 and engine_getPayloadBodiesByRangeV1 RPC APIs", "fieldConfig": { @@ -2751,6 +2345,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2798,7 +2393,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2819,7 +2414,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2851,7 +2446,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2883,7 +2478,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2915,7 +2510,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2947,7 +2542,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2998,6 +2593,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3041,12 +2637,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_engine_rpc_blobs_blob_count{$instance_label=\"$instance\"}[$__rate_interval])", @@ -3073,7 +2669,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -3104,6 +2700,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3151,7 +2748,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3172,7 +2769,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3204,7 +2801,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3240,7 +2837,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total pipeline runs triggered by the sync controller", "fieldConfig": { @@ -3272,6 +2869,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3318,7 +2916,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3338,7 +2936,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -3370,6 +2968,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3416,7 +3015,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3450,7 +3049,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -3481,6 +3080,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3528,7 +3128,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3549,7 +3149,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3601,6 +3201,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3648,12 +3249,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -3687,7 +3288,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -3740,6 +3341,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3787,12 +3389,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -3844,6 +3446,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3877,9 +3480,7 @@ "id": "byNames", "options": { "mode": "exclude", - "names": [ - "Precompile cache hits" - ], + "names": ["Precompile cache hits"], "prefix": "All except:", "readOnly": true } @@ -3889,7 +3490,7 @@ "id": "custom.hideFrom", "value": { "legend": false, - "tooltip": false, + "tooltip": true, "viz": true } } @@ -3917,12 +3518,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -3987,6 +3588,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4033,12 +3635,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_proofs_processed_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4085,6 +3687,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4132,12 +3735,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_proof_calculation_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4184,6 +3787,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4231,19 +3835,110 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_tree_root_pending_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.5\"}", "instant": false, - "legendFormat": "{{quantile}} percentile", + "legendFormat": "accounts p50", "range": true, "refId": "Branch Nodes" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.9\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p90", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.95\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p95", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.99\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p99", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.5\"}", + "hide": false, + "instant": false, + "legendFormat": "storage p50", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.9\"}", + "hide": false, + "instant": false, + "legendFormat": "storage p90", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.95\"}", + "hide": false, + "instant": false, + "legendFormat": "storage p95", + "range": true, + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.99\"}", + "hide": false, + "instant": false, + "legendFormat": "storage p99", + "range": true, + "refId": "G" } ], "title": "Pending MultiProof requests", @@ -4283,6 +3978,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4324,28 +4020,118 @@ "placement": "bottom", "showLegend": true }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.1.0-pre", - "targets": [ + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.5\"}", + "instant": false, + "legendFormat": "accounts p50", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.9\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p90", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.95\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p95", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.99\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p99", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.5\"}", + "instant": false, + "legendFormat": "storages p50", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.9\"}", + "hide": false, + "instant": false, + "legendFormat": "storages p90", + "range": true, + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.95\"}", + "hide": false, + "instant": false, + "legendFormat": "storages p95", + "range": true, + "refId": "G" + }, { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_tree_root_inflight_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.99\"}", + "hide": false, "instant": false, - "legendFormat": "{{quantile}} percentile", + "legendFormat": "storages p99", "range": true, - "refId": "Branch Nodes" + "refId": "H" } ], - "title": "In-flight MultiProof requests", + "title": "Active multiproof workers", "type": "timeseries" }, { @@ -4382,6 +4168,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4429,12 +4216,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_total_account_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4481,6 +4268,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4528,12 +4316,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_total_storage_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4580,6 +4368,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4627,12 +4416,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_skipped_account_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4680,6 +4469,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4727,12 +4517,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_skipped_storage_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4780,6 +4570,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4827,12 +4618,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_multiproof_task_total_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4881,6 +4672,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4928,12 +4720,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5000,6 +4792,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5043,12 +4836,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5133,12 +4926,12 @@ "unit": "percentunit" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5189,6 +4982,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5232,12 +5026,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5287,6 +5081,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5330,12 +5125,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5388,6 +5183,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5427,10 +5223,7 @@ { "id": "custom.lineStyle", "value": { - "dash": [ - 0, - 10 - ], + "dash": [0, 10], "fill": "dot" } }, @@ -5462,12 +5255,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -5548,6 +5341,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5587,10 +5381,7 @@ { "id": "custom.lineStyle", "value": { - "dash": [ - 0, - 10 - ], + "dash": [0, 10], "fill": "dot" } }, @@ -5622,12 +5413,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5705,37 +5496,32 @@ }, "id": 48, "options": { - "displayLabels": [ - "name" - ], + "displayLabels": ["name"], "legend": { "displayMode": "table", "placement": "right", "showLegend": true, - "values": [ - "value" - ] + "values": ["value"] }, "pieType": "pie", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, + "sort": "desc", "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_db_table_size{$instance_label=\"$instance\"}", @@ -5783,6 +5569,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5830,12 +5617,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5885,30 +5672,27 @@ "displayMode": "table", "placement": "right", "showLegend": true, - "values": [ - "value" - ] + "values": ["value"] }, "pieType": "pie", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, + "sort": "desc", "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "sum by (type) ( reth_db_table_pages{$instance_label=\"$instance\"} )", @@ -5955,6 +5739,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6003,12 +5788,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by (job) ( reth_db_table_size{$instance_label=\"$instance\"} )", @@ -6055,6 +5840,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6102,12 +5888,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_db_freelist{$instance_label=\"$instance\"}) by (job)", @@ -6134,6 +5920,9 @@ "cellOptions": { "type": "auto" }, + "footer": { + "reducers": [] + }, "inspect": false }, "mappings": [], @@ -6159,7 +5948,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6171,7 +5960,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6183,7 +5972,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6195,7 +5984,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6207,7 +5996,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6251,22 +6040,14 @@ "id": 58, "options": { "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, "showHeader": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -6325,37 +6106,32 @@ }, "id": 202, "options": { - "displayLabels": [ - "name" - ], + "displayLabels": ["name"], "legend": { "displayMode": "table", "placement": "right", "showLegend": true, - "values": [ - "value" - ] + "values": ["value"] }, "pieType": "pie", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, + "sort": "desc", "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_static_files_segment_size{$instance_label=\"$instance\"}", @@ -6383,6 +6159,9 @@ "cellOptions": { "type": "auto" }, + "footer": { + "reducers": [] + }, "inspect": false }, "mappings": [], @@ -6436,7 +6215,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6448,7 +6227,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6460,7 +6239,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6472,7 +6251,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6488,22 +6267,14 @@ "id": 204, "options": { "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, "showHeader": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -6533,6 +6304,9 @@ "cellOptions": { "type": "auto" }, + "footer": { + "reducers": [] + }, "inspect": false }, "mappings": [], @@ -6586,7 +6360,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6598,7 +6372,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6610,7 +6384,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6622,7 +6396,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6638,22 +6412,14 @@ "id": 205, "options": { "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, "showHeader": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -6703,6 +6469,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6750,12 +6517,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by (job) ( reth_static_files_segment_size{$instance_label=\"$instance\"} )", @@ -6802,6 +6569,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6849,12 +6617,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "max(max_over_time(reth_static_files_jar_provider_write_duration_seconds{$instance_label=\"$instance\", operation=\"commit-writer\", quantile=\"1\"}[$__interval]) > 0) by (segment)", @@ -6915,6 +6683,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6929,7 +6698,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -6960,12 +6730,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_blockchain_tree_canonical_chain_height{$instance_label=\"$instance\"}", @@ -7013,6 +6783,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7027,7 +6798,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7058,12 +6830,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_blockchain_tree_block_buffer_blocks{$instance_label=\"$instance\"}", @@ -7110,6 +6882,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7124,7 +6897,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7156,12 +6930,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "increase(reth_blockchain_tree_reorgs{$instance_label=\"$instance\"}[$__rate_interval])", @@ -7208,6 +6982,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7222,7 +6997,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7254,12 +7030,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_blockchain_tree_latest_reorg_depth{$instance_label=\"$instance\"}", @@ -7320,6 +7096,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7392,12 +7169,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -7484,12 +7261,12 @@ "unit": "percentunit" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -7538,6 +7315,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7585,12 +7363,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "max(max_over_time(reth_rpc_server_calls_time_seconds{$instance_label=\"$instance\"}[$__rate_interval])) by (method) > 0", @@ -7673,12 +7451,12 @@ "unit": "percentunit" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -7727,6 +7505,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7810,12 +7589,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -7848,7 +7627,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -7882,7 +7661,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -7916,7 +7695,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -7954,7 +7733,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -7985,6 +7764,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8032,7 +7812,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8067,7 +7847,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -8098,6 +7878,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8112,7 +7893,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8168,7 +7950,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8184,7 +7966,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_headers_total_flushed{$instance_label=\"$instance\"}", @@ -8209,7 +7991,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_total_flushed{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8257,6 +8039,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8271,7 +8054,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8303,12 +8087,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_timeout_errors{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8331,7 +8115,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_validation_errors{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8379,6 +8163,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8393,7 +8178,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8424,12 +8210,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_headers_in_flight_requests{$instance_label=\"$instance\"}", @@ -8470,7 +8256,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The internal state of the headers downloader: the number of downloaded headers, and the number of headers sent to the header stage.", "fieldConfig": { @@ -8502,6 +8288,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8582,7 +8369,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8598,7 +8385,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_total_flushed{$instance_label=\"$instance\"}", @@ -8622,7 +8409,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_bodies_total_downloaded{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8646,7 +8433,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_buffered_blocks{$instance_label=\"$instance\"}", @@ -8674,7 +8461,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Internal errors in the bodies downloader. These are expected to happen from time to time.", "fieldConfig": { @@ -8706,6 +8493,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8750,7 +8538,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8766,7 +8554,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_bodies_unexpected_errors{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8794,7 +8582,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The number of connected peers and in-progress requests for bodies.", "fieldConfig": { @@ -8826,6 +8614,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8872,7 +8661,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8888,7 +8677,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_network_connected_peers{$instance_label=\"$instance\"}", @@ -8936,6 +8725,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9000,12 +8790,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_buffered_blocks_size_bytes{$instance_label=\"$instance\"}", @@ -9033,7 +8823,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The number of blocks in a request and size in bytes of those block responses", "fieldConfig": { @@ -9065,6 +8855,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9129,7 +8920,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9146,7 +8937,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_response_response_length{$instance_label=\"$instance\"}", @@ -9188,7 +8979,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9220,6 +9011,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9293,7 +9085,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9318,7 +9110,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9350,6 +9142,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9423,7 +9216,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9448,7 +9241,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9480,6 +9273,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9553,7 +9347,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9578,7 +9372,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9610,6 +9404,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9683,7 +9478,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9722,7 +9517,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Number of active jobs", "fieldConfig": { @@ -9754,6 +9549,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9800,7 +9596,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9820,7 +9616,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total number of initiated jobs", "fieldConfig": { @@ -9852,6 +9648,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9898,7 +9695,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9918,7 +9715,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total number of failed jobs", "fieldConfig": { @@ -9950,6 +9747,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9996,7 +9794,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10029,7 +9827,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10060,6 +9858,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -10108,7 +9907,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10129,7 +9928,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10160,6 +9959,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -10208,7 +10008,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10229,7 +10029,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10260,6 +10060,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -10307,7 +10108,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10342,7 +10143,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10373,6 +10174,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10433,7 +10235,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10450,7 +10252,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_allocated{$instance_label=\"$instance\"}", @@ -10476,7 +10278,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_metadata{$instance_label=\"$instance\"}", @@ -10502,7 +10304,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_retained{$instance_label=\"$instance\"}", @@ -10551,6 +10353,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10598,12 +10401,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_process_resident_memory_bytes{$instance_label=\"$instance\"}", @@ -10651,6 +10454,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10698,12 +10502,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "avg(rate(reth_process_cpu_seconds_total{$instance_label=\"$instance\"}[1m]))", @@ -10751,6 +10555,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10798,12 +10603,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_process_open_fds{$instance_label=\"$instance\"}", @@ -10851,6 +10656,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10898,12 +10704,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_executor_spawn_critical_tasks_total{$instance_label=\"$instance\"}- reth_executor_spawn_finished_critical_tasks_total{$instance_label=\"$instance\"}", @@ -10952,6 +10758,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -11012,12 +10819,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -11049,20 +10856,153 @@ "title": "Task Executor regular tasks", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Tracks the number of regular blocking tasks currently ran by the executor.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "semi-dark-red", + "value": 80 + } + ] + }, + "unit": "tasks/s" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "unit", + "value": "tasks" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 362 + }, + "id": 1007, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "rate(reth_executor_spawn_regular_blocking_tasks_total{$instance_label=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Tasks started", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_regular_blocking_tasks_total{$instance_label=\"$instance\"} - reth_executor_spawn_finished_regular_blocking_tasks_total{$instance_label=\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" + } + ], + "title": "Task Executor regular blocking tasks", + "type": "timeseries" + }, { "collapsed": true, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 362 + "y": 370 }, "id": 236, "panels": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The total number of canonical state notifications sent to ExExes.", "fieldConfig": { @@ -11158,7 +11098,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The total number of events ExExes have sent to the manager.", "fieldConfig": { @@ -11254,7 +11194,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Current and Maximum capacity of the internal state notifications buffer.", "fieldConfig": { @@ -11346,7 +11286,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "max_over_time(reth_exex_manager_max_capacity{$instance_label=\"$instance\"}[1h])", @@ -11442,7 +11382,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_exex_manager_buffer_size{$instance_label=\"$instance\"}", @@ -11497,9 +11437,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -11512,7 +11450,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_exex_manager_num_exexs{$instance_label=\"$instance\"}", @@ -11535,7 +11473,7 @@ "h": 1, "w": 24, "x": 0, - "y": 363 + "y": 371 }, "id": 241, "panels": [ @@ -11623,7 +11561,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_lowest_committed_block_height{$instance_label=\"$instance\"}", @@ -11653,7 +11591,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -11747,7 +11685,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_notifications_count{$instance_label=\"$instance\"}", @@ -11846,7 +11784,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_size_bytes{$instance_label=\"$instance\"}", @@ -11866,7 +11804,7 @@ } ], "refresh": "5s", - "schemaVersion": 41, + "schemaVersion": 42, "tags": [], "templating": { "list": [ @@ -11932,6 +11870,6 @@ "timezone": "", "title": "Reth", "uid": "2k8BXz24x", - "version": 3, + "version": 4, "weekStart": "" } diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 56755b1e730..cc3ba9abf88 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -1,5 +1,5 @@ use crate::BeaconSidecarConfig; -use alloy_consensus::{BlockHeader, Signed, Transaction as _, TxEip4844WithSidecar, Typed2718}; +use alloy_consensus::{Signed, Transaction as _, TxEip4844WithSidecar, Typed2718}; use alloy_eips::eip7594::BlobTransactionSidecarVariant; use alloy_primitives::B256; use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator}; @@ -202,9 +202,9 @@ where .map(|tx| { let transaction_hash = *tx.tx_hash(); let block_metadata = BlockMetadata { - block_hash: new.tip().hash(), - block_number: new.tip().number(), - gas_used: new.tip().gas_used(), + block_hash: block.hash(), + block_number: block.number, + gas_used: block.gas_used, }; BlobTransactionEvent::Reorged(ReorgedBlob { transaction_hash, diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index a72b2c44487..1d93226dd6a 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -8,7 +8,7 @@ use alloy_evm::{ block::{BlockExecutorFactory, BlockExecutorFor, ExecutableTx}, eth::{EthBlockExecutionCtx, EthBlockExecutor}, precompiles::PrecompilesMap, - revm::context::result::ResultAndState, + revm::context::{result::ResultAndState, Block as _}, EthEvm, EthEvmFactory, }; use alloy_sol_macro::sol; @@ -271,7 +271,7 @@ pub fn apply_withdrawals_contract_call( // Clean-up post system tx context state.remove(&SYSTEM_ADDRESS); - state.remove(&evm.block().beneficiary); + state.remove(&evm.block().beneficiary()); evm.db_mut().commit(state); diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index f700cf9e89a..c5441a2b388 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -33,7 +33,7 @@ async fn main() -> eyre::Result<()> { let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) .testing_node(tasks.executor()) .node(EthereumNode::default()) - .launch() + .launch_with_debug_capabilities() .await?; let mut notifications = node.provider.canonical_state_stream(); diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index b5e69670ec7..e32f0be6bd5 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -18,7 +18,7 @@ use reth_ethereum::{ evm::{ primitives::{Database, EvmEnv}, revm::{ - context::{Context, TxEnv}, + context::{BlockEnv, Context, TxEnv}, context_interface::result::{EVMError, HaltReason}, inspector::{Inspector, NoOpInspector}, interpreter::interpreter::EthInterpreter, @@ -54,6 +54,7 @@ impl EvmFactory for MyEvmFactory { type HaltReason = HaltReason; type Context = EthEvmContext; type Spec = SpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm(&self, db: DB, input: EvmEnv) -> Self::Evm { diff --git a/examples/custom-hardforks/Cargo.toml b/examples/custom-hardforks/Cargo.toml new file mode 100644 index 00000000000..78060f6af62 --- /dev/null +++ b/examples/custom-hardforks/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "custom-hardforks" +license.workspace = true +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +# Core Reth dependencies for chain specs and hardforks +reth-chainspec.workspace = true +reth-network-peers.workspace = true +alloy-genesis.workspace = true +alloy-consensus.workspace = true +alloy-primitives.workspace = true +alloy-eips.workspace = true +serde = { version = "1.0", features = ["derive"] } diff --git a/examples/custom-hardforks/src/chainspec.rs b/examples/custom-hardforks/src/chainspec.rs new file mode 100644 index 00000000000..d51db59fddb --- /dev/null +++ b/examples/custom-hardforks/src/chainspec.rs @@ -0,0 +1,149 @@ +//! Custom chain specification integrating hardforks. +//! +//! This demonstrates how to build a `ChainSpec` with custom hardforks, +//! implementing required traits for integration with Reth's chain management. + +use alloy_eips::eip7840::BlobParams; +use alloy_genesis::Genesis; +use alloy_primitives::{B256, U256}; +use reth_chainspec::{ + hardfork, BaseFeeParams, Chain, ChainSpec, DepositContract, EthChainSpec, EthereumHardfork, + EthereumHardforks, ForkCondition, Hardfork, Hardforks, +}; +use reth_network_peers::NodeRecord; +use serde::{Deserialize, Serialize}; + +// Define custom hardfork variants using Reth's `hardfork!` macro. +// Each variant represents a protocol upgrade (e.g., enabling new features). +hardfork!( + /// Custom hardforks for the example chain. + /// + /// These are inspired by Ethereum's upgrades but customized for demonstration. + /// Add new variants here to extend the chain's hardfork set. + CustomHardfork { + /// Enables basic custom features (e.g., a new precompile). + BasicUpgrade, + /// Enables advanced features (e.g., state modifications). + AdvancedUpgrade, + } +); + +// Implement the `Hardfork` trait for each variant. +// This defines the name and any custom logic (e.g., feature toggles). +// Note: The hardfork! macro already implements Hardfork, so no manual impl needed. + +// Configuration for hardfork activation. +// This struct holds settings like activation blocks and is serializable for config files. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct CustomHardforkConfig { + /// Block number to activate BasicUpgrade. + pub basic_upgrade_block: u64, + /// Block number to activate AdvancedUpgrade. + pub advanced_upgrade_block: u64, +} + +// Custom chain spec wrapping Reth's `ChainSpec` with our hardforks. +#[derive(Debug, Clone)] +pub struct CustomChainSpec { + pub inner: ChainSpec, +} + +impl CustomChainSpec { + /// Creates a custom chain spec from a genesis file. + /// + /// This parses the [`ChainSpec`] and adds the custom hardforks. + pub fn from_genesis(genesis: Genesis) -> Self { + let extra = genesis.config.extra_fields.deserialize_as::().unwrap(); + + let mut inner = ChainSpec::from_genesis(genesis); + inner.hardforks.insert( + CustomHardfork::BasicUpgrade, + ForkCondition::Timestamp(extra.basic_upgrade_block), + ); + inner.hardforks.insert( + CustomHardfork::AdvancedUpgrade, + ForkCondition::Timestamp(extra.advanced_upgrade_block), + ); + Self { inner } + } +} + +// Implement `Hardforks` to integrate custom hardforks with Reth's system. +impl Hardforks for CustomChainSpec { + fn fork(&self, fork: H) -> ForkCondition { + self.inner.fork(fork) + } + + fn forks_iter(&self) -> impl Iterator { + self.inner.forks_iter() + } + + fn fork_id(&self, head: &reth_chainspec::Head) -> reth_chainspec::ForkId { + self.inner.fork_id(head) + } + + fn latest_fork_id(&self) -> reth_chainspec::ForkId { + self.inner.latest_fork_id() + } + + fn fork_filter(&self, head: reth_chainspec::Head) -> reth_chainspec::ForkFilter { + self.inner.fork_filter(head) + } +} + +// Implement `EthChainSpec` for compatibility with Ethereum-based nodes. +impl EthChainSpec for CustomChainSpec { + type Header = alloy_consensus::Header; + + fn chain(&self) -> Chain { + self.inner.chain() + } + + fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams { + self.inner.base_fee_params_at_timestamp(timestamp) + } + + fn blob_params_at_timestamp(&self, timestamp: u64) -> Option { + self.inner.blob_params_at_timestamp(timestamp) + } + + fn deposit_contract(&self) -> Option<&DepositContract> { + self.inner.deposit_contract() + } + + fn genesis_hash(&self) -> B256 { + self.inner.genesis_hash() + } + + fn prune_delete_limit(&self) -> usize { + self.inner.prune_delete_limit() + } + + fn display_hardforks(&self) -> Box { + Box::new(self.inner.display_hardforks()) + } + + fn genesis_header(&self) -> &Self::Header { + self.inner.genesis_header() + } + + fn genesis(&self) -> &Genesis { + self.inner.genesis() + } + + fn bootnodes(&self) -> Option> { + self.inner.bootnodes() + } + + fn final_paris_total_difficulty(&self) -> Option { + self.inner.final_paris_total_difficulty() + } +} + +// Implement `EthereumHardforks` to support Ethereum hardfork queries. +impl EthereumHardforks for CustomChainSpec { + fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition { + self.inner.ethereum_fork_activation(fork) + } +} diff --git a/examples/custom-hardforks/src/main.rs b/examples/custom-hardforks/src/main.rs new file mode 100644 index 00000000000..588f260c616 --- /dev/null +++ b/examples/custom-hardforks/src/main.rs @@ -0,0 +1,5 @@ +//! Example that showcases how to inject custom hardforks. + +pub mod chainspec; + +fn main() {} diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index 9ac414b7178..34ba5c413cc 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -7,7 +7,6 @@ license.workspace = true [dependencies] # reth -reth-chain-state.workspace = true reth-codecs.workspace = true reth-network-peers.workspace = true reth-node-builder.workspace = true diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index 357290e14d7..d7eabdc19f7 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -6,14 +6,13 @@ use crate::{ }; use alloy_eips::eip2718::WithEncoded; use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload}; -use reth_chain_state::ExecutedBlockWithTrieUpdates; use reth_engine_primitives::EngineApiValidator; use reth_ethereum::{ node::api::{ - validate_version_specific_fields, AddOnsContext, BuiltPayload, EngineApiMessageVersion, - EngineObjectValidationError, ExecutionPayload, FullNodeComponents, NewPayloadError, - NodePrimitives, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, - PayloadTypes, PayloadValidator, + validate_version_specific_fields, AddOnsContext, BuiltPayload, BuiltPayloadExecutedBlock, + EngineApiMessageVersion, EngineObjectValidationError, ExecutionPayload, FullNodeComponents, + NewPayloadError, NodePrimitives, PayloadAttributes, PayloadBuilderAttributes, + PayloadOrAttributes, PayloadTypes, PayloadValidator, }, primitives::{RecoveredBlock, SealedBlock}, storage::StateProviderFactory, @@ -167,7 +166,7 @@ impl BuiltPayload for CustomBuiltPayload { self.0.fees() } - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { self.0.executed_block() } diff --git a/examples/custom-node/src/evm/alloy.rs b/examples/custom-node/src/evm/alloy.rs index 6071a2c6dd8..d8df842cfc5 100644 --- a/examples/custom-node/src/evm/alloy.rs +++ b/examples/custom-node/src/evm/alloy.rs @@ -40,6 +40,7 @@ where type Error = EVMError; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = P; type Inspector = I; @@ -103,6 +104,7 @@ impl EvmFactory for CustomEvmFactory { type Error = EVMError; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm( diff --git a/examples/custom-node/src/evm/env.rs b/examples/custom-node/src/evm/env.rs index 5508ec4e6d0..53a2b4e3f15 100644 --- a/examples/custom-node/src/evm/env.rs +++ b/examples/custom-node/src/evm/env.rs @@ -1,6 +1,7 @@ use crate::primitives::{CustomTransaction, TxPayment}; use alloy_eips::{eip2930::AccessList, Typed2718}; use alloy_evm::{FromRecoveredTx, FromTxWithEncoded, IntoTxEnv}; +use alloy_op_evm::block::OpTxEnv; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use op_alloy_consensus::OpTxEnvelope; use op_revm::OpTransaction; @@ -328,3 +329,12 @@ impl IntoTxEnv for CustomTxEnv { self } } + +impl OpTxEnv for CustomTxEnv { + fn encoded_bytes(&self) -> Option<&Bytes> { + match self { + Self::Op(tx) => tx.encoded_bytes(), + Self::Payment(_) => None, + } + } +} diff --git a/examples/custom-node/src/pool.rs b/examples/custom-node/src/pool.rs index 0959b3bcae0..8828803a0f3 100644 --- a/examples/custom-node/src/pool.rs +++ b/examples/custom-node/src/pool.rs @@ -17,7 +17,7 @@ pub enum CustomPooledTransaction { /// A regular Optimism transaction as defined by [`OpPooledTransaction`]. #[envelope(flatten)] Op(OpPooledTransaction), - /// A [`TxPayment`] tagged with type 0x7E. + /// A [`TxPayment`] tagged with type 0x2A (decimal 42). #[envelope(ty = 42)] Payment(Signed), } diff --git a/examples/custom-node/src/primitives/tx.rs b/examples/custom-node/src/primitives/tx.rs index f04bcc8862f..fe763e079e5 100644 --- a/examples/custom-node/src/primitives/tx.rs +++ b/examples/custom-node/src/primitives/tx.rs @@ -23,7 +23,7 @@ pub enum CustomTransaction { /// A regular Optimism transaction as defined by [`OpTxEnvelope`]. #[envelope(flatten)] Op(OpTxEnvelope), - /// A [`TxPayment`] tagged with type 0x7E. + /// A [`TxPayment`] tagged with type 0x2A (decimal 42). #[envelope(ty = 42)] Payment(Signed), } @@ -33,7 +33,7 @@ impl RlpBincode for CustomTransaction {} impl reth_codecs::alloy::transaction::Envelope for CustomTransaction { fn signature(&self) -> &Signature { match self { - CustomTransaction::Op(tx) => tx.signature(), + CustomTransaction::Op(tx) => reth_codecs::alloy::transaction::Envelope::signature(tx), CustomTransaction::Payment(tx) => tx.signature(), } } diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 93896accbbc..339aa1ae3d1 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -66,11 +66,6 @@ fn header_provider_example(provider: T, number: u64) -> eyre: provider.header(sealed_header.hash())?.ok_or(eyre::eyre!("header by hash not found"))?; assert_eq!(sealed_header.header(), &header_by_hash); - // The header's total difficulty is stored in a separate table, so we have a separate call for - // it. This is not needed for post PoS transition chains. - let td = provider.header_td_by_number(number)?.ok_or(eyre::eyre!("header td not found"))?; - assert!(!td.is_zero()); - // Can query headers by range as well, already sealed! let headers = provider.sealed_headers_range(100..200)?; assert_eq!(headers.len(), 100); diff --git a/examples/exex-subscription/src/main.rs b/examples/exex-subscription/src/main.rs index eb7ffaaf754..e39408a3dc0 100644 --- a/examples/exex-subscription/src/main.rs +++ b/examples/exex-subscription/src/main.rs @@ -4,11 +4,9 @@ //! requested address. #[allow(dead_code)] use alloy_primitives::{Address, U256}; -use clap::Parser; use futures::TryStreamExt; use jsonrpsee::{ - core::SubscriptionResult, proc_macros::rpc, tracing, PendingSubscriptionSink, - SubscriptionMessage, + core::SubscriptionResult, proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage, }; use reth_ethereum::{ exex::{ExExContext, ExExEvent, ExExNotification}, @@ -166,14 +164,8 @@ async fn my_exex( Ok(()) } -#[derive(Parser, Debug)] -struct Args { - #[arg(long)] - enable_ext: bool, -} - fn main() -> eyre::Result<()> { - reth_ethereum::cli::Cli::parse_args().run(|builder, _args| async move { + reth_ethereum::cli::Cli::parse_args().run(|builder, _| async move { let (subscriptions_tx, subscriptions_rx) = mpsc::unbounded_channel::(); let rpc = StorageWatcherRpc::new(subscriptions_tx.clone()); diff --git a/examples/node-custom-rpc/src/main.rs b/examples/node-custom-rpc/src/main.rs index 3c7c9269f58..7ab271b4cc5 100644 --- a/examples/node-custom-rpc/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -53,7 +53,7 @@ fn main() { Ok(()) }) // launch the node with custom rpc - .launch() + .launch_with_debug_capabilities() .await?; handle.wait_for_node_exit().await @@ -91,7 +91,7 @@ pub trait TxpoolExtApi { ) -> SubscriptionResult; } -/// The type that implements the `txpool` rpc namespace trait +/// The type that implements the `txpoolExt` rpc namespace trait pub struct TxpoolExt { pool: Pool, } diff --git a/examples/precompile-cache/src/main.rs b/examples/precompile-cache/src/main.rs index dcaa886d736..fe748db4636 100644 --- a/examples/precompile-cache/src/main.rs +++ b/examples/precompile-cache/src/main.rs @@ -16,7 +16,7 @@ use reth_ethereum::{ evm::{ primitives::{Database, EvmEnv}, revm::{ - context::{Context, TxEnv}, + context::{BlockEnv, Context, TxEnv}, context_interface::result::{EVMError, HaltReason}, inspector::{Inspector, NoOpInspector}, interpreter::interpreter::EthInterpreter, @@ -69,6 +69,7 @@ impl EvmFactory for MyEvmFactory { type HaltReason = HaltReason; type Context = EthEvmContext; type Spec = SpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm(&self, db: DB, input: EvmEnv) -> Self::Evm { @@ -176,7 +177,7 @@ where async fn build_evm(self, ctx: &BuilderContext) -> eyre::Result { let evm_config = EthEvmConfig::new_with_evm_factory( ctx.chain_spec(), - MyEvmFactory { precompile_cache: self.precompile_cache.clone() }, + MyEvmFactory { precompile_cache: self.precompile_cache }, ); Ok(evm_config) } diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 97bd1debdcc..b19d99776ab 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -53,7 +53,7 @@ async fn main() -> eyre::Result<()> { db.clone(), spec.clone(), StaticFileProvider::read_only(db_path.join("static_files"), true)?, - ); + )?; // 2. Set up the blockchain provider using only the database provider and a noop for the tree to // satisfy trait bounds. Tree is not used in this example since we are only operating on the diff --git a/flake.lock b/flake.lock index 704d14161e0..4efd90828f9 100644 --- a/flake.lock +++ b/flake.lock @@ -2,11 +2,11 @@ "nodes": { "crane": { "locked": { - "lastModified": 1754269165, - "narHash": "sha256-0tcS8FHd4QjbCVoxN9jI+PjHgA4vc/IjkUSp+N3zy0U=", + "lastModified": 1760924934, + "narHash": "sha256-tuuqY5aU7cUkR71sO2TraVKK2boYrdW3gCSXUkF4i44=", "owner": "ipetkov", "repo": "crane", - "rev": "444e81206df3f7d92780680e45858e31d2f07a08", + "rev": "c6b4d5308293d0d04fcfeee92705017537cad02f", "type": "github" }, "original": { @@ -23,11 +23,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1755499663, - "narHash": "sha256-OxHGov+A4qR4kpO3e1I3LFR78IAKvDFnWoWsDWvFhKU=", + "lastModified": 1761720242, + "narHash": "sha256-Zi9nWw68oUDMVOhf/+Z97wVbNV2K7eEAGZugQKqU7xw=", "owner": "nix-community", "repo": "fenix", - "rev": "d1ff4457857ad551e8d6c7c79324b44fac518b8b", + "rev": "8e4d32f4cc12b3f106af6e4515b36ac046a1ec91", "type": "github" }, "original": { @@ -63,11 +63,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1755004716, - "narHash": "sha256-TbhPR5Fqw5LjAeI3/FOPhNNFQCF3cieKCJWWupeZmiA=", + "lastModified": 1761686505, + "narHash": "sha256-jX6UrGS/hABDaM4jdx3+xgH3KCHP2zKHeTa8CD5myEo=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "b2a58b8c6eff3c3a2c8b5c70dbf69ead78284194", + "rev": "d08d54f3c10dfa41033eb780c3bddb50e09d30fc", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 7550edc31e3..512b69e3660 100644 --- a/flake.nix +++ b/flake.nix @@ -120,6 +120,11 @@ rustNightly.rustfmt pkgs.cargo-nextest ]; + + # Remove the hardening added by nix to fix jmalloc compilation error. + # More info: https://github.com/tikv/jemallocator/issues/108 + hardeningDisable = [ "fortify" ]; + } overrides); } ); diff --git a/fork.yaml b/fork.yaml index 9f82ca15ac9..78a31490db7 100644 --- a/fork.yaml +++ b/fork.yaml @@ -4,7 +4,7 @@ footer: | base: name: reth url: https://github.com/paradigmxyz/reth - hash: e9598ba5ac4e32600e48b93d197a25603b1c644b + hash: b48c72fad20c0964306392066f5c077a376e5fa9 fork: name: scroll-reth url: https://github.com/scroll-tech/reth diff --git a/pkg/reth/debian/reth.service b/pkg/reth/debian/reth.service deleted file mode 100644 index edd78d455c0..00000000000 --- a/pkg/reth/debian/reth.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Modular, contributor-friendly and blazing-fast implementation of the Ethereum protocol -Wants=network-online.target -After=network.target network-online.target - -[Service] -Type=exec -DynamicUser=yes -StateDirectory=reth -ExecStart=/usr/bin/reth node --datadir %S/reth --log.file.max-files 0 - -[Install] -WantedBy=multi-user.target diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index 6b11e29c707..e9cf465a98d 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -28,11 +28,11 @@ reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-consensus.workspace = true reth-revm = { workspace = true, features = ["std", "witness"] } -reth-stateless = { workspace = true } +reth-stateless = { workspace = true, features = ["secp256k1"] } reth-tracing.workspace = true reth-trie.workspace = true reth-trie-db.workspace = true -revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg", "memory_limit"] } alloy-rlp.workspace = true alloy-primitives.workspace = true diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index c06ac05a6d5..06d3069d4a8 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -10,17 +10,20 @@ use reth_chainspec::ChainSpec; use reth_consensus::{Consensus, HeaderValidator}; use reth_db_common::init::{insert_genesis_hashes, insert_genesis_history, insert_genesis_state}; use reth_ethereum_consensus::{validate_block_post_execution, EthBeaconConsensus}; -use reth_ethereum_primitives::Block; +use reth_ethereum_primitives::{Block, TransactionSigned}; use reth_evm::{execute::Executor, ConfigureEvm}; use reth_evm_ethereum::EthEvmConfig; -use reth_primitives_traits::{RecoveredBlock, SealedBlock}; +use reth_primitives_traits::{Block as BlockTrait, RecoveredBlock, SealedBlock}; use reth_provider::{ test_utils::create_test_provider_factory_with_chain_spec, BlockWriter, DatabaseProviderFactory, ExecutionOutcome, HeaderProvider, HistoryWriter, OriginalValuesKnown, StateProofProvider, StateWriter, StaticFileProviderFactory, StaticFileSegment, StaticFileWriter, }; use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord, State}; -use reth_stateless::{validation::stateless_validation, ExecutionWitness}; +use reth_stateless::{ + trie::StatelessSparseTrie, validation::stateless_validation_with_trie, ExecutionWitness, + UncompressedPublicKey, +}; use reth_trie::{HashedPostState, KeccakKeyHasher, StateRoot}; use reth_trie_db::DatabaseStateRoot; use std::{ @@ -356,9 +359,16 @@ fn run_case( } // Now validate using the stateless client if everything else passes - for (block, execution_witness) in &program_inputs { - stateless_validation( - block.clone(), + for (recovered_block, execution_witness) in &program_inputs { + let block = recovered_block.clone().into_block(); + + // Recover the actual public keys from the transaction signatures + let public_keys = recover_signers(block.body().transactions()) + .expect("Failed to recover public keys from transaction signatures"); + + stateless_validation_with_trie::( + block, + public_keys, execution_witness.clone(), chain_spec.clone(), EthEvmConfig::new(chain_spec.clone()), @@ -413,6 +423,26 @@ fn pre_execution_checks( Ok(()) } +/// Recover public keys from transaction signatures. +fn recover_signers<'a, I>(txs: I) -> Result, Box> +where + I: IntoIterator, +{ + txs.into_iter() + .enumerate() + .map(|(i, tx)| { + tx.signature() + .recover_from_prehash(&tx.signature_hash()) + .map(|keys| { + UncompressedPublicKey( + keys.to_encoded_point(false).as_bytes().try_into().unwrap(), + ) + }) + .map_err(|e| format!("failed to recover signature for tx #{i}: {e}").into()) + }) + .collect::, _>>() +} + /// Returns whether the test at the given path should be skipped. /// /// Some tests are edge cases that cannot happen on mainnet, while others are skipped for diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 49c49bf1936..c6f4e44cffa 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -265,7 +265,7 @@ pub enum ForkSpec { FrontierToHomesteadAt5, /// Homestead Homestead, - /// Homestead to Tangerine + /// Homestead to DAO HomesteadToDaoAt5, /// Homestead to Tangerine HomesteadToEIP150At5, diff --git a/testing/ef-tests/tests/tests.rs b/testing/ef-tests/tests/tests.rs index 0961817e901..2728246901a 100644 --- a/testing/ef-tests/tests/tests.rs +++ b/testing/ef-tests/tests/tests.rs @@ -93,7 +93,7 @@ macro_rules! blockchain_test { .join("ethereum-tests") .join("BlockchainTests"); - BlockchainTests::new(suite_path).run_only(&format!("{}", stringify!($dir))); + BlockchainTests::new(suite_path).run_only(stringify!($dir)); } }; }