From f1913ae824b124525bec7896c9271e1a4bdefa41 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 7 Feb 2024 11:51:26 +0200 Subject: [PATCH] feat(en): Make state keeper work with pruned data (#900) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Modifies state keeper so that it works with pruned node data during snapshot recovery. ## Why ❔ Part of preparations of EN code to support snapshot recovery. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. --- core/bin/external_node/src/main.rs | 109 ++-- ...51dea32e37cb8c51f1dd5d82c15eddc48e6b.json} | 4 +- ...ea7a878613cf62f7fb9b94c5145dc8d1da674.json | 62 +++ ...37d8d542b4f14cf560972c005ab3cc13d1f63.json | 23 - ...528f012c2ecaebf6622ca1ae481045604e58d.json | 21 + ...dd913ed9fbd69b8354b7d18b01d3fb62f6be8.json | 44 -- ...253eb340a21afd7d65ce6d2f523aeded8dfc0.json | 29 + ...940bd2aee55b66f6780ceae06c3e1ff92eb8b.json | 18 - ...85ec6ec1e522bc058710560ef78e75f94ddac.json | 22 + ...extra_fields_to_snapshot_recovery.down.sql | 5 + ...d_extra_fields_to_snapshot_recovery.up.sql | 10 + core/lib/dal/src/blocks_web3_dal.rs | 14 +- core/lib/dal/src/snapshot_recovery_dal.rs | 45 +- core/lib/dal/src/storage_web3_dal.rs | 17 +- core/lib/dal/src/sync_dal.rs | 50 +- core/lib/dal/src/tests/mod.rs | 18 +- core/lib/dal/src/transactions_dal.rs | 104 ++-- core/lib/snapshots_applier/src/lib.rs | 11 +- core/lib/snapshots_applier/src/tests/mod.rs | 7 +- core/lib/state/src/test_utils.rs | 9 +- core/lib/types/src/snapshots.rs | 9 +- core/lib/vm_utils/src/lib.rs | 46 +- core/lib/vm_utils/src/storage.rs | 351 ++++++++---- .../src/api_server/execution_sandbox/tests.rs | 5 +- .../src/api_server/tx_sender/tests.rs | 16 +- .../src/api_server/web3/tests/debug.rs | 5 +- .../src/api_server/web3/tests/filters.rs | 8 +- .../src/api_server/web3/tests/mod.rs | 85 +-- .../src/api_server/web3/tests/vm.rs | 6 +- .../src/api_server/web3/tests/ws.rs | 19 +- .../zksync_core/src/consensus/storage/mod.rs | 12 +- .../lib/zksync_core/src/consensus/testonly.rs | 68 ++- .../src/metadata_calculator/recovery/tests.rs | 15 +- .../state_keeper/batch_executor/tests/mod.rs | 60 +- .../batch_executor/tests/tester.rs | 186 ++++++- .../zksync_core/src/state_keeper/io/common.rs | 113 ---- .../src/state_keeper/io/common/mod.rs | 116 ++++ .../src/state_keeper/io/common/tests.rs | 527 ++++++++++++++++++ .../src/state_keeper/io/mempool.rs | 193 ++++--- .../zksync_core/src/state_keeper/io/mod.rs | 1 - .../src/state_keeper/io/seal_logic.rs | 22 +- .../src/state_keeper/io/tests/mod.rs | 196 +++++-- .../src/state_keeper/io/tests/tester.rs | 52 +- .../zksync_core/src/state_keeper/keeper.rs | 27 +- core/lib/zksync_core/src/state_keeper/mod.rs | 3 +- .../zksync_core/src/state_keeper/tests/mod.rs | 8 +- .../src/state_keeper/tests/tester.rs | 1 - .../src/state_keeper/updates/mod.rs | 13 +- .../sync_layer/batch_status_updater/tests.rs | 8 +- .../zksync_core/src/sync_layer/external_io.rs | 218 ++++---- .../lib/zksync_core/src/sync_layer/fetcher.rs | 98 ++-- .../src/sync_layer/gossip/conversions.rs | 38 -- core/lib/zksync_core/src/sync_layer/tests.rs | 312 ++++++++--- core/lib/zksync_core/src/utils/testonly.rs | 102 ++-- core/tests/test_account/src/lib.rs | 6 +- 55 files changed, 2499 insertions(+), 1068 deletions(-) rename core/lib/dal/.sqlx/{query-eae299cb634a5b5b0409648436c4550d90b643424e3cac238d97cb79c9c140a4.json => query-3596a70433e4e27fcda18f37073b51dea32e37cb8c51f1dd5d82c15eddc48e6b.json} (60%) create mode 100644 core/lib/dal/.sqlx/query-3743b41751a141c21f1674fa581ea7a878613cf62f7fb9b94c5145dc8d1da674.json delete mode 100644 core/lib/dal/.sqlx/query-525123d4ec2b427f1c171f30d0937d8d542b4f14cf560972c005ab3cc13d1f63.json create mode 100644 core/lib/dal/.sqlx/query-66510caa7683ed90729cb545ac8528f012c2ecaebf6622ca1ae481045604e58d.json delete mode 100644 core/lib/dal/.sqlx/query-73f0401ac19c4e1efd73d02b8dcdd913ed9fbd69b8354b7d18b01d3fb62f6be8.json create mode 100644 core/lib/dal/.sqlx/query-d3f9202d665ef4fcb028dae6484253eb340a21afd7d65ce6d2f523aeded8dfc0.json delete mode 100644 core/lib/dal/.sqlx/query-eb83e9175b4f8c0351ac2d4b4d2940bd2aee55b66f6780ceae06c3e1ff92eb8b.json create mode 100644 core/lib/dal/.sqlx/query-f1541a8d970d57ed118ee603e7285ec6ec1e522bc058710560ef78e75f94ddac.json create mode 100644 core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.down.sql create mode 100644 core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.up.sql delete mode 100644 core/lib/zksync_core/src/state_keeper/io/common.rs create mode 100644 core/lib/zksync_core/src/state_keeper/io/common/mod.rs create mode 100644 core/lib/zksync_core/src/state_keeper/io/common/tests.rs delete mode 100644 core/lib/zksync_core/src/sync_layer/gossip/conversions.rs diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 8765d656ca5..8ee0b9fc26a 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -28,8 +28,9 @@ use zksync_core::{ MiniblockSealer, MiniblockSealerHandle, ZkSyncStateKeeper, }, sync_layer::{ - batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, fetcher::FetcherCursor, - genesis::perform_genesis_if_needed, ActionQueue, MainNodeClient, SyncState, + batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, + fetcher::MainNodeFetcher, genesis::perform_genesis_if_needed, ActionQueue, MainNodeClient, + SyncState, }, }; use zksync_dal::{healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; @@ -58,7 +59,7 @@ async fn build_state_keeper( miniblock_sealer_handle: MiniblockSealerHandle, stop_receiver: watch::Receiver, chain_id: L2ChainId, -) -> ZkSyncStateKeeper { +) -> anyhow::Result { // These config values are used on the main node, and depending on these values certain transactions can // be *rejected* (that is, not included into the block). However, external node only mirrors what the main // node has already executed, so we can safely set these values to the maximum possible values - if the main @@ -79,9 +80,9 @@ async fn build_state_keeper( true, )); - let main_node_url = config.required.main_node_url().unwrap(); + let main_node_url = config.required.main_node_url()?; let main_node_client = ::json_rpc(&main_node_url) - .expect("Failed creating JSON-RPC client for main node"); + .context("Failed creating JSON-RPC client for main node")?; let io = ExternalIO::new( miniblock_sealer_handle, connection_pool, @@ -92,14 +93,15 @@ async fn build_state_keeper( validation_computational_gas_limit, chain_id, ) - .await; + .await + .context("Failed initializing I/O for external node state keeper")?; - ZkSyncStateKeeper::new( + Ok(ZkSyncStateKeeper::new( stop_receiver, Box::new(io), batch_executor_base, Box::new(NoopSealer), - ) + )) } async fn init_tasks( @@ -166,61 +168,56 @@ async fn init_tasks( stop_receiver.clone(), config.remote.l2_chain_id, ) - .await; + .await?; let main_node_client = ::json_rpc(&main_node_url) .context("Failed creating JSON-RPC client for main node")?; let singleton_pool_builder = ConnectionPool::singleton(&config.postgres.database_url); - let fetcher_handle = match config.consensus.clone() { - None => { - let fetcher_cursor = { - let pool = singleton_pool_builder - .build() - .await - .context("failed to build a connection pool for `MainNodeFetcher`")?; - let mut storage = pool.access_storage_tagged("sync_layer").await?; - FetcherCursor::new(&mut storage) - .await - .context("failed to load `MainNodeFetcher` cursor from Postgres")? - }; - let fetcher = fetcher_cursor.into_fetcher( - Box::new(main_node_client), - action_queue_sender, - sync_state.clone(), - stop_receiver.clone(), - ); - tokio::spawn(fetcher.run()) - } - Some(cfg) => { - let pool = connection_pool.clone(); - let mut stop_receiver = stop_receiver.clone(); - let sync_state = sync_state.clone(); - #[allow(clippy::redundant_locals)] - tokio::spawn(async move { - let sync_state = sync_state; - let main_node_client = main_node_client; - scope::run!(&ctx::root(), |ctx, s| async { - s.spawn_bg(async { - let res = cfg.run(ctx, pool, action_queue_sender).await; - tracing::info!("Consensus actor stopped"); - res - }); - // TODO: information about the head block of the validators - // (currently just the main node) - // should also be provided over the gossip network. - s.spawn_bg(async { - consensus::run_main_node_state_fetcher(ctx, &main_node_client, &sync_state) - .await?; - Ok(()) - }); - ctx.wait(stop_receiver.wait_for(|stop| *stop)).await??; + let fetcher_handle = if let Some(cfg) = config.consensus.clone() { + let pool = connection_pool.clone(); + let mut stop_receiver = stop_receiver.clone(); + let sync_state = sync_state.clone(); + + #[allow(clippy::redundant_locals)] + tokio::spawn(async move { + let sync_state = sync_state; + let main_node_client = main_node_client; + scope::run!(&ctx::root(), |ctx, s| async { + s.spawn_bg(async { + let res = cfg.run(ctx, pool, action_queue_sender).await; + tracing::info!("Consensus actor stopped"); + res + }); + // TODO: information about the head block of the validators (currently just the main node) + // should also be provided over the gossip network. + s.spawn_bg(async { + consensus::run_main_node_state_fetcher(ctx, &main_node_client, &sync_state) + .await?; Ok(()) - }) - .await - .context("consensus actor") + }); + ctx.wait(stop_receiver.wait_for(|stop| *stop)).await??; + Ok(()) }) - } + .await + .context("consensus actor") + }) + } else { + let pool = singleton_pool_builder + .build() + .await + .context("failed to build a connection pool for `MainNodeFetcher`")?; + let mut storage = pool.access_storage_tagged("sync_layer").await?; + let fetcher = MainNodeFetcher::new( + &mut storage, + Box::new(main_node_client), + action_queue_sender, + sync_state.clone(), + stop_receiver.clone(), + ) + .await + .context("failed initializing main node fetcher")?; + tokio::spawn(fetcher.run()) }; let metadata_calculator_config = MetadataCalculatorConfig { diff --git a/core/lib/dal/.sqlx/query-eae299cb634a5b5b0409648436c4550d90b643424e3cac238d97cb79c9c140a4.json b/core/lib/dal/.sqlx/query-3596a70433e4e27fcda18f37073b51dea32e37cb8c51f1dd5d82c15eddc48e6b.json similarity index 60% rename from core/lib/dal/.sqlx/query-eae299cb634a5b5b0409648436c4550d90b643424e3cac238d97cb79c9c140a4.json rename to core/lib/dal/.sqlx/query-3596a70433e4e27fcda18f37073b51dea32e37cb8c51f1dd5d82c15eddc48e6b.json index aa4751e12b7..7fc673c7c22 100644 --- a/core/lib/dal/.sqlx/query-eae299cb634a5b5b0409648436c4550d90b643424e3cac238d97cb79c9c140a4.json +++ b/core/lib/dal/.sqlx/query-3596a70433e4e27fcda18f37073b51dea32e37cb8c51f1dd5d82c15eddc48e6b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n (\n SELECT\n MAX(m2.number)\n FROM\n miniblocks m2\n WHERE\n miniblocks.l1_batch_number = m2.l1_batch_number\n ) AS \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (\n SELECT\n MAX(m2.number)\n FROM\n miniblocks m2\n WHERE\n miniblocks.l1_batch_number = m2.l1_batch_number\n ) AS \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -90,5 +90,5 @@ false ] }, - "hash": "eae299cb634a5b5b0409648436c4550d90b643424e3cac238d97cb79c9c140a4" + "hash": "3596a70433e4e27fcda18f37073b51dea32e37cb8c51f1dd5d82c15eddc48e6b" } diff --git a/core/lib/dal/.sqlx/query-3743b41751a141c21f1674fa581ea7a878613cf62f7fb9b94c5145dc8d1da674.json b/core/lib/dal/.sqlx/query-3743b41751a141c21f1674fa581ea7a878613cf62f7fb9b94c5145dc8d1da674.json new file mode 100644 index 00000000000..7da86fec74b --- /dev/null +++ b/core/lib/dal/.sqlx/query-3743b41751a141c21f1674fa581ea7a878613cf62f7fb9b94c5145dc8d1da674.json @@ -0,0 +1,62 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number,\n l1_batch_timestamp,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_timestamp,\n miniblock_hash,\n protocol_version,\n storage_logs_chunks_processed\n FROM\n snapshot_recovery\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "l1_batch_timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_batch_root_hash", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "miniblock_number", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "miniblock_timestamp", + "type_info": "Int8" + }, + { + "ordinal": 5, + "name": "miniblock_hash", + "type_info": "Bytea" + }, + { + "ordinal": 6, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "storage_logs_chunks_processed", + "type_info": "BoolArray" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "3743b41751a141c21f1674fa581ea7a878613cf62f7fb9b94c5145dc8d1da674" +} diff --git a/core/lib/dal/.sqlx/query-525123d4ec2b427f1c171f30d0937d8d542b4f14cf560972c005ab3cc13d1f63.json b/core/lib/dal/.sqlx/query-525123d4ec2b427f1c171f30d0937d8d542b4f14cf560972c005ab3cc13d1f63.json deleted file mode 100644 index 7764425aa21..00000000000 --- a/core/lib/dal/.sqlx/query-525123d4ec2b427f1c171f30d0937d8d542b4f14cf560972c005ab3cc13d1f63.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n hash\n FROM\n miniblocks\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "525123d4ec2b427f1c171f30d0937d8d542b4f14cf560972c005ab3cc13d1f63" -} diff --git a/core/lib/dal/.sqlx/query-66510caa7683ed90729cb545ac8528f012c2ecaebf6622ca1ae481045604e58d.json b/core/lib/dal/.sqlx/query-66510caa7683ed90729cb545ac8528f012c2ecaebf6622ca1ae481045604e58d.json new file mode 100644 index 00000000000..76900667a9c --- /dev/null +++ b/core/lib/dal/.sqlx/query-66510caa7683ed90729cb545ac8528f012c2ecaebf6622ca1ae481045604e58d.json @@ -0,0 +1,21 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n snapshot_recovery (\n l1_batch_number,\n l1_batch_timestamp,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_timestamp,\n miniblock_hash,\n protocol_version,\n storage_logs_chunks_processed,\n updated_at,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int8", + "Int8", + "Bytea", + "Int4", + "BoolArray" + ] + }, + "nullable": [] + }, + "hash": "66510caa7683ed90729cb545ac8528f012c2ecaebf6622ca1ae481045604e58d" +} diff --git a/core/lib/dal/.sqlx/query-73f0401ac19c4e1efd73d02b8dcdd913ed9fbd69b8354b7d18b01d3fb62f6be8.json b/core/lib/dal/.sqlx/query-73f0401ac19c4e1efd73d02b8dcdd913ed9fbd69b8354b7d18b01d3fb62f6be8.json deleted file mode 100644 index 7c366776a5a..00000000000 --- a/core/lib/dal/.sqlx/query-73f0401ac19c4e1efd73d02b8dcdd913ed9fbd69b8354b7d18b01d3fb62f6be8.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_root_hash,\n storage_logs_chunks_processed\n FROM\n snapshot_recovery\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "l1_batch_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 2, - "name": "miniblock_number", - "type_info": "Int8" - }, - { - "ordinal": 3, - "name": "miniblock_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 4, - "name": "storage_logs_chunks_processed", - "type_info": "BoolArray" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false, - false, - false - ] - }, - "hash": "73f0401ac19c4e1efd73d02b8dcdd913ed9fbd69b8354b7d18b01d3fb62f6be8" -} diff --git a/core/lib/dal/.sqlx/query-d3f9202d665ef4fcb028dae6484253eb340a21afd7d65ce6d2f523aeded8dfc0.json b/core/lib/dal/.sqlx/query-d3f9202d665ef4fcb028dae6484253eb340a21afd7d65ce6d2f523aeded8dfc0.json new file mode 100644 index 00000000000..469c338969d --- /dev/null +++ b/core/lib/dal/.sqlx/query-d3f9202d665ef4fcb028dae6484253eb340a21afd7d65ce6d2f523aeded8dfc0.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n hash\n FROM\n miniblocks\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "d3f9202d665ef4fcb028dae6484253eb340a21afd7d65ce6d2f523aeded8dfc0" +} diff --git a/core/lib/dal/.sqlx/query-eb83e9175b4f8c0351ac2d4b4d2940bd2aee55b66f6780ceae06c3e1ff92eb8b.json b/core/lib/dal/.sqlx/query-eb83e9175b4f8c0351ac2d4b4d2940bd2aee55b66f6780ceae06c3e1ff92eb8b.json deleted file mode 100644 index 250e5beb89a..00000000000 --- a/core/lib/dal/.sqlx/query-eb83e9175b4f8c0351ac2d4b4d2940bd2aee55b66f6780ceae06c3e1ff92eb8b.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n snapshot_recovery (\n l1_batch_number,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_root_hash,\n storage_logs_chunks_processed,\n updated_at,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW(), NOW())\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Bytea", - "Int8", - "Bytea", - "BoolArray" - ] - }, - "nullable": [] - }, - "hash": "eb83e9175b4f8c0351ac2d4b4d2940bd2aee55b66f6780ceae06c3e1ff92eb8b" -} diff --git a/core/lib/dal/.sqlx/query-f1541a8d970d57ed118ee603e7285ec6ec1e522bc058710560ef78e75f94ddac.json b/core/lib/dal/.sqlx/query-f1541a8d970d57ed118ee603e7285ec6ec1e522bc058710560ef78e75f94ddac.json new file mode 100644 index 00000000000..0613eb77a30 --- /dev/null +++ b/core/lib/dal/.sqlx/query-f1541a8d970d57ed118ee603e7285ec6ec1e522bc058710560ef78e75f94ddac.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n miniblock_hash\n FROM\n snapshot_recovery\n WHERE\n miniblock_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "miniblock_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "f1541a8d970d57ed118ee603e7285ec6ec1e522bc058710560ef78e75f94ddac" +} diff --git a/core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.down.sql b/core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.down.sql new file mode 100644 index 00000000000..fbe98139ec8 --- /dev/null +++ b/core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE snapshot_recovery + DROP COLUMN miniblock_hash, + DROP COLUMN l1_batch_timestamp, + DROP COLUMN miniblock_timestamp, + DROP COLUMN protocol_version; diff --git a/core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.up.sql b/core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.up.sql new file mode 100644 index 00000000000..b34e199a2ae --- /dev/null +++ b/core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.up.sql @@ -0,0 +1,10 @@ +ALTER TABLE snapshot_recovery + ADD COLUMN miniblock_hash BYTEA NOT NULL, + ADD COLUMN l1_batch_timestamp BIGINT NOT NULL, + ADD COLUMN miniblock_timestamp BIGINT NOT NULL, + ADD COLUMN protocol_version INT NOT NULL; +-- `miniblock_root_hash` should be renamed to `miniblock_hash`, but we cannot do it straightforwardly +-- because of backward compatibility. Instead, we create a new column and set a dummy default value +-- for the old one, so that INSERTs not referencing `miniblock_root_hash` don't fail. +ALTER TABLE snapshot_recovery + ALTER COLUMN miniblock_root_hash SET DEFAULT '\x0000000000000000000000000000000000000000000000000000000000000000'::bytea; diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index c03352937d2..c265ce45e4e 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -650,13 +650,15 @@ mod tests { use zksync_types::{ block::{MiniblockHasher, MiniblockHeader}, fee::TransactionExecutionMetrics, - snapshots::SnapshotRecoveryStatus, Address, MiniblockNumber, ProtocolVersion, ProtocolVersionId, }; use super::*; use crate::{ - tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, + tests::{ + create_miniblock_header, create_snapshot_recovery, mock_execution_result, + mock_l2_transaction, + }, ConnectionPool, }; @@ -815,13 +817,7 @@ mod tests { async fn resolving_pending_block_id_for_snapshot_recovery() { let connection_pool = ConnectionPool::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); - let snapshot_recovery = SnapshotRecoveryStatus { - l1_batch_number: L1BatchNumber(23), - l1_batch_root_hash: H256::zero(), - miniblock_number: MiniblockNumber(42), - miniblock_root_hash: H256::zero(), - storage_logs_chunks_processed: vec![true; 100], - }; + let snapshot_recovery = create_snapshot_recovery(); conn.snapshot_recovery_dal() .insert_initial_recovery_status(&snapshot_recovery) .await diff --git a/core/lib/dal/src/snapshot_recovery_dal.rs b/core/lib/dal/src/snapshot_recovery_dal.rs index af6f6a25439..5d5e727b38b 100644 --- a/core/lib/dal/src/snapshot_recovery_dal.rs +++ b/core/lib/dal/src/snapshot_recovery_dal.rs @@ -1,4 +1,6 @@ -use zksync_types::{snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, H256}; +use zksync_types::{ + snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, +}; use crate::StorageProcessor; @@ -17,20 +19,26 @@ impl SnapshotRecoveryDal<'_, '_> { INSERT INTO snapshot_recovery ( l1_batch_number, + l1_batch_timestamp, l1_batch_root_hash, miniblock_number, - miniblock_root_hash, + miniblock_timestamp, + miniblock_hash, + protocol_version, storage_logs_chunks_processed, updated_at, created_at ) VALUES - ($1, $2, $3, $4, $5, NOW(), NOW()) + ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), NOW()) "#, status.l1_batch_number.0 as i64, + status.l1_batch_timestamp as i64, status.l1_batch_root_hash.0.as_slice(), status.miniblock_number.0 as i64, - status.miniblock_root_hash.0.as_slice(), + status.miniblock_timestamp as i64, + status.miniblock_hash.0.as_slice(), + status.protocol_version as i32, &status.storage_logs_chunks_processed, ) .execute(self.storage.conn()) @@ -64,9 +72,12 @@ impl SnapshotRecoveryDal<'_, '_> { r#" SELECT l1_batch_number, + l1_batch_timestamp, l1_batch_root_hash, miniblock_number, - miniblock_root_hash, + miniblock_timestamp, + miniblock_hash, + protocol_version, storage_logs_chunks_processed FROM snapshot_recovery @@ -75,19 +86,24 @@ impl SnapshotRecoveryDal<'_, '_> { .fetch_optional(self.storage.conn()) .await?; - Ok(record.map(|r| SnapshotRecoveryStatus { - l1_batch_number: L1BatchNumber(r.l1_batch_number as u32), - l1_batch_root_hash: H256::from_slice(&r.l1_batch_root_hash), - miniblock_number: MiniblockNumber(r.miniblock_number as u32), - miniblock_root_hash: H256::from_slice(&r.miniblock_root_hash), - storage_logs_chunks_processed: r.storage_logs_chunks_processed.into_iter().collect(), + Ok(record.map(|row| SnapshotRecoveryStatus { + l1_batch_number: L1BatchNumber(row.l1_batch_number as u32), + l1_batch_timestamp: row.l1_batch_timestamp as u64, + l1_batch_root_hash: H256::from_slice(&row.l1_batch_root_hash), + miniblock_number: MiniblockNumber(row.miniblock_number as u32), + miniblock_timestamp: row.miniblock_timestamp as u64, + miniblock_hash: H256::from_slice(&row.miniblock_hash), + protocol_version: ProtocolVersionId::try_from(row.protocol_version as u16).unwrap(), + storage_logs_chunks_processed: row.storage_logs_chunks_processed, })) } } #[cfg(test)] mod tests { - use zksync_types::{snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, H256}; + use zksync_types::{ + snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, + }; use crate::ConnectionPool; @@ -103,9 +119,12 @@ mod tests { assert_eq!(None, empty_status); let mut status = SnapshotRecoveryStatus { l1_batch_number: L1BatchNumber(123), + l1_batch_timestamp: 123, l1_batch_root_hash: H256::random(), miniblock_number: MiniblockNumber(234), - miniblock_root_hash: H256::random(), + miniblock_timestamp: 234, + miniblock_hash: H256::random(), + protocol_version: ProtocolVersionId::latest(), storage_logs_chunks_processed: vec![false, false, true, false], }; applied_status_dal diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index dab9b622871..6f795ec8b7e 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -275,12 +275,13 @@ impl StorageWeb3Dal<'_, '_> { #[cfg(test)] mod tests { - use zksync_types::{ - block::L1BatchHeader, snapshots::SnapshotRecoveryStatus, ProtocolVersion, ProtocolVersionId, - }; + use zksync_types::{block::L1BatchHeader, ProtocolVersion, ProtocolVersionId}; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool}; + use crate::{ + tests::{create_miniblock_header, create_snapshot_recovery}, + ConnectionPool, + }; #[tokio::test] async fn resolving_l1_batch_number_of_miniblock() { @@ -356,13 +357,7 @@ mod tests { conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; - let snapshot_recovery = SnapshotRecoveryStatus { - l1_batch_number: L1BatchNumber(23), - l1_batch_root_hash: H256::zero(), - miniblock_number: MiniblockNumber(42), - miniblock_root_hash: H256::zero(), - storage_logs_chunks_processed: vec![true; 100], - }; + let snapshot_recovery = create_snapshot_recovery(); conn.snapshot_recovery_dal() .insert_initial_recovery_status(&snapshot_recovery) .await diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 284ce317555..90c7ad3665f 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -30,6 +30,12 @@ impl SyncDal<'_, '_> { (MAX(number) + 1) FROM l1_batches + ), + ( + SELECT + MAX(l1_batch_number) + 1 + FROM + snapshot_recovery ) ) AS "l1_batch_number!", ( @@ -108,7 +114,10 @@ mod tests { use super::*; use crate::{ - tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, + tests::{ + create_miniblock_header, create_snapshot_recovery, mock_execution_result, + mock_l2_transaction, + }, ConnectionPool, }; @@ -227,4 +236,43 @@ mod tests { assert!(block.last_in_batch); assert_eq!(block.operator_address, miniblock_header.fee_account_address); } + + #[tokio::test] + async fn sync_block_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + + // Simulate snapshot recovery. + conn.protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion::default()) + .await; + let snapshot_recovery = create_snapshot_recovery(); + conn.snapshot_recovery_dal() + .insert_initial_recovery_status(&snapshot_recovery) + .await + .unwrap(); + + assert!(conn + .sync_dal() + .sync_block(snapshot_recovery.miniblock_number, false) + .await + .unwrap() + .is_none()); + + let miniblock_header = create_miniblock_header(snapshot_recovery.miniblock_number.0 + 1); + conn.blocks_dal() + .insert_miniblock(&miniblock_header) + .await + .unwrap(); + + let block = conn + .sync_dal() + .sync_block(miniblock_header.number, false) + .await + .unwrap() + .expect("No new miniblock"); + assert_eq!(block.number, miniblock_header.number); + assert_eq!(block.timestamp, miniblock_header.timestamp); + assert_eq!(block.l1_batch_number, snapshot_recovery.l1_batch_number + 1); + } } diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index da9c66d8a71..a193679d37e 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -8,9 +8,10 @@ use zksync_types::{ helpers::unix_timestamp_ms, l1::{L1Tx, OpProcessingType, PriorityQueueType}, l2::L2Tx, + snapshots::SnapshotRecoveryStatus, tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, - Address, Execute, L1BlockNumber, L1TxCommonData, L2ChainId, MiniblockNumber, PriorityOpId, - ProtocolVersionId, H160, H256, U256, + Address, Execute, L1BatchNumber, L1BlockNumber, L1TxCommonData, L2ChainId, MiniblockNumber, + PriorityOpId, ProtocolVersionId, H160, H256, U256, }; use crate::{ @@ -118,6 +119,19 @@ pub(crate) fn mock_execution_result(transaction: L2Tx) -> TransactionExecutionRe } } +pub(crate) fn create_snapshot_recovery() -> SnapshotRecoveryStatus { + SnapshotRecoveryStatus { + l1_batch_number: L1BatchNumber(23), + l1_batch_timestamp: 23, + l1_batch_root_hash: H256::zero(), + miniblock_number: MiniblockNumber(42), + miniblock_timestamp: 42, + miniblock_hash: H256::zero(), + protocol_version: ProtocolVersionId::latest(), + storage_logs_chunks_processed: vec![true; 100], + } +} + #[tokio::test] async fn workflow_with_submit_tx_equal_hashes() { let connection_pool = ConnectionPool::test_pool().await; diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index dc76fca6732..f86af45a36a 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -1,6 +1,6 @@ -use std::{fmt, time::Duration}; +use std::{collections::HashMap, fmt, time::Duration}; -use anyhow::Context; +use anyhow::Context as _; use bigdecimal::BigDecimal; use itertools::Itertools; use sqlx::{error, types::chrono::NaiveDateTime}; @@ -1119,7 +1119,7 @@ impl TransactionsDal<'_, '_> { .fetch_all(self.storage.conn()) .await?; - self.get_miniblocks_to_execute(transactions).await + self.map_transactions_to_execution_data(transactions).await } /// Returns miniblocks with their transactions to be used in VM execution. @@ -1147,10 +1147,10 @@ impl TransactionsDal<'_, '_> { .fetch_all(self.storage.conn()) .await?; - self.get_miniblocks_to_execute(transactions).await + self.map_transactions_to_execution_data(transactions).await } - async fn get_miniblocks_to_execute( + async fn map_transactions_to_execution_data( &mut self, transactions: Vec, ) -> anyhow::Result> { @@ -1168,14 +1168,10 @@ impl TransactionsDal<'_, '_> { if transactions_by_miniblock.is_empty() { return Ok(Vec::new()); } - let from_miniblock = transactions_by_miniblock - .first() - .context("No first transaction found for miniblock")? - .0; - let to_miniblock = transactions_by_miniblock - .last() - .context("No last transaction found for miniblock")? - .0; + let from_miniblock = transactions_by_miniblock.first().unwrap().0; + let to_miniblock = transactions_by_miniblock.last().unwrap().0; + // `unwrap()`s are safe; `transactions_by_miniblock` is not empty as checked above + let miniblock_data = sqlx::query!( r#" SELECT @@ -1194,9 +1190,15 @@ impl TransactionsDal<'_, '_> { .fetch_all(self.storage.conn()) .await?; - let prev_hashes = sqlx::query!( + anyhow::ensure!( + miniblock_data.len() == transactions_by_miniblock.len(), + "Not enough miniblock data retrieved" + ); + + let prev_miniblock_hashes = sqlx::query!( r#" SELECT + number, hash FROM miniblocks @@ -1211,31 +1213,57 @@ impl TransactionsDal<'_, '_> { .fetch_all(self.storage.conn()) .await?; - assert_eq!( - miniblock_data.len(), - transactions_by_miniblock.len(), - "Not enough miniblock data retrieved" - ); - assert_eq!( - prev_hashes.len(), - transactions_by_miniblock.len(), - "Not enough previous hashes retrieved" - ); - - Ok(transactions_by_miniblock + let prev_miniblock_hashes: HashMap<_, _> = prev_miniblock_hashes .into_iter() - .zip(miniblock_data) - .zip(prev_hashes) - .map( - |(((number, txs), miniblock_data_row), prev_hash_row)| MiniblockExecutionData { - number, - timestamp: miniblock_data_row.timestamp as u64, - prev_block_hash: H256::from_slice(&prev_hash_row.hash), - virtual_blocks: miniblock_data_row.virtual_blocks as u32, - txs, - }, - ) - .collect()) + .map(|row| { + ( + MiniblockNumber(row.number as u32), + H256::from_slice(&row.hash), + ) + }) + .collect(); + + let mut data = Vec::with_capacity(transactions_by_miniblock.len()); + let it = transactions_by_miniblock.into_iter().zip(miniblock_data); + for ((number, txs), miniblock_row) in it { + let prev_miniblock_number = number - 1; + let prev_block_hash = match prev_miniblock_hashes.get(&prev_miniblock_number) { + Some(hash) => *hash, + None => { + // Can occur after snapshot recovery; the first previous miniblock may not be present + // in the storage. + let row = sqlx::query!( + r#" + SELECT + miniblock_hash + FROM + snapshot_recovery + WHERE + miniblock_number = $1 + "#, + prev_miniblock_number.0 as i32 + ) + .fetch_optional(self.storage.conn()) + .await? + .with_context(|| { + format!( + "miniblock #{prev_miniblock_number} is not in storage, and its hash is not \ + in snapshot recovery data" + ) + })?; + H256::from_slice(&row.miniblock_hash) + } + }; + + data.push(MiniblockExecutionData { + number, + timestamp: miniblock_row.timestamp as u64, + prev_block_hash, + virtual_blocks: miniblock_row.virtual_blocks as u32, + txs, + }); + } + Ok(data) } pub async fn get_tx_locations(&mut self, l1_batch_number: L1BatchNumber) -> TxLocations { diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index 31b202a7c1e..15ba95a1a96 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -203,15 +203,22 @@ impl<'a> SnapshotsApplier<'a> { .fetch_l2_block(miniblock_number) .await? .with_context(|| format!("miniblock #{miniblock_number} is missing on main node"))?; - let miniblock_root_hash = miniblock + let miniblock_hash = miniblock .hash .context("snapshot miniblock fetched from main node doesn't have hash set")?; Ok(SnapshotRecoveryStatus { l1_batch_number, + l1_batch_timestamp: snapshot.last_l1_batch_with_metadata.header.timestamp, l1_batch_root_hash: snapshot.last_l1_batch_with_metadata.metadata.root_hash, miniblock_number: snapshot.miniblock_number, - miniblock_root_hash, + miniblock_timestamp: miniblock.timestamp, + miniblock_hash, + protocol_version: snapshot + .last_l1_batch_with_metadata + .header + .protocol_version + .unwrap(), storage_logs_chunks_processed: vec![false; snapshot.storage_logs_chunks.len()], }) } diff --git a/core/lib/snapshots_applier/src/tests/mod.rs b/core/lib/snapshots_applier/src/tests/mod.rs index de750ef12f9..4ecc2f17712 100644 --- a/core/lib/snapshots_applier/src/tests/mod.rs +++ b/core/lib/snapshots_applier/src/tests/mod.rs @@ -10,7 +10,7 @@ use zksync_types::{ SnapshotRecoveryStatus, SnapshotStorageLog, SnapshotStorageLogsChunk, SnapshotStorageLogsChunkMetadata, SnapshotStorageLogsStorageKey, }, - Bytes, L1BatchNumber, MiniblockNumber, H256, + Bytes, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, }; use self::utils::{l1_block_metadata, miniblock_metadata, random_storage_logs, MockMainNodeClient}; @@ -93,8 +93,11 @@ async fn snapshots_creator_can_successfully_recover_db() { let expected_status = SnapshotRecoveryStatus { l1_batch_number, l1_batch_root_hash, + l1_batch_timestamp: 0, miniblock_number, - miniblock_root_hash: miniblock_hash, + miniblock_hash, + miniblock_timestamp: 0, + protocol_version: ProtocolVersionId::default(), storage_logs_chunks_processed: vec![true, true], }; diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 4c05faec50e..b21eeb196c5 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -6,8 +6,8 @@ use zksync_dal::StorageProcessor; use zksync_types::{ block::{L1BatchHeader, MiniblockHeader}, snapshots::SnapshotRecoveryStatus, - AccountTreeId, Address, L1BatchNumber, MiniblockNumber, ProtocolVersion, StorageKey, - StorageLog, H256, + AccountTreeId, Address, L1BatchNumber, MiniblockNumber, ProtocolVersion, ProtocolVersionId, + StorageKey, StorageLog, H256, }; pub(crate) async fn prepare_postgres(conn: &mut StorageProcessor<'_>) { @@ -129,9 +129,12 @@ pub(crate) async fn prepare_postgres_for_snapshot_recovery( let snapshot_recovery = SnapshotRecoveryStatus { l1_batch_number: L1BatchNumber(23), + l1_batch_timestamp: 23, l1_batch_root_hash: H256::zero(), // not used miniblock_number: MiniblockNumber(42), - miniblock_root_hash: H256::zero(), // not used + miniblock_timestamp: 42, + miniblock_hash: H256::zero(), // not used + protocol_version: ProtocolVersionId::latest(), storage_logs_chunks_processed: vec![true; 100], }; conn.snapshot_recovery_dal() diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs index 2007c825902..009df6dcb83 100644 --- a/core/lib/types/src/snapshots.rs +++ b/core/lib/types/src/snapshots.rs @@ -6,7 +6,9 @@ use zksync_basic_types::{AccountTreeId, L1BatchNumber, MiniblockNumber, H256}; use zksync_protobuf::{required, ProtoFmt}; use zksync_utils::u256_to_h256; -use crate::{commitment::L1BatchWithMetadata, Bytes, StorageKey, StorageValue, U256}; +use crate::{ + commitment::L1BatchWithMetadata, Bytes, ProtocolVersionId, StorageKey, StorageValue, U256, +}; /// Information about all snapshots persisted by the node. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -193,8 +195,11 @@ impl ProtoFmt for SnapshotStorageLogsChunk { pub struct SnapshotRecoveryStatus { pub l1_batch_number: L1BatchNumber, pub l1_batch_root_hash: H256, + pub l1_batch_timestamp: u64, pub miniblock_number: MiniblockNumber, - pub miniblock_root_hash: H256, + pub miniblock_hash: H256, + pub miniblock_timestamp: u64, + pub protocol_version: ProtocolVersionId, pub storage_logs_chunks_processed: Vec, } diff --git a/core/lib/vm_utils/src/lib.rs b/core/lib/vm_utils/src/lib.rs index a04ad45f748..5a661e433fb 100644 --- a/core/lib/vm_utils/src/lib.rs +++ b/core/lib/vm_utils/src/lib.rs @@ -11,7 +11,7 @@ use zksync_dal::StorageProcessor; use zksync_state::{PostgresStorage, StoragePtr, StorageView, WriteStorage}; use zksync_types::{L1BatchNumber, L2ChainId, Transaction}; -use crate::storage::load_l1_batch_params; +use crate::storage::L1BatchParamsProvider; pub type VmAndStorage<'a> = ( VmInstance>, HistoryEnabled>, @@ -24,44 +24,38 @@ pub fn create_vm( mut connection: StorageProcessor<'_>, l2_chain_id: L2ChainId, ) -> anyhow::Result { - let prev_l1_batch_number = l1_batch_number - 1; - let (_, miniblock_number) = rt_handle + let l1_batch_params_provider = rt_handle + .block_on(L1BatchParamsProvider::new(&mut connection)) + .context("failed initializing L1 batch params provider")?; + let first_miniblock_in_batch = rt_handle .block_on( - connection - .blocks_dal() - .get_miniblock_range_of_l1_batch(prev_l1_batch_number), - )? - .with_context(|| { - format!( - "l1_batch_number {l1_batch_number:?} must have a previous miniblock to start from" - ) - })?; - - let fee_account_addr = rt_handle - .block_on( - connection - .blocks_dal() - .get_fee_address_for_miniblock(miniblock_number + 1), - )? - .with_context(|| { - format!("l1_batch_number {l1_batch_number:?} must have fee_account_address") - })?; + l1_batch_params_provider + .load_first_miniblock_in_batch(&mut connection, l1_batch_number), + ) + .with_context(|| format!("failed loading first miniblock in L1 batch #{l1_batch_number}"))? + .with_context(|| format!("no miniblocks persisted for L1 batch #{l1_batch_number}"))?; // In the state keeper, this value is used to reject execution. // All batches ran by BasicWitnessInputProducer have already been executed by State Keeper. // This means we don't want to reject any execution, therefore we're using MAX as an allow all. let validation_computational_gas_limit = u32::MAX; + let (system_env, l1_batch_env) = rt_handle - .block_on(load_l1_batch_params( + .block_on(l1_batch_params_provider.load_l1_batch_params( &mut connection, - l1_batch_number, - fee_account_addr, + &first_miniblock_in_batch, validation_computational_gas_limit, l2_chain_id, )) .context("expected miniblock to be executed and sealed")?; - let pg_storage = PostgresStorage::new(rt_handle.clone(), connection, miniblock_number, true); + let storage_miniblock_number = first_miniblock_in_batch.number() - 1; + let pg_storage = PostgresStorage::new( + rt_handle.clone(), + connection, + storage_miniblock_number, + true, + ); let storage_view = StorageView::new(pg_storage).to_rc_ptr(); let vm = VmInstance::new(l1_batch_env, system_env, storage_view.clone()); diff --git a/core/lib/vm_utils/src/storage.rs b/core/lib/vm_utils/src/storage.rs index a05c84b911d..1794b4546b1 100644 --- a/core/lib/vm_utils/src/storage.rs +++ b/core/lib/vm_utils/src/storage.rs @@ -1,121 +1,40 @@ use std::time::{Duration, Instant}; +use anyhow::Context; use multivm::{ interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, vm_latest::constants::BLOCK_GAS_LIMIT, + zk_evm_latest::ethereum_types::H256, }; use zksync_contracts::BaseSystemContracts; use zksync_dal::StorageProcessor; use zksync_types::{ - fee_model::BatchFeeInput, Address, L1BatchNumber, L2ChainId, MiniblockNumber, - ProtocolVersionId, H256, U256, ZKPORTER_IS_AVAILABLE, + block::MiniblockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, + L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, ZKPORTER_IS_AVAILABLE, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; -pub async fn load_l1_batch_params( - storage: &mut StorageProcessor<'_>, - current_l1_batch_number: L1BatchNumber, - fee_account: Address, - validation_computational_gas_limit: u32, - chain_id: L2ChainId, -) -> Option<(SystemEnv, L1BatchEnv)> { - // If miniblock doesn't exist (for instance if it's pending), it means that there is no unsynced state (i.e. no transactions - // were executed after the last sealed batch). - let pending_miniblock_number = { - let (_, last_miniblock_number_included_in_l1_batch) = storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(current_l1_batch_number - 1) - .await - .unwrap() - .unwrap(); - last_miniblock_number_included_in_l1_batch + 1 - }; - let pending_miniblock_header = storage - .blocks_dal() - .get_miniblock_header(pending_miniblock_number) - .await - .unwrap()?; - - tracing::info!("Getting previous batch hash"); - let (previous_l1_batch_hash, _) = - wait_for_prev_l1_batch_params(storage, current_l1_batch_number).await; - - tracing::info!("Getting previous miniblock hash"); - let prev_miniblock_hash = storage - .blocks_dal() - .get_miniblock_header(pending_miniblock_number - 1) - .await - .unwrap() - .unwrap() - .hash; - - let base_system_contracts = storage - .factory_deps_dal() - .get_base_system_contracts( - pending_miniblock_header - .base_system_contracts_hashes - .bootloader, - pending_miniblock_header - .base_system_contracts_hashes - .default_aa, - ) - .await; - - tracing::info!("Previous l1_batch_hash: {}", previous_l1_batch_hash); - Some(l1_batch_params( - current_l1_batch_number, - fee_account, - pending_miniblock_header.timestamp, - previous_l1_batch_hash, - pending_miniblock_header.batch_fee_input, - pending_miniblock_number, - prev_miniblock_hash, - base_system_contracts, - validation_computational_gas_limit, - pending_miniblock_header - .protocol_version - .expect("`protocol_version` must be set for pending miniblock"), - pending_miniblock_header.virtual_blocks, - chain_id, - )) +/// Typesafe wrapper around [`MiniblockHeader`] returned by [`L1BatchParamsProvider`]. +#[derive(Debug)] +pub struct FirstMiniblockInBatch { + header: MiniblockHeader, + l1_batch_number: L1BatchNumber, } -pub async fn wait_for_prev_l1_batch_params( - storage: &mut StorageProcessor<'_>, - number: L1BatchNumber, -) -> (U256, u64) { - if number == L1BatchNumber(0) { - return (U256::default(), 0); +impl FirstMiniblockInBatch { + pub fn number(&self) -> MiniblockNumber { + self.header.number } - wait_for_l1_batch_params_unchecked(storage, number - 1).await -} -/// # Warning -/// -/// If invoked for a `L1BatchNumber` of a non-existent l1 batch, will block current thread indefinitely. -async fn wait_for_l1_batch_params_unchecked( - storage: &mut StorageProcessor<'_>, - number: L1BatchNumber, -) -> (U256, u64) { - // If the state root is not known yet, this duration will be used to back off in the while loops - const SAFE_STATE_ROOT_INTERVAL: Duration = Duration::from_millis(100); - - let stage_started_at: Instant = Instant::now(); - loop { - let data = storage - .blocks_dal() - .get_l1_batch_state_root_and_timestamp(number) - .await - .unwrap(); - if let Some((root_hash, timestamp)) = data { - tracing::trace!( - "Waiting for hash of L1 batch #{number} took {:?}", - stage_started_at.elapsed() - ); - return (h256_to_u256(root_hash), timestamp); - } + pub fn has_protocol_version(&self) -> bool { + self.header.protocol_version.is_some() + } - tokio::time::sleep(SAFE_STATE_ROOT_INTERVAL).await; + pub fn set_protocol_version(&mut self, version: ProtocolVersionId) { + assert!( + self.header.protocol_version.is_none(), + "Cannot redefine protocol version" + ); + self.header.protocol_version = Some(version); } } @@ -125,7 +44,7 @@ pub fn l1_batch_params( current_l1_batch_number: L1BatchNumber, fee_account: Address, l1_batch_timestamp: u64, - previous_batch_hash: U256, + previous_batch_hash: H256, fee_input: BatchFeeInput, first_miniblock_number: MiniblockNumber, prev_miniblock_hash: H256, @@ -146,7 +65,7 @@ pub fn l1_batch_params( chain_id, }, L1BatchEnv { - previous_batch_hash: Some(u256_to_h256(previous_batch_hash)), + previous_batch_hash: Some(previous_batch_hash), number: current_l1_batch_number, timestamp: l1_batch_timestamp, fee_input, @@ -161,3 +80,227 @@ pub fn l1_batch_params( }, ) } + +/// Provider of L1 batch parameters for state keeper I/O implementations. The provider is stateless; i.e., it doesn't +/// enforce a particular order of method calls. +#[derive(Debug)] +pub struct L1BatchParamsProvider { + snapshot: Option, +} + +impl L1BatchParamsProvider { + pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + let snapshot = storage + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await?; + Ok(Self { snapshot }) + } + + /// Returns state root hash and timestamp of an L1 batch with the specified number waiting for the hash to be computed + /// if necessary. + pub async fn wait_for_l1_batch_params( + &self, + storage: &mut StorageProcessor<'_>, + number: L1BatchNumber, + ) -> anyhow::Result<(H256, u64)> { + let first_l1_batch = if let Some(snapshot) = &self.snapshot { + // Special case: if we've recovered from a snapshot, we allow to wait for the snapshot L1 batch. + if number == snapshot.l1_batch_number { + return Ok((snapshot.l1_batch_root_hash, snapshot.l1_batch_timestamp)); + } + snapshot.l1_batch_number + 1 + } else { + L1BatchNumber(0) + }; + + anyhow::ensure!( + number >= first_l1_batch, + "Cannot wait a hash of a pruned L1 batch #{number} (first retained batch: {first_l1_batch})" + ); + Self::wait_for_l1_batch_params_unchecked(storage, number).await + } + + async fn wait_for_l1_batch_params_unchecked( + storage: &mut StorageProcessor<'_>, + number: L1BatchNumber, + ) -> anyhow::Result<(H256, u64)> { + // If the state root is not known yet, this duration will be used to back off in the while loops + const SAFE_STATE_ROOT_INTERVAL: Duration = Duration::from_millis(100); + + let stage_started_at: Instant = Instant::now(); + loop { + let data = storage + .blocks_dal() + .get_l1_batch_state_root_and_timestamp(number) + .await?; + if let Some((root_hash, timestamp)) = data { + tracing::trace!( + "Waiting for hash of L1 batch #{number} took {:?}", + stage_started_at.elapsed() + ); + return Ok((root_hash, timestamp)); + } + + tokio::time::sleep(SAFE_STATE_ROOT_INTERVAL).await; + } + } + + pub async fn load_l1_batch_protocol_version( + &self, + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + if let Some(snapshot) = &self.snapshot { + if l1_batch_number == snapshot.l1_batch_number { + return Ok(Some(snapshot.protocol_version)); + } + anyhow::ensure!( + l1_batch_number > snapshot.l1_batch_number, + "Requested protocol version for pruned L1 batch #{l1_batch_number}; first retained batch is #{}", + snapshot.l1_batch_number + 1 + ); + } + + storage + .blocks_dal() + .get_batch_protocol_version_id(l1_batch_number) + .await + .map_err(Into::into) + } + + /// Returns a header of the first miniblock in the specified L1 batch regardless of whether the batch is sealed or not. + pub async fn load_first_miniblock_in_batch( + &self, + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + let miniblock_number = self + .load_number_of_first_miniblock_in_batch(storage, l1_batch_number) + .await + .context("failed getting first miniblock number")?; + Ok(match miniblock_number { + Some(number) => storage + .blocks_dal() + .get_miniblock_header(number) + .await + .context("failed getting miniblock header")? + .map(|header| FirstMiniblockInBatch { + header, + l1_batch_number, + }), + None => None, + }) + } + + #[doc(hidden)] // public for testing purposes + pub async fn load_number_of_first_miniblock_in_batch( + &self, + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + if l1_batch_number == L1BatchNumber(0) { + return Ok(Some(MiniblockNumber(0))); + } + + if let Some(snapshot) = &self.snapshot { + anyhow::ensure!( + l1_batch_number > snapshot.l1_batch_number, + "Cannot load miniblocks for pruned L1 batch #{l1_batch_number} (first retained batch: {})", + snapshot.l1_batch_number + 1 + ); + if l1_batch_number == snapshot.l1_batch_number + 1 { + return Ok(Some(snapshot.miniblock_number + 1)); + } + } + + let prev_l1_batch = l1_batch_number - 1; + // At this point, we have ensured that `prev_l1_batch` is not pruned. + let Some((_, last_miniblock_in_prev_l1_batch)) = storage + .blocks_dal() + .get_miniblock_range_of_l1_batch(prev_l1_batch) + .await + .with_context(|| { + format!("failed getting miniblock range for L1 batch #{prev_l1_batch}") + })? + else { + return Ok(None); + }; + Ok(Some(last_miniblock_in_prev_l1_batch + 1)) + } + + /// Loads VM-related L1 batch parameters for the specified batch. + pub async fn load_l1_batch_params( + &self, + storage: &mut StorageProcessor<'_>, + first_miniblock_in_batch: &FirstMiniblockInBatch, + validation_computational_gas_limit: u32, + chain_id: L2ChainId, + ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { + anyhow::ensure!( + first_miniblock_in_batch.l1_batch_number > L1BatchNumber(0), + "Loading params for genesis L1 batch not supported" + ); + // L1 batch timestamp is set to the timestamp of its first miniblock. + let l1_batch_timestamp = first_miniblock_in_batch.header.timestamp; + + let prev_l1_batch_number = first_miniblock_in_batch.l1_batch_number - 1; + tracing::info!("Getting previous L1 batch hash for batch #{prev_l1_batch_number}"); + let (prev_l1_batch_hash, prev_l1_batch_timestamp) = self + .wait_for_l1_batch_params(storage, prev_l1_batch_number) + .await + .context("failed getting hash for previous L1 batch")?; + tracing::info!("Got state root hash for previous L1 batch #{prev_l1_batch_number}: {prev_l1_batch_hash:?}"); + + anyhow::ensure!( + prev_l1_batch_timestamp < l1_batch_timestamp, + "Invalid params for L1 batch #{}: Timestamp of previous L1 batch ({prev_l1_batch_timestamp}) >= \ + provisional L1 batch timestamp ({l1_batch_timestamp}), \ + meaning that L1 batch will be rejected by the bootloader", + first_miniblock_in_batch.l1_batch_number + ); + + let prev_miniblock_number = first_miniblock_in_batch.header.number - 1; + tracing::info!("Getting previous miniblock hash for miniblock #{prev_miniblock_number}"); + + let prev_miniblock_hash = self.snapshot.as_ref().and_then(|snapshot| { + (snapshot.miniblock_number == prev_miniblock_number).then_some(snapshot.miniblock_hash) + }); + let prev_miniblock_hash = match prev_miniblock_hash { + Some(hash) => hash, + None => storage + .blocks_web3_dal() + .get_miniblock_hash(prev_miniblock_number) + .await + .context("failed getting hash for previous miniblock")? + .context("previous miniblock disappeared from storage")?, + }; + tracing::info!( + "Got hash for previous miniblock #{prev_miniblock_number}: {prev_miniblock_hash:?}" + ); + + let contract_hashes = first_miniblock_in_batch.header.base_system_contracts_hashes; + let base_system_contracts = storage + .factory_deps_dal() + .get_base_system_contracts(contract_hashes.bootloader, contract_hashes.default_aa) + .await; + + Ok(l1_batch_params( + first_miniblock_in_batch.l1_batch_number, + first_miniblock_in_batch.header.fee_account_address, + l1_batch_timestamp, + prev_l1_batch_hash, + first_miniblock_in_batch.header.batch_fee_input, + first_miniblock_in_batch.header.number, + prev_miniblock_hash, + base_system_contracts, + validation_computational_gas_limit, + first_miniblock_in_batch + .header + .protocol_version + .context("`protocol_version` must be set for miniblock")?, + first_miniblock_in_batch.header.virtual_blocks, + chain_id, + )) + } +} diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs index d18bf5dafd0..525b2a26b43 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs @@ -5,7 +5,7 @@ use assert_matches::assert_matches; use super::*; use crate::{ genesis::{ensure_genesis_state, GenesisParams}, - utils::testonly::{create_miniblock, prepare_empty_recovery_snapshot}, + utils::testonly::{create_miniblock, prepare_recovery_snapshot}, }; #[tokio::test] @@ -67,7 +67,8 @@ async fn creating_block_args() { async fn creating_block_args_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); assert_eq!( diff --git a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs index 55c6852cd4a..8cb37e42633 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs @@ -1,6 +1,6 @@ //! Tests for the transaction sender. -use zksync_types::{get_nonce_key, StorageLog}; +use zksync_types::{get_nonce_key, L1BatchNumber, StorageLog}; use super::*; use crate::{ @@ -89,7 +89,7 @@ async fn getting_nonce_for_account() { #[tokio::test] async fn getting_nonce_for_account_after_snapshot_recovery() { - const SNAPSHOT_MINIBLOCK_NUMBER: u32 = 42; + const SNAPSHOT_MINIBLOCK_NUMBER: MiniblockNumber = MiniblockNumber(42); let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); @@ -99,7 +99,13 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(123)), StorageLog::new_write_log(get_nonce_key(&other_address), H256::from_low_u64_be(25)), ]; - prepare_recovery_snapshot(&mut storage, SNAPSHOT_MINIBLOCK_NUMBER, &nonce_logs).await; + prepare_recovery_snapshot( + &mut storage, + L1BatchNumber(23), + SNAPSHOT_MINIBLOCK_NUMBER, + &nonce_logs, + ) + .await; let l2_chain_id = L2ChainId::default(); let tx_executor = MockTransactionExecutor::default().into(); @@ -115,7 +121,7 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { storage .blocks_dal() - .insert_miniblock(&create_miniblock(SNAPSHOT_MINIBLOCK_NUMBER + 1)) + .insert_miniblock(&create_miniblock(SNAPSHOT_MINIBLOCK_NUMBER.0 + 1)) .await .unwrap(); let new_nonce_logs = vec![StorageLog::new_write_log( @@ -125,7 +131,7 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { storage .storage_logs_dal() .insert_storage_logs( - MiniblockNumber(SNAPSHOT_MINIBLOCK_NUMBER + 1), + SNAPSHOT_MINIBLOCK_NUMBER + 1, &[(H256::default(), new_nonce_logs)], ) .await; diff --git a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs index bf929469b44..e8821d03e69 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs @@ -137,8 +137,7 @@ impl HttpTest for TraceBlockTestWithSnapshotRecovery { } async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { - let snapshot_miniblock_number = - MiniblockNumber(StorageInitialization::SNAPSHOT_RECOVERY_BLOCK); + let snapshot_miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK; let missing_miniblock_numbers = [ MiniblockNumber(0), snapshot_miniblock_number - 1, @@ -150,7 +149,7 @@ impl HttpTest for TraceBlockTestWithSnapshotRecovery { .trace_block_by_number(number.0.into(), None) .await .unwrap_err(); - assert_pruned_block_error(&error, 24); + assert_pruned_block_error(&error, snapshot_miniblock_number + 1); } TraceBlockTest(snapshot_miniblock_number + 1) diff --git a/core/lib/zksync_core/src/api_server/web3/tests/filters.rs b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs index 913437b5e19..2b202be8c02 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/filters.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs @@ -30,11 +30,11 @@ impl HttpTest for BasicFilterChangesTest { let new_tx_hash = tx_result.hash; let new_miniblock = store_miniblock( &mut pool.access_storage().await?, - MiniblockNumber(if self.snapshot_recovery { + if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1 } else { - 1 - }), + MiniblockNumber(1) + }, &[tx_result], ) .await?; @@ -116,7 +116,7 @@ impl HttpTest for LogFilterChangesTest { let mut storage = pool.access_storage().await?; let first_local_miniblock = if self.snapshot_recovery { - StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1 + StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0 + 1 } else { 1 }; diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index 70732d69cfe..4292ab8c0c2 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -43,7 +43,7 @@ use crate::{ genesis::{ensure_genesis_state, GenesisParams}, utils::testonly::{ create_l1_batch, create_l1_batch_metadata, create_l2_transaction, create_miniblock, - prepare_empty_recovery_snapshot, prepare_recovery_snapshot, + prepare_recovery_snapshot, }, }; @@ -195,7 +195,8 @@ enum StorageInitialization { } impl StorageInitialization { - const SNAPSHOT_RECOVERY_BLOCK: u32 = 23; + const SNAPSHOT_RECOVERY_BATCH: L1BatchNumber = L1BatchNumber(23); + const SNAPSHOT_RECOVERY_BLOCK: MiniblockNumber = MiniblockNumber(23); fn empty_recovery() -> Self { Self::Recovery { @@ -220,17 +221,17 @@ impl StorageInitialization { .await?; } } - Self::Recovery { logs, factory_deps } if logs.is_empty() && factory_deps.is_empty() => { - prepare_empty_recovery_snapshot(storage, Self::SNAPSHOT_RECOVERY_BLOCK).await; - } Self::Recovery { logs, factory_deps } => { - prepare_recovery_snapshot(storage, Self::SNAPSHOT_RECOVERY_BLOCK, logs).await; + prepare_recovery_snapshot( + storage, + Self::SNAPSHOT_RECOVERY_BATCH, + Self::SNAPSHOT_RECOVERY_BLOCK, + logs, + ) + .await; storage .factory_deps_dal() - .insert_factory_deps( - MiniblockNumber(Self::SNAPSHOT_RECOVERY_BLOCK), - factory_deps, - ) + .insert_factory_deps(Self::SNAPSHOT_RECOVERY_BLOCK, factory_deps) .await?; } } @@ -460,17 +461,17 @@ impl HttpTest for BlockMethodsWithSnapshotRecovery { let block_number = client.get_block_number().await?; let expected_block_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - assert_eq!(block_number, expected_block_number.into()); + assert_eq!(block_number, expected_block_number.0.into()); - for block_number in [api::BlockNumber::Latest, expected_block_number.into()] { + for block_number in [api::BlockNumber::Latest, expected_block_number.0.into()] { let block = client .get_block_by_number(block_number, false) .await? .context("no latest block")?; - assert_eq!(block.number, expected_block_number.into()); + assert_eq!(block.number, expected_block_number.0.into()); } - for number in [0, 1, expected_block_number - 1] { + for number in [0, 1, expected_block_number.0 - 1] { let error = client .get_block_details(MiniblockNumber(number)) .await @@ -498,7 +499,7 @@ impl HttpTest for BlockMethodsWithSnapshotRecovery { } } -fn assert_pruned_block_error(error: &ClientError, first_retained_block: u32) { +fn assert_pruned_block_error(error: &ClientError, first_retained_block: MiniblockNumber) { if let ClientError::Call(error) = error { assert_eq!(error.code(), ErrorCode::InvalidParams.code()); assert!( @@ -537,58 +538,58 @@ impl HttpTest for L1BatchMethodsWithSnapshotRecovery { let mut storage = pool.access_storage().await?; let miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - store_miniblock(&mut storage, MiniblockNumber(miniblock_number), &[]).await?; - seal_l1_batch(&mut storage, L1BatchNumber(miniblock_number)).await?; + let l1_batch_number = StorageInitialization::SNAPSHOT_RECOVERY_BATCH + 1; + store_miniblock(&mut storage, miniblock_number, &[]).await?; + seal_l1_batch(&mut storage, l1_batch_number).await?; drop(storage); - let l1_batch_number = client.get_l1_batch_number().await?; - assert_eq!(l1_batch_number, miniblock_number.into()); + assert_eq!( + client.get_l1_batch_number().await?, + l1_batch_number.0.into() + ); // `get_miniblock_range` method let miniblock_range = client - .get_miniblock_range(L1BatchNumber(miniblock_number)) + .get_miniblock_range(l1_batch_number) .await? .context("no range for sealed L1 batch")?; - assert_eq!(miniblock_range.0, miniblock_number.into()); - assert_eq!(miniblock_range.1, miniblock_number.into()); + assert_eq!(miniblock_range.0, miniblock_number.0.into()); + assert_eq!(miniblock_range.1, miniblock_number.0.into()); - let miniblock_range_for_future_batch = client - .get_miniblock_range(L1BatchNumber(miniblock_number) + 1) - .await?; + let miniblock_range_for_future_batch = + client.get_miniblock_range(l1_batch_number + 1).await?; assert_eq!(miniblock_range_for_future_batch, None); let error = client - .get_miniblock_range(L1BatchNumber(miniblock_number) - 1) + .get_miniblock_range(l1_batch_number - 1) .await .unwrap_err(); - assert_pruned_l1_batch_error(&error, miniblock_number); + assert_pruned_l1_batch_error(&error, l1_batch_number); // `get_l1_batch_details` method let details = client - .get_l1_batch_details(L1BatchNumber(miniblock_number)) + .get_l1_batch_details(l1_batch_number) .await? .context("no details for sealed L1 batch")?; - assert_eq!(details.number, L1BatchNumber(miniblock_number)); + assert_eq!(details.number, l1_batch_number); - let details_for_future_batch = client - .get_l1_batch_details(L1BatchNumber(miniblock_number) + 1) - .await?; + let details_for_future_batch = client.get_l1_batch_details(l1_batch_number + 1).await?; assert!( details_for_future_batch.is_none(), "{details_for_future_batch:?}" ); let error = client - .get_l1_batch_details(L1BatchNumber(miniblock_number) - 1) + .get_l1_batch_details(l1_batch_number - 1) .await .unwrap_err(); - assert_pruned_l1_batch_error(&error, miniblock_number); + assert_pruned_l1_batch_error(&error, l1_batch_number); Ok(()) } } -fn assert_pruned_l1_batch_error(error: &ClientError, first_retained_l1_batch: u32) { +fn assert_pruned_l1_batch_error(error: &ClientError, first_retained_l1_batch: L1BatchNumber) { if let ClientError::Call(error) = error { assert_eq!(error.code(), ErrorCode::InvalidParams.code()); assert!( @@ -635,7 +636,7 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { let address = Address::repeat_byte(1); let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - for number in [0, 1, first_local_miniblock - 1] { + for number in [0, 1, first_local_miniblock.0 - 1] { let number = api::BlockIdVariant::BlockNumber(number.into()); let error = client.get_code(address, Some(number)).await.unwrap_err(); assert_pruned_block_error(&error, first_local_miniblock); @@ -645,13 +646,13 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { .get_storage_at(address, 0.into(), Some(number)) .await .unwrap_err(); - assert_pruned_block_error(&error, 24); + assert_pruned_block_error(&error, first_local_miniblock); } - store_miniblock(&mut storage, MiniblockNumber(first_local_miniblock), &[]).await?; + store_miniblock(&mut storage, first_local_miniblock, &[]).await?; drop(storage); - for number in [api::BlockNumber::Latest, first_local_miniblock.into()] { + for number in [api::BlockNumber::Latest, first_local_miniblock.0.into()] { let number = api::BlockIdVariant::BlockNumber(number); let code = client.get_code(address, Some(number)).await?; assert_eq!(code.0, b"code"); @@ -793,7 +794,7 @@ impl HttpTest for TransactionCountAfterSnapshotRecoveryTest { let pruned_block_numbers = [ api::BlockNumber::Earliest, 0.into(), - StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.into(), + StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0.into(), ]; for number in pruned_block_numbers { let number = api::BlockIdVariant::BlockNumber(number); @@ -805,9 +806,9 @@ impl HttpTest for TransactionCountAfterSnapshotRecoveryTest { } let latest_miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - store_miniblock(&mut storage, MiniblockNumber(latest_miniblock_number), &[]).await?; + store_miniblock(&mut storage, latest_miniblock_number, &[]).await?; - let latest_block_numbers = [api::BlockNumber::Latest, latest_miniblock_number.into()]; + let latest_block_numbers = [api::BlockNumber::Latest, latest_miniblock_number.0.into()]; for number in latest_block_numbers { let number = api::BlockIdVariant::BlockNumber(number); let latest_count = client diff --git a/core/lib/zksync_core/src/api_server/web3/tests/vm.rs b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs index b685e9fc016..060c5cafeee 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/vm.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs @@ -96,7 +96,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { assert_eq!(call_result.0, b"output"); let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - let first_miniblock_numbers = [api::BlockNumber::Latest, first_local_miniblock.into()]; + let first_miniblock_numbers = [api::BlockNumber::Latest, first_local_miniblock.0.into()]; for number in first_miniblock_numbers { let number = api::BlockIdVariant::BlockNumber(number); let error = client @@ -110,7 +110,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { } } - let pruned_block_numbers = [0, 1, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK]; + let pruned_block_numbers = [0, 1, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0]; for number in pruned_block_numbers { let number = api::BlockIdVariant::BlockNumber(number.into()); let error = client @@ -121,7 +121,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { } let mut storage = pool.access_storage().await?; - store_miniblock(&mut storage, MiniblockNumber(first_local_miniblock), &[]).await?; + store_miniblock(&mut storage, first_local_miniblock, &[]).await?; drop(storage); for number in first_miniblock_numbers { diff --git a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs index a368854f9e7..818a3d34564 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs @@ -98,8 +98,13 @@ async fn wait_for_notifier_miniblock( async fn notifiers_start_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - prepare_empty_recovery_snapshot(&mut storage, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK) - .await; + prepare_recovery_snapshot( + &mut storage, + StorageInitialization::SNAPSHOT_RECOVERY_BATCH, + StorageInitialization::SNAPSHOT_RECOVERY_BLOCK, + &[], + ) + .await; let (stop_sender, stop_receiver) = watch::channel(false); let (events_sender, mut events_receiver) = mpsc::unbounded_channel(); @@ -116,7 +121,7 @@ async fn notifiers_start_after_snapshot_recovery() { } // Emulate creating the first miniblock; check that notifiers react to it. - let first_local_miniblock = MiniblockNumber(StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1); + let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; store_miniblock(&mut storage, first_local_miniblock, &[]) .await .unwrap(); @@ -261,11 +266,11 @@ impl WsTest for BasicSubscriptionsTest { let mut storage = pool.access_storage().await?; let tx_result = execute_l2_transaction(create_l2_transaction(1, 2)); let new_tx_hash = tx_result.hash; - let miniblock_number = MiniblockNumber(if self.snapshot_recovery { + let miniblock_number = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1 } else { - 1 - }); + MiniblockNumber(1) + }; let new_miniblock = store_miniblock(&mut storage, miniblock_number, &[tx_result]).await?; drop(storage); @@ -385,7 +390,7 @@ impl WsTest for LogSubscriptionsTest { let mut storage = pool.access_storage().await?; let miniblock_number = if self.snapshot_recovery { - StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1 + StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0 + 1 } else { 1 }; diff --git a/core/lib/zksync_core/src/consensus/storage/mod.rs b/core/lib/zksync_core/src/consensus/storage/mod.rs index bac1c54f3e6..113eff97672 100644 --- a/core/lib/zksync_core/src/consensus/storage/mod.rs +++ b/core/lib/zksync_core/src/consensus/storage/mod.rs @@ -11,9 +11,9 @@ use zksync_types::MiniblockNumber; #[cfg(test)] mod testonly; -use crate::sync_layer::{ - fetcher::{FetchedBlock, FetcherCursor}, - sync_action::ActionQueueSender, +use crate::{ + state_keeper::io::common::IoCursor, + sync_layer::{fetcher::FetchedBlock, sync_action::ActionQueueSender}, }; /// Context-aware `zksync_dal::StorageProcessor` wrapper. @@ -128,14 +128,14 @@ impl<'a> CtxStorage<'a> { } /// Wrapper for `FetcherCursor::new()`. - pub async fn new_fetcher_cursor(&mut self, ctx: &ctx::Ctx) -> ctx::Result { - Ok(ctx.wait(FetcherCursor::new(&mut self.0)).await??) + pub async fn new_fetcher_cursor(&mut self, ctx: &ctx::Ctx) -> ctx::Result { + Ok(ctx.wait(IoCursor::for_fetcher(&mut self.0)).await??) } } #[derive(Debug)] struct Cursor { - inner: FetcherCursor, + inner: IoCursor, actions: ActionQueueSender, } diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs index 4dd9202297b..0168a58194b 100644 --- a/core/lib/zksync_core/src/consensus/testonly.rs +++ b/core/lib/zksync_core/src/consensus/testonly.rs @@ -1,4 +1,7 @@ //! Utilities for testing the consensus module. + +use std::collections::HashMap; + use anyhow::Context as _; use rand::{ distributions::{Distribution, Standard}, @@ -9,8 +12,8 @@ use zksync_consensus_roles::{node, validator}; use zksync_contracts::{BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::ConnectionPool; use zksync_types::{ - api, block::MiniblockHasher, Address, L1BatchNumber, L2ChainId, MiniblockNumber, - ProtocolVersionId, H256, + api, block::MiniblockHasher, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, + L2ChainId, MiniblockNumber, ProtocolVersionId, H256, U256, }; use crate::{ @@ -59,9 +62,38 @@ impl Distribution for Standard { pub(crate) struct MockMainNodeClient { prev_miniblock_hash: H256, l2_blocks: Vec, + block_number_offset: u32, + protocol_versions: HashMap, + system_contracts: HashMap>, } impl MockMainNodeClient { + pub fn for_snapshot_recovery(snapshot: &SnapshotRecoveryStatus) -> Self { + // This block may be requested during node initialization + let last_miniblock_in_snapshot_batch = api::en::SyncBlock { + number: snapshot.miniblock_number, + l1_batch_number: snapshot.l1_batch_number, + last_in_batch: true, + timestamp: snapshot.miniblock_timestamp, + l1_gas_price: 2, + l2_fair_gas_price: 3, + fair_pubdata_price: Some(24), + base_system_contracts_hashes: BaseSystemContractsHashes::default(), + operator_address: Address::repeat_byte(2), + transactions: Some(vec![]), + virtual_blocks: Some(0), + hash: Some(snapshot.miniblock_hash), + protocol_version: ProtocolVersionId::latest(), + }; + + Self { + prev_miniblock_hash: snapshot.miniblock_hash, + l2_blocks: vec![last_miniblock_in_snapshot_batch], + block_number_offset: snapshot.miniblock_number.0, + ..Self::default() + } + } + /// `miniblock_count` doesn't include a fictive miniblock. Returns hashes of generated transactions. pub fn push_l1_batch(&mut self, miniblock_count: u32) -> Vec { let l1_batch_number = self @@ -115,15 +147,28 @@ impl MockMainNodeClient { self.l2_blocks.extend(l2_blocks); tx_hashes } + + pub fn insert_protocol_version(&mut self, version: api::ProtocolVersion) { + self.system_contracts + .insert(version.base_system_contracts.bootloader, vec![]); + self.system_contracts + .insert(version.base_system_contracts.default_aa, vec![]); + self.protocol_versions.insert(version.version_id, version); + } } #[async_trait::async_trait] impl MainNodeClient for MockMainNodeClient { async fn fetch_system_contract_by_hash( &self, - _hash: H256, + hash: H256, ) -> anyhow::Result { - anyhow::bail!("Not implemented"); + let code = self + .system_contracts + .get(&hash) + .cloned() + .with_context(|| format!("requested unexpected system contract {hash:?}"))?; + Ok(SystemContractCode { hash, code }) } async fn fetch_genesis_contract_bytecode( @@ -135,9 +180,13 @@ impl MainNodeClient for MockMainNodeClient { async fn fetch_protocol_version( &self, - _protocol_version: ProtocolVersionId, + protocol_version: ProtocolVersionId, ) -> anyhow::Result { - anyhow::bail!("Not implemented"); + let protocol_version = protocol_version as u16; + self.protocol_versions + .get(&protocol_version) + .cloned() + .with_context(|| format!("requested unexpected protocol version {protocol_version}")) } async fn fetch_genesis_l1_batch_hash(&self) -> anyhow::Result { @@ -157,7 +206,10 @@ impl MainNodeClient for MockMainNodeClient { number: MiniblockNumber, with_transactions: bool, ) -> anyhow::Result> { - let Some(mut block) = self.l2_blocks.get(number.0 as usize).cloned() else { + let Some(block_index) = number.0.checked_sub(self.block_number_offset) else { + return Ok(None); + }; + let Some(mut block) = self.l2_blocks.get(block_index as usize).cloned() else { return Ok(None); }; if !with_transactions { @@ -385,7 +437,7 @@ impl StateKeeperRunner { u32::MAX, L2ChainId::default(), ) - .await; + .await?; s.spawn_bg(miniblock_sealer.run()); s.spawn_bg(run_mock_metadata_calculator(ctx, &self.pool)); s.spawn_bg( diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs index 5d1d37deeab..78d8bb65f4e 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs @@ -12,7 +12,7 @@ use zksync_config::configs::{ }; use zksync_health_check::{CheckHealth, HealthStatus, ReactiveHealthCheck}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; -use zksync_types::{L1BatchNumber, L2ChainId, StorageLog}; +use zksync_types::{L1BatchNumber, L2ChainId, ProtocolVersionId, StorageLog}; use super::*; use crate::{ @@ -121,9 +121,12 @@ async fn prepare_recovery_snapshot_with_genesis( SnapshotRecoveryStatus { l1_batch_number: L1BatchNumber(1), + l1_batch_timestamp: 1, l1_batch_root_hash, miniblock_number: MiniblockNumber(1), - miniblock_root_hash: H256::zero(), // not used + miniblock_timestamp: 1, + miniblock_hash: H256::zero(), // not used + protocol_version: ProtocolVersionId::latest(), storage_logs_chunks_processed: vec![], } } @@ -239,7 +242,13 @@ async fn entire_recovery_workflow(case: RecoveryWorkflowCase) { // Emulate the recovered view of Postgres. Unlike with previous tests, we don't perform genesis. let snapshot_logs = gen_storage_logs(100..300, 1).pop().unwrap(); let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &snapshot_logs).await; + let snapshot_recovery = prepare_recovery_snapshot( + &mut storage, + L1BatchNumber(23), + MiniblockNumber(42), + &snapshot_logs, + ) + .await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let merkle_tree_config = MerkleTreeConfig { diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs index 362afe20437..9e7caa6f575 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs @@ -1,11 +1,11 @@ use assert_matches::assert_matches; +use test_casing::test_casing; use zksync_dal::ConnectionPool; use zksync_test_account::Account; -use zksync_types::PriorityOpId; +use zksync_types::{get_nonce_key, utils::storage_key_for_eth_balance, PriorityOpId}; -use self::tester::Tester; +use self::tester::{AccountLoadNextExecutable, StorageSnapshot, TestConfig, Tester}; use super::TxExecutionResult; -use crate::state_keeper::batch_executor::tests::tester::{AccountLoadNextExecutable, TestConfig}; mod tester; @@ -34,9 +34,7 @@ fn assert_reverted(execution_result: &TxExecutionResult) { async fn execute_l2_tx() { let connection_pool = ConnectionPool::test_pool().await; let mut alice = Account::random(); - let tester = Tester::new(connection_pool); - tester.genesis().await; tester.fund(&[alice.address()]).await; let executor = tester.create_batch_executor().await; @@ -46,6 +44,58 @@ async fn execute_l2_tx() { executor.finish_batch().await; } +#[derive(Debug, Clone, Copy)] +enum SnapshotRecoveryMutation { + RemoveNonce, + RemoveBalance, +} + +impl SnapshotRecoveryMutation { + const ALL: [Option; 3] = [None, Some(Self::RemoveNonce), Some(Self::RemoveBalance)]; + + fn mutate_snapshot(self, storage_snapshot: &mut StorageSnapshot, alice: &Account) { + match self { + Self::RemoveNonce => { + let nonce_key = get_nonce_key(&alice.address()); + let nonce_value = storage_snapshot.storage_logs.remove(&nonce_key); + assert!(nonce_value.is_some()); + } + Self::RemoveBalance => { + let balance_key = storage_key_for_eth_balance(&alice.address()); + let balance_value = storage_snapshot.storage_logs.remove(&balance_key); + assert!(balance_value.is_some()); + } + } + } +} + +/// Tests that we can continue executing account transactions after emulating snapshot recovery. +/// Test cases with a set `mutation` ensure that the VM executor correctly detects missing data (e.g., dropped account nonce). +#[test_casing(3, SnapshotRecoveryMutation::ALL)] +#[tokio::test] +async fn execute_l2_tx_after_snapshot_recovery(mutation: Option) { + let mut alice = Account::random(); + let connection_pool = ConnectionPool::test_pool().await; + + let mut storage_snapshot = StorageSnapshot::new(&connection_pool, &mut alice, 10).await; + assert!(storage_snapshot.storage_logs.len() > 10); // sanity check + assert!(!storage_snapshot.factory_deps.is_empty()); + if let Some(mutation) = mutation { + mutation.mutate_snapshot(&mut storage_snapshot, &alice); + } + let snapshot = storage_snapshot.recover(&connection_pool).await; + + let tester = Tester::new(connection_pool); + let executor = tester.recover_batch_executor(&snapshot).await; + let res = executor.execute_tx(alice.execute()).await; + if mutation.is_none() { + assert_executed(&res); + executor.finish_batch().await; + } else { + assert_rejected(&res); + } +} + /// Checks that we can successfully execute a single L1 tx in batch executor. #[tokio::test] async fn execute_l1_tx() { diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index c1ad81e1127..6a7e7c4bd14 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -1,8 +1,10 @@ //! Testing harness for the batch executor. //! Contains helper functionality to initialize test context and perform tests without too much boilerplate. +use std::collections::HashMap; + use multivm::{ - interface::{L1BatchEnv, SystemEnv}, + interface::{L1BatchEnv, L2BlockEnv, SystemEnv}, vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use tempfile::TempDir; @@ -12,20 +14,23 @@ use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractEx use zksync_dal::ConnectionPool; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ - ethabi::Token, fee::Fee, system_contracts::get_system_smart_contracts, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, L1BatchNumber, - L2ChainId, MiniblockNumber, PriorityOpId, ProtocolVersionId, StorageLog, Transaction, H256, - L2_ETH_TOKEN_ADDRESS, SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, + block::MiniblockHasher, ethabi::Token, fee::Fee, snapshots::SnapshotRecoveryStatus, + storage_writes_deduplicator::StorageWritesDeduplicator, + system_contracts::get_system_smart_contracts, utils::storage_key_for_standard_token_balance, + AccountTreeId, Address, Execute, L1BatchNumber, L2ChainId, MiniblockNumber, PriorityOpId, + ProtocolVersionId, StorageKey, StorageLog, Transaction, H256, L2_ETH_TOKEN_ADDRESS, + SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, }; use zksync_utils::u256_to_h256; use crate::{ genesis::create_genesis_l1_batch, state_keeper::{ - batch_executor::BatchExecutorHandle, + batch_executor::{BatchExecutorHandle, TxExecutionResult}, tests::{default_l1_batch_env, default_system_env, BASE_SYSTEM_CONTRACTS}, L1BatchExecutorBuilder, MainBatchExecutorBuilder, }, + utils::testonly::prepare_recovery_snapshot, }; const DEFAULT_GAS_PER_PUBDATA: u32 = 10000; @@ -87,6 +92,17 @@ impl Tester { /// Creates a batch executor instance. /// This function intentionally uses sensible defaults to not introduce boilerplate. pub(super) async fn create_batch_executor(&self) -> BatchExecutorHandle { + // Not really important for the batch executor - it operates over a single batch. + let (l1_batch_env, system_env) = self.batch_params(L1BatchNumber(1), 100); + self.create_batch_executor_inner(l1_batch_env, system_env) + .await + } + + async fn create_batch_executor_inner( + &self, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + ) -> BatchExecutorHandle { let mut builder = MainBatchExecutorBuilder::new( self.db_dir.path().to_str().unwrap().to_owned(), self.pool.clone(), @@ -96,13 +112,6 @@ impl Tester { 100, false, ); - - // Not really important for the batch executor - it operates over a single batch. - let (l1_batch_env, system_env) = self.batch_params( - L1BatchNumber(1), - 100, - self.config.validation_computational_gas_limit, - ); let (_stop_sender, stop_receiver) = watch::channel(false); builder .init_batch(l1_batch_env, system_env, &stop_receiver) @@ -110,19 +119,37 @@ impl Tester { .expect("Batch executor was interrupted") } + pub(super) async fn recover_batch_executor( + &self, + snapshot: &SnapshotRecoveryStatus, + ) -> BatchExecutorHandle { + let current_timestamp = snapshot.miniblock_timestamp + 1; + let (mut l1_batch_env, system_env) = + self.batch_params(snapshot.l1_batch_number + 1, current_timestamp); + l1_batch_env.previous_batch_hash = Some(snapshot.l1_batch_root_hash); + l1_batch_env.first_l2_block = L2BlockEnv { + number: snapshot.miniblock_number.0 + 1, + timestamp: current_timestamp, + prev_block_hash: snapshot.miniblock_hash, + max_virtual_blocks_to_create: 1, + }; + + self.create_batch_executor_inner(l1_batch_env, system_env) + .await + } + /// Creates test batch params that can be fed into the VM. fn batch_params( &self, l1_batch_number: L1BatchNumber, timestamp: u64, - validation_computational_gas_limit: u32, ) -> (L1BatchEnv, SystemEnv) { let mut system_params = default_system_env(); if let Some(vm_gas_limit) = self.config.vm_gas_limit { system_params.gas_limit = vm_gas_limit; } system_params.default_validation_computational_gas_limit = - validation_computational_gas_limit; + self.config.validation_computational_gas_limit; let mut batch_params = default_l1_batch_env(l1_batch_number.0, timestamp, self.fee_account); batch_params.previous_batch_hash = Some(H256::zero()); // Not important in this context. (batch_params, system_params) @@ -326,11 +353,136 @@ fn fee(gas_limit: u32) -> Fee { pub fn mock_loadnext_gas_burn_calldata(gas: u32) -> Vec { let loadnext_contract = get_loadnext_contract(); - let contract_function = loadnext_contract.contract.function("burnGas").unwrap(); - let params = vec![Token::Uint(U256::from(gas))]; contract_function .encode_input(¶ms) .expect("failed to encode parameters") } + +/// Concise representation of a storage snapshot for testing recovery. +#[derive(Debug)] +pub(super) struct StorageSnapshot { + pub miniblock_number: MiniblockNumber, + pub miniblock_hash: H256, + pub miniblock_timestamp: u64, + pub storage_logs: HashMap, + pub factory_deps: HashMap>, +} + +impl StorageSnapshot { + /// Generates a new snapshot by executing the specified number of transactions, each in a separate miniblock. + pub async fn new( + connection_pool: &ConnectionPool, + alice: &mut Account, + transaction_count: u32, + ) -> Self { + let tester = Tester::new(connection_pool.clone()); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + + let mut storage = connection_pool.access_storage().await.unwrap(); + let all_logs = storage + .snapshots_creator_dal() + .get_storage_logs_chunk(MiniblockNumber(0), H256::zero()..=H256::repeat_byte(0xff)) + .await + .unwrap(); + let factory_deps = storage + .snapshots_creator_dal() + .get_all_factory_deps(MiniblockNumber(0)) + .await + .unwrap(); + let mut all_logs: HashMap<_, _> = all_logs + .into_iter() + .map(|log| (log.key, log.value)) + .collect(); + + let executor = tester.create_batch_executor().await; + let mut l2_block_env = L2BlockEnv { + number: 1, + prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), + timestamp: 100, + max_virtual_blocks_to_create: 1, + }; + let mut storage_writes_deduplicator = StorageWritesDeduplicator::new(); + + for _ in 0..transaction_count { + let tx = alice.execute(); + let tx_hash = tx.hash(); // probably incorrect + let res = executor.execute_tx(tx).await; + if let TxExecutionResult::Success { tx_result, .. } = res { + let storage_logs = &tx_result.logs.storage_logs; + storage_writes_deduplicator + .apply(storage_logs.iter().filter(|log| log.log_query.rw_flag)); + } else { + panic!("Unexpected tx execution result: {res:?}"); + }; + + let mut hasher = MiniblockHasher::new( + MiniblockNumber(l2_block_env.number), + l2_block_env.timestamp, + l2_block_env.prev_block_hash, + ); + hasher.push_tx_hash(tx_hash); + + l2_block_env.number += 1; + l2_block_env.timestamp += 1; + l2_block_env.prev_block_hash = hasher.finalize(ProtocolVersionId::latest()); + executor.start_next_miniblock(l2_block_env).await; + } + + let (finished_batch, _) = executor.finish_batch().await; + let storage_logs = &finished_batch.block_tip_execution_result.logs.storage_logs; + storage_writes_deduplicator.apply(storage_logs.iter().filter(|log| log.log_query.rw_flag)); + let modified_entries = storage_writes_deduplicator.into_modified_key_values(); + all_logs.extend( + modified_entries + .into_iter() + .map(|(key, slot)| (key, u256_to_h256(slot.value))), + ); + + // Compute the hash of the last (fictive) miniblock in the batch. + let miniblock_hash = MiniblockHasher::new( + MiniblockNumber(l2_block_env.number), + l2_block_env.timestamp, + l2_block_env.prev_block_hash, + ) + .finalize(ProtocolVersionId::latest()); + + storage.blocks_dal().delete_genesis().await.unwrap(); + Self { + miniblock_number: MiniblockNumber(l2_block_env.number), + miniblock_timestamp: l2_block_env.timestamp, + miniblock_hash, + storage_logs: all_logs, + factory_deps: factory_deps.into_iter().collect(), + } + } + + /// Recovers storage from this snapshot. + pub async fn recover(self, connection_pool: &ConnectionPool) -> SnapshotRecoveryStatus { + let snapshot_logs: Vec<_> = self + .storage_logs + .into_iter() + .map(|(key, value)| StorageLog::new_write_log(key, value)) + .collect(); + let mut storage = connection_pool.access_storage().await.unwrap(); + let mut snapshot = prepare_recovery_snapshot( + &mut storage, + L1BatchNumber(1), + self.miniblock_number, + &snapshot_logs, + ) + .await; + + snapshot.miniblock_hash = self.miniblock_hash; + snapshot.miniblock_timestamp = self.miniblock_timestamp; + + storage + .factory_deps_dal() + .insert_factory_deps(snapshot.miniblock_number, &self.factory_deps) + .await + .unwrap(); + snapshot + } +} diff --git a/core/lib/zksync_core/src/state_keeper/io/common.rs b/core/lib/zksync_core/src/state_keeper/io/common.rs deleted file mode 100644 index 54632c468ab..00000000000 --- a/core/lib/zksync_core/src/state_keeper/io/common.rs +++ /dev/null @@ -1,113 +0,0 @@ -use std::time::Duration; - -use multivm::{ - interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, - vm_latest::constants::BLOCK_GAS_LIMIT, -}; -use vm_utils::storage::load_l1_batch_params; -use zksync_contracts::BaseSystemContracts; -use zksync_dal::StorageProcessor; -use zksync_types::{ - fee_model::BatchFeeInput, Address, L1BatchNumber, L2ChainId, MiniblockNumber, - ProtocolVersionId, H256, U256, ZKPORTER_IS_AVAILABLE, -}; -use zksync_utils::u256_to_h256; - -use super::PendingBatchData; - -/// Returns the parameters required to initialize the VM for the next L1 batch. -#[allow(clippy::too_many_arguments)] -pub(crate) fn l1_batch_params( - current_l1_batch_number: L1BatchNumber, - fee_account: Address, - l1_batch_timestamp: u64, - previous_batch_hash: U256, - fee_input: BatchFeeInput, - first_miniblock_number: MiniblockNumber, - prev_miniblock_hash: H256, - base_system_contracts: BaseSystemContracts, - validation_computational_gas_limit: u32, - protocol_version: ProtocolVersionId, - virtual_blocks: u32, - chain_id: L2ChainId, -) -> (SystemEnv, L1BatchEnv) { - ( - SystemEnv { - zk_porter_available: ZKPORTER_IS_AVAILABLE, - version: protocol_version, - base_system_smart_contracts: base_system_contracts, - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: validation_computational_gas_limit, - chain_id, - }, - L1BatchEnv { - previous_batch_hash: Some(u256_to_h256(previous_batch_hash)), - number: current_l1_batch_number, - timestamp: l1_batch_timestamp, - fee_input, - fee_account, - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: first_miniblock_number.0, - timestamp: l1_batch_timestamp, - prev_block_hash: prev_miniblock_hash, - max_virtual_blocks_to_create: virtual_blocks, - }, - }, - ) -} - -/// Returns the amount of iterations `delay_interval` fits into `max_wait`, rounding up. -pub(crate) fn poll_iters(delay_interval: Duration, max_wait: Duration) -> usize { - let max_wait_millis = max_wait.as_millis() as u64; - let delay_interval_millis = delay_interval.as_millis() as u64; - assert!(delay_interval_millis > 0, "delay interval must be positive"); - - ((max_wait_millis + delay_interval_millis - 1) / delay_interval_millis).max(1) as usize -} - -/// Loads the pending L1 block data from the database. -pub(crate) async fn load_pending_batch( - storage: &mut StorageProcessor<'_>, - current_l1_batch_number: L1BatchNumber, - fee_account: Address, - validation_computational_gas_limit: u32, - chain_id: L2ChainId, -) -> Option { - let (system_env, l1_batch_env) = load_l1_batch_params( - storage, - current_l1_batch_number, - fee_account, - validation_computational_gas_limit, - chain_id, - ) - .await?; - - let pending_miniblocks = storage - .transactions_dal() - .get_miniblocks_to_reexecute() - .await - .unwrap(); - - Some(PendingBatchData { - l1_batch_env, - system_env, - pending_miniblocks, - }) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - #[rustfmt::skip] // One-line formatting looks better here. - fn test_poll_iters() { - assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(0)), 1); - assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(100)), 1); - assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(101)), 2); - assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(200)), 2); - assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(201)), 3); - } -} diff --git a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs new file mode 100644 index 00000000000..64001d7d502 --- /dev/null +++ b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs @@ -0,0 +1,116 @@ +use std::time::Duration; + +use anyhow::Context; +use multivm::interface::{L1BatchEnv, SystemEnv}; +use zksync_dal::StorageProcessor; +use zksync_types::{L1BatchNumber, MiniblockNumber, H256}; + +use super::PendingBatchData; + +#[cfg(test)] +mod tests; + +/// Returns the amount of iterations `delay_interval` fits into `max_wait`, rounding up. +pub(crate) fn poll_iters(delay_interval: Duration, max_wait: Duration) -> usize { + let max_wait_millis = max_wait.as_millis() as u64; + let delay_interval_millis = delay_interval.as_millis() as u64; + assert!(delay_interval_millis > 0, "delay interval must be positive"); + + ((max_wait_millis + delay_interval_millis - 1) / delay_interval_millis).max(1) as usize +} + +/// Cursor of the miniblock / L1 batch progress used by [`StateKeeperIO`](super::StateKeeperIO) implementations. +#[derive(Debug)] +pub(crate) struct IoCursor { + pub next_miniblock: MiniblockNumber, + pub prev_miniblock_hash: H256, + pub prev_miniblock_timestamp: u64, + pub l1_batch: L1BatchNumber, +} + +impl IoCursor { + /// Loads the cursor from Postgres. + pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + let last_sealed_l1_batch_number = storage + .blocks_dal() + .get_sealed_l1_batch_number() + .await + .context("Failed getting sealed L1 batch number")?; + let last_miniblock_header = storage + .blocks_dal() + .get_last_sealed_miniblock_header() + .await + .context("Failed getting sealed miniblock header")?; + + if let (Some(l1_batch_number), Some(miniblock_header)) = + (last_sealed_l1_batch_number, &last_miniblock_header) + { + Ok(Self { + next_miniblock: miniblock_header.number + 1, + prev_miniblock_hash: miniblock_header.hash, + prev_miniblock_timestamp: miniblock_header.timestamp, + l1_batch: l1_batch_number + 1, + }) + } else { + let snapshot_recovery = storage + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await + .context("Failed getting snapshot recovery info")? + .context("Postgres contains neither blocks nor snapshot recovery info")?; + let l1_batch = + last_sealed_l1_batch_number.unwrap_or(snapshot_recovery.l1_batch_number) + 1; + + let (next_miniblock, prev_miniblock_hash, prev_miniblock_timestamp); + if let Some(miniblock_header) = &last_miniblock_header { + next_miniblock = miniblock_header.number + 1; + prev_miniblock_hash = miniblock_header.hash; + prev_miniblock_timestamp = miniblock_header.timestamp; + } else { + next_miniblock = snapshot_recovery.miniblock_number + 1; + prev_miniblock_hash = snapshot_recovery.miniblock_hash; + prev_miniblock_timestamp = snapshot_recovery.miniblock_timestamp; + } + + Ok(Self { + next_miniblock, + prev_miniblock_hash, + prev_miniblock_timestamp, + l1_batch, + }) + } + } +} + +/// Loads the pending L1 batch data from the database. +/// +/// # Errors +/// +/// Propagates DB errors. Also returns an error if environment doesn't correspond to a pending L1 batch. +pub(crate) async fn load_pending_batch( + storage: &mut StorageProcessor<'_>, + system_env: SystemEnv, + l1_batch_env: L1BatchEnv, +) -> anyhow::Result { + let pending_miniblocks = storage + .transactions_dal() + .get_miniblocks_to_reexecute() + .await + .context("failed loading miniblocks for re-execution")?; + let first_pending_miniblock = pending_miniblocks + .first() + .context("no pending miniblocks; was environment loaded for a correct L1 batch number?")?; + let expected_pending_miniblock_number = MiniblockNumber(l1_batch_env.first_l2_block.number); + anyhow::ensure!( + first_pending_miniblock.number == expected_pending_miniblock_number, + "Invalid `first_miniblock_in_batch` supplied: its L1 batch #{} is not pending; \ + first pending miniblock: {first_pending_miniblock:?}, first miniblock in batch: {:?}", + l1_batch_env.number, + l1_batch_env.first_l2_block + ); + Ok(PendingBatchData { + l1_batch_env, + system_env, + pending_miniblocks, + }) +} diff --git a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs new file mode 100644 index 00000000000..a59f711a576 --- /dev/null +++ b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs @@ -0,0 +1,527 @@ +//! Tests for the common I/O utils. +//! +//! `L1BatchParamsProvider` tests are (temporarily?) here because of `testonly` utils in this crate to create L1 batches, +//! miniblocks, transactions etc. + +use std::{collections::HashMap, ops}; + +use futures::FutureExt; +use vm_utils::storage::L1BatchParamsProvider; +use zksync_contracts::BaseSystemContractsHashes; +use zksync_dal::ConnectionPool; +use zksync_types::{ + block::MiniblockHasher, fee::TransactionExecutionMetrics, L2ChainId, ProtocolVersion, + ProtocolVersionId, +}; + +use super::*; +use crate::{ + genesis::{ensure_genesis_state, GenesisParams}, + utils::testonly::{ + create_l1_batch, create_l2_transaction, create_miniblock, execute_l2_transaction, + prepare_recovery_snapshot, + }, +}; + +#[test] +#[rustfmt::skip] // One-line formatting looks better here. +fn test_poll_iters() { + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(0)), 1); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(100)), 1); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(101)), 2); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(200)), 2); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(201)), 3); +} + +#[tokio::test] +async fn creating_io_cursor_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(1)); + assert_eq!(cursor.next_miniblock, MiniblockNumber(1)); + assert_eq!(cursor.prev_miniblock_timestamp, 0); + assert_eq!( + cursor.prev_miniblock_hash, + MiniblockHasher::legacy_hash(MiniblockNumber(0)) + ); + + let miniblock = create_miniblock(1); + storage + .blocks_dal() + .insert_miniblock(&miniblock) + .await + .unwrap(); + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(1)); + assert_eq!(cursor.next_miniblock, MiniblockNumber(2)); + assert_eq!(cursor.prev_miniblock_timestamp, miniblock.timestamp); + assert_eq!(cursor.prev_miniblock_hash, miniblock.hash); +} + +#[tokio::test] +async fn creating_io_cursor_with_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(24)); + assert_eq!( + cursor.next_miniblock, + snapshot_recovery.miniblock_number + 1 + ); + assert_eq!( + cursor.prev_miniblock_timestamp, + snapshot_recovery.miniblock_timestamp + ); + assert_eq!(cursor.prev_miniblock_hash, snapshot_recovery.miniblock_hash); + + // Add a miniblock so that we have miniblocks (but not an L1 batch) in the storage. + let miniblock = create_miniblock(snapshot_recovery.miniblock_number.0 + 1); + storage + .blocks_dal() + .insert_miniblock(&miniblock) + .await + .unwrap(); + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(24)); + assert_eq!(cursor.next_miniblock, miniblock.number + 1); + assert_eq!(cursor.prev_miniblock_timestamp, miniblock.timestamp); + assert_eq!(cursor.prev_miniblock_hash, miniblock.hash); +} + +#[tokio::test] +async fn waiting_for_l1_batch_params_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let genesis_root_hash = + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let (hash, timestamp) = provider + .wait_for_l1_batch_params(&mut storage, L1BatchNumber(0)) + .await + .unwrap(); + assert_eq!(hash, genesis_root_hash); + assert_eq!(timestamp, 0); + + let new_l1_batch = create_l1_batch(1); + storage + .blocks_dal() + .insert_mock_l1_batch(&new_l1_batch) + .await + .unwrap(); + + let wait_future = provider.wait_for_l1_batch_params(&mut storage, L1BatchNumber(1)); + futures::pin_mut!(wait_future); + tokio::task::yield_now().await; + assert!((&mut wait_future).now_or_never().is_none()); + + let expected_hash = H256::repeat_byte(1); + let mut storage = pool.access_storage().await.unwrap(); + storage + .blocks_dal() + .set_l1_batch_hash(L1BatchNumber(1), expected_hash) + .await + .unwrap(); + let (hash, timestamp) = wait_future.await.unwrap(); + assert_eq!(hash, expected_hash); + assert_eq!(timestamp, new_l1_batch.timestamp); +} + +#[tokio::test] +async fn waiting_for_l1_batch_params_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let (hash, timestamp) = provider + .wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number) + .await + .unwrap(); + assert_eq!(hash, snapshot_recovery.l1_batch_root_hash); + assert_eq!(timestamp, snapshot_recovery.l1_batch_timestamp); + + for pruned_l1_batch in [0, 1, snapshot_recovery.l1_batch_number.0 - 1] { + assert!(provider + .wait_for_l1_batch_params(&mut storage, L1BatchNumber(pruned_l1_batch)) + .await + .is_err()); + } + + let new_l1_batch = create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1); + storage + .blocks_dal() + .insert_mock_l1_batch(&new_l1_batch) + .await + .unwrap(); + + let wait_future = + provider.wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number + 1); + futures::pin_mut!(wait_future); + tokio::task::yield_now().await; + assert!((&mut wait_future).now_or_never().is_none()); + + let expected_hash = H256::repeat_byte(1); + let mut storage = pool.access_storage().await.unwrap(); + storage + .blocks_dal() + .set_l1_batch_hash(new_l1_batch.number, expected_hash) + .await + .unwrap(); + let (hash, timestamp) = wait_future.await.unwrap(); + assert_eq!(hash, expected_hash); + assert_eq!(timestamp, new_l1_batch.timestamp); +} + +#[tokio::test] +async fn getting_first_miniblock_in_batch_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut batches_and_miniblocks = HashMap::from([ + (L1BatchNumber(0), Ok(Some(MiniblockNumber(0)))), + (L1BatchNumber(1), Ok(Some(MiniblockNumber(1)))), + (L1BatchNumber(2), Ok(None)), + (L1BatchNumber(100), Ok(None)), + ]); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_miniblock = create_miniblock(1); + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + let new_miniblock = create_miniblock(2); + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_l1_batch = create_l1_batch(1); + storage + .blocks_dal() + .insert_mock_l1_batch(&new_l1_batch) + .await + .unwrap(); + storage + .blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(new_l1_batch.number) + .await + .unwrap(); + + batches_and_miniblocks.insert(L1BatchNumber(2), Ok(Some(MiniblockNumber(3)))); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; +} + +async fn assert_first_miniblock_numbers( + provider: &L1BatchParamsProvider, + storage: &mut StorageProcessor<'_>, + batches_and_miniblocks: &HashMap, ()>>, +) { + for (&batch, &expected_miniblock) in batches_and_miniblocks { + let number = provider + .load_number_of_first_miniblock_in_batch(storage, batch) + .await; + match expected_miniblock { + Ok(expected) => { + assert_eq!( + number.unwrap(), + expected, + "load_number_of_first_miniblock_in_batch({batch})" + ); + } + Err(()) => { + number.unwrap_err(); + } + } + } +} + +#[tokio::test] +async fn getting_first_miniblock_in_batch_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut batches_and_miniblocks = HashMap::from([ + (L1BatchNumber(1), Err(())), + (snapshot_recovery.l1_batch_number, Err(())), + ( + snapshot_recovery.l1_batch_number + 1, + Ok(Some(snapshot_recovery.miniblock_number + 1)), + ), + (snapshot_recovery.l1_batch_number + 2, Ok(None)), + (L1BatchNumber(100), Ok(None)), + ]); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_miniblock = create_miniblock(snapshot_recovery.miniblock_number.0 + 1); + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_l1_batch = create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1); + storage + .blocks_dal() + .insert_mock_l1_batch(&new_l1_batch) + .await + .unwrap(); + storage + .blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(new_l1_batch.number) + .await + .unwrap(); + + batches_and_miniblocks.insert( + snapshot_recovery.l1_batch_number + 2, + Ok(Some(new_miniblock.number + 1)), + ); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; +} + +#[tokio::test] +async fn loading_pending_batch_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let genesis_params = GenesisParams::mock(); + ensure_genesis_state(&mut storage, L2ChainId::default(), &genesis_params) + .await + .unwrap(); + store_pending_miniblocks( + &mut storage, + 1..=2, + genesis_params.base_system_contracts.hashes(), + ) + .await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let first_miniblock_in_batch = provider + .load_first_miniblock_in_batch(&mut storage, L1BatchNumber(1)) + .await + .unwrap() + .expect("no first miniblock"); + assert_eq!(first_miniblock_in_batch.number(), MiniblockNumber(1)); + + let (system_env, l1_batch_env) = provider + .load_l1_batch_params( + &mut storage, + &first_miniblock_in_batch, + u32::MAX, + L2ChainId::default(), + ) + .await + .unwrap(); + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + .await + .unwrap(); + + assert_eq!(pending_batch.pending_miniblocks.len(), 2); + assert_eq!(pending_batch.l1_batch_env.number, L1BatchNumber(1)); + assert_eq!(pending_batch.l1_batch_env.timestamp, 1); + assert_eq!(pending_batch.l1_batch_env.first_l2_block.number, 1); + assert_eq!(pending_batch.l1_batch_env.first_l2_block.timestamp, 1); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.prev_block_hash, + MiniblockHasher::legacy_hash(MiniblockNumber(0)) + ); +} + +async fn store_pending_miniblocks( + storage: &mut StorageProcessor<'_>, + numbers: ops::RangeInclusive, + contract_hashes: BaseSystemContractsHashes, +) { + for miniblock_number in numbers { + let tx = create_l2_transaction(10, 100); + storage + .transactions_dal() + .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) + .await; + let mut new_miniblock = create_miniblock(miniblock_number); + new_miniblock.base_system_contracts_hashes = contract_hashes; + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + let tx_result = execute_l2_transaction(tx); + storage + .transactions_dal() + .mark_txs_as_executed_in_miniblock(new_miniblock.number, &[tx_result], 1.into()) + .await; + } +} + +#[tokio::test] +async fn loading_pending_batch_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; + + let starting_miniblock_number = snapshot_recovery.miniblock_number.0 + 1; + store_pending_miniblocks( + &mut storage, + starting_miniblock_number..=starting_miniblock_number + 1, + GenesisParams::mock().base_system_contracts.hashes(), + ) + .await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let first_miniblock_in_batch = provider + .load_first_miniblock_in_batch(&mut storage, snapshot_recovery.l1_batch_number + 1) + .await + .unwrap() + .expect("no first miniblock"); + assert_eq!( + first_miniblock_in_batch.number(), + snapshot_recovery.miniblock_number + 1 + ); + + let (system_env, l1_batch_env) = provider + .load_l1_batch_params( + &mut storage, + &first_miniblock_in_batch, + u32::MAX, + L2ChainId::default(), + ) + .await + .unwrap(); + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + .await + .unwrap(); + + let expected_timestamp = u64::from(snapshot_recovery.miniblock_number.0) + 1; + assert_eq!(pending_batch.pending_miniblocks.len(), 2); + assert_eq!( + pending_batch.l1_batch_env.number, + snapshot_recovery.l1_batch_number + 1 + ); + assert_eq!(pending_batch.l1_batch_env.timestamp, expected_timestamp); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.number, + snapshot_recovery.miniblock_number.0 + 1 + ); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.timestamp, + expected_timestamp + ); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.prev_block_hash, + snapshot_recovery.miniblock_hash + ); +} + +#[tokio::test] +async fn getting_batch_version_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let mut genesis_params = GenesisParams::mock(); + genesis_params.protocol_version = ProtocolVersionId::Version5; + ensure_genesis_state(&mut storage, L2ChainId::default(), &genesis_params) + .await + .unwrap(); + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let version = provider + .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(0)) + .await + .unwrap(); + assert_eq!(version, Some(genesis_params.protocol_version)); + + assert!(provider + .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(1)) + .await + .unwrap() + .is_none()); + + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion::default()) + .await; + let new_l1_batch = create_l1_batch(1); + storage + .blocks_dal() + .insert_mock_l1_batch(&new_l1_batch) + .await + .unwrap(); + + let version = provider + .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(1)) + .await + .unwrap(); + assert_eq!(version, new_l1_batch.protocol_version); +} + +#[tokio::test] +async fn getting_batch_version_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let version = provider + .load_l1_batch_protocol_version(&mut storage, snapshot_recovery.l1_batch_number) + .await + .unwrap(); + assert_eq!(version, Some(snapshot_recovery.protocol_version)); + + assert!(provider + .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(1)) + .await + .is_err()); + assert!(provider + .load_l1_batch_protocol_version(&mut storage, snapshot_recovery.l1_batch_number + 1) + .await + .unwrap() + .is_none()); + + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion { + id: ProtocolVersionId::next(), + ..ProtocolVersion::default() + }) + .await; + let mut new_l1_batch = create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1); + new_l1_batch.protocol_version = Some(ProtocolVersionId::next()); + storage + .blocks_dal() + .insert_mock_l1_batch(&new_l1_batch) + .await + .unwrap(); + + let version = provider + .load_l1_batch_protocol_version(&mut storage, snapshot_recovery.l1_batch_number + 1) + .await + .unwrap(); + assert_eq!(version, new_l1_batch.protocol_version); +} diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index f2686011003..00d39502d64 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -5,20 +5,20 @@ use std::{ time::{Duration, Instant}, }; +use anyhow::Context as _; use async_trait::async_trait; use multivm::{ interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}, utils::derive_base_fee_and_gas_per_pubdata, }; -use vm_utils::storage::wait_for_prev_l1_batch_params; +use vm_utils::storage::{l1_batch_params, L1BatchParamsProvider}; use zksync_config::configs::chain::StateKeeperConfig; use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; use zksync_object_store::ObjectStore; use zksync_types::{ - block::MiniblockHeader, protocol_version::ProtocolUpgradeTx, - witness_block_state::WitnessBlockState, Address, L1BatchNumber, L2ChainId, MiniblockNumber, - ProtocolVersionId, Transaction, U256, + protocol_version::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, Address, + L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, }; // TODO (SMA-1206): use seconds instead of milliseconds. use zksync_utils::time::millis_since_epoch; @@ -28,14 +28,14 @@ use crate::{ state_keeper::{ extractors, io::{ - common::{l1_batch_params, load_pending_batch, poll_iters}, + common::{load_pending_batch, poll_iters, IoCursor}, fee_address_migration, MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, mempool_actor::l2_tx_filter, metrics::KEEPER_METRICS, seal_criteria::{IoSealCriteria, TimeoutSealer}, - updates::UpdatesManager, + updates::{MiniblockUpdates, UpdatesManager}, MempoolGuard, }, }; @@ -52,6 +52,9 @@ pub(crate) struct MempoolIO { timeout_sealer: TimeoutSealer, filter: L2TxFilter, current_miniblock_number: MiniblockNumber, + prev_miniblock_hash: H256, + prev_miniblock_timestamp: u64, + l1_batch_params_provider: L1BatchParamsProvider, miniblock_sealer_handle: MiniblockSealerHandle, current_l1_batch_number: L1BatchNumber, fee_account: Address, @@ -94,18 +97,48 @@ impl StateKeeperIO for MempoolIO { .await .unwrap(); + let pending_miniblock_header = self + .l1_batch_params_provider + .load_first_miniblock_in_batch(&mut storage, self.current_l1_batch_number) + .await + .with_context(|| { + format!( + "failed loading first miniblock for L1 batch #{}", + self.current_l1_batch_number + ) + }) + .unwrap()?; + let (system_env, l1_batch_env) = self + .l1_batch_params_provider + .load_l1_batch_params( + &mut storage, + &pending_miniblock_header, + self.validation_computational_gas_limit, + self.chain_id, + ) + .await + .with_context(|| { + format!( + "failed loading params for L1 batch #{}", + self.current_l1_batch_number + ) + }) + .unwrap(); + let pending_batch_data = load_pending_batch(&mut storage, system_env, l1_batch_env) + .await + .with_context(|| { + format!( + "failed loading data for re-execution for pending L1 batch #{}", + self.current_l1_batch_number + ) + }) + .unwrap(); + let PendingBatchData { l1_batch_env, system_env, pending_miniblocks, - } = load_pending_batch( - &mut storage, - self.current_l1_batch_number, - self.fee_account, - self.validation_computational_gas_limit, - self.chain_id, - ) - .await?; + } = pending_batch_data; // Initialize the filter for the transactions that come after the pending batch. // We use values from the pending block to match the filter with one used before the restart. let (base_fee, gas_per_pubdata) = @@ -129,14 +162,6 @@ impl StateKeeperIO for MempoolIO { ) -> Option<(SystemEnv, L1BatchEnv)> { let deadline = Instant::now() + max_wait; - let prev_l1_batch_hash = self.load_previous_l1_batch_hash().await; - - let MiniblockHeader { - timestamp: prev_miniblock_timestamp, - hash: prev_miniblock_hash, - .. - } = self.load_previous_miniblock_header().await; - // Block until at least one transaction in the mempool can match the filter (or timeout happens). // This is needed to ensure that block timestamp is not too old. for _ in 0..poll_iters(self.delay_interval, max_wait) { @@ -145,7 +170,7 @@ impl StateKeeperIO for MempoolIO { // We can use `timeout_at` since `sleep_past` is cancel-safe; it only uses `sleep()` async calls. let current_timestamp = tokio::time::timeout_at( deadline.into(), - sleep_past(prev_miniblock_timestamp, self.current_miniblock_number), + sleep_past(self.prev_miniblock_timestamp, self.current_miniblock_number), ); let current_timestamp = current_timestamp.await.ok()?; @@ -154,11 +179,16 @@ impl StateKeeperIO for MempoolIO { self.current_l1_batch_number.0, self.filter.fee_input ); - let mut storage = self.pool.access_storage().await.unwrap(); + let mut storage = self + .pool + .access_storage_tagged("state_keeper") + .await + .unwrap(); let (base_system_contracts, protocol_version) = storage .protocol_versions_dal() .base_system_contracts_by_timestamp(current_timestamp) .await; + drop(storage); // We create a new filter each time, since parameters may change and a previously // ignored transaction in the mempool may be scheduled for the execution. @@ -167,12 +197,13 @@ impl StateKeeperIO for MempoolIO { protocol_version.into(), ) .await; - // We only need to get the root hash when we're certain that we have a new transaction. if !self.mempool.has_next(&self.filter) { tokio::time::sleep(self.delay_interval).await; continue; } + // We only need to get the root hash when we're certain that we have a new transaction. + let prev_l1_batch_hash = self.wait_for_previous_l1_batch_hash().await; return Some(l1_batch_params( self.current_l1_batch_number, self.fee_account, @@ -180,7 +211,7 @@ impl StateKeeperIO for MempoolIO { prev_l1_batch_hash, self.filter.fee_input, self.current_miniblock_number, - prev_miniblock_hash, + self.prev_miniblock_hash, base_system_contracts, self.validation_computational_gas_limit, protocol_version, @@ -195,13 +226,12 @@ impl StateKeeperIO for MempoolIO { async fn wait_for_new_miniblock_params( &mut self, max_wait: Duration, - prev_miniblock_timestamp: u64, ) -> Option { // We must provide different timestamps for each miniblock. // If miniblock sealing interval is greater than 1 second then `sleep_past` won't actually sleep. let timestamp = tokio::time::timeout( max_wait, - sleep_past(prev_miniblock_timestamp, self.current_miniblock_number), + sleep_past(self.prev_miniblock_timestamp, self.current_miniblock_number), ) .await .ok()?; @@ -272,7 +302,7 @@ impl StateKeeperIO for MempoolIO { false, ); self.miniblock_sealer_handle.submit(command).await; - self.current_miniblock_number += 1; + self.update_miniblock_fields(&updates_manager.miniblock); } async fn seal_l1_batch( @@ -312,7 +342,7 @@ impl StateKeeperIO for MempoolIO { let pool = self.pool.clone(); let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); - updates_manager + let fictive_miniblock = updates_manager .seal_l1_batch( &mut storage, self.current_miniblock_number, @@ -321,17 +351,24 @@ impl StateKeeperIO for MempoolIO { self.l2_erc20_bridge_addr, ) .await; - self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. + self.update_miniblock_fields(&fictive_miniblock); self.current_l1_batch_number += 1; Ok(()) } async fn load_previous_batch_version_id(&mut self) -> Option { - let mut storage = self.pool.access_storage().await.unwrap(); - storage - .blocks_dal() - .get_batch_protocol_version_id(self.current_l1_batch_number - 1) + let mut storage = self + .pool + .access_storage_tagged("state_keeper") + .await + .unwrap(); + let prev_l1_batch_number = self.current_l1_batch_number - 1; + self.l1_batch_params_provider + .load_l1_batch_protocol_version(&mut storage, prev_l1_batch_number) .await + .with_context(|| { + format!("failed loading protocol version for L1 batch #{prev_l1_batch_number}") + }) .unwrap() } @@ -339,7 +376,11 @@ impl StateKeeperIO for MempoolIO { &mut self, version_id: ProtocolVersionId, ) -> Option { - let mut storage = self.pool.access_storage().await.unwrap(); + let mut storage = self + .pool + .access_storage_tagged("state_keeper") + .await + .unwrap(); storage .protocol_versions_dal() .get_protocol_upgrade_tx(version_id) @@ -407,43 +448,39 @@ impl MempoolIO { l2_erc20_bridge_addr: Address, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> Self { - assert!( + ) -> anyhow::Result { + anyhow::ensure!( config.virtual_blocks_interval > 0, "Virtual blocks interval must be positive" ); - assert!( + anyhow::ensure!( config.virtual_blocks_per_miniblock > 0, "Virtual blocks per miniblock must be positive" ); - let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); - // TODO (PLA-703): Support no L1 batches / miniblocks in the storage - let last_sealed_l1_batch_number = storage - .blocks_dal() - .get_sealed_l1_batch_number() + let mut storage = pool.access_storage_tagged("state_keeper").await?; + let cursor = IoCursor::new(&mut storage) .await - .unwrap() - .expect("No L1 batches sealed"); - let last_miniblock_number = storage - .blocks_dal() - .get_sealed_miniblock_number() + .context("failed initializing I/O cursor")?; + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut storage) .await - .unwrap() - .expect("empty storage not supported"); // FIXME (PLA-703): handle empty storage + .context("failed initializing L1 batch params provider")?; fee_address_migration::migrate_pending_miniblocks(&mut storage).await; drop(storage); - Self { + Ok(Self { mempool, object_store, pool, timeout_sealer: TimeoutSealer::new(config), filter: L2TxFilter::default(), // ^ Will be initialized properly on the first newly opened batch - current_l1_batch_number: last_sealed_l1_batch_number + 1, + current_l1_batch_number: cursor.l1_batch, miniblock_sealer_handle, - current_miniblock_number: last_miniblock_number + 1, + current_miniblock_number: cursor.next_miniblock, + prev_miniblock_hash: cursor.prev_miniblock_hash, + prev_miniblock_timestamp: cursor.prev_miniblock_timestamp, + l1_batch_params_provider, fee_account: config.fee_account_addr, validation_computational_gas_limit, delay_interval, @@ -452,10 +489,20 @@ impl MempoolIO { chain_id, virtual_blocks_interval: config.virtual_blocks_interval, virtual_blocks_per_miniblock: config.virtual_blocks_per_miniblock, - } + }) + } + + fn update_miniblock_fields(&mut self, miniblock: &MiniblockUpdates) { + assert_eq!( + miniblock.number, self.current_miniblock_number.0, + "Attempted to seal a miniblock with unexpected number" + ); + self.current_miniblock_number += 1; + self.prev_miniblock_hash = miniblock.get_miniblock_hash(); + self.prev_miniblock_timestamp = miniblock.timestamp; } - async fn load_previous_l1_batch_hash(&self) -> U256 { + async fn wait_for_previous_l1_batch_hash(&self) -> H256 { tracing::info!( "Getting previous L1 batch hash for L1 batch #{}", self.current_l1_batch_number @@ -467,34 +514,24 @@ impl MempoolIO { .access_storage_tagged("state_keeper") .await .unwrap(); - let (batch_hash, _) = - wait_for_prev_l1_batch_params(&mut storage, self.current_l1_batch_number).await; + let prev_l1_batch_number = self.current_l1_batch_number - 1; + let (batch_hash, _) = self + .l1_batch_params_provider + .wait_for_l1_batch_params(&mut storage, prev_l1_batch_number) + .await + .with_context(|| { + format!("error waiting for params for L1 batch #{prev_l1_batch_number}") + }) + .unwrap(); wait_latency.observe(); tracing::info!( - "Got previous L1 batch hash: {batch_hash:0>64x} for L1 batch #{}", + "Got previous L1 batch hash: {batch_hash:?} for L1 batch #{}", self.current_l1_batch_number ); batch_hash } - async fn load_previous_miniblock_header(&self) -> MiniblockHeader { - let load_latency = KEEPER_METRICS.load_previous_miniblock_header.start(); - let mut storage = self - .pool - .access_storage_tagged("state_keeper") - .await - .unwrap(); - let miniblock_header = storage - .blocks_dal() - .get_miniblock_header(self.current_miniblock_number - 1) - .await - .unwrap() - .expect("Previous miniblock must be sealed and header saved to DB"); - load_latency.observe(); - miniblock_header - } - /// "virtual_blocks_per_miniblock" will be created either if the miniblock_number % virtual_blocks_interval == 0 or /// the miniblock is the first one in the batch. /// For instance: @@ -515,6 +552,10 @@ impl MempoolIO { pub(super) fn filter(&self) -> &L2TxFilter { &self.filter } + + pub(super) fn set_prev_miniblock_timestamp(&mut self, timestamp: u64) { + self.prev_miniblock_timestamp = timestamp; + } } #[cfg(test)] diff --git a/core/lib/zksync_core/src/state_keeper/io/mod.rs b/core/lib/zksync_core/src/state_keeper/io/mod.rs index 16cc15e03b0..a159e3644c7 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mod.rs @@ -82,7 +82,6 @@ pub trait StateKeeperIO: 'static + Send + IoSealCriteria { async fn wait_for_new_miniblock_params( &mut self, max_wait: Duration, - prev_miniblock_timestamp: u64, ) -> Option; /// Blocks for up to `max_wait` until the next transaction is available for execution. /// Returns `None` if no transaction became available until the timeout. diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index 091583a0b87..430e56cdfeb 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -9,7 +9,6 @@ use multivm::{ interface::{FinishedL1Batch, L1BatchEnv}, utils::get_max_gas_per_pubdata_byte, }; -use vm_utils::storage::wait_for_prev_l1_batch_params; use zksync_dal::StorageProcessor; use zksync_types::{ block::{unpack_block_info, L1BatchHeader, MiniblockHeader}, @@ -34,20 +33,20 @@ use zksync_utils::{h256_to_u256, time::millis_since_epoch, u256_to_h256}; use crate::{ metrics::{BlockStage, MiniblockStage, APP_METRICS}, state_keeper::{ - extractors, metrics::{ L1BatchSealStage, MiniblockSealStage, KEEPER_METRICS, L1_BATCH_METRICS, MINIBLOCK_METRICS, }, types::ExecutionMetricsForCriteria, - updates::{MiniblockSealCommand, UpdatesManager}, + updates::{MiniblockSealCommand, MiniblockUpdates, UpdatesManager}, }, }; impl UpdatesManager { /// Persists an L1 batch in the storage. /// This action includes a creation of an empty "fictive" miniblock that contains - /// the events generated during the bootloader "tip phase". + /// the events generated during the bootloader "tip phase". Returns updates for this fictive miniblock. + #[must_use = "fictive miniblock must be used to update I/O params"] pub(crate) async fn seal_l1_batch( mut self, storage: &mut StorageProcessor<'_>, @@ -55,7 +54,7 @@ impl UpdatesManager { l1_batch_env: &L1BatchEnv, finished_batch: FinishedL1Batch, l2_erc20_bridge_addr: Address, - ) { + ) -> MiniblockUpdates { let started_at = Instant::now(); let progress = L1_BATCH_METRICS.start(L1BatchSealStage::VmFinalization); let mut transaction = storage.start_transaction().await.unwrap(); @@ -116,20 +115,8 @@ impl UpdatesManager { ); let progress = L1_BATCH_METRICS.start(L1BatchSealStage::InsertL1BatchHeader); - let (_prev_hash, prev_timestamp) = - wait_for_prev_l1_batch_params(&mut transaction, l1_batch_env.number).await; - assert!( - prev_timestamp < l1_batch_env.timestamp, - "Cannot seal L1 batch #{}: Timestamp of previous L1 batch ({}) >= provisional L1 batch timestamp ({}), \ - meaning that L1 batch will be rejected by the bootloader", - l1_batch_env.number, - extractors::display_timestamp(prev_timestamp), - extractors::display_timestamp(l1_batch_env.timestamp) - ); - let l2_to_l1_messages = extract_long_l2_to_l1_messages(&finished_batch.final_execution_state.events); - let l1_batch = L1BatchHeader { number: l1_batch_env.number, timestamp: l1_batch_env.timestamp, @@ -247,6 +234,7 @@ impl UpdatesManager { l1_batch_env.timestamp, &writes_metrics, ); + miniblock_command.miniblock } fn report_l1_batch_metrics( diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 0dcd0408493..fa695f4774d 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -6,7 +6,8 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; use zksync_types::{ - block::BlockGasCount, + block::{BlockGasCount, MiniblockHasher}, + fee::TransactionExecutionMetrics, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, tx::ExecutionMetrics, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, StorageKey, VmEvent, @@ -21,11 +22,11 @@ use crate::{ mempool_actor::l2_tx_filter, tests::{ create_execution_result, create_transaction, create_updates_manager, - default_l1_batch_env, default_vm_block_result, Query, + default_l1_batch_env, default_system_env, default_vm_block_result, Query, }, updates::{MiniblockSealCommand, MiniblockUpdates, UpdatesManager}, }, - utils::testonly::create_l1_batch_metadata, + utils::testonly::prepare_recovery_snapshot, }; mod tester; @@ -48,17 +49,18 @@ async fn test_filter_initialization() { #[tokio::test] async fn test_filter_with_pending_batch() { let connection_pool = ConnectionPool::test_pool().await; - let tester = Tester::new(); - + let mut tester = Tester::new(); tester.genesis(&connection_pool).await; // Insert a sealed batch so there will be a `prev_l1_batch_state_root`. // These gas values are random and don't matter for filter calculation as there will be a // pending batch the filter will be based off of. - tester + let tx_result = tester .insert_miniblock(&connection_pool, 1, 5, BatchFeeInput::l1_pegged(55, 555)) .await; - tester.insert_sealed_batch(&connection_pool, 1).await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; // Inserting a pending miniblock that isn't included in a sealed batch means there is a pending batch. // The gas values are randomly chosen but so affect filter values calculation. @@ -68,7 +70,7 @@ async fn test_filter_with_pending_batch() { fair_l2_gas_price: 1000, fair_pubdata_price: 500, }); - + tester.set_timestamp(2); tester .insert_miniblock(&connection_pool, 2, 10, fee_input) .await; @@ -97,10 +99,12 @@ async fn test_filter_with_no_pending_batch() { // Insert a sealed batch so there will be a `prev_l1_batch_state_root`. // These gas values are random and don't matter for filter calculation. - tester + let tx_result = tester .insert_miniblock(&connection_pool, 1, 5, BatchFeeInput::l1_pegged(55, 555)) .await; - tester.insert_sealed_batch(&connection_pool, 1).await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; // Create a copy of the tx filter that the mempool will use. let want_filter = l2_tx_filter( @@ -138,13 +142,15 @@ async fn test_timestamps_are_distinct( tester.genesis(&connection_pool).await; tester.set_timestamp(prev_miniblock_timestamp); - tester + let tx_result = tester .insert_miniblock(&connection_pool, 1, 5, BatchFeeInput::l1_pegged(55, 555)) .await; if delay_prev_miniblock_compared_to_batch { tester.set_timestamp(prev_miniblock_timestamp - 1); } - tester.insert_sealed_batch(&connection_pool, 1).await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; let (mut mempool, mut guard) = tester.create_test_mempool_io(connection_pool, 1).await; // Insert a transaction to trigger L1 batch creation. @@ -362,25 +368,21 @@ async fn test_miniblock_and_l1_batch_processing( // Genesis is needed for proper mempool initialization. tester.genesis(&pool).await; - let mut conn = pool.access_storage_tagged("state_keeper").await.unwrap(); + let mut storage = pool.access_storage().await.unwrap(); // Save metadata for the genesis L1 batch so that we don't hang in `seal_l1_batch`. - let metadata = create_l1_batch_metadata(0); - conn.blocks_dal() - .save_l1_batch_metadata(L1BatchNumber(0), &metadata, H256::zero(), false) + storage + .blocks_dal() + .set_l1_batch_hash(L1BatchNumber(0), H256::zero()) .await .unwrap(); - drop(conn); + drop(storage); let (mut mempool, _) = tester .create_test_mempool_io(pool.clone(), miniblock_sealer_capacity) .await; - let l1_batch_env = default_l1_batch_env(0, 1, Address::random()); - let mut updates = UpdatesManager::new( - l1_batch_env, - BaseSystemContractsHashes::default(), - ProtocolVersionId::latest(), - ); + let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); + let mut updates = UpdatesManager::new(&l1_batch_env, &default_system_env()); let tx = create_transaction(10, 100); updates.extend_from_executed_transaction( @@ -398,8 +400,6 @@ async fn test_miniblock_and_l1_batch_processing( }); let finished_batch = default_vm_block_result(); - - let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); mempool .seal_l1_batch(None, updates, &l1_batch_env, finished_batch) .await @@ -435,6 +435,141 @@ async fn miniblock_and_l1_batch_processing_with_sync_sealer() { test_miniblock_and_l1_batch_processing(pool, 0).await; } +#[tokio::test] +async fn miniblock_processing_after_snapshot_recovery() { + let connection_pool = ConnectionPool::test_pool().await; + let mut storage = connection_pool.access_storage().await.unwrap(); + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; + let tester = Tester::new(); + + let (mut mempool, mut mempool_guard) = tester + .create_test_mempool_io(connection_pool.clone(), 0) + .await; + assert_eq!( + mempool.current_miniblock_number(), + snapshot_recovery.miniblock_number + 1 + ); + assert_eq!( + mempool.current_l1_batch_number(), + snapshot_recovery.l1_batch_number + 1 + ); + assert!(mempool.load_pending_batch().await.is_none()); + + // Insert a transaction into the mempool in order to open a new batch. + let tx_filter = l2_tx_filter( + &tester.create_batch_fee_input_provider().await, + ProtocolVersionId::latest().into(), + ) + .await; + let tx = tester.insert_tx( + &mut mempool_guard, + tx_filter.fee_per_gas, + tx_filter.gas_per_pubdata, + ); + storage + .transactions_dal() + .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) + .await; + + let (system_env, l1_batch_env) = mempool + .wait_for_new_batch_params(Duration::from_secs(10)) + .await + .unwrap(); + assert_eq!(l1_batch_env.number, snapshot_recovery.l1_batch_number + 1); + assert_eq!( + l1_batch_env.previous_batch_hash, + Some(snapshot_recovery.l1_batch_root_hash) + ); + assert_eq!( + l1_batch_env.first_l2_block.prev_block_hash, + snapshot_recovery.miniblock_hash + ); + + let mut updates = UpdatesManager::new(&l1_batch_env, &system_env); + + let tx_hash = tx.hash(); + updates.extend_from_executed_transaction( + tx.into(), + create_execution_result(0, []), + vec![], + BlockGasCount::default(), + ExecutionMetrics::default(), + vec![], + ); + mempool.seal_miniblock(&updates).await; + + // Check that the miniblock is persisted and has correct data. + let persisted_miniblock = storage + .blocks_dal() + .get_miniblock_header(snapshot_recovery.miniblock_number + 1) + .await + .unwrap() + .expect("no miniblock persisted"); + assert_eq!( + persisted_miniblock.number, + snapshot_recovery.miniblock_number + 1 + ); + assert_eq!(persisted_miniblock.l2_tx_count, 1); + + let mut miniblock_hasher = MiniblockHasher::new( + persisted_miniblock.number, + persisted_miniblock.timestamp, + snapshot_recovery.miniblock_hash, + ); + miniblock_hasher.push_tx_hash(tx_hash); + assert_eq!( + persisted_miniblock.hash, + miniblock_hasher.finalize(ProtocolVersionId::latest()) + ); + + let miniblock_transactions = storage + .transactions_web3_dal() + .get_raw_miniblock_transactions(persisted_miniblock.number) + .await + .unwrap(); + assert_eq!(miniblock_transactions.len(), 1); + assert_eq!(miniblock_transactions[0].hash(), tx_hash); + + // Emulate node restart. + let (mut mempool, _) = tester + .create_test_mempool_io(connection_pool.clone(), 0) + .await; + assert_eq!( + mempool.current_miniblock_number(), + snapshot_recovery.miniblock_number + 2 + ); + assert_eq!( + mempool.current_l1_batch_number(), + snapshot_recovery.l1_batch_number + 1 + ); + + let pending_batch = mempool.load_pending_batch().await.unwrap(); + assert_eq!( + pending_batch.l1_batch_env.number, + snapshot_recovery.l1_batch_number + 1 + ); + assert_eq!( + pending_batch.l1_batch_env.previous_batch_hash, + Some(snapshot_recovery.l1_batch_root_hash) + ); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.prev_block_hash, + snapshot_recovery.miniblock_hash + ); + assert_eq!(pending_batch.pending_miniblocks.len(), 1); + assert_eq!( + pending_batch.pending_miniblocks[0].number, + snapshot_recovery.miniblock_number + 1 + ); + assert_eq!( + pending_batch.pending_miniblocks[0].prev_block_hash, + snapshot_recovery.miniblock_hash + ); + assert_eq!(pending_batch.pending_miniblocks[0].txs.len(), 1); + assert_eq!(pending_batch.pending_miniblocks[0].txs[0].hash(), tx_hash); +} + #[tokio::test] async fn miniblock_sealer_handle_blocking() { let pool = ConnectionPool::test_pool().await; @@ -528,12 +663,11 @@ async fn different_timestamp_for_miniblocks_in_same_batch() { tester.genesis(&connection_pool).await; let (mut mempool, _) = tester.create_test_mempool_io(connection_pool, 1).await; let current_timestamp = seconds_since_epoch(); - let MiniblockParams { - timestamp: next_timestamp, - .. - } = mempool - .wait_for_new_miniblock_params(Duration::from_secs(10), current_timestamp) + mempool.set_prev_miniblock_timestamp(current_timestamp); + + let miniblock_params = mempool + .wait_for_new_miniblock_params(Duration::from_secs(10)) .await .unwrap(); - assert!(next_timestamp > current_timestamp); + assert!(miniblock_params.timestamp > current_timestamp); } diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index fe79bfc584b..126b288ddee 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -1,6 +1,6 @@ //! Testing harness for the IO. -use std::{sync::Arc, time::Duration}; +use std::{slice, sync::Arc, time::Duration}; use multivm::vm_latest::constants::BLOCK_GAS_LIMIT; use zksync_config::{configs::chain::StateKeeperConfig, GasAdjusterConfig}; @@ -10,18 +10,23 @@ use zksync_eth_client::clients::MockEthereum; use zksync_object_store::ObjectStoreFactory; use zksync_types::{ block::MiniblockHeader, + fee::TransactionExecutionMetrics, fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV1}, + l2::L2Tx, protocol_version::L1VerifierConfig, system_contracts::get_system_smart_contracts, - Address, L2ChainId, PriorityOpId, ProtocolVersionId, H256, + tx::TransactionExecutionResult, + Address, L2ChainId, MiniblockNumber, PriorityOpId, ProtocolVersionId, H256, }; use crate::{ fee_model::MainNodeFeeInputProvider, genesis::create_genesis_l1_batch, l1_gas_price::GasAdjuster, - state_keeper::{io::MiniblockSealer, tests::create_transaction, MempoolGuard, MempoolIO}, - utils::testonly::{create_l1_batch, create_miniblock}, + state_keeper::{io::MiniblockSealer, MempoolGuard, MempoolIO}, + utils::testonly::{ + create_l1_batch, create_l2_transaction, create_miniblock, execute_l2_transaction, + }, }; #[derive(Debug)] @@ -96,6 +101,7 @@ impl Tester { minimal_l2_gas_price: self.minimal_l2_gas_price(), virtual_blocks_interval: 1, virtual_blocks_per_miniblock: 1, + fee_account_addr: Address::repeat_byte(0x11), // Maintain implicit invariant: fee address is never `Address::zero()` ..StateKeeperConfig::default() }; let object_store = ObjectStoreFactory::mock().create_store().await; @@ -112,7 +118,8 @@ impl Tester { BLOCK_GAS_LIMIT, L2ChainId::from(270), ) - .await; + .await + .unwrap(); (io, mempool) } @@ -144,8 +151,13 @@ impl Tester { number: u32, base_fee_per_gas: u64, fee_input: BatchFeeInput, - ) { + ) -> TransactionExecutionResult { let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); + let tx = create_l2_transaction(10, 100); + storage + .transactions_dal() + .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) + .await; storage .blocks_dal() .insert_miniblock(&MiniblockHeader { @@ -157,9 +169,24 @@ impl Tester { }) .await .unwrap(); + let tx_result = execute_l2_transaction(tx.clone()); + storage + .transactions_dal() + .mark_txs_as_executed_in_miniblock( + MiniblockNumber(number), + slice::from_ref(&tx_result), + 1.into(), + ) + .await; + tx_result } - pub(super) async fn insert_sealed_batch(&self, pool: &ConnectionPool, number: u32) { + pub(super) async fn insert_sealed_batch( + &self, + pool: &ConnectionPool, + number: u32, + tx_results: &[TransactionExecutionResult], + ) { let batch_header = create_l1_batch(number); let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); storage @@ -172,6 +199,10 @@ impl Tester { .mark_miniblocks_as_executed_in_l1_batch(batch_header.number) .await .unwrap(); + storage + .transactions_dal() + .mark_txs_as_executed_in_l1_batch(batch_header.number, tx_results) + .await; storage .blocks_dal() .set_l1_batch_hash(batch_header.number, H256::default()) @@ -184,8 +215,9 @@ impl Tester { guard: &mut MempoolGuard, fee_per_gas: u64, gas_per_pubdata: u32, - ) { - let tx = create_transaction(fee_per_gas, gas_per_pubdata.into()); - guard.insert(vec![tx], Default::default()); + ) -> L2Tx { + let tx = create_l2_transaction(fee_per_gas, gas_per_pubdata.into()); + guard.insert(vec![tx.clone().into()], Default::default()); + tx } } diff --git a/core/lib/zksync_core/src/state_keeper/keeper.rs b/core/lib/zksync_core/src/state_keeper/keeper.rs index 14829c5cf61..8e77efef663 100644 --- a/core/lib/zksync_core/src/state_keeper/keeper.rs +++ b/core/lib/zksync_core/src/state_keeper/keeper.rs @@ -145,11 +145,7 @@ impl ZkSyncStateKeeper { }; let protocol_version = system_env.version; - let mut updates_manager = UpdatesManager::new( - l1_batch_env.clone(), - system_env.base_system_smart_contracts.hashes(), - protocol_version, - ); + let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); let previous_batch_protocol_version = self.io.load_previous_batch_version_id().await.unwrap(); @@ -197,9 +193,7 @@ impl ZkSyncStateKeeper { self.io.seal_miniblock(&updates_manager).await; // We've sealed the miniblock that we had, but we still need to setup the timestamp // for the fictive miniblock. - let new_miniblock_params = self - .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) - .await?; + let new_miniblock_params = self.wait_for_new_miniblock_params().await?; Self::start_next_miniblock( new_miniblock_params, &mut updates_manager, @@ -225,11 +219,7 @@ impl ZkSyncStateKeeper { // Start the new batch. (system_env, l1_batch_env) = self.wait_for_new_batch_params().await?; - updates_manager = UpdatesManager::new( - l1_batch_env.clone(), - system_env.base_system_smart_contracts.hashes(), - system_env.version, - ); + updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); batch_executor = self .batch_executor_base .init_batch( @@ -264,14 +254,11 @@ impl ZkSyncStateKeeper { Err(Error::Canceled) } - async fn wait_for_new_miniblock_params( - &mut self, - prev_miniblock_timestamp: u64, - ) -> Result { + async fn wait_for_new_miniblock_params(&mut self) -> Result { while !self.is_canceled() { if let Some(params) = self .io - .wait_for_new_miniblock_params(POLL_WAIT_DURATION, prev_miniblock_timestamp) + .wait_for_new_miniblock_params(POLL_WAIT_DURATION) .await { return Ok(params); @@ -386,7 +373,7 @@ impl ZkSyncStateKeeper { // We've processed all the miniblocks, and right now we're initializing the next *actual* miniblock. let new_miniblock_params = self - .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) + .wait_for_new_miniblock_params() .await .map_err(|e| e.context("wait_for_new_miniblock_params"))?; Self::start_next_miniblock(new_miniblock_params, updates_manager, batch_executor).await; @@ -426,7 +413,7 @@ impl ZkSyncStateKeeper { self.io.seal_miniblock(updates_manager).await; let new_miniblock_params = self - .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) + .wait_for_new_miniblock_params() .await .map_err(|e| e.context("wait_for_new_miniblock_params"))?; tracing::debug!( diff --git a/core/lib/zksync_core/src/state_keeper/mod.rs b/core/lib/zksync_core/src/state_keeper/mod.rs index b1534d9612f..ee4493cf613 100644 --- a/core/lib/zksync_core/src/state_keeper/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/mod.rs @@ -67,7 +67,8 @@ pub(crate) async fn create_state_keeper( state_keeper_config.validation_computational_gas_limit, network_config.zksync_network_id, ) - .await; + .await + .expect("Failed initializing main node I/O for state keeper"); let sealer = SequencerSealer::new(state_keeper_config); ZkSyncStateKeeper::new( diff --git a/core/lib/zksync_core/src/state_keeper/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/tests/mod.rs index 70511b45b73..1b107c6ac72 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/mod.rs @@ -15,7 +15,7 @@ use multivm::{ }; use once_cell::sync::Lazy; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{ aggregated_operations::AggregatedActionType, @@ -115,11 +115,7 @@ pub(super) fn default_vm_block_result() -> FinishedL1Batch { pub(super) fn create_updates_manager() -> UpdatesManager { let l1_batch_env = default_l1_batch_env(1, 1, Address::default()); - UpdatesManager::new( - l1_batch_env, - BaseSystemContractsHashes::default(), - ProtocolVersionId::latest(), - ) + UpdatesManager::new(&l1_batch_env, &default_system_env()) } pub(super) fn create_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> Transaction { diff --git a/core/lib/zksync_core/src/state_keeper/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/tests/tester.rs index ca65b165326..fc5d595b042 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/tester.rs @@ -661,7 +661,6 @@ impl StateKeeperIO for TestIO { async fn wait_for_new_miniblock_params( &mut self, _max_wait: Duration, - _prev_miniblock_timestamp: u64, ) -> Option { Some(MiniblockParams { timestamp: self.timestamp, diff --git a/core/lib/zksync_core/src/state_keeper/updates/mod.rs b/core/lib/zksync_core/src/state_keeper/updates/mod.rs index faee5a5fbff..7c9a7c89013 100644 --- a/core/lib/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/updates/mod.rs @@ -1,5 +1,5 @@ use multivm::{ - interface::{L1BatchEnv, VmExecutionResultAndLogs}, + interface::{L1BatchEnv, SystemEnv, VmExecutionResultAndLogs}, utils::get_batch_base_fee, }; use zksync_contracts::BaseSystemContractsHashes; @@ -37,18 +37,15 @@ pub struct UpdatesManager { } impl UpdatesManager { - pub(crate) fn new( - l1_batch_env: L1BatchEnv, - base_system_contract_hashes: BaseSystemContractsHashes, - protocol_version: ProtocolVersionId, - ) -> Self { + pub(crate) fn new(l1_batch_env: &L1BatchEnv, system_env: &SystemEnv) -> Self { + let protocol_version = system_env.version; Self { batch_timestamp: l1_batch_env.timestamp, fee_account_address: l1_batch_env.fee_account, batch_fee_input: l1_batch_env.fee_input, - base_fee_per_gas: get_batch_base_fee(&l1_batch_env, protocol_version.into()), + base_fee_per_gas: get_batch_base_fee(l1_batch_env, protocol_version.into()), protocol_version, - base_system_contract_hashes, + base_system_contract_hashes: system_env.base_system_smart_contracts.hashes(), l1_batch: L1BatchUpdates::new(), miniblock: MiniblockUpdates::new( l1_batch_env.first_l2_block.timestamp, diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs index f18f316a968..69bdb6dcc36 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs @@ -12,7 +12,7 @@ use super::*; use crate::{ genesis::{ensure_genesis_state, GenesisParams}, sync_layer::metrics::L1BatchStage, - utils::testonly::{create_l1_batch, create_miniblock, prepare_empty_recovery_snapshot}, + utils::testonly::{create_l1_batch, create_miniblock, prepare_recovery_snapshot}, }; async fn seal_l1_batch(storage: &mut StorageProcessor<'_>, number: L1BatchNumber) { @@ -261,7 +261,7 @@ async fn updater_cursor_for_storage_with_genesis_block() { async fn updater_cursor_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - prepare_empty_recovery_snapshot(&mut storage, 23).await; + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let cursor = UpdaterCursor::new(&mut storage).await.unwrap(); assert_eq!(cursor.last_committed_l1_batch, L1BatchNumber(23)); @@ -275,7 +275,7 @@ async fn normal_updater_operation(snapshot_recovery: bool, async_batches: bool) let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let first_batch_number = if snapshot_recovery { - prepare_empty_recovery_snapshot(&mut storage, 23).await; + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; L1BatchNumber(24) } else { ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) @@ -347,7 +347,7 @@ async fn updater_with_gradual_main_node_updates(snapshot_recovery: bool) { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let first_batch_number = if snapshot_recovery { - prepare_empty_recovery_snapshot(&mut storage, 23).await; + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; L1BatchNumber(24) } else { ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 701e16188ad..ac9cac8139f 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -1,15 +1,15 @@ -use std::{collections::HashMap, convert::TryInto, iter::FromIterator, time::Duration}; +use std::{collections::HashMap, time::Duration}; +use anyhow::Context as _; use async_trait::async_trait; -use futures::future; use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; -use vm_utils::storage::wait_for_prev_l1_batch_params; +use vm_utils::storage::{l1_batch_params, L1BatchParamsProvider}; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::ConnectionPool; use zksync_types::{ ethabi::Address, fee_model::BatchFeeInput, protocol_version::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, L1BatchNumber, L2ChainId, MiniblockNumber, - ProtocolVersionId, Transaction, H256, U256, + ProtocolVersionId, Transaction, H256, }; use zksync_utils::{be_words_to_bytes, bytes_to_be_words}; @@ -22,13 +22,13 @@ use crate::{ metrics::{BlockStage, APP_METRICS}, state_keeper::{ io::{ - common::{l1_batch_params, load_pending_batch, poll_iters}, + common::{load_pending_batch, poll_iters, IoCursor}, fee_address_migration, MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, metrics::KEEPER_METRICS, seal_criteria::IoSealCriteria, - updates::UpdatesManager, + updates::{MiniblockUpdates, UpdatesManager}, }, }; @@ -48,6 +48,8 @@ pub struct ExternalIO { current_l1_batch_number: L1BatchNumber, current_miniblock_number: MiniblockNumber, + prev_miniblock_hash: H256, + l1_batch_params_provider: L1BatchParamsProvider, actions: ActionQueue, sync_state: SyncState, main_node_client: Box, @@ -70,67 +72,74 @@ impl ExternalIO { l2_erc20_bridge_addr: Address, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> Self { - let mut storage = pool.access_storage_tagged("sync_layer").await.unwrap(); - // TODO (PLA-703): Support no L1 batches / miniblocks in the storage - let last_sealed_l1_batch_number = storage - .blocks_dal() - .get_sealed_l1_batch_number() + ) -> anyhow::Result { + let mut storage = pool.access_storage_tagged("sync_layer").await?; + let cursor = IoCursor::new(&mut storage) .await - .unwrap() - .expect("No L1 batches sealed"); - let last_miniblock_number = storage - .blocks_dal() - .get_sealed_miniblock_number() + .context("failed initializing I/O cursor")?; + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut storage) .await - .unwrap() - .expect("empty storage not supported"); // FIXME (PLA-703): handle empty storage - // We must run the migration for pending miniblocks synchronously, since we use `fee_account_address` - // from a pending miniblock in `load_pending_batch()` implementation. + .context("failed initializing L1 batch params provider")?; + // We must run the migration for pending miniblocks synchronously, since we use `fee_account_address` + // from a pending miniblock in `load_pending_batch()` implementation. fee_address_migration::migrate_pending_miniblocks(&mut storage).await; drop(storage); tracing::info!( "Initialized the ExternalIO: current L1 batch number {}, current miniblock number {}", - last_sealed_l1_batch_number + 1, - last_miniblock_number + 1, + cursor.l1_batch, + cursor.next_miniblock, ); - sync_state.set_local_block(last_miniblock_number); + sync_state.set_local_block(MiniblockNumber(cursor.next_miniblock.saturating_sub(1))); - Self { + Ok(Self { miniblock_sealer_handle, pool, - current_l1_batch_number: last_sealed_l1_batch_number + 1, - current_miniblock_number: last_miniblock_number + 1, + current_l1_batch_number: cursor.l1_batch, + current_miniblock_number: cursor.next_miniblock, + prev_miniblock_hash: cursor.prev_miniblock_hash, + l1_batch_params_provider, actions, sync_state, main_node_client, l2_erc20_bridge_addr, validation_computational_gas_limit, chain_id, - } + }) } - async fn load_previous_l1_batch_hash(&self) -> U256 { - let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); - let wait_latency = KEEPER_METRICS.wait_for_prev_hash_time.start(); - let (hash, _) = - wait_for_prev_l1_batch_params(&mut storage, self.current_l1_batch_number).await; - wait_latency.observe(); - hash + fn update_miniblock_fields(&mut self, miniblock: &MiniblockUpdates) { + assert_eq!( + miniblock.number, self.current_miniblock_number.0, + "Attempted to seal a miniblock with unexpected number" + ); + // Mimic the metric emitted by the main node to reuse existing Grafana charts. + APP_METRICS.block_number[&BlockStage::Sealed].set(self.current_l1_batch_number.0.into()); + self.sync_state + .set_local_block(self.current_miniblock_number); + self.current_miniblock_number += 1; + self.prev_miniblock_hash = miniblock.get_miniblock_hash(); } - async fn load_previous_miniblock_hash(&self) -> H256 { - let prev_miniblock_number = self.current_miniblock_number - 1; + async fn wait_for_previous_l1_batch_hash(&self) -> H256 { + tracing::info!( + "Getting previous L1 batch hash for L1 batch #{}", + self.current_l1_batch_number + ); let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); - let header = storage - .blocks_dal() - .get_miniblock_header(prev_miniblock_number) + let wait_latency = KEEPER_METRICS.wait_for_prev_hash_time.start(); + let prev_l1_batch_number = self.current_l1_batch_number - 1; + let (hash, _) = self + .l1_batch_params_provider + .wait_for_l1_batch_params(&mut storage, prev_l1_batch_number) .await - .unwrap() - .unwrap_or_else(|| panic!("Miniblock #{prev_miniblock_number} is missing")); - header.hash + .with_context(|| { + format!("error waiting for params for L1 batch #{prev_l1_batch_number}") + }) + .unwrap(); + wait_latency.observe(); + hash } async fn load_base_system_contracts_by_version_id( @@ -149,6 +158,8 @@ impl ExternalIO { match base_system_contracts { Some(version) => version, None => { + tracing::info!("Fetching protocol version {id:?} from the main node"); + let protocol_version = self .main_node_client .fetch_protocol_version(id) @@ -200,7 +211,10 @@ impl ExternalIO { hash, }, None => { - tracing::info!("Fetching base system contract bytecode from the main node"); + tracing::info!( + "Fetching base system contract bytecode with hash {hash:?} from the main node" + ); + let contract = self .main_node_client .fetch_system_contract_by_hash(hash) @@ -213,7 +227,7 @@ impl ExternalIO { .factory_deps_dal() .insert_factory_deps( self.current_miniblock_number, - &HashMap::from_iter([(contract.hash, be_words_to_bytes(&contract.code))]), + &HashMap::from([(contract.hash, be_words_to_bytes(&contract.code))]), ) .await .unwrap(); @@ -249,49 +263,62 @@ impl StateKeeperIO for ExternalIO { async fn load_pending_batch(&mut self) -> Option { let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); - let pending_miniblock_number = { - let (_, last_miniblock_number_included_in_l1_batch) = storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(self.current_l1_batch_number - 1) - .await - .unwrap() - .unwrap(); - last_miniblock_number_included_in_l1_batch + 1 - }; - let pending_miniblock_header = storage - .blocks_dal() - .get_miniblock_header(pending_miniblock_number) + let mut pending_miniblock_header = self + .l1_batch_params_provider + .load_first_miniblock_in_batch(&mut storage, self.current_l1_batch_number) .await + .with_context(|| { + format!( + "failed loading first miniblock for L1 batch #{}", + self.current_l1_batch_number + ) + }) .unwrap()?; - let fee_account = pending_miniblock_header.fee_account_address; - - if pending_miniblock_header.protocol_version.is_none() { + if !pending_miniblock_header.has_protocol_version() { // Fetch protocol version ID for pending miniblocks to know which VM to use to re-execute them. let sync_block = self .main_node_client - .fetch_l2_block(pending_miniblock_header.number, false) + .fetch_l2_block(pending_miniblock_header.number(), false) .await .expect("Failed to fetch block from the main node") .expect("Block must exist"); // Loading base system contracts will insert protocol version in the database if it's not present there. - let _ = self - .load_base_system_contracts_by_version_id(sync_block.protocol_version) + self.load_base_system_contracts_by_version_id(sync_block.protocol_version) .await; storage .blocks_dal() .set_protocol_version_for_pending_miniblocks(sync_block.protocol_version) .await .unwrap(); + pending_miniblock_header.set_protocol_version(sync_block.protocol_version); } - load_pending_batch( - &mut storage, - self.current_l1_batch_number, - fee_account, - self.validation_computational_gas_limit, - self.chain_id, - ) - .await + let (system_env, l1_batch_env) = self + .l1_batch_params_provider + .load_l1_batch_params( + &mut storage, + &pending_miniblock_header, + self.validation_computational_gas_limit, + self.chain_id, + ) + .await + .with_context(|| { + format!( + "failed loading parameters for pending L1 batch #{}", + self.current_l1_batch_number + ) + }) + .unwrap(); + let data = load_pending_batch(&mut storage, system_env, l1_batch_env) + .await + .with_context(|| { + format!( + "failed loading data for re-execution for pending L1 batch #{}", + self.current_l1_batch_number + ) + }) + .unwrap(); + Some(data) } async fn wait_for_new_batch_params( @@ -315,14 +342,10 @@ impl StateKeeperIO for ExternalIO { number, self.current_l1_batch_number, "Batch number mismatch" ); - tracing::info!("Getting previous L1 batch hash and miniblock hash"); - let (previous_l1_batch_hash, previous_miniblock_hash) = future::join( - self.load_previous_l1_batch_hash(), - self.load_previous_miniblock_hash(), - ) - .await; + let previous_l1_batch_hash = self.wait_for_previous_l1_batch_hash().await; tracing::info!( - "Previous L1 batch hash: {previous_l1_batch_hash}, previous miniblock hash: {previous_miniblock_hash}" + "Previous L1 batch hash: {previous_l1_batch_hash:?}, previous miniblock hash: {:?}", + self.prev_miniblock_hash ); let base_system_contracts = self @@ -340,7 +363,7 @@ impl StateKeeperIO for ExternalIO { l1_gas_price, ), miniblock_number, - previous_miniblock_hash, + self.prev_miniblock_hash, base_system_contracts, self.validation_computational_gas_limit, protocol_version, @@ -349,7 +372,7 @@ impl StateKeeperIO for ExternalIO { )); } Some(other) => { - panic!("Unexpected action in the action queue: {:?}", other); + panic!("Unexpected action in the action queue: {other:?}"); } None => { tokio::time::sleep(POLL_INTERVAL).await; @@ -362,7 +385,6 @@ impl StateKeeperIO for ExternalIO { async fn wait_for_new_miniblock_params( &mut self, max_wait: Duration, - _prev_miniblock_timestamp: u64, ) -> Option { // Wait for the next miniblock to appear in the queue. let actions = &mut self.actions; @@ -395,13 +417,11 @@ impl StateKeeperIO for ExternalIO { } Some(other) => { panic!( - "Unexpected action in the queue while waiting for the next miniblock {:?}", - other + "Unexpected action in the queue while waiting for the next miniblock: {other:?}" ); } - _ => { + None => { tokio::time::sleep(POLL_INTERVAL).await; - continue; } } } @@ -466,7 +486,7 @@ impl StateKeeperIO for ExternalIO { self.sync_state .set_local_block(self.current_miniblock_number); tracing::info!("Miniblock {} is sealed", self.current_miniblock_number); - self.current_miniblock_number += 1; + self.update_miniblock_fields(&updates_manager.miniblock); } async fn seal_l1_batch( @@ -488,36 +508,32 @@ impl StateKeeperIO for ExternalIO { self.miniblock_sealer_handle.wait_for_all_commands().await; let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); - let mut transaction = storage.start_transaction().await.unwrap(); - updates_manager + let fictive_miniblock = updates_manager .seal_l1_batch( - &mut transaction, + &mut storage, self.current_miniblock_number, l1_batch_env, finished_batch, self.l2_erc20_bridge_addr, ) .await; - transaction.commit().await.unwrap(); + drop(storage); + self.update_miniblock_fields(&fictive_miniblock); tracing::info!("Batch {} is sealed", self.current_l1_batch_number); - - // Mimic the metric emitted by the main node to reuse existing Grafana charts. - APP_METRICS.block_number[&BlockStage::Sealed].set(self.current_l1_batch_number.0.into()); - - self.sync_state - .set_local_block(self.current_miniblock_number); - self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. self.current_l1_batch_number += 1; Ok(()) } async fn load_previous_batch_version_id(&mut self) -> Option { - let mut storage = self.pool.access_storage().await.unwrap(); - storage - .blocks_dal() - .get_batch_protocol_version_id(self.current_l1_batch_number - 1) + let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); + let prev_l1_batch_number = self.current_l1_batch_number - 1; + self.l1_batch_params_provider + .load_l1_batch_protocol_version(&mut storage, prev_l1_batch_number) .await + .with_context(|| { + format!("failed loading protocol version for L1 batch #{prev_l1_batch_number}") + }) .unwrap() } diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index 98e0a025ea8..5161166b212 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -15,7 +15,10 @@ use super::{ sync_action::{ActionQueueSender, SyncAction}, SyncState, }; -use crate::metrics::{TxStage, APP_METRICS}; +use crate::{ + metrics::{TxStage, APP_METRICS}, + state_keeper::io::common::IoCursor, +}; const DELAY_INTERVAL: Duration = Duration::from_millis(500); const RETRY_DELAY_INTERVAL: Duration = Duration::from_secs(5); @@ -70,32 +73,10 @@ impl TryFrom for FetchedBlock { } } -/// Cursor of [`MainNodeFetcher`]. -#[derive(Debug)] -pub struct FetcherCursor { - // Fields are public for testing purposes. - pub(crate) next_miniblock: MiniblockNumber, - pub(super) prev_miniblock_hash: H256, - pub(super) l1_batch: L1BatchNumber, -} - -impl FetcherCursor { - /// Loads the cursor from Postgres. - pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { - // TODO (PLA-703): Support no L1 batches / miniblocks in the storage - let last_sealed_l1_batch_number = storage - .blocks_dal() - .get_sealed_l1_batch_number() - .await - .context("Failed getting sealed L1 batch number")? - .context("No L1 batches sealed")?; - let last_miniblock_header = storage - .blocks_dal() - .get_last_sealed_miniblock_header() - .await - .context("Failed getting sealed miniblock header")? - .context("No miniblocks sealed")?; - +impl IoCursor { + /// Loads this cursor from storage and modifies it to account for the pending L1 batch if necessary. + pub(crate) async fn for_fetcher(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + let mut this = Self::new(storage).await?; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. let was_new_batch_open = storage @@ -103,24 +84,10 @@ impl FetcherCursor { .pending_batch_exists() .await .context("Failed checking whether pending L1 batch exists")?; - - // Miniblocks are always fully processed. - let next_miniblock = last_miniblock_header.number + 1; - let prev_miniblock_hash = last_miniblock_header.hash; - // Decide whether the next batch should be explicitly opened or not. - let l1_batch = if was_new_batch_open { - // No `OpenBatch` action needed. - last_sealed_l1_batch_number + 1 - } else { - // We need to open the next batch. - last_sealed_l1_batch_number - }; - - Ok(Self { - next_miniblock, - prev_miniblock_hash, - l1_batch, - }) + if !was_new_batch_open { + this.l1_batch -= 1; // Should continue from the last L1 batch present in the storage + } + Ok(this) } pub(crate) fn advance(&mut self, block: FetchedBlock) -> Vec { @@ -196,36 +163,39 @@ impl FetcherCursor { new_actions } +} - /// Builds a fetcher from this cursor. - pub fn into_fetcher( - self, +/// Structure responsible for fetching batches and miniblock data from the main node. +#[derive(Debug)] +pub struct MainNodeFetcher { + // Fields are public for testing purposes. + pub(super) client: CachingMainNodeClient, + pub(super) cursor: IoCursor, + pub(super) actions: ActionQueueSender, + pub(super) sync_state: SyncState, + pub(super) stop_receiver: watch::Receiver, +} + +impl MainNodeFetcher { + pub async fn new( + storage: &mut StorageProcessor<'_>, client: Box, actions: ActionQueueSender, sync_state: SyncState, stop_receiver: watch::Receiver, - ) -> MainNodeFetcher { - MainNodeFetcher { + ) -> anyhow::Result { + let cursor = IoCursor::for_fetcher(storage) + .await + .context("failed getting I/O cursor from Postgres")?; + Ok(Self { client: CachingMainNodeClient::new(client), - cursor: self, + cursor, actions, sync_state, stop_receiver, - } + }) } -} -/// Structure responsible for fetching batches and miniblock data from the main node. -#[derive(Debug)] -pub struct MainNodeFetcher { - client: CachingMainNodeClient, - cursor: FetcherCursor, - actions: ActionQueueSender, - sync_state: SyncState, - stop_receiver: watch::Receiver, -} - -impl MainNodeFetcher { pub async fn run(mut self) -> anyhow::Result<()> { tracing::info!( "Starting the fetcher routine. Initial miniblock: {}, initial l1 batch: {}", diff --git a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs deleted file mode 100644 index de9f00093fa..00000000000 --- a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs +++ /dev/null @@ -1,38 +0,0 @@ -//! Conversion logic between server and consensus types. -use anyhow::Context as _; -use zksync_consensus_roles::validator::FinalBlock; -use zksync_dal::blocks_dal::ConsensusBlockFields; -use zksync_types::MiniblockNumber; - -use crate::{consensus, sync_layer::fetcher::FetchedBlock}; - -impl FetchedBlock { - pub(super) fn from_gossip_block( - block: &FinalBlock, - last_in_batch: bool, - ) -> anyhow::Result { - let number = u32::try_from(block.header.number.0) - .context("Integer overflow converting block number")?; - let payload = consensus::Payload::decode(&block.payload) - .context("Failed deserializing block payload")?; - - Ok(Self { - number: MiniblockNumber(number), - l1_batch_number: payload.l1_batch_number, - last_in_batch, - protocol_version: payload.protocol_version, - timestamp: payload.timestamp, - reference_hash: Some(payload.hash), - l1_gas_price: payload.l1_gas_price, - l2_fair_gas_price: payload.l2_fair_gas_price, - fair_pubdata_price: payload.fair_pubdata_price, - virtual_blocks: payload.virtual_blocks, - operator_address: payload.operator_address, - transactions: payload.transactions, - consensus: Some(ConsensusBlockFields { - parent: block.header.parent, - justification: block.justification.clone(), - }), - }) - } -} diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 2fc010f4a78..fc78b7d9678 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -6,24 +6,30 @@ use std::{ time::{Duration, Instant}, }; +use test_casing::test_casing; use tokio::{sync::watch, task::JoinHandle}; use zksync_config::configs::chain::NetworkConfig; +use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{ + api, + block::MiniblockHasher, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, + snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, }; -use super::{fetcher::FetcherCursor, sync_action::SyncAction, *}; +use super::{sync_action::SyncAction, *}; use crate::{ api_server::web3::tests::spawn_http_server, consensus::testonly::MockMainNodeClient, genesis::{ensure_genesis_state, GenesisParams}, state_keeper::{ - seal_criteria::NoopSealer, tests::TestBatchExecutorBuilder, MiniblockSealer, - ZkSyncStateKeeper, + io::common::IoCursor, seal_criteria::NoopSealer, tests::TestBatchExecutorBuilder, + MiniblockSealer, ZkSyncStateKeeper, }, - utils::testonly::{create_l1_batch_metadata, create_l2_transaction}, + sync_layer::{client::CachingMainNodeClient, fetcher::MainNodeFetcher}, + utils::testonly::{create_l1_batch_metadata, create_l2_transaction, prepare_recovery_snapshot}, }; const TEST_TIMEOUT: Duration = Duration::from_secs(10); @@ -52,12 +58,15 @@ pub(super) struct StateKeeperHandles { impl StateKeeperHandles { /// `tx_hashes` are grouped by the L1 batch. - pub async fn new(pool: ConnectionPool, actions: ActionQueue, tx_hashes: &[&[H256]]) -> Self { + pub async fn new( + pool: ConnectionPool, + main_node_client: MockMainNodeClient, + actions: ActionQueue, + tx_hashes: &[&[H256]], + ) -> Self { assert!(!tx_hashes.is_empty()); assert!(tx_hashes.iter().all(|tx_hashes| !tx_hashes.is_empty())); - ensure_genesis(&mut pool.access_storage().await.unwrap()).await; - let sync_state = SyncState::new(); let (miniblock_sealer, miniblock_sealer_handle) = MiniblockSealer::new(pool.clone(), 5); tokio::spawn(miniblock_sealer.run()); @@ -67,12 +76,13 @@ impl StateKeeperHandles { pool, actions, sync_state.clone(), - Box::::default(), + Box::new(main_node_client), Address::repeat_byte(1), u32::MAX, L2ChainId::default(), ) - .await; + .await + .unwrap(); let (stop_sender, stop_receiver) = watch::channel(false); let mut batch_executor_base = TestBatchExecutorBuilder::default(); @@ -140,33 +150,65 @@ fn extract_tx_hashes<'a>(actions: impl IntoIterator) -> V .collect() } +/// Returns a mock snapshot recovery status equivalent to "recovering" from the genesis block. +fn genesis_snapshot_recovery_status() -> SnapshotRecoveryStatus { + SnapshotRecoveryStatus { + l1_batch_number: L1BatchNumber(0), + l1_batch_root_hash: H256::zero(), // unused + l1_batch_timestamp: 0, + miniblock_number: MiniblockNumber(0), + miniblock_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), + miniblock_timestamp: 0, + protocol_version: ProtocolVersionId::default(), + storage_logs_chunks_processed: vec![], + } +} + +#[test_casing(2, [false, true])] #[tokio::test] -async fn external_io_basics() { +async fn external_io_basics(snapshot_recovery: bool) { let pool = ConnectionPool::test_pool().await; - let open_l1_batch = open_l1_batch(1, 1, 1); + let mut storage = pool.access_storage().await.unwrap(); + let snapshot = if snapshot_recovery { + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await + } else { + ensure_genesis(&mut storage).await; + genesis_snapshot_recovery_status() + }; + + let open_l1_batch = open_l1_batch( + snapshot.l1_batch_number.0 + 1, + snapshot.miniblock_timestamp + 1, + snapshot.miniblock_number.0 + 1, + ); let tx = create_l2_transaction(10, 100); let tx_hash = tx.hash(); let tx = SyncAction::Tx(Box::new(tx.into())); let actions = vec![open_l1_batch, tx, SyncAction::SealMiniblock]; let (actions_sender, action_queue) = ActionQueue::new(); - let state_keeper = - StateKeeperHandles::new(pool.clone(), action_queue, &[&extract_tx_hashes(&actions)]).await; + let client = MockMainNodeClient::default(); + let state_keeper = StateKeeperHandles::new( + pool.clone(), + client, + action_queue, + &[&extract_tx_hashes(&actions)], + ) + .await; actions_sender.push_actions(actions).await; // Wait until the miniblock is sealed. state_keeper - .wait(|state| state.get_local_block() == MiniblockNumber(1)) + .wait(|state| state.get_local_block() == snapshot.miniblock_number + 1) .await; // Check that the miniblock is persisted. - let mut storage = pool.access_storage().await.unwrap(); let miniblock = storage .blocks_dal() - .get_miniblock_header(MiniblockNumber(1)) + .get_miniblock_header(snapshot.miniblock_number + 1) .await .unwrap() - .expect("Miniblock #1 is not persisted"); - assert_eq!(miniblock.timestamp, 1); + .expect("New miniblock is not persisted"); + assert_eq!(miniblock.timestamp, snapshot.miniblock_timestamp + 1); let expected_fee_input = BatchFeeInput::PubdataIndependent(PubdataIndependentBatchFeeModelInput { @@ -187,12 +229,112 @@ async fn external_io_basics() { .get(0) .cloned() .expect("Transaction not persisted"); - assert_eq!(tx_receipt.block_number, 1.into()); + assert_eq!( + tx_receipt.block_number, + (snapshot.miniblock_number.0 + 1).into() + ); assert_eq!(tx_receipt.transaction_index, 0.into()); } -pub(super) async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPool) -> Vec { - let open_l1_batch = open_l1_batch(1, 1, 1); +#[test_casing(2, [false, true])] +#[tokio::test] +async fn external_io_works_without_local_protocol_version(snapshot_recovery: bool) { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot = if snapshot_recovery { + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await + } else { + ensure_genesis(&mut storage).await; + genesis_snapshot_recovery_status() + }; + + let mut open_l1_batch = open_l1_batch( + snapshot.l1_batch_number.0 + 1, + snapshot.miniblock_timestamp + 1, + snapshot.miniblock_number.0 + 1, + ); + if let SyncAction::OpenBatch { + protocol_version, .. + } = &mut open_l1_batch + { + *protocol_version = ProtocolVersionId::next(); + } else { + unreachable!(); + }; + + let tx = create_l2_transaction(10, 100); + let tx = SyncAction::Tx(Box::new(tx.into())); + let actions = vec![open_l1_batch, tx, SyncAction::SealMiniblock]; + + let (actions_sender, action_queue) = ActionQueue::new(); + let mut client = MockMainNodeClient::default(); + let next_protocol_version = api::ProtocolVersion { + version_id: ProtocolVersionId::next() as u16, + timestamp: snapshot.miniblock_timestamp + 1, + base_system_contracts: BaseSystemContractsHashes { + bootloader: H256::repeat_byte(1), + default_aa: H256::repeat_byte(2), + }, + ..api::ProtocolVersion::default() + }; + client.insert_protocol_version(next_protocol_version.clone()); + + let state_keeper = StateKeeperHandles::new( + pool.clone(), + client, + action_queue, + &[&extract_tx_hashes(&actions)], + ) + .await; + actions_sender.push_actions(actions).await; + // Wait until the miniblock is sealed. + state_keeper + .wait(|state| state.get_local_block() == snapshot.miniblock_number + 1) + .await; + + // Check that the miniblock and the protocol version for it are persisted. + let persisted_protocol_version = storage + .protocol_versions_dal() + .get_protocol_version(ProtocolVersionId::next()) + .await + .expect("next protocol version not persisted"); + assert_eq!( + persisted_protocol_version.timestamp, + next_protocol_version.timestamp + ); + assert_eq!( + persisted_protocol_version.base_system_contracts_hashes, + next_protocol_version.base_system_contracts + ); + + let miniblock = storage + .blocks_dal() + .get_miniblock_header(snapshot.miniblock_number + 1) + .await + .unwrap() + .expect("New miniblock is not persisted"); + assert_eq!(miniblock.timestamp, snapshot.miniblock_timestamp + 1); + assert_eq!(miniblock.protocol_version, Some(ProtocolVersionId::next())); +} + +pub(super) async fn run_state_keeper_with_multiple_miniblocks( + pool: ConnectionPool, + snapshot_recovery: bool, +) -> (SnapshotRecoveryStatus, Vec) { + let mut storage = pool.access_storage().await.unwrap(); + let snapshot = if snapshot_recovery { + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await + } else { + ensure_genesis(&mut storage).await; + genesis_snapshot_recovery_status() + }; + drop(storage); + + let open_l1_batch = open_l1_batch( + snapshot.l1_batch_number.0 + 1, + snapshot.miniblock_timestamp + 1, + snapshot.miniblock_number.0 + 1, + ); let txs = (0..5).map(|_| { let tx = create_l2_transaction(10, 100); SyncAction::Tx(Box::new(tx.into())) @@ -203,8 +345,8 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPo .collect(); let open_miniblock = SyncAction::Miniblock { - number: MiniblockNumber(2), - timestamp: 2, + number: snapshot.miniblock_number + 2, + timestamp: snapshot.miniblock_timestamp + 2, virtual_blocks: 1, }; let more_txs = (0..3).map(|_| { @@ -222,41 +364,47 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPo .chain(&second_miniblock_actions), ); let (actions_sender, action_queue) = ActionQueue::new(); - let state_keeper = StateKeeperHandles::new(pool, action_queue, &[&tx_hashes]).await; + let client = MockMainNodeClient::default(); + let state_keeper = StateKeeperHandles::new(pool, client, action_queue, &[&tx_hashes]).await; actions_sender.push_actions(first_miniblock_actions).await; actions_sender.push_actions(second_miniblock_actions).await; // Wait until both miniblocks are sealed. state_keeper - .wait(|state| state.get_local_block() == MiniblockNumber(2)) + .wait(|state| state.get_local_block() == snapshot.miniblock_number + 2) .await; - tx_hashes + (snapshot, tx_hashes) } +#[test_casing(2, [false, true])] #[tokio::test] -async fn external_io_with_multiple_miniblocks() { +async fn external_io_with_multiple_miniblocks(snapshot_recovery: bool) { let pool = ConnectionPool::test_pool().await; - let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; + let (snapshot, tx_hashes) = + run_state_keeper_with_multiple_miniblocks(pool.clone(), snapshot_recovery).await; assert_eq!(tx_hashes.len(), 8); // Check that both miniblocks are persisted. - let tx_hashes_by_miniblock = [(1, &tx_hashes[..5]), (2, &tx_hashes[5..])]; + let tx_hashes_by_miniblock = [ + (snapshot.miniblock_number + 1, &tx_hashes[..5]), + (snapshot.miniblock_number + 2, &tx_hashes[5..]), + ]; let mut storage = pool.access_storage().await.unwrap(); for (number, expected_tx_hashes) in tx_hashes_by_miniblock { let miniblock = storage .blocks_dal() - .get_miniblock_header(MiniblockNumber(number)) + .get_miniblock_header(number) .await .unwrap() - .unwrap_or_else(|| panic!("Miniblock #{} is not persisted", number)); + .unwrap_or_else(|| panic!("Miniblock #{number} is not persisted")); assert_eq!(miniblock.l2_tx_count, expected_tx_hashes.len() as u16); - assert_eq!(miniblock.timestamp, u64::from(number)); + assert_eq!(miniblock.timestamp, u64::from(number.0)); let sync_block = storage .sync_dal() - .sync_block(MiniblockNumber(number), true) + .sync_block(number, true) .await .unwrap() - .unwrap_or_else(|| panic!("Sync block #{} is not persisted", number)); + .unwrap_or_else(|| panic!("Sync block #{number} is not persisted")); let transactions = sync_block.transactions.unwrap(); assert_eq!(transactions.len(), expected_tx_hashes.len()); @@ -265,43 +413,54 @@ async fn external_io_with_multiple_miniblocks() { } drop(storage); - test_external_io_recovery(pool, tx_hashes).await; + test_external_io_recovery(pool, &snapshot, tx_hashes).await; } -async fn test_external_io_recovery(pool: ConnectionPool, mut tx_hashes: Vec) { +async fn test_external_io_recovery( + pool: ConnectionPool, + snapshot: &SnapshotRecoveryStatus, + mut tx_hashes: Vec, +) { let new_tx = create_l2_transaction(10, 100); tx_hashes.push(new_tx.hash()); let new_tx = SyncAction::Tx(Box::new(new_tx.into())); let (actions_sender, action_queue) = ActionQueue::new(); - let state_keeper = StateKeeperHandles::new(pool.clone(), action_queue, &[&tx_hashes]).await; + let client = if snapshot.l1_batch_number > L1BatchNumber(0) { + MockMainNodeClient::for_snapshot_recovery(snapshot) + } else { + MockMainNodeClient::default() + }; + + let state_keeper = + StateKeeperHandles::new(pool.clone(), client, action_queue, &[&tx_hashes]).await; // Check that the state keeper state is restored. assert_eq!( state_keeper.sync_state.get_local_block(), - MiniblockNumber(2) + snapshot.miniblock_number + 2 ); // Send new actions and wait until the new miniblock is sealed. let open_miniblock = SyncAction::Miniblock { - number: MiniblockNumber(3), - timestamp: 3, + number: snapshot.miniblock_number + 3, + timestamp: snapshot.miniblock_timestamp + 3, virtual_blocks: 1, }; let actions = vec![open_miniblock, new_tx, SyncAction::SealMiniblock]; actions_sender.push_actions(actions).await; state_keeper - .wait(|state| state.get_local_block() == MiniblockNumber(3)) + .wait(|state| state.get_local_block() == snapshot.miniblock_number + 3) .await; let mut storage = pool.access_storage().await.unwrap(); let miniblock = storage .blocks_dal() - .get_miniblock_header(MiniblockNumber(3)) + .get_miniblock_header(snapshot.miniblock_number + 3) .await .unwrap() - .expect("Miniblock #3 is not persisted"); + .expect("New miniblock is not persisted"); assert_eq!(miniblock.l2_tx_count, 1); - assert_eq!(miniblock.timestamp, 3); + assert_eq!(miniblock.timestamp, snapshot.miniblock_timestamp + 3); } pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: u32) { @@ -331,6 +490,8 @@ pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: pub(super) async fn run_state_keeper_with_multiple_l1_batches( pool: ConnectionPool, ) -> Vec> { + ensure_genesis(&mut pool.access_storage().await.unwrap()).await; + let l1_batch = open_l1_batch(1, 1, 1); let first_tx = create_l2_transaction(10, 100); let first_tx_hash = first_tx.hash(); @@ -354,6 +515,7 @@ pub(super) async fn run_state_keeper_with_multiple_l1_batches( let (actions_sender, action_queue) = ActionQueue::new(); let state_keeper = StateKeeperHandles::new( pool.clone(), + MockMainNodeClient::default(), action_queue, &[&[first_tx_hash], &[second_tx_hash]], ) @@ -411,10 +573,6 @@ async fn fetcher_basics() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis(&mut storage).await; - let fetcher_cursor = FetcherCursor::new(&mut storage).await.unwrap(); - assert_eq!(fetcher_cursor.l1_batch, L1BatchNumber(0)); - assert_eq!(fetcher_cursor.next_miniblock, MiniblockNumber(1)); - drop(storage); let mut mock_client = MockMainNodeClient::default(); mock_client.push_l1_batch(0); @@ -425,12 +583,19 @@ async fn fetcher_basics() { let (actions_sender, mut actions) = ActionQueue::new(); let (stop_sender, stop_receiver) = watch::channel(false); let sync_state = SyncState::default(); - let fetcher = fetcher_cursor.into_fetcher( + let fetcher = MainNodeFetcher::new( + &mut storage, Box::new(mock_client), actions_sender, sync_state.clone(), stop_receiver, - ); + ) + .await + .unwrap(); + drop(storage); + + assert_eq!(fetcher.cursor.l1_batch, L1BatchNumber(0)); + assert_eq!(fetcher.cursor.next_miniblock, MiniblockNumber(1)); let fetcher_task = tokio::spawn(fetcher.run()); // Check that `sync_state` is updated. @@ -481,21 +646,14 @@ async fn fetcher_basics() { fetcher_task.await.unwrap().unwrap(); } +#[test_casing(2, [false, true])] #[tokio::test] -async fn fetcher_with_real_server() { +async fn fetcher_with_real_server(snapshot_recovery: bool) { let pool = ConnectionPool::test_pool().await; // Fill in transactions grouped in multiple miniblocks in the storage. - let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; + let (snapshot, tx_hashes) = + run_state_keeper_with_multiple_miniblocks(pool.clone(), snapshot_recovery).await; let mut tx_hashes = VecDeque::from(tx_hashes); - let mut connection = pool.access_storage().await.unwrap(); - let genesis_miniblock_hash = connection - .blocks_dal() - .get_miniblock_header(MiniblockNumber(0)) - .await - .unwrap() - .expect("No genesis miniblock") - .hash; - drop(connection); // Start the API server. let network_config = NetworkConfig::for_tests(); @@ -514,23 +672,27 @@ async fn fetcher_with_real_server() { let sync_state = SyncState::default(); let (actions_sender, mut actions) = ActionQueue::new(); let client = ::json_rpc(&format!("http://{server_addr}/")).unwrap(); - let fetcher_cursor = FetcherCursor { - next_miniblock: MiniblockNumber(1), - prev_miniblock_hash: genesis_miniblock_hash, - l1_batch: L1BatchNumber(0), - }; - let fetcher = fetcher_cursor.into_fetcher( - Box::new(client), - actions_sender, - sync_state.clone(), + let fetcher = MainNodeFetcher { + client: CachingMainNodeClient::new(Box::new(client)), + cursor: IoCursor { + next_miniblock: snapshot.miniblock_number + 1, + prev_miniblock_hash: snapshot.miniblock_hash, + prev_miniblock_timestamp: snapshot.miniblock_timestamp, + l1_batch: snapshot.l1_batch_number, + }, + actions: actions_sender, + sync_state: sync_state.clone(), stop_receiver, - ); + }; let fetcher_task = tokio::spawn(fetcher.run()); // Check generated actions. - let mut current_miniblock_number = MiniblockNumber(0); + let mut current_miniblock_number = snapshot.miniblock_number; let mut tx_count_in_miniblock = 0; - let miniblock_number_to_tx_count = HashMap::from([(1, 5), (2, 3)]); + let miniblock_number_to_tx_count = HashMap::from([ + (snapshot.miniblock_number + 1, 5), + (snapshot.miniblock_number + 2, 3), + ]); let started_at = Instant::now(); let deadline = started_at + TEST_TIMEOUT; loop { @@ -543,7 +705,7 @@ async fn fetcher_with_real_server() { first_miniblock_info, .. } => { - assert_eq!(number, L1BatchNumber(1)); + assert_eq!(number, snapshot.l1_batch_number + 1); current_miniblock_number += 1; // First miniblock is implicitly opened tx_count_in_miniblock = 0; assert_eq!(first_miniblock_info.0, current_miniblock_number); @@ -563,7 +725,7 @@ async fn fetcher_with_real_server() { tx_count_in_miniblock, miniblock_number_to_tx_count[¤t_miniblock_number] ); - if current_miniblock_number == MiniblockNumber(2) { + if current_miniblock_number == snapshot.miniblock_number + 2 { break; } } diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index 625ee9c693b..a1a4f97494c 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -1,5 +1,7 @@ //! Test utils. +use std::collections::HashMap; + use multivm::utils::get_max_gas_per_pubdata_byte; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::StorageProcessor; @@ -13,11 +15,14 @@ use zksync_types::{ l2::L2Tx, snapshots::SnapshotRecoveryStatus, transaction_request::PaymasterParams, + tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersion, ProtocolVersionId, StorageLog, H256, U256, }; -use crate::{fee_model::BatchFeeModelInputProvider, l1_gas_price::L1GasPriceProvider}; +use crate::{ + fee_model::BatchFeeModelInputProvider, genesis::GenesisParams, l1_gas_price::L1GasPriceProvider, +}; /// Creates a miniblock header with the specified number and deterministic contents. pub(crate) fn create_miniblock(number: u32) -> MiniblockHeader { @@ -99,10 +104,25 @@ pub(crate) fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> L tx } +pub(crate) fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { + TransactionExecutionResult { + hash: transaction.hash(), + transaction: transaction.into(), + execution_info: ExecutionMetrics::default(), + execution_status: TxExecutionStatus::Success, + refunded_gas: 0, + operator_suggested_refund: 0, + compressed_bytecodes: vec![], + call_traces: vec![], + revert_reason: None, + } +} + /// Prepares a recovery snapshot without performing genesis. pub(crate) async fn prepare_recovery_snapshot( storage: &mut StorageProcessor<'_>, - l1_batch_number: u32, + l1_batch_number: L1BatchNumber, + miniblock_number: MiniblockNumber, snapshot_logs: &[StorageLog], ) -> SnapshotRecoveryStatus { let mut storage = storage.start_transaction().await.unwrap(); @@ -115,21 +135,45 @@ pub(crate) async fn prepare_recovery_snapshot( .collect(); let l1_batch_root_hash = ZkSyncTree::process_genesis_batch(&tree_instructions).root_hash; - storage + let miniblock = create_miniblock(miniblock_number.0); + let l1_batch = create_l1_batch(l1_batch_number.0); + // Miniblock and L1 batch are intentionally **not** inserted into the storage. + + // Store factory deps for the base system contracts. + let contracts = GenesisParams::mock().base_system_contracts; + + let protocol_version = storage .protocol_versions_dal() - .save_protocol_version_with_tx(ProtocolVersion::default()) + .get_protocol_version(ProtocolVersionId::latest()) .await; - // TODO (PLA-596): Don't insert L1 batches / miniblocks once the relevant foreign keys are removed - let miniblock = create_miniblock(l1_batch_number); - storage - .blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); - let l1_batch = create_l1_batch(l1_batch_number); + if let Some(protocol_version) = protocol_version { + assert_eq!( + protocol_version.base_system_contracts_hashes, + contracts.hashes(), + "Protocol version set up with incorrect base system contracts" + ); + } else { + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion { + base_system_contracts_hashes: contracts.hashes(), + ..ProtocolVersion::default() + }) + .await; + } + let factory_deps = HashMap::from([ + ( + contracts.bootloader.hash, + zksync_utils::be_words_to_bytes(&contracts.bootloader.code), + ), + ( + contracts.default_aa.hash, + zksync_utils::be_words_to_bytes(&contracts.default_aa.code), + ), + ]); storage - .blocks_dal() - .insert_mock_l1_batch(&l1_batch) + .factory_deps_dal() + .insert_factory_deps(miniblock.number, &factory_deps) .await .unwrap(); @@ -144,9 +188,12 @@ pub(crate) async fn prepare_recovery_snapshot( let snapshot_recovery = SnapshotRecoveryStatus { l1_batch_number: l1_batch.number, + l1_batch_timestamp: l1_batch.timestamp, l1_batch_root_hash, miniblock_number: miniblock.number, - miniblock_root_hash: H256::zero(), // not used + miniblock_timestamp: miniblock.timestamp, + miniblock_hash: H256::zero(), // not used + protocol_version: ProtocolVersionId::latest(), storage_logs_chunks_processed: vec![true; 100], }; storage @@ -158,31 +205,6 @@ pub(crate) async fn prepare_recovery_snapshot( snapshot_recovery } -// TODO (PLA-596): Replace with `prepare_recovery_snapshot(.., &[])` -pub(crate) async fn prepare_empty_recovery_snapshot( - storage: &mut StorageProcessor<'_>, - l1_batch_number: u32, -) -> SnapshotRecoveryStatus { - storage - .protocol_versions_dal() - .save_protocol_version_with_tx(ProtocolVersion::default()) - .await; - - let snapshot_recovery = SnapshotRecoveryStatus { - l1_batch_number: l1_batch_number.into(), - l1_batch_root_hash: H256::zero(), - miniblock_number: l1_batch_number.into(), - miniblock_root_hash: H256::zero(), // not used - storage_logs_chunks_processed: vec![true; 100], - }; - storage - .snapshot_recovery_dal() - .insert_initial_recovery_status(&snapshot_recovery) - .await - .unwrap(); - snapshot_recovery -} - /// Mock [`L1GasPriceProvider`] that returns a constant value. #[derive(Debug)] pub(crate) struct MockL1GasPriceProvider(pub u64); diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index ec3c1b7a7b0..0f063fca48d 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -8,6 +8,7 @@ use zksync_system_constants::{ REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, }; use zksync_types::{ + api, fee::Fee, l1::{OpProcessingType, PriorityQueueType}, l2::L2Tx, @@ -86,7 +87,10 @@ impl Account { ) .expect("should create a signed execute transaction"); - tx.set_input(H256::random().0.to_vec(), H256::random()); + // Set the real transaction hash, which is necessary for transaction execution in VM to function properly. + let tx_request = api::TransactionRequest::from(tx.clone()); + let tx_hash = tx_request.get_tx_hash(L2ChainId::default()).unwrap(); + tx.set_input(H256::random().0.to_vec(), tx_hash); tx.into() }