From 015ed9306d70ec60d512455342cacfd43327cdfc Mon Sep 17 00:00:00 2001 From: tomg10 Date: Thu, 21 Mar 2024 00:11:27 +0100 Subject: [PATCH 1/5] consistency checker persistent cursor Signed-off-by: tomg10 --- ...28321969b8c5bce89a83b74ee08f9e2dfcf9b.json | 14 +++ ...c5237c7d77d4cfcfb57ae76d7b7c67016b2b1.json | 20 ++++ ...173835_consistency-checker-cursor.down.sql | 1 + ...20173835_consistency-checker-cursor.up.sql | 9 ++ core/lib/dal/src/blocks_dal.rs | 91 ++++++++++++++----- .../src/api_server/web3/tests/mod.rs | 2 +- .../src/consistency_checker/mod.rs | 17 +++- .../src/consistency_checker/tests/mod.rs | 6 ++ 8 files changed, 135 insertions(+), 25 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-68e5240ab8c75e68c13efad97a628321969b8c5bce89a83b74ee08f9e2dfcf9b.json create mode 100644 core/lib/dal/.sqlx/query-eb2737effced24c9bc1138544d3c5237c7d77d4cfcfb57ae76d7b7c67016b2b1.json create mode 100644 core/lib/dal/migrations/20240320173835_consistency-checker-cursor.down.sql create mode 100644 core/lib/dal/migrations/20240320173835_consistency-checker-cursor.up.sql diff --git a/core/lib/dal/.sqlx/query-68e5240ab8c75e68c13efad97a628321969b8c5bce89a83b74ee08f9e2dfcf9b.json b/core/lib/dal/.sqlx/query-68e5240ab8c75e68c13efad97a628321969b8c5bce89a83b74ee08f9e2dfcf9b.json new file mode 100644 index 00000000000..833de0661fd --- /dev/null +++ b/core/lib/dal/.sqlx/query-68e5240ab8c75e68c13efad97a628321969b8c5bce89a83b74ee08f9e2dfcf9b.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n consistency_checker_info\n SET\n last_processed_l1_batch = $1,\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "68e5240ab8c75e68c13efad97a628321969b8c5bce89a83b74ee08f9e2dfcf9b" +} diff --git a/core/lib/dal/.sqlx/query-eb2737effced24c9bc1138544d3c5237c7d77d4cfcfb57ae76d7b7c67016b2b1.json b/core/lib/dal/.sqlx/query-eb2737effced24c9bc1138544d3c5237c7d77d4cfcfb57ae76d7b7c67016b2b1.json new file mode 100644 index 00000000000..71e6860b4e7 --- /dev/null +++ b/core/lib/dal/.sqlx/query-eb2737effced24c9bc1138544d3c5237c7d77d4cfcfb57ae76d7b7c67016b2b1.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n last_processed_l1_batch as \"last_processed_l1_batch!\"\n FROM\n consistency_checker_info\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_processed_l1_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "eb2737effced24c9bc1138544d3c5237c7d77d4cfcfb57ae76d7b7c67016b2b1" +} diff --git a/core/lib/dal/migrations/20240320173835_consistency-checker-cursor.down.sql b/core/lib/dal/migrations/20240320173835_consistency-checker-cursor.down.sql new file mode 100644 index 00000000000..9d0b59df98c --- /dev/null +++ b/core/lib/dal/migrations/20240320173835_consistency-checker-cursor.down.sql @@ -0,0 +1 @@ +DROP TABLE consistency_checker_info; diff --git a/core/lib/dal/migrations/20240320173835_consistency-checker-cursor.up.sql b/core/lib/dal/migrations/20240320173835_consistency-checker-cursor.up.sql new file mode 100644 index 00000000000..359ccacbe59 --- /dev/null +++ b/core/lib/dal/migrations/20240320173835_consistency-checker-cursor.up.sql @@ -0,0 +1,9 @@ +CREATE TABLE consistency_checker_info +( + last_processed_l1_batch BIGINT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL +); + +INSERT INTO consistency_checker_info(last_processed_l1_batch, created_at, updated_at) +VALUES (1, NOW(), NOW()); diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index fbe6a259222..e4b0e92ea75 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -30,6 +30,45 @@ pub struct BlocksDal<'a, 'c> { } impl BlocksDal<'_, '_> { + pub async fn get_consistency_checker_last_processed_l1_batch( + &mut self, + ) -> sqlx::Result { + let row = sqlx::query!( + r#" + SELECT + last_processed_l1_batch AS "last_processed_l1_batch!" + FROM + consistency_checker_info + "# + ) + .instrument("get_consistency_checker_last_processed_l1_batch") + .report_latency() + .fetch_one(self.storage) + .await?; + Ok(L1BatchNumber(row.last_processed_l1_batch as u32)) + } + + pub async fn set_consistency_checker_last_processed_l1_batch( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> sqlx::Result<()> { + sqlx::query!( + r#" + UPDATE consistency_checker_info + SET + last_processed_l1_batch = $1, + updated_at = NOW() + "#, + l1_batch_number.0 as i32, + ) + .instrument("set_consistency_checker_last_processed_l1_batch") + .report_latency() + .with_arg("eth_tx_id", &l1_batch_number) + .execute(self.storage) + .await?; + Ok(()) + } + pub async fn is_genesis_needed(&mut self) -> sqlx::Result { let count = sqlx::query!( r#" @@ -580,8 +619,8 @@ impl BlocksDal<'_, '_> { i64::from(header.number.0), &events_queue ) - .execute(transaction.conn()) - .await?; + .execute(transaction.conn()) + .await?; transaction.commit().await?; Ok(()) @@ -912,11 +951,11 @@ impl BlocksDal<'_, '_> { commitment_artifacts.aux_commitments .map(|a| a.bootloader_initial_content_commitment.0.to_vec()), ) - .instrument("save_batch_aux_commitments") - .with_arg("number", &number) - .report_latency() - .execute(&mut transaction) - .await?; + .instrument("save_batch_aux_commitments") + .with_arg("number", &number) + .report_latency() + .execute(&mut transaction) + .await?; transaction.commit().await?; Ok(()) @@ -1006,9 +1045,9 @@ impl BlocksDal<'_, '_> { 1 "# ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| L1BatchNumber(row.number as u32))) + .fetch_optional(self.storage.conn()) + .await? + .map(|row| L1BatchNumber(row.number as u32))) } /// Returns the number of the last L1 batch for which an Ethereum prove tx exists in the database. @@ -1069,9 +1108,9 @@ impl BlocksDal<'_, '_> { 1 "# ) - .fetch_optional(self.storage.conn()) - .await? - .map(|record| L1BatchNumber(record.number as u32))) + .fetch_optional(self.storage.conn()) + .await? + .map(|record| L1BatchNumber(record.number as u32))) } /// Returns the number of the last L1 batch for which an Ethereum execute tx was sent and confirmed. @@ -1093,9 +1132,9 @@ impl BlocksDal<'_, '_> { 1 "# ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| L1BatchNumber(row.number as u32))) + .fetch_optional(self.storage.conn()) + .await? + .map(|row| L1BatchNumber(row.number as u32))) } /// This method returns batches that are confirmed on L1. That is, it doesn't wait for the proofs to be generated. @@ -1383,7 +1422,9 @@ impl BlocksDal<'_, '_> { .fetch_optional(self.storage.conn()) .await?; - let Some(row) = row else { return Ok(vec![]) }; + let Some(row) = row else { + return Ok(vec![]); + }; let expected_started_point = row.number; // After Postgres 12->14 upgrade this field is now f64 @@ -1412,8 +1453,8 @@ impl BlocksDal<'_, '_> { "#, max_l1_batch_timestamp_seconds_bd, ) - .fetch_one(self.storage.conn()) - .await?; + .fetch_one(self.storage.conn()) + .await?; Ok(if let Some(max_ready_to_send_block) = row.max { // If we found at least one ready to execute batch then we can simply return all blocks between @@ -1911,8 +1952,12 @@ impl BlocksDal<'_, '_> { ) .fetch_one(self.storage.conn()) .await?; - let Some(min) = row.min else { return Ok(None) }; - let Some(max) = row.max else { return Ok(None) }; + let Some(min) = row.min else { + return Ok(None); + }; + let Some(max) = row.max else { + return Ok(None); + }; Ok(Some(( MiniblockNumber(min as u32), MiniblockNumber(max as u32), @@ -2327,8 +2372,8 @@ impl BlocksDal<'_, '_> { i64::from(numbers.start().0), i64::from(numbers.end().0) ) - .execute(self.storage.conn()) - .await?; + .execute(self.storage.conn()) + .await?; Ok(execution_result.rows_affected()) } diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index c837b1e4e1b..4acdc326407 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -57,7 +57,7 @@ mod snapshots; mod vm; mod ws; -const TEST_TIMEOUT: Duration = Duration::from_secs(20); +const TEST_TIMEOUT: Duration = Duration::from_secs(50); const POLL_INTERVAL: Duration = Duration::from_millis(50); impl ApiServerHandles { diff --git a/core/lib/zksync_core/src/consistency_checker/mod.rs b/core/lib/zksync_core/src/consistency_checker/mod.rs index 5a2ac3d14d9..f1a7ea61b7e 100644 --- a/core/lib/zksync_core/src/consistency_checker/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/mod.rs @@ -504,11 +504,21 @@ impl ConsistencyChecker { .0 .saturating_sub(self.max_batches_to_recheck) .into(); + + let last_processed_batch = self + .pool + .connection() + .await? + .blocks_dal() + .get_consistency_checker_last_processed_l1_batch() + .await?; + // We shouldn't check batches not present in the storage, and skip the genesis batch since // it's not committed on L1. let first_batch_to_check = first_batch_to_check .max(earliest_l1_batch_number) - .max(L1BatchNumber(1)); + .max(L1BatchNumber(1)) + .max(last_processed_batch); tracing::info!( "Last committed L1 batch is #{last_committed_batch}; starting checks from L1 batch #{first_batch_to_check}" ); @@ -534,6 +544,11 @@ impl ConsistencyChecker { match self.check_commitments(batch_number, &local).await { Ok(()) => { + let mut storage = self.pool.connection().await?; + storage + .blocks_dal() + .set_consistency_checker_last_processed_l1_batch(batch_number) + .await?; self.event_handler.update_checked_batch(batch_number); batch_number += 1; } diff --git a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs index b34255b9fe7..18bbf62a5c9 100644 --- a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs @@ -565,6 +565,12 @@ async fn checker_functions_after_snapshot_recovery(delay_batch_insertion: bool) // Wait until the batch is checked. let checked_batch = l1_batch_updates_receiver.recv().await.unwrap(); assert_eq!(checked_batch, l1_batch.header.number); + let last_reported_batch = storage + .blocks_dal() + .get_consistency_checker_last_processed_l1_batch() + .await + .unwrap(); + assert_eq!(last_reported_batch, l1_batch.header.number); stop_sender.send_replace(true); checker_task.await.unwrap().unwrap(); From 8bfeeeb6f3943e6be7e80a9e4c9075e193bed413 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Thu, 21 Mar 2024 00:16:58 +0100 Subject: [PATCH 2/5] revert bad formatting Signed-off-by: tomg10 --- core/lib/dal/src/blocks_dal.rs | 52 ++++++++----------- .../src/api_server/web3/tests/mod.rs | 2 +- 2 files changed, 24 insertions(+), 30 deletions(-) diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index e4b0e92ea75..f3ab8697e59 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -619,8 +619,8 @@ impl BlocksDal<'_, '_> { i64::from(header.number.0), &events_queue ) - .execute(transaction.conn()) - .await?; + .execute(transaction.conn()) + .await?; transaction.commit().await?; Ok(()) @@ -951,11 +951,11 @@ impl BlocksDal<'_, '_> { commitment_artifacts.aux_commitments .map(|a| a.bootloader_initial_content_commitment.0.to_vec()), ) - .instrument("save_batch_aux_commitments") - .with_arg("number", &number) - .report_latency() - .execute(&mut transaction) - .await?; + .instrument("save_batch_aux_commitments") + .with_arg("number", &number) + .report_latency() + .execute(&mut transaction) + .await?; transaction.commit().await?; Ok(()) @@ -1045,9 +1045,9 @@ impl BlocksDal<'_, '_> { 1 "# ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| L1BatchNumber(row.number as u32))) + .fetch_optional(self.storage.conn()) + .await? + .map(|row| L1BatchNumber(row.number as u32))) } /// Returns the number of the last L1 batch for which an Ethereum prove tx exists in the database. @@ -1108,9 +1108,9 @@ impl BlocksDal<'_, '_> { 1 "# ) - .fetch_optional(self.storage.conn()) - .await? - .map(|record| L1BatchNumber(record.number as u32))) + .fetch_optional(self.storage.conn()) + .await? + .map(|record| L1BatchNumber(record.number as u32))) } /// Returns the number of the last L1 batch for which an Ethereum execute tx was sent and confirmed. @@ -1132,9 +1132,9 @@ impl BlocksDal<'_, '_> { 1 "# ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| L1BatchNumber(row.number as u32))) + .fetch_optional(self.storage.conn()) + .await? + .map(|row| L1BatchNumber(row.number as u32))) } /// This method returns batches that are confirmed on L1. That is, it doesn't wait for the proofs to be generated. @@ -1422,9 +1422,7 @@ impl BlocksDal<'_, '_> { .fetch_optional(self.storage.conn()) .await?; - let Some(row) = row else { - return Ok(vec![]); - }; + let Some(row) = row else { return Ok(vec![]) }; let expected_started_point = row.number; // After Postgres 12->14 upgrade this field is now f64 @@ -1453,8 +1451,8 @@ impl BlocksDal<'_, '_> { "#, max_l1_batch_timestamp_seconds_bd, ) - .fetch_one(self.storage.conn()) - .await?; + .fetch_one(self.storage.conn()) + .await?; Ok(if let Some(max_ready_to_send_block) = row.max { // If we found at least one ready to execute batch then we can simply return all blocks between @@ -1952,12 +1950,8 @@ impl BlocksDal<'_, '_> { ) .fetch_one(self.storage.conn()) .await?; - let Some(min) = row.min else { - return Ok(None); - }; - let Some(max) = row.max else { - return Ok(None); - }; + let Some(min) = row.min else { return Ok(None) }; + let Some(max) = row.max else { return Ok(None) }; Ok(Some(( MiniblockNumber(min as u32), MiniblockNumber(max as u32), @@ -2372,8 +2366,8 @@ impl BlocksDal<'_, '_> { i64::from(numbers.start().0), i64::from(numbers.end().0) ) - .execute(self.storage.conn()) - .await?; + .execute(self.storage.conn()) + .await?; Ok(execution_result.rows_affected()) } diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index 4acdc326407..c837b1e4e1b 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -57,7 +57,7 @@ mod snapshots; mod vm; mod ws; -const TEST_TIMEOUT: Duration = Duration::from_secs(50); +const TEST_TIMEOUT: Duration = Duration::from_secs(20); const POLL_INTERVAL: Duration = Duration::from_millis(50); impl ApiServerHandles { From 5778dcd9501aa78c9e61046df722c9355b83a21a Mon Sep 17 00:00:00 2001 From: tomg10 Date: Thu, 21 Mar 2024 00:21:04 +0100 Subject: [PATCH 3/5] fix Signed-off-by: tomg10 --- core/lib/dal/src/blocks_dal.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index f3ab8697e59..7be67ab88d8 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -63,7 +63,7 @@ impl BlocksDal<'_, '_> { ) .instrument("set_consistency_checker_last_processed_l1_batch") .report_latency() - .with_arg("eth_tx_id", &l1_batch_number) + .with_arg("l1_batch_number", &l1_batch_number) .execute(self.storage) .await?; Ok(()) From de859077710637b69a1148b56d8c3a08b10efd72 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Thu, 21 Mar 2024 09:36:54 +0100 Subject: [PATCH 4/5] sqlx queries Signed-off-by: tomg10 --- ...d97a628321969b8c5bce89a83b74ee08f9e2dfcf9b.json | 14 -------------- ...f4c1ae6ecbe96475e83dfed603bd305e72460f52c2.json | 14 ++++++++++++++ ...fb985618fd95722a77cc44a2e0519f3740191dc75.json} | 4 ++-- 3 files changed, 16 insertions(+), 16 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-68e5240ab8c75e68c13efad97a628321969b8c5bce89a83b74ee08f9e2dfcf9b.json create mode 100644 core/lib/dal/.sqlx/query-ea682d41219feda3514336f4c1ae6ecbe96475e83dfed603bd305e72460f52c2.json rename core/lib/dal/.sqlx/{query-eb2737effced24c9bc1138544d3c5237c7d77d4cfcfb57ae76d7b7c67016b2b1.json => query-f5854ce2c37bc66d38b05c9fb985618fd95722a77cc44a2e0519f3740191dc75.json} (73%) diff --git a/core/lib/dal/.sqlx/query-68e5240ab8c75e68c13efad97a628321969b8c5bce89a83b74ee08f9e2dfcf9b.json b/core/lib/dal/.sqlx/query-68e5240ab8c75e68c13efad97a628321969b8c5bce89a83b74ee08f9e2dfcf9b.json deleted file mode 100644 index 833de0661fd..00000000000 --- a/core/lib/dal/.sqlx/query-68e5240ab8c75e68c13efad97a628321969b8c5bce89a83b74ee08f9e2dfcf9b.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE\n consistency_checker_info\n SET\n last_processed_l1_batch = $1,\n updated_at = NOW()\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "68e5240ab8c75e68c13efad97a628321969b8c5bce89a83b74ee08f9e2dfcf9b" -} diff --git a/core/lib/dal/.sqlx/query-ea682d41219feda3514336f4c1ae6ecbe96475e83dfed603bd305e72460f52c2.json b/core/lib/dal/.sqlx/query-ea682d41219feda3514336f4c1ae6ecbe96475e83dfed603bd305e72460f52c2.json new file mode 100644 index 00000000000..96a71d8f234 --- /dev/null +++ b/core/lib/dal/.sqlx/query-ea682d41219feda3514336f4c1ae6ecbe96475e83dfed603bd305e72460f52c2.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE consistency_checker_info\n SET\n last_processed_l1_batch = $1,\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "ea682d41219feda3514336f4c1ae6ecbe96475e83dfed603bd305e72460f52c2" +} diff --git a/core/lib/dal/.sqlx/query-eb2737effced24c9bc1138544d3c5237c7d77d4cfcfb57ae76d7b7c67016b2b1.json b/core/lib/dal/.sqlx/query-f5854ce2c37bc66d38b05c9fb985618fd95722a77cc44a2e0519f3740191dc75.json similarity index 73% rename from core/lib/dal/.sqlx/query-eb2737effced24c9bc1138544d3c5237c7d77d4cfcfb57ae76d7b7c67016b2b1.json rename to core/lib/dal/.sqlx/query-f5854ce2c37bc66d38b05c9fb985618fd95722a77cc44a2e0519f3740191dc75.json index 71e6860b4e7..b6d90da9f58 100644 --- a/core/lib/dal/.sqlx/query-eb2737effced24c9bc1138544d3c5237c7d77d4cfcfb57ae76d7b7c67016b2b1.json +++ b/core/lib/dal/.sqlx/query-f5854ce2c37bc66d38b05c9fb985618fd95722a77cc44a2e0519f3740191dc75.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n last_processed_l1_batch as \"last_processed_l1_batch!\"\n FROM\n consistency_checker_info\n ", + "query": "\n SELECT\n last_processed_l1_batch AS \"last_processed_l1_batch!\"\n FROM\n consistency_checker_info\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "eb2737effced24c9bc1138544d3c5237c7d77d4cfcfb57ae76d7b7c67016b2b1" + "hash": "f5854ce2c37bc66d38b05c9fb985618fd95722a77cc44a2e0519f3740191dc75" } From 4e5805eedeaa33baea0e62c1d92a87b4c4d6c357 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Tue, 26 Mar 2024 13:27:05 +0100 Subject: [PATCH 5/5] PR feedback Signed-off-by: tomg10 --- .../20240320173835_consistency-checker-cursor.up.sql | 2 +- core/lib/zksync_core/src/consistency_checker/mod.rs | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/core/lib/dal/migrations/20240320173835_consistency-checker-cursor.up.sql b/core/lib/dal/migrations/20240320173835_consistency-checker-cursor.up.sql index 359ccacbe59..f19cd95fe8e 100644 --- a/core/lib/dal/migrations/20240320173835_consistency-checker-cursor.up.sql +++ b/core/lib/dal/migrations/20240320173835_consistency-checker-cursor.up.sql @@ -6,4 +6,4 @@ CREATE TABLE consistency_checker_info ); INSERT INTO consistency_checker_info(last_processed_l1_batch, created_at, updated_at) -VALUES (1, NOW(), NOW()); +VALUES (0, NOW(), NOW()); diff --git a/core/lib/zksync_core/src/consistency_checker/mod.rs b/core/lib/zksync_core/src/consistency_checker/mod.rs index f1a7ea61b7e..717a5e1737f 100644 --- a/core/lib/zksync_core/src/consistency_checker/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/mod.rs @@ -517,8 +517,7 @@ impl ConsistencyChecker { // it's not committed on L1. let first_batch_to_check = first_batch_to_check .max(earliest_l1_batch_number) - .max(L1BatchNumber(1)) - .max(last_processed_batch); + .max(L1BatchNumber(last_processed_batch.0 + 1)); tracing::info!( "Last committed L1 batch is #{last_committed_batch}; starting checks from L1 batch #{first_batch_to_check}" );