From 6e9ed8c0499830ba71a22b5e112d94aa7e91d517 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 4 Apr 2024 10:12:40 +0300 Subject: [PATCH 1/9] feat(db): Wrap sqlx errors in DAL (#1522) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Wraps `sqlx::Error` in the core DAL crate using the existing instrumentation tools. ## Why ❔ This allows naturally providing additional context for errors w/o the need to manually specify it in all DAL call sites. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- Cargo.lock | 1 + core/bin/external_node/src/init.rs | 6 +- core/bin/external_node/src/main.rs | 3 +- .../external_node/src/version_sync_task.rs | 9 +- core/bin/snapshots_creator/src/creator.rs | 4 +- core/lib/circuit_breaker/src/lib.rs | 6 + .../src/basic_witness_input_producer_dal.rs | 13 +- core/lib/dal/src/blocks_dal.rs | 236 ++++++++---------- core/lib/dal/src/blocks_web3_dal.rs | 61 +++-- core/lib/dal/src/consensus_dal.rs | 133 +++++----- core/lib/dal/src/events_dal.rs | 66 ++--- core/lib/dal/src/events_web3_dal.rs | 174 ++++++------- core/lib/dal/src/factory_deps_dal.rs | 15 +- core/lib/dal/src/lib.rs | 6 +- core/lib/dal/src/models/mod.rs | 18 +- core/lib/dal/src/models/storage_sync.rs | 52 ++-- core/lib/dal/src/snapshot_recovery_dal.rs | 23 +- core/lib/dal/src/snapshots_creator_dal.rs | 8 +- core/lib/dal/src/snapshots_dal.rs | 42 ++-- core/lib/dal/src/storage_logs_dal.rs | 136 +++++----- core/lib/dal/src/storage_logs_dedup_dal.rs | 87 ++++--- core/lib/dal/src/storage_web3_dal.rs | 24 +- core/lib/dal/src/sync_dal.rs | 10 +- core/lib/dal/src/system_dal.rs | 5 +- core/lib/dal/src/tokens_dal.rs | 52 ++-- core/lib/dal/src/tokens_web3_dal.rs | 14 +- core/lib/dal/src/transactions_dal.rs | 13 +- core/lib/dal/src/transactions_web3_dal.rs | 39 ++- core/lib/db_connection/Cargo.toml | 1 + core/lib/db_connection/src/connection.rs | 23 +- core/lib/db_connection/src/connection_pool.rs | 33 +-- core/lib/db_connection/src/error.rs | 186 ++++++++++++++ core/lib/db_connection/src/instrument.rs | 161 ++++++++++-- core/lib/db_connection/src/lib.rs | 3 + core/lib/snapshots_applier/src/lib.rs | 112 ++------- core/lib/state/src/postgres/mod.rs | 5 +- core/lib/state/src/rocksdb/mod.rs | 17 +- core/lib/state/src/rocksdb/recovery.rs | 15 +- core/lib/vm_utils/src/storage.rs | 8 +- .../src/api_server/execution_sandbox/apply.rs | 12 +- .../src/api_server/execution_sandbox/mod.rs | 3 +- .../api_server/execution_sandbox/validate.rs | 6 +- .../api_server/tx_sender/master_pool_sink.rs | 2 +- .../src/api_server/tx_sender/mod.rs | 6 +- .../src/api_server/web3/namespaces/debug.rs | 12 +- .../src/api_server/web3/namespaces/en.rs | 26 +- .../src/api_server/web3/namespaces/eth.rs | 71 +++--- .../api_server/web3/namespaces/snapshots.rs | 12 +- .../src/api_server/web3/namespaces/zks.rs | 62 +++-- .../zksync_core/src/api_server/web3/pubsub.rs | 19 +- .../zksync_core/src/api_server/web3/state.rs | 28 ++- .../src/basic_witness_input_producer/mod.rs | 6 +- .../zksync_core/src/consensus/storage/mod.rs | 33 ++- .../lib/zksync_core/src/consensus/testonly.rs | 8 +- core/lib/zksync_core/src/genesis.rs | 5 +- .../src/metadata_calculator/recovery/mod.rs | 8 +- .../lib/zksync_core/src/reorg_detector/mod.rs | 14 +- .../src/state_keeper/io/common/mod.rs | 12 +- .../state_keeper/io/fee_address_migration.rs | 3 +- .../src/state_keeper/state_keeper_storage.rs | 12 +- .../sync_layer/batch_status_updater/mod.rs | 4 +- .../zksync_core/src/sync_layer/external_io.rs | 3 +- core/lib/zksync_core/src/utils/mod.rs | 9 +- prover/Cargo.lock | 1 + 64 files changed, 1255 insertions(+), 942 deletions(-) create mode 100644 core/lib/db_connection/src/error.rs diff --git a/Cargo.lock b/Cargo.lock index 9236dbb01bc..3b9473153f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8328,6 +8328,7 @@ dependencies = [ "serde", "serde_json", "sqlx", + "thiserror", "tokio", "tracing", "url", diff --git a/core/bin/external_node/src/init.rs b/core/bin/external_node/src/init.rs index b5e5d232bee..a4087d18646 100644 --- a/core/bin/external_node/src/init.rs +++ b/core/bin/external_node/src/init.rs @@ -30,13 +30,11 @@ pub(crate) async fn ensure_storage_initialized( let genesis_l1_batch = storage .blocks_dal() .get_l1_batch_header(L1BatchNumber(0)) - .await - .context("failed getting genesis batch info")?; + .await?; let snapshot_recovery = storage .snapshot_recovery_dal() .get_applied_snapshot_status() - .await - .context("failed getting snapshot recovery info")?; + .await?; drop(storage); let decision = match (genesis_l1_batch, snapshot_recovery) { diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 88f5534ab2b..3eb7f77c428 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -874,8 +874,7 @@ async fn main() -> anyhow::Result<()> { let sealed_l1_batch_number = connection .blocks_dal() .get_sealed_l1_batch_number() - .await - .context("Failed getting sealed L1 batch number")? + .await? .context( "Cannot roll back pending L1 batch since there are no L1 batches in Postgres", )?; diff --git a/core/bin/external_node/src/version_sync_task.rs b/core/bin/external_node/src/version_sync_task.rs index d4fa2854480..c8e0e566980 100644 --- a/core/bin/external_node/src/version_sync_task.rs +++ b/core/bin/external_node/src/version_sync_task.rs @@ -60,8 +60,7 @@ pub async fn sync_versions( let (mid_miniblock, _) = connection .blocks_dal() .get_miniblock_range_of_l1_batch(mid_batch) - .await - .with_context(|| format!("Failed to get miniblock range for L1 batch #{mid_batch}"))? + .await? .with_context(|| { format!("Postgres is inconsistent: missing miniblocks for L1 batch #{mid_batch}") })?; @@ -88,8 +87,7 @@ pub async fn sync_versions( let (remote_first_v22_miniblock, _) = connection .blocks_dal() .get_miniblock_range_of_l1_batch(remote_first_v22_l1_batch) - .await - .with_context(|| format!("Failed to get miniblock range for L1 batch #{remote_first_v22_l1_batch}"))? + .await? .with_context(|| { format!("Postgres is inconsistent: missing miniblocks for L1 batch #{remote_first_v22_l1_batch}") })?; @@ -110,8 +108,7 @@ pub async fn sync_versions( let (local_first_v22_miniblock, _) = transaction .blocks_dal() .get_miniblock_range_of_l1_batch(local_first_v22_l1_batch) - .await - .with_context(|| format!("Failed to get miniblock range for L1 batch #{local_first_v22_l1_batch}"))? + .await? .with_context(|| { format!("Postgres is inconsistent: missing miniblocks for L1 batch #{local_first_v22_l1_batch}") })?; diff --git a/core/bin/snapshots_creator/src/creator.rs b/core/bin/snapshots_creator/src/creator.rs index b630e2a2048..25945ec2866 100644 --- a/core/bin/snapshots_creator/src/creator.rs +++ b/core/bin/snapshots_creator/src/creator.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use anyhow::Context as _; use tokio::sync::Semaphore; use zksync_config::SnapshotsCreatorConfig; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalResult}; use zksync_object_store::ObjectStore; use zksync_types::{ snapshots::{ @@ -67,7 +67,7 @@ pub(crate) struct SnapshotCreator { } impl SnapshotCreator { - async fn connect_to_replica(&self) -> anyhow::Result> { + async fn connect_to_replica(&self) -> DalResult> { self.replica_pool .connection_tagged("snapshots_creator") .await diff --git a/core/lib/circuit_breaker/src/lib.rs b/core/lib/circuit_breaker/src/lib.rs index adbd148b3f1..ad2f3e20a79 100644 --- a/core/lib/circuit_breaker/src/lib.rs +++ b/core/lib/circuit_breaker/src/lib.rs @@ -39,6 +39,12 @@ pub enum CircuitBreakerError { Internal(#[from] anyhow::Error), } +impl From for CircuitBreakerError { + fn from(err: zksync_dal::DalError) -> Self { + Self::Internal(err.generalize()) + } +} + /// Checks circuit breakers #[derive(Debug)] pub struct CircuitBreakerChecker { diff --git a/core/lib/dal/src/basic_witness_input_producer_dal.rs b/core/lib/dal/src/basic_witness_input_producer_dal.rs index 5d6cc060f00..f207dfa77c4 100644 --- a/core/lib/dal/src/basic_witness_input_producer_dal.rs +++ b/core/lib/dal/src/basic_witness_input_producer_dal.rs @@ -1,9 +1,11 @@ #![doc = include_str!("../doc/BasicWitnessInputProducerDal.md")] + use std::time::{Duration, Instant}; use sqlx::postgres::types::PgInterval; use zksync_db_connection::{ connection::Connection, + error::DalResult, instrument::InstrumentExt, utils::{duration_to_naive_time, pg_interval_from_duration}, }; @@ -51,7 +53,7 @@ impl BasicWitnessInputProducerDal<'_, '_> { pub async fn create_basic_witness_input_producer_job( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" INSERT INTO @@ -64,6 +66,7 @@ impl BasicWitnessInputProducerDal<'_, '_> { BasicWitnessInputProducerJobStatus::Queued as BasicWitnessInputProducerJobStatus, ) .instrument("create_basic_witness_input_producer_job") + .with_arg("l1_batch_number", &l1_batch_number) .report_latency() .execute(self.storage) .await?; @@ -73,7 +76,7 @@ impl BasicWitnessInputProducerDal<'_, '_> { pub async fn get_next_basic_witness_input_producer_job( &mut self, - ) -> sqlx::Result> { + ) -> DalResult> { let l1_batch_number = sqlx::query!( r#" UPDATE basic_witness_input_producer_jobs @@ -150,7 +153,7 @@ impl BasicWitnessInputProducerDal<'_, '_> { l1_batch_number: L1BatchNumber, started_at: Instant, object_path: &str, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" UPDATE basic_witness_input_producer_jobs @@ -168,6 +171,7 @@ impl BasicWitnessInputProducerDal<'_, '_> { object_path, ) .instrument("mark_job_as_successful") + .with_arg("l1_batch_number", &l1_batch_number) .report_latency() .execute(self.storage) .await?; @@ -180,7 +184,7 @@ impl BasicWitnessInputProducerDal<'_, '_> { l1_batch_number: L1BatchNumber, started_at: Instant, error: String, - ) -> sqlx::Result> { + ) -> DalResult> { let attempts = sqlx::query!( r#" UPDATE basic_witness_input_producer_jobs @@ -202,6 +206,7 @@ impl BasicWitnessInputProducerDal<'_, '_> { BasicWitnessInputProducerJobStatus::Successful as BasicWitnessInputProducerJobStatus, ) .instrument("mark_job_as_failed") + .with_arg("l1_batch_number", &l1_batch_number) .report_latency() .fetch_optional(self.storage) .await? diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index e76226d42cc..b0fc0ef0299 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -2,13 +2,13 @@ use std::{ collections::HashMap, convert::{Into, TryInto}, ops, - ops::RangeInclusive, }; use anyhow::Context as _; use bigdecimal::{BigDecimal, FromPrimitive, ToPrimitive}; use zksync_db_connection::{ - connection::Connection, instrument::InstrumentExt, interpolate_query, match_query_as, + connection::Connection, error::DalResult, instrument::InstrumentExt, interpolate_query, + match_query_as, }; use zksync_types::{ aggregated_operations::AggregatedActionType, @@ -19,7 +19,10 @@ use zksync_types::{ }; use crate::{ - models::storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageMiniblockHeader}, + models::{ + parse_protocol_version, + storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageMiniblockHeader}, + }, Core, CoreDal, }; @@ -31,7 +34,7 @@ pub struct BlocksDal<'a, 'c> { impl BlocksDal<'_, '_> { pub async fn get_consistency_checker_last_processed_l1_batch( &mut self, - ) -> sqlx::Result { + ) -> DalResult { let row = sqlx::query!( r#" SELECT @@ -50,7 +53,7 @@ impl BlocksDal<'_, '_> { pub async fn set_consistency_checker_last_processed_l1_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" UPDATE consistency_checker_info @@ -68,7 +71,7 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn is_genesis_needed(&mut self) -> sqlx::Result { + pub async fn is_genesis_needed(&mut self) -> DalResult { let count = sqlx::query!( r#" SELECT @@ -77,13 +80,14 @@ impl BlocksDal<'_, '_> { l1_batches "# ) - .fetch_one(self.storage.conn()) + .instrument("is_genesis_needed") + .fetch_one(self.storage) .await? .count; Ok(count == 0) } - pub async fn get_sealed_l1_batch_number(&mut self) -> sqlx::Result> { + pub async fn get_sealed_l1_batch_number(&mut self) -> DalResult> { let row = sqlx::query!( r#" SELECT @@ -100,7 +104,7 @@ impl BlocksDal<'_, '_> { Ok(row.number.map(|num| L1BatchNumber(num as u32))) } - pub async fn get_sealed_miniblock_number(&mut self) -> sqlx::Result> { + pub async fn get_sealed_miniblock_number(&mut self) -> DalResult> { let row = sqlx::query!( r#" SELECT @@ -118,7 +122,7 @@ impl BlocksDal<'_, '_> { } /// Returns the number of the earliest L1 batch present in the DB, or `None` if there are no L1 batches. - pub async fn get_earliest_l1_batch_number(&mut self) -> sqlx::Result> { + pub async fn get_earliest_l1_batch_number(&mut self) -> DalResult> { let row = sqlx::query!( r#" SELECT @@ -137,7 +141,7 @@ impl BlocksDal<'_, '_> { pub async fn get_last_l1_batch_number_with_metadata( &mut self, - ) -> sqlx::Result> { + ) -> DalResult> { let row = sqlx::query!( r#" SELECT @@ -158,7 +162,7 @@ impl BlocksDal<'_, '_> { pub async fn get_next_l1_batch_ready_for_commitment_generation( &mut self, - ) -> sqlx::Result> { + ) -> DalResult> { let row = sqlx::query!( r#" SELECT @@ -186,7 +190,7 @@ impl BlocksDal<'_, '_> { /// or `None` if there are no such L1 batches. pub async fn get_earliest_l1_batch_number_with_metadata( &mut self, - ) -> sqlx::Result> { + ) -> DalResult> { let row = sqlx::query!( r#" SELECT @@ -208,7 +212,7 @@ impl BlocksDal<'_, '_> { pub async fn get_l1_batches_for_eth_tx_id( &mut self, eth_tx_id: u32, - ) -> sqlx::Result> { + ) -> DalResult> { let l1_batches = sqlx::query_as!( StorageL1BatchHeader, r#" @@ -248,7 +252,7 @@ impl BlocksDal<'_, '_> { pub async fn get_storage_l1_batch( &mut self, number: L1BatchNumber, - ) -> sqlx::Result> { + ) -> DalResult> { sqlx::query_as!( StorageL1Batch, r#" @@ -301,7 +305,7 @@ impl BlocksDal<'_, '_> { pub async fn get_l1_batch_header( &mut self, number: L1BatchNumber, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(sqlx::query_as!( StorageL1BatchHeader, r#" @@ -654,9 +658,7 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn get_last_sealed_miniblock_header( - &mut self, - ) -> sqlx::Result> { + pub async fn get_last_sealed_miniblock_header(&mut self) -> DalResult> { let header = sqlx::query_as!( StorageMiniblockHeader, r#" @@ -685,7 +687,8 @@ impl BlocksDal<'_, '_> { 1 "#, ) - .fetch_optional(self.storage.conn()) + .instrument("get_last_sealed_miniblock_header") + .fetch_optional(self.storage) .await?; let Some(header) = header else { @@ -703,7 +706,7 @@ impl BlocksDal<'_, '_> { pub async fn get_miniblock_header( &mut self, miniblock_number: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let header = sqlx::query_as!( StorageMiniblockHeader, r#" @@ -731,7 +734,9 @@ impl BlocksDal<'_, '_> { "#, i64::from(miniblock_number.0), ) - .fetch_optional(self.storage.conn()) + .instrument("get_miniblock_header") + .with_arg("miniblock_number", &miniblock_number) + .fetch_optional(self.storage) .await?; let Some(header) = header else { @@ -749,7 +754,7 @@ impl BlocksDal<'_, '_> { pub async fn mark_miniblocks_as_executed_in_l1_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" UPDATE miniblocks @@ -760,7 +765,9 @@ impl BlocksDal<'_, '_> { "#, l1_batch_number.0 as i32, ) - .execute(self.storage.conn()) + .instrument("mark_miniblocks_as_executed_in_l1_batch") + .with_arg("l1_batch_number", &l1_batch_number) + .execute(self.storage) .await?; Ok(()) } @@ -934,7 +941,7 @@ impl BlocksDal<'_, '_> { pub async fn get_last_committed_to_eth_l1_batch( &mut self, - ) -> anyhow::Result> { + ) -> DalResult> { // We can get 0 block for the first transaction let block = sqlx::query_as!( StorageL1Batch, @@ -992,9 +999,7 @@ impl BlocksDal<'_, '_> { return Ok(None); } - self.get_l1_batch_with_metadata(block) - .await - .context("get_l1_batch_with_metadata()") + self.get_l1_batch_with_metadata(block).await } /// Returns the number of the last L1 batch for which an Ethereum commit tx was sent and confirmed. @@ -1654,7 +1659,7 @@ impl BlocksDal<'_, '_> { pub async fn get_l1_batch_state_root( &mut self, number: L1BatchNumber, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -1666,7 +1671,9 @@ impl BlocksDal<'_, '_> { "#, i64::from(number.0) ) - .fetch_optional(self.storage.conn()) + .instrument("get_l1_batch_state_root") + .with_arg("number", &number) + .fetch_optional(self.storage) .await? .and_then(|row| row.hash) .map(|hash| H256::from_slice(&hash))) @@ -1702,23 +1709,17 @@ impl BlocksDal<'_, '_> { pub async fn get_l1_batch_metadata( &mut self, number: L1BatchNumber, - ) -> anyhow::Result> { - let Some(l1_batch) = self - .get_storage_l1_batch(number) - .await - .context("get_storage_l1_batch()")? - else { + ) -> DalResult> { + let Some(l1_batch) = self.get_storage_l1_batch(number).await? else { return Ok(None); }; - self.get_l1_batch_with_metadata(l1_batch) - .await - .context("get_l1_batch_with_metadata") + self.get_l1_batch_with_metadata(l1_batch).await } pub async fn get_l1_batch_tree_data( &mut self, number: L1BatchNumber, - ) -> anyhow::Result> { + ) -> DalResult> { let row = sqlx::query!( r#" SELECT @@ -1731,8 +1732,11 @@ impl BlocksDal<'_, '_> { "#, i64::from(number.0) ) - .fetch_optional(self.storage.conn()) + .instrument("get_l1_batch_tree_data") + .with_arg("number", &number) + .fetch_optional(self.storage) .await?; + Ok(row.and_then(|row| { Some(L1BatchTreeData { hash: H256::from_slice(&row.hash?), @@ -1744,11 +1748,10 @@ impl BlocksDal<'_, '_> { pub async fn get_l1_batch_with_metadata( &mut self, storage_batch: StorageL1Batch, - ) -> anyhow::Result> { + ) -> DalResult> { let unsorted_factory_deps = self .get_l1_batch_factory_deps(L1BatchNumber(storage_batch.number as u32)) - .await - .context("get_l1_batch_factory_deps()")?; + .await?; let header: L1BatchHeader = storage_batch.clone().into(); let Ok(metadata) = storage_batch.try_into() else { return Ok(None); @@ -1770,7 +1773,7 @@ impl BlocksDal<'_, '_> { pub async fn get_l1_batch_factory_deps( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result>> { + ) -> DalResult>> { Ok(sqlx::query!( r#" SELECT @@ -1784,7 +1787,9 @@ impl BlocksDal<'_, '_> { "#, i64::from(l1_batch_number.0) ) - .fetch_all(self.storage.conn()) + .instrument("get_l1_batch_factory_deps") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_all(self.storage) .await? .into_iter() .map(|row| (H256::from_slice(&row.bytecode_hash), row.bytecode)) @@ -1794,50 +1799,52 @@ impl BlocksDal<'_, '_> { pub async fn delete_initial_writes( &mut self, last_batch_to_keep: L1BatchNumber, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { self.delete_initial_writes_inner(Some(last_batch_to_keep)) .await } - pub async fn delete_initial_writes_inner( + async fn delete_initial_writes_inner( &mut self, last_batch_to_keep: Option, - ) -> sqlx::Result<()> { - let block_number = last_batch_to_keep.map_or(-1, |number| i64::from(number.0)); + ) -> DalResult<()> { + let l1_batch_number = last_batch_to_keep.map_or(-1, |number| i64::from(number.0)); sqlx::query!( r#" DELETE FROM initial_writes WHERE l1_batch_number > $1 "#, - block_number + l1_batch_number ) - .execute(self.storage.conn()) + .instrument("delete_initial_writes") + .with_arg("l1_batch_number", &l1_batch_number) + .execute(self.storage) .await?; Ok(()) } + /// Deletes all L1 batches from the storage so that the specified batch number is the last one left. - pub async fn delete_l1_batches( - &mut self, - last_batch_to_keep: L1BatchNumber, - ) -> sqlx::Result<()> { + pub async fn delete_l1_batches(&mut self, last_batch_to_keep: L1BatchNumber) -> DalResult<()> { self.delete_l1_batches_inner(Some(last_batch_to_keep)).await } async fn delete_l1_batches_inner( &mut self, last_batch_to_keep: Option, - ) -> sqlx::Result<()> { - let block_number = last_batch_to_keep.map_or(-1, |number| i64::from(number.0)); + ) -> DalResult<()> { + let l1_batch_number = last_batch_to_keep.map_or(-1, |number| i64::from(number.0)); sqlx::query!( r#" DELETE FROM l1_batches WHERE number > $1 "#, - block_number + l1_batch_number ) - .execute(self.storage.conn()) + .instrument("delete_l1_batches") + .with_arg("l1_batch_number", &l1_batch_number) + .execute(self.storage) .await?; Ok(()) } @@ -1846,7 +1853,7 @@ impl BlocksDal<'_, '_> { pub async fn delete_miniblocks( &mut self, last_miniblock_to_keep: MiniblockNumber, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { self.delete_miniblocks_inner(Some(last_miniblock_to_keep)) .await } @@ -1854,7 +1861,7 @@ impl BlocksDal<'_, '_> { async fn delete_miniblocks_inner( &mut self, last_miniblock_to_keep: Option, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { let block_number = last_miniblock_to_keep.map_or(-1, |number| i64::from(number.0)); sqlx::query!( r#" @@ -1864,18 +1871,21 @@ impl BlocksDal<'_, '_> { "#, block_number ) - .execute(self.storage.conn()) + .instrument("delete_miniblocks") + .with_arg("block_number", &block_number) + .execute(self.storage) .await?; Ok(()) } - async fn delete_logs_inner(&mut self) -> sqlx::Result<()> { + async fn delete_logs_inner(&mut self) -> DalResult<()> { sqlx::query!( r#" DELETE FROM storage_logs "#, ) - .execute(self.storage.conn()) + .instrument("delete_logs") + .execute(self.storage) .await?; Ok(()) } @@ -1917,7 +1927,7 @@ impl BlocksDal<'_, '_> { pub async fn get_miniblock_range_of_l1_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let row = sqlx::query!( r#" SELECT @@ -1930,8 +1940,11 @@ impl BlocksDal<'_, '_> { "#, i64::from(l1_batch_number.0) ) - .fetch_one(self.storage.conn()) + .instrument("get_miniblock_range_of_l1_batch") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_one(self.storage) .await?; + let Some(min) = row.min else { return Ok(None) }; let Some(max) = row.max else { return Ok(None) }; Ok(Some(( @@ -2021,8 +2034,8 @@ impl BlocksDal<'_, '_> { pub async fn get_batch_protocol_version_id( &mut self, l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - let Some(row) = sqlx::query!( + ) -> DalResult> { + Ok(sqlx::query!( r#" SELECT protocol_version @@ -2033,67 +2046,18 @@ impl BlocksDal<'_, '_> { "#, i64::from(l1_batch_number.0) ) - .fetch_optional(self.storage.conn()) - .await? - else { - return Ok(None); - }; - let Some(v) = row.protocol_version else { - return Ok(None); - }; - Ok(Some((v as u16).try_into()?)) - } - - pub async fn get_miniblock_protocol_version_id( - &mut self, - miniblock_number: MiniblockNumber, - ) -> anyhow::Result> { - let Some(row) = sqlx::query!( - r#" - SELECT - protocol_version - FROM - miniblocks - WHERE - number = $1 - "#, - i64::from(miniblock_number.0) - ) - .fetch_optional(self.storage.conn()) - .await? - else { - return Ok(None); - }; - let Some(v) = row.protocol_version else { - return Ok(None); - }; - Ok(Some((v as u16).try_into()?)) - } - - pub async fn get_miniblock_timestamp( - &mut self, - miniblock_number: MiniblockNumber, - ) -> sqlx::Result> { - Ok(sqlx::query!( - r#" - SELECT - timestamp - FROM - miniblocks - WHERE - number = $1 - "#, - i64::from(miniblock_number.0) - ) - .fetch_optional(self.storage.conn()) + .try_map(|row| row.protocol_version.map(parse_protocol_version).transpose()) + .instrument("get_batch_protocol_version_id") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_optional(self.storage) .await? - .map(|row| row.timestamp as u64)) + .flatten()) } pub async fn set_protocol_version_for_pending_miniblocks( &mut self, id: ProtocolVersionId, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" UPDATE miniblocks @@ -2104,7 +2068,9 @@ impl BlocksDal<'_, '_> { "#, id as i32, ) - .execute(self.storage.conn()) + .instrument("set_protocol_version_for_pending_miniblocks") + .with_arg("id", &id) + .execute(self.storage) .await?; Ok(()) } @@ -2112,7 +2078,7 @@ impl BlocksDal<'_, '_> { pub async fn get_fee_address_for_miniblock( &mut self, number: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let Some(mut fee_account_address) = self.raw_fee_address_for_miniblock(number).await? else { return Ok(None); @@ -2128,7 +2094,7 @@ impl BlocksDal<'_, '_> { async fn raw_fee_address_for_miniblock( &mut self, number: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let Some(row) = sqlx::query!( r#" SELECT @@ -2140,7 +2106,9 @@ impl BlocksDal<'_, '_> { "#, number.0 as i32 ) - .fetch_optional(self.storage.conn()) + .instrument("raw_fee_address_for_miniblock") + .with_arg("number", &number) + .fetch_optional(self.storage) .await? else { return Ok(None); @@ -2192,7 +2160,7 @@ impl BlocksDal<'_, '_> { pub async fn reset_protocol_version_for_l1_batches( &mut self, - l1_batch_range: RangeInclusive, + l1_batch_range: ops::RangeInclusive, protocol_version: ProtocolVersionId, ) -> sqlx::Result<()> { sqlx::query!( @@ -2214,7 +2182,7 @@ impl BlocksDal<'_, '_> { pub async fn reset_protocol_version_for_miniblocks( &mut self, - miniblock_range: RangeInclusive, + miniblock_range: ops::RangeInclusive, protocol_version: ProtocolVersionId, ) -> sqlx::Result<()> { sqlx::query!( @@ -2242,7 +2210,7 @@ impl BlocksDal<'_, '_> { &mut self, fee_address: &mut Address, miniblock_number: MiniblockNumber, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { if *fee_address != Address::default() { return Ok(()); } @@ -2262,7 +2230,9 @@ impl BlocksDal<'_, '_> { "#, miniblock_number.0 as i32 ) - .fetch_optional(self.storage.conn()) + .instrument("maybe_load_fee_address") + .with_arg("miniblock_number", &miniblock_number) + .fetch_optional(self.storage) .await? else { return Ok(()); @@ -2277,7 +2247,7 @@ impl BlocksDal<'_, '_> { pub async fn is_fee_address_migrated( &mut self, number: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(self .raw_fee_address_for_miniblock(number) .await? diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 9dcc4cda420..5d756fdbc0e 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -1,5 +1,6 @@ use zksync_db_connection::{ - connection::Connection, instrument::InstrumentExt, interpolate_query, match_query_as, + connection::Connection, error::DalResult, instrument::InstrumentExt, interpolate_query, + match_query_as, }; use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ @@ -125,7 +126,7 @@ impl BlocksWeb3Dal<'_, '_> { &mut self, from_block: MiniblockNumber, limit: usize, - ) -> sqlx::Result<(Vec, Option)> { + ) -> DalResult<(Vec, Option)> { let rows = sqlx::query!( r#" SELECT @@ -143,7 +144,10 @@ impl BlocksWeb3Dal<'_, '_> { i64::from(from_block.0), limit as i32 ) - .fetch_all(self.storage.conn()) + .instrument("get_block_hashes_since") + .with_arg("from_block", &from_block) + .with_arg("limit", &limit) + .fetch_all(self.storage) .await?; let last_block_number = rows.last().map(|row| MiniblockNumber(row.number as u32)); @@ -155,7 +159,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_block_headers_after( &mut self, from_block: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let rows = sqlx::query!( r#" SELECT @@ -171,7 +175,9 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(from_block.0), ) - .fetch_all(self.storage.conn()) + .instrument("get_block_headers_after") + .with_arg("from_block", &from_block) + .fetch_all(self.storage) .await?; let blocks = rows.into_iter().map(|row| BlockHeader { @@ -351,8 +357,8 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_l2_to_l1_logs( &mut self, - block_number: L1BatchNumber, - ) -> sqlx::Result> { + l1_batch_number: L1BatchNumber, + ) -> DalResult> { let raw_logs = sqlx::query!( r#" SELECT @@ -362,9 +368,11 @@ impl BlocksWeb3Dal<'_, '_> { WHERE number = $1 "#, - i64::from(block_number.0) + i64::from(l1_batch_number.0) ) - .fetch_optional(self.storage.conn()) + .instrument("get_l2_to_l1_logs") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_optional(self.storage) .await? .map(|row| row.l2_to_l1_logs) .unwrap_or_default(); @@ -378,7 +386,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_l1_batch_number_of_miniblock( &mut self, miniblock_number: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let number: Option = sqlx::query!( r#" SELECT @@ -390,7 +398,9 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(miniblock_number.0) ) - .fetch_optional(self.storage.conn()) + .instrument("get_l1_batch_number_of_miniblock") + .with_arg("miniblock_number", &miniblock_number) + .fetch_optional(self.storage) .await? .and_then(|row| row.l1_batch_number); @@ -400,7 +410,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_miniblock_range_of_l1_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let row = sqlx::query!( r#" SELECT @@ -413,7 +423,9 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(l1_batch_number.0) ) - .fetch_one(self.storage.conn()) + .instrument("get_miniblock_range_of_l1_batch") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_one(self.storage) .await?; Ok(match (row.min, row.max) { @@ -428,7 +440,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_l1_batch_info_for_tx( &mut self, tx_hash: H256, - ) -> sqlx::Result> { + ) -> DalResult> { let row = sqlx::query!( r#" SELECT @@ -441,7 +453,9 @@ impl BlocksWeb3Dal<'_, '_> { "#, tx_hash.as_bytes() ) - .fetch_optional(self.storage.conn()) + .instrument("get_l1_batch_info_for_tx") + .with_arg("tx_hash", &tx_hash) + .fetch_optional(self.storage) .await?; let result = row.and_then(|row| match (row.l1_batch_number, row.l1_batch_tx_index) { @@ -458,7 +472,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_traces_for_miniblock( &mut self, block_number: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(sqlx::query_as!( CallTrace, r#" @@ -474,7 +488,9 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(block_number.0) ) - .fetch_all(self.storage.conn()) + .instrument("get_traces_for_miniblock") + .with_arg("block_number", &block_number) + .fetch_all(self.storage) .await? .into_iter() .map(Call::from) @@ -487,7 +503,7 @@ impl BlocksWeb3Dal<'_, '_> { &mut self, newest_block: MiniblockNumber, block_count: u64, - ) -> sqlx::Result> { + ) -> DalResult> { let result: Vec<_> = sqlx::query!( r#" SELECT @@ -504,7 +520,10 @@ impl BlocksWeb3Dal<'_, '_> { i64::from(newest_block.0), block_count as i64 ) - .fetch_all(self.storage.conn()) + .instrument("get_fee_history") + .with_arg("newest_block", &newest_block) + .with_arg("block_count", &block_count) + .fetch_all(self.storage) .await? .into_iter() .map(|row| bigdecimal_to_u256(row.base_fee_per_gas)) @@ -516,7 +535,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_block_details( &mut self, block_number: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let storage_block_details = sqlx::query_as!( StorageBlockDetails, r#" @@ -590,7 +609,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_l1_batch_details( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let l1_batch_details: Option = sqlx::query_as!( StorageL1BatchDetails, r#" diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 3e051ce5f5f..8ff07041e21 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,7 +1,13 @@ +use std::ops; + use anyhow::Context as _; use zksync_consensus_roles::validator; use zksync_consensus_storage::ReplicaState; -use zksync_db_connection::connection::Connection; +use zksync_db_connection::{ + connection::Connection, + error::{DalResult, SqlxContext}, + instrument::{InstrumentExt, Instrumented}, +}; use zksync_types::MiniblockNumber; pub use crate::models::consensus::Payload; @@ -15,8 +21,8 @@ pub struct ConsensusDal<'a, 'c> { impl ConsensusDal<'_, '_> { /// Fetches genesis. - pub async fn genesis(&mut self) -> anyhow::Result> { - let Some(row) = sqlx::query!( + pub async fn genesis(&mut self) -> DalResult> { + Ok(sqlx::query!( r#" SELECT genesis @@ -26,15 +32,17 @@ impl ConsensusDal<'_, '_> { fake_key "# ) - .fetch_optional(self.storage.conn()) + .try_map(|row| { + row.genesis + .map(|genesis| { + zksync_protobuf::serde::deserialize(genesis).decode_column("genesis") + }) + .transpose() + }) + .instrument("genesis") + .fetch_optional(self.storage) .await? - else { - return Ok(None); - }; - let Some(genesis) = row.genesis else { - return Ok(None); - }; - Ok(Some(zksync_protobuf::serde::deserialize(genesis)?)) + .flatten()) } /// Attempts to update the genesis. @@ -94,26 +102,20 @@ impl ConsensusDal<'_, '_> { /// Fetches the range of miniblocks present in storage. /// If storage was recovered from snapshot, the range doesn't need to start at 0. - pub async fn block_range(&mut self) -> anyhow::Result> { - let mut txn = self - .storage - .start_transaction() - .await - .context("start_transaction")?; + pub async fn block_range(&mut self) -> DalResult> { + let mut txn = self.storage.start_transaction().await?; let snapshot = txn .snapshot_recovery_dal() .get_applied_snapshot_status() - .await - .context("get_applied_snapshot_status()")?; + .await?; // `snapshot.miniblock_number` indicates the last block processed. - // This block is NOT present in storage. Therefore the first block + // This block is NOT present in storage. Therefore, the first block // that will appear in storage is `snapshot.miniblock_number+1`. let start = validator::BlockNumber(snapshot.map_or(0, |s| s.miniblock_number.0 + 1).into()); let end = txn .blocks_dal() .get_sealed_miniblock_number() - .await - .context("get_sealed_miniblock_number")? + .await? .map_or(start, |last| validator::BlockNumber(last.0.into()).next()); Ok(start..end) } @@ -149,8 +151,8 @@ impl ConsensusDal<'_, '_> { } /// Fetches the current BFT replica state. - pub async fn replica_state(&mut self) -> anyhow::Result { - let row = sqlx::query!( + pub async fn replica_state(&mut self) -> DalResult { + sqlx::query!( r#" SELECT state AS "state!" @@ -160,14 +162,15 @@ impl ConsensusDal<'_, '_> { fake_key "# ) - .fetch_one(self.storage.conn()) - .await?; - Ok(zksync_protobuf::serde::deserialize(row.state)?) + .try_map(|row| zksync_protobuf::serde::deserialize(row.state).decode_column("state")) + .instrument("replica_state") + .fetch_one(self.storage) + .await } /// Sets the current BFT replica state. - pub async fn set_replica_state(&mut self, state: &ReplicaState) -> sqlx::Result<()> { - let state = + pub async fn set_replica_state(&mut self, state: &ReplicaState) -> DalResult<()> { + let state_json = zksync_protobuf::serde::serialize(state, serde_json::value::Serializer).unwrap(); sqlx::query!( r#" @@ -177,9 +180,11 @@ impl ConsensusDal<'_, '_> { WHERE fake_key "#, - state + state_json ) - .execute(self.storage.conn()) + .instrument("set_replica_state") + .with_arg("state.view", &state.view) + .execute(self.storage) .await?; Ok(()) } @@ -187,8 +192,8 @@ impl ConsensusDal<'_, '_> { /// Fetches the first consensus certificate. /// It might NOT be the certificate for the first miniblock: /// see `validator::Genesis.first_block`. - pub async fn first_certificate(&mut self) -> anyhow::Result> { - let Some(row) = sqlx::query!( + pub async fn first_certificate(&mut self) -> DalResult> { + sqlx::query!( r#" SELECT certificate @@ -200,19 +205,19 @@ impl ConsensusDal<'_, '_> { 1 "# ) - .fetch_optional(self.storage.conn()) - .await? - else { - return Ok(None); - }; - Ok(Some(zksync_protobuf::serde::deserialize(row.certificate)?)) + .try_map(|row| { + zksync_protobuf::serde::deserialize(row.certificate).decode_column("certificate") + }) + .instrument("first_certificate") + .fetch_optional(self.storage) + .await } /// Fetches the last consensus certificate. - /// Currently certificates are NOT generated synchronously with miniblocks, + /// Currently, certificates are NOT generated synchronously with miniblocks, /// so it might NOT be the certificate for the last miniblock. - pub async fn last_certificate(&mut self) -> anyhow::Result> { - let Some(row) = sqlx::query!( + pub async fn last_certificate(&mut self) -> DalResult> { + sqlx::query!( r#" SELECT certificate @@ -224,20 +229,22 @@ impl ConsensusDal<'_, '_> { 1 "# ) - .fetch_optional(self.storage.conn()) - .await? - else { - return Ok(None); - }; - Ok(Some(zksync_protobuf::serde::deserialize(row.certificate)?)) + .try_map(|row| { + zksync_protobuf::serde::deserialize(row.certificate).decode_column("certificate") + }) + .instrument("last_certificate") + .fetch_optional(self.storage) + .await } /// Fetches the consensus certificate for the miniblock with the given `block_number`. pub async fn certificate( &mut self, block_number: validator::BlockNumber, - ) -> anyhow::Result> { - let Some(row) = sqlx::query!( + ) -> DalResult> { + let instrumentation = + Instrumented::new("certificate").with_arg("block_number", &block_number); + let query = sqlx::query!( r#" SELECT certificate @@ -246,14 +253,17 @@ impl ConsensusDal<'_, '_> { WHERE number = $1 "#, - i64::try_from(block_number.0)? + i64::try_from(block_number.0) + .map_err(|err| { instrumentation.arg_error("block_number", err) })? ) - .fetch_optional(self.storage.conn()) - .await? - else { - return Ok(None); - }; - Ok(Some(zksync_protobuf::serde::deserialize(row.certificate)?)) + .try_map(|row| { + zksync_protobuf::serde::deserialize(row.certificate).decode_column("certificate") + }); + + instrumentation + .with(query) + .fetch_optional(self.storage) + .await } /// Converts the miniblock `block_number` into consensus payload. `Payload` is an @@ -262,8 +272,13 @@ impl ConsensusDal<'_, '_> { pub async fn block_payload( &mut self, block_number: validator::BlockNumber, - ) -> anyhow::Result> { - let block_number = MiniblockNumber(block_number.0.try_into()?); + ) -> DalResult> { + let instrumentation = + Instrumented::new("block_payload").with_arg("block_number", &block_number); + let block_number = u32::try_from(block_number.0) + .map_err(|err| instrumentation.arg_error("block_number", err))?; + let block_number = MiniblockNumber(block_number); + let Some(block) = self .storage .sync_dal() diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index 909ba9e815e..af4f5185669 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, fmt}; use sqlx::types::chrono::Utc; use zksync_db_connection::{ - connection::Connection, instrument::InstrumentExt, write_str, writeln_str, + connection::Connection, error::DalResult, instrument::InstrumentExt, write_str, writeln_str, }; use zksync_system_constants::L1_MESSENGER_ADDRESS; use zksync_types::{ @@ -243,7 +243,7 @@ impl EventsDal<'_, '_> { pub(crate) async fn get_l1_batch_raw_published_bytecode_hashes( &mut self, l1_batch_number: L1BatchNumber, - ) -> Result, SqlxError> { + ) -> DalResult> { let Some((from_miniblock, to_miniblock)) = self .storage .blocks_dal() @@ -252,6 +252,7 @@ impl EventsDal<'_, '_> { else { return Ok(Vec::new()); }; + let result: Vec<_> = sqlx::query!( r#" SELECT @@ -271,7 +272,10 @@ impl EventsDal<'_, '_> { L1_MESSENGER_ADDRESS.as_bytes(), L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE.as_bytes() ) - .fetch_all(self.storage.conn()) + .instrument("get_l1_batch_raw_published_bytecode_hashes") + .with_arg("from_miniblock", &from_miniblock) + .with_arg("to_miniblock", &to_miniblock) + .fetch_all(self.storage) .await? .into_iter() .map(|row| H256::from_slice(&row.value)) @@ -334,7 +338,7 @@ impl EventsDal<'_, '_> { pub async fn get_vm_events_for_l1_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> Result>, SqlxError> { + ) -> DalResult>> { let Some((from_miniblock, to_miniblock)) = self .storage .blocks_dal() @@ -343,8 +347,9 @@ impl EventsDal<'_, '_> { else { return Ok(None); }; + let mut tx_index_in_l1_batch = -1; - let events = sqlx::query!( + let rows = sqlx::query!( r#" SELECT address, @@ -366,32 +371,35 @@ impl EventsDal<'_, '_> { i64::from(to_miniblock.0), ) .instrument("get_vm_events_for_l1_batch") + .with_arg("l1_batch_number", &l1_batch_number) .report_latency() .fetch_all(self.storage) - .await? - .into_iter() - .map(|row| { - let indexed_topics = vec![row.topic1, row.topic2, row.topic3, row.topic4] - .into_iter() - .filter_map(|topic| { - if !topic.is_empty() { - Some(H256::from_slice(&topic)) - } else { - None - } - }) - .collect(); - if row.event_index_in_tx == 0 { - tx_index_in_l1_batch += 1; - } - VmEvent { - location: (l1_batch_number, tx_index_in_l1_batch as u32), - address: Address::from_slice(&row.address), - indexed_topics, - value: row.value, - } - }) - .collect(); + .await?; + + let events = rows + .into_iter() + .map(|row| { + let indexed_topics = vec![row.topic1, row.topic2, row.topic3, row.topic4] + .into_iter() + .filter_map(|topic| { + if !topic.is_empty() { + Some(H256::from_slice(&topic)) + } else { + None + } + }) + .collect(); + if row.event_index_in_tx == 0 { + tx_index_in_l1_batch += 1; + } + VmEvent { + location: (l1_batch_number, tx_index_in_l1_batch as u32), + address: Address::from_slice(&row.address), + indexed_topics, + value: row.value, + } + }) + .collect(); Ok(Some(events)) } } diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index 2264fc539eb..bcf8525b412 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -3,13 +3,13 @@ use sqlx::{ query::{Query, QueryAs}, Postgres, Row, }; -use zksync_db_connection::{connection::Connection, instrument::InstrumentExt}; +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; use zksync_types::{ api::{GetLogsFilter, Log}, Address, MiniblockNumber, H256, }; -use crate::{models::storage_event::StorageWeb3Log, Core, SqlxError}; +use crate::{models::storage_event::StorageWeb3Log, Core}; #[derive(Debug)] pub struct EventsWeb3Dal<'a, 'c> { @@ -23,104 +23,95 @@ impl EventsWeb3Dal<'_, '_> { &mut self, filter: &GetLogsFilter, offset: usize, - ) -> Result, SqlxError> { - { - let (where_sql, arg_index) = self.build_get_logs_where_clause(filter); - - let query = format!( - r#" - SELECT miniblock_number - FROM events - WHERE {} - ORDER BY miniblock_number ASC, event_index_in_block ASC - LIMIT 1 OFFSET ${} - "#, - where_sql, arg_index - ); - - let mut query = sqlx::query(&query); - - // Bind address params - noop if there are no addresses + ) -> DalResult> { + let (where_sql, arg_index) = self.build_get_logs_where_clause(filter); + + let query = format!( + r#" + SELECT miniblock_number + FROM events + WHERE {} + ORDER BY miniblock_number ASC, event_index_in_block ASC + LIMIT 1 OFFSET ${} + "#, + where_sql, arg_index + ); + + let mut query = sqlx::query(&query); + + // Bind address params - noop if there are no addresses + query = Self::bind_params_for_optional_filter_query( + query, + filter.addresses.iter().map(Address::as_bytes).collect(), + ); + for (_, topics) in &filter.topics { + // Bind topic params - noop if there are no topics query = Self::bind_params_for_optional_filter_query( query, - filter.addresses.iter().map(Address::as_bytes).collect(), + topics.iter().map(H256::as_bytes).collect(), ); - for (_, topics) in &filter.topics { - // Bind topic params - noop if there are no topics - query = Self::bind_params_for_optional_filter_query( - query, - topics.iter().map(H256::as_bytes).collect(), - ); - } - query = query.bind(offset as i32); - let log = query - .instrument("get_log_block_number") - .report_latency() - .with_arg("filter", filter) - .with_arg("offset", &offset) - .fetch_optional(self.storage) - .await?; - - Ok(log.map(|row| MiniblockNumber(row.get::("miniblock_number") as u32))) } + query = query.bind(offset as i32); + let log = query + .instrument("get_log_block_number") + .report_latency() + .with_arg("filter", filter) + .with_arg("offset", &offset) + .fetch_optional(self.storage) + .await?; + + Ok(log.map(|row| MiniblockNumber(row.get::("miniblock_number") as u32))) } /// Returns logs for given filter. #[allow(clippy::type_complexity)] - pub async fn get_logs( - &mut self, - filter: GetLogsFilter, - limit: usize, - ) -> Result, SqlxError> { - { - let (where_sql, arg_index) = self.build_get_logs_where_clause(&filter); - - let query = format!( - r#" - WITH events_select AS ( - SELECT - address, topic1, topic2, topic3, topic4, value, - miniblock_number, tx_hash, tx_index_in_block, - event_index_in_block, event_index_in_tx - FROM events - WHERE {} - ORDER BY miniblock_number ASC, event_index_in_block ASC - LIMIT ${} - ) - SELECT miniblocks.hash as "block_hash", miniblocks.l1_batch_number as "l1_batch_number", events_select.* - FROM events_select - LEFT JOIN miniblocks ON events_select.miniblock_number = miniblocks.number + pub async fn get_logs(&mut self, filter: GetLogsFilter, limit: usize) -> DalResult> { + let (where_sql, arg_index) = self.build_get_logs_where_clause(&filter); + let query = format!( + r#" + WITH events_select AS ( + SELECT + address, topic1, topic2, topic3, topic4, value, + miniblock_number, tx_hash, tx_index_in_block, + event_index_in_block, event_index_in_tx + FROM events + WHERE {} ORDER BY miniblock_number ASC, event_index_in_block ASC - "#, - where_sql, arg_index - ); - - let mut query = sqlx::query_as(&query); - - // Bind address params - noop if there are no addresses + LIMIT ${} + ) + SELECT miniblocks.hash as "block_hash", miniblocks.l1_batch_number as "l1_batch_number", events_select.* + FROM events_select + LEFT JOIN miniblocks ON events_select.miniblock_number = miniblocks.number + ORDER BY miniblock_number ASC, event_index_in_block ASC + "#, + where_sql, arg_index + ); + + let mut query = sqlx::query_as(&query); + + // Bind address params - noop if there are no addresses + query = Self::bind_params_for_optional_filter_query_as( + query, + filter.addresses.iter().map(Address::as_bytes).collect(), + ); + for (_, topics) in &filter.topics { + // Bind topic params - noop if there are no topics query = Self::bind_params_for_optional_filter_query_as( query, - filter.addresses.iter().map(Address::as_bytes).collect(), + topics.iter().map(H256::as_bytes).collect(), ); - for (_, topics) in &filter.topics { - // Bind topic params - noop if there are no topics - query = Self::bind_params_for_optional_filter_query_as( - query, - topics.iter().map(H256::as_bytes).collect(), - ); - } - query = query.bind(limit as i32); - - let db_logs: Vec = query - .instrument("get_logs") - .report_latency() - .with_arg("filter", &filter) - .with_arg("limit", &limit) - .fetch_all(self.storage) - .await?; - let logs = db_logs.into_iter().map(Into::into).collect(); - Ok(logs) } + query = query.bind(limit as i32); + + let db_logs: Vec = query + .instrument("get_logs") + .report_latency() + .with_arg("filter", &filter) + .with_arg("limit", &limit) + .fetch_all(self.storage) + .await?; + let logs = db_logs.into_iter().map(Into::into).collect(); + Ok(logs) } fn build_get_logs_where_clause(&self, filter: &GetLogsFilter) -> (String, u8) { @@ -192,10 +183,7 @@ impl EventsWeb3Dal<'_, '_> { } } - pub async fn get_all_logs( - &mut self, - from_block: MiniblockNumber, - ) -> Result, SqlxError> { + pub async fn get_all_logs(&mut self, from_block: MiniblockNumber) -> DalResult> { { let db_logs: Vec = sqlx::query_as!( StorageWeb3Log, @@ -245,7 +233,9 @@ impl EventsWeb3Dal<'_, '_> { "#, i64::from(from_block.0) ) - .fetch_all(self.storage.conn()) + .instrument("get_all_logs") + .with_arg("from_block", &from_block) + .fetch_all(self.storage) .await?; let logs = db_logs.into_iter().map(Into::into).collect(); Ok(logs) diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index 920e0ce0329..7b963b49350 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -2,7 +2,7 @@ use std::collections::{HashMap, HashSet}; use anyhow::Context as _; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; -use zksync_db_connection::connection::Connection; +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; use zksync_types::{MiniblockNumber, H256, U256}; use zksync_utils::{bytes_to_be_words, bytes_to_chunks}; @@ -21,7 +21,7 @@ impl FactoryDepsDal<'_, '_> { &mut self, block_number: MiniblockNumber, factory_deps: &HashMap>, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { let (bytecode_hashes, bytecodes): (Vec<_>, Vec<_>) = factory_deps .iter() .map(|(hash, bytecode)| (hash.as_bytes(), bytecode.as_slice())) @@ -46,14 +46,17 @@ impl FactoryDepsDal<'_, '_> { &bytecodes as &[&[u8]], i64::from(block_number.0) ) - .execute(self.storage.conn()) + .instrument("insert_factory_deps") + .with_arg("block_number", &block_number) + .with_arg("factory_deps.len", &factory_deps.len()) + .execute(self.storage) .await?; Ok(()) } /// Returns bytecode for a factory dependency with the specified bytecode `hash`. - pub async fn get_factory_dep(&mut self, hash: H256) -> sqlx::Result>> { + pub async fn get_factory_dep(&mut self, hash: H256) -> DalResult>> { Ok(sqlx::query!( r#" SELECT @@ -65,7 +68,9 @@ impl FactoryDepsDal<'_, '_> { "#, hash.as_bytes(), ) - .fetch_optional(self.storage.conn()) + .instrument("get_factory_dep") + .with_arg("hash", &hash) + .fetch_optional(self.storage) .await? .map(|row| row.bytecode)) } diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 37fc8d49aec..58a5c301a49 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -5,7 +5,11 @@ pub use sqlx::{types::BigDecimal, Error as SqlxError}; use zksync_db_connection::connection::DbMarker; -pub use zksync_db_connection::{connection::Connection, connection_pool::ConnectionPool}; +pub use zksync_db_connection::{ + connection::Connection, + connection_pool::ConnectionPool, + error::{DalError, DalResult}, +}; use crate::{ basic_witness_input_producer_dal::BasicWitnessInputProducerDal, blocks_dal::BlocksDal, diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index 7e32c87f0e6..773852ba712 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -1,3 +1,7 @@ +use anyhow::Context as _; +use zksync_db_connection::error::SqlxContext; +use zksync_types::{ProtocolVersionId, H160, H256}; + pub mod consensus; mod proto; pub mod storage_block; @@ -13,13 +17,21 @@ pub mod storage_witness_job_info; #[cfg(test)] mod tests; -use anyhow::Context; -use zksync_types::{H160, H256}; - fn parse_h256(bytes: &[u8]) -> anyhow::Result { Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) } +fn parse_h256_opt(bytes: Option<&[u8]>) -> anyhow::Result { + parse_h256(bytes.context("missing data")?) +} + fn parse_h160(bytes: &[u8]) -> anyhow::Result { Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) } + +pub(crate) fn parse_protocol_version(raw: i32) -> sqlx::Result { + u16::try_from(raw) + .decode_column("protocol_version")? + .try_into() + .decode_column("protocol_version") +} diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index b8415960a45..a204f607e7e 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -1,12 +1,12 @@ -use anyhow::Context as _; use zksync_contracts::BaseSystemContractsHashes; +use zksync_db_connection::error::SqlxContext; use zksync_types::{ api::en, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, H256, }; use crate::{ consensus_dal::Payload, - models::{parse_h160, parse_h256}, + models::{parse_h160, parse_h256, parse_h256_opt, parse_protocol_version}, }; #[derive(Debug, Clone, sqlx::FromRow)] @@ -44,50 +44,46 @@ pub(crate) struct SyncBlock { } impl TryFrom for SyncBlock { - type Error = anyhow::Error; - fn try_from(block: StorageSyncBlock) -> anyhow::Result { + type Error = sqlx::Error; + + fn try_from(block: StorageSyncBlock) -> Result { Ok(Self { - number: MiniblockNumber(block.number.try_into().context("number")?), + number: MiniblockNumber(block.number.try_into().decode_column("number")?), l1_batch_number: L1BatchNumber( block .l1_batch_number .try_into() - .context("l1_batch_number")?, + .decode_column("l1_batch_number")?, ), last_in_batch: block.last_batch_miniblock == Some(block.number), - timestamp: block.timestamp.try_into().context("timestamp")?, - l1_gas_price: block.l1_gas_price.try_into().context("l1_gas_price")?, + timestamp: block.timestamp.try_into().decode_column("timestamp")?, + l1_gas_price: block + .l1_gas_price + .try_into() + .decode_column("l1_gas_price")?, l2_fair_gas_price: block .l2_fair_gas_price .try_into() - .context("l2_fair_gas_price")?, + .decode_column("l2_fair_gas_price")?, fair_pubdata_price: block .fair_pubdata_price - .map(|v| v.try_into().context("fair_pubdata_price")) + .map(|v| v.try_into().decode_column("fair_pubdata_price")) .transpose()?, // TODO (SMA-1635): Make these fields non optional in database base_system_contracts_hashes: BaseSystemContractsHashes { - bootloader: parse_h256( - &block - .bootloader_code_hash - .context("bootloader_code_hash should not be none")?, - ) - .context("bootloader_code_hash")?, - default_aa: parse_h256( - &block - .default_aa_code_hash - .context("default_aa_code_hash should not be none")?, - ) - .context("default_aa_code_hash")?, + bootloader: parse_h256_opt(block.bootloader_code_hash.as_deref()) + .decode_column("bootloader_code_hash")?, + default_aa: parse_h256_opt(block.default_aa_code_hash.as_deref()) + .decode_column("default_aa_code_hash")?, }, fee_account_address: parse_h160(&block.fee_account_address) - .context("fee_account_address")?, - virtual_blocks: block.virtual_blocks.try_into().context("virtual_blocks")?, - hash: parse_h256(&block.hash).context("hash")?, - protocol_version: u16::try_from(block.protocol_version) - .context("protocol_version")? + .decode_column("fee_account_address")?, + virtual_blocks: block + .virtual_blocks .try_into() - .context("protocol_version")?, + .decode_column("virtual_blocks")?, + hash: parse_h256(&block.hash).decode_column("hash")?, + protocol_version: parse_protocol_version(block.protocol_version)?, }) } } diff --git a/core/lib/dal/src/snapshot_recovery_dal.rs b/core/lib/dal/src/snapshot_recovery_dal.rs index e4d082c8eb9..bf3315a768b 100644 --- a/core/lib/dal/src/snapshot_recovery_dal.rs +++ b/core/lib/dal/src/snapshot_recovery_dal.rs @@ -1,4 +1,4 @@ -use zksync_db_connection::connection::Connection; +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; use zksync_types::{ snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, }; @@ -14,7 +14,7 @@ impl SnapshotRecoveryDal<'_, '_> { pub async fn insert_initial_recovery_status( &mut self, status: &SnapshotRecoveryStatus, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" INSERT INTO @@ -42,15 +42,15 @@ impl SnapshotRecoveryDal<'_, '_> { status.protocol_version as i32, &status.storage_logs_chunks_processed, ) - .execute(self.storage.conn()) + .instrument("insert_initial_recovery_status") + .with_arg("status.l1_batch_number", &status.l1_batch_number) + .with_arg("status.miniblock_number", &status.miniblock_number) + .execute(self.storage) .await?; Ok(()) } - pub async fn mark_storage_logs_chunk_as_processed( - &mut self, - chunk_id: u64, - ) -> sqlx::Result<()> { + pub async fn mark_storage_logs_chunk_as_processed(&mut self, chunk_id: u64) -> DalResult<()> { sqlx::query!( r#" UPDATE snapshot_recovery @@ -60,7 +60,9 @@ impl SnapshotRecoveryDal<'_, '_> { "#, chunk_id as i32 + 1 ) - .execute(self.storage.conn()) + .instrument("mark_storage_logs_chunk_as_processed") + .with_arg("chunk_id", &chunk_id) + .execute(self.storage) .await?; Ok(()) @@ -68,7 +70,7 @@ impl SnapshotRecoveryDal<'_, '_> { pub async fn get_applied_snapshot_status( &mut self, - ) -> sqlx::Result> { + ) -> DalResult> { let record = sqlx::query!( r#" SELECT @@ -84,7 +86,8 @@ impl SnapshotRecoveryDal<'_, '_> { snapshot_recovery "#, ) - .fetch_optional(self.storage.conn()) + .instrument("get_applied_snapshot_status") + .fetch_optional(self.storage) .await?; Ok(record.map(|row| SnapshotRecoveryStatus { diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs index 32e388770e7..9c00bfab037 100644 --- a/core/lib/dal/src/snapshots_creator_dal.rs +++ b/core/lib/dal/src/snapshots_creator_dal.rs @@ -1,4 +1,4 @@ -use zksync_db_connection::{connection::Connection, instrument::InstrumentExt}; +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; use zksync_types::{ snapshots::SnapshotStorageLog, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, H256, @@ -15,7 +15,7 @@ impl SnapshotsCreatorDal<'_, '_> { pub async fn get_distinct_storage_logs_keys_count( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result { + ) -> DalResult { let count = sqlx::query!( r#" SELECT @@ -48,7 +48,7 @@ impl SnapshotsCreatorDal<'_, '_> { miniblock_number: MiniblockNumber, l1_batch_number: L1BatchNumber, hashed_keys_range: std::ops::RangeInclusive, - ) -> sqlx::Result> { + ) -> DalResult> { // We need to filter the returned logs by `l1_batch_number` in order to not return "phantom writes", i.e., // logs that have deduplicated writes (e.g., a write to a non-zero value and back to zero in the same L1 batch) // which are actually written to in future L1 batches. @@ -115,7 +115,7 @@ impl SnapshotsCreatorDal<'_, '_> { pub async fn get_all_factory_deps( &mut self, miniblock_number: MiniblockNumber, - ) -> sqlx::Result)>> { + ) -> DalResult)>> { let rows = sqlx::query!( r#" SELECT diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs index 4a44bf0ce1e..36010b9a7de 100644 --- a/core/lib/dal/src/snapshots_dal.rs +++ b/core/lib/dal/src/snapshots_dal.rs @@ -1,4 +1,8 @@ -use zksync_db_connection::{connection::Connection, instrument::InstrumentExt}; +use zksync_db_connection::{ + connection::Connection, + error::{DalResult, SqlxContext}, + instrument::InstrumentExt, +}; use zksync_types::{ snapshots::{AllSnapshots, SnapshotMetadata, SnapshotVersion}, L1BatchNumber, @@ -18,15 +22,8 @@ impl TryFrom for SnapshotMetadata { type Error = sqlx::Error; fn try_from(row: StorageSnapshotMetadata) -> Result { - let int_version = u16::try_from(row.version).map_err(|err| sqlx::Error::ColumnDecode { - index: "version".to_owned(), - source: err.into(), - })?; - let version = - SnapshotVersion::try_from(int_version).map_err(|err| sqlx::Error::ColumnDecode { - index: "version".to_owned(), - source: err.into(), - })?; + let int_version = u16::try_from(row.version).decode_column("version")?; + let version = SnapshotVersion::try_from(int_version).decode_column("version")?; Ok(Self { version, @@ -53,7 +50,7 @@ impl SnapshotsDal<'_, '_> { l1_batch_number: L1BatchNumber, storage_logs_chunk_count: u64, factory_deps_filepaths: &str, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" INSERT INTO @@ -74,6 +71,8 @@ impl SnapshotsDal<'_, '_> { factory_deps_filepaths, ) .instrument("add_snapshot") + .with_arg("version", &version) + .with_arg("l1_batch_number", &l1_batch_number) .report_latency() .execute(self.storage) .await?; @@ -105,7 +104,7 @@ impl SnapshotsDal<'_, '_> { Ok(()) } - pub async fn get_all_complete_snapshots(&mut self) -> sqlx::Result { + pub async fn get_all_complete_snapshots(&mut self) -> DalResult { let rows = sqlx::query!( r#" SELECT @@ -133,8 +132,8 @@ impl SnapshotsDal<'_, '_> { }) } - pub async fn get_newest_snapshot_metadata(&mut self) -> sqlx::Result> { - let row = sqlx::query_as!( + pub async fn get_newest_snapshot_metadata(&mut self) -> DalResult> { + sqlx::query_as!( StorageSnapshotMetadata, r#" SELECT @@ -150,19 +149,18 @@ impl SnapshotsDal<'_, '_> { 1 "# ) + .try_map(SnapshotMetadata::try_from) .instrument("get_newest_snapshot_metadata") .report_latency() .fetch_optional(self.storage) - .await?; - - row.map(TryFrom::try_from).transpose() + .await } pub async fn get_snapshot_metadata( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { - let row = sqlx::query_as!( + ) -> DalResult> { + sqlx::query_as!( StorageSnapshotMetadata, r#" SELECT @@ -177,12 +175,12 @@ impl SnapshotsDal<'_, '_> { "#, l1_batch_number.0 as i32 ) + .try_map(SnapshotMetadata::try_from) .instrument("get_snapshot_metadata") + .with_arg("l1_batch_number", &l1_batch_number) .report_latency() .fetch_optional(self.storage) - .await?; - - row.map(TryFrom::try_from).transpose() + .await } } diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index 5a3daba0dcb..cdb3fc47f44 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -2,7 +2,10 @@ use std::{collections::HashMap, ops, time::Instant}; use sqlx::types::chrono::Utc; use zksync_db_connection::{ - connection::Connection, instrument::InstrumentExt, write_str, writeln_str, + connection::Connection, + error::DalResult, + instrument::{CopyStatement, InstrumentExt}, + write_str, writeln_str, }; use zksync_types::{ get_code_key, snapshots::SnapshotStorageLog, AccountTreeId, Address, L1BatchNumber, @@ -24,7 +27,7 @@ impl StorageLogsDal<'_, '_> { &mut self, block_number: MiniblockNumber, logs: &[(H256, Vec)], - ) -> sqlx::Result<()> { + ) -> DalResult<()> { self.insert_storage_logs_inner(block_number, logs, 0).await } @@ -33,18 +36,20 @@ impl StorageLogsDal<'_, '_> { block_number: MiniblockNumber, logs: &[(H256, Vec)], mut operation_number: u32, - ) -> sqlx::Result<()> { - let mut copy = self - .storage - .conn() - .copy_in_raw( - "COPY storage_logs( - hashed_key, address, key, value, operation_number, tx_hash, miniblock_number, - created_at, updated_at - ) - FROM STDIN WITH (DELIMITER '|')", + ) -> DalResult<()> { + let logs_len = logs.len(); + let copy = CopyStatement::new( + "COPY storage_logs( + hashed_key, address, key, value, operation_number, tx_hash, miniblock_number, + created_at, updated_at ) - .await?; + FROM STDIN WITH (DELIMITER '|')", + ) + .instrument("insert_storage_logs") + .with_arg("block_number", &block_number) + .with_arg("logs.len", &logs_len) + .start(self.storage) + .await?; let mut buffer = String::new(); let now = Utc::now().naive_utc().to_string(); @@ -66,27 +71,27 @@ impl StorageLogsDal<'_, '_> { operation_number += 1; } } - copy.send(buffer.as_bytes()).await?; - copy.finish().await?; - Ok(()) + copy.send(buffer.as_bytes()).await } pub async fn insert_storage_logs_from_snapshot( &mut self, miniblock_number: MiniblockNumber, snapshot_storage_logs: &[SnapshotStorageLog], - ) -> sqlx::Result<()> { - let mut copy = self - .storage - .conn() - .copy_in_raw( - "COPY storage_logs( - hashed_key, address, key, value, operation_number, tx_hash, miniblock_number, - created_at, updated_at - ) - FROM STDIN WITH (DELIMITER '|')", + ) -> DalResult<()> { + let storage_logs_len = snapshot_storage_logs.len(); + let copy = CopyStatement::new( + "COPY storage_logs( + hashed_key, address, key, value, operation_number, tx_hash, miniblock_number, + created_at, updated_at ) - .await?; + FROM STDIN WITH (DELIMITER '|')", + ) + .instrument("insert_storage_logs_from_snapshot") + .with_arg("miniblock_number", &miniblock_number) + .with_arg("storage_logs.len", &storage_logs_len) + .start(self.storage) + .await?; let mut buffer = String::new(); let now = Utc::now().naive_utc().to_string(); @@ -106,16 +111,14 @@ impl StorageLogsDal<'_, '_> { H256::zero() ); } - copy.send(buffer.as_bytes()).await?; - copy.finish().await?; - Ok(()) + copy.send(buffer.as_bytes()).await } pub async fn append_storage_logs( &mut self, block_number: MiniblockNumber, logs: &[(H256, Vec)], - ) -> sqlx::Result<()> { + ) -> DalResult<()> { let operation_number = sqlx::query!( r#" SELECT @@ -127,7 +130,9 @@ impl StorageLogsDal<'_, '_> { "#, i64::from(block_number.0) ) - .fetch_one(self.storage.conn()) + .instrument("append_storage_logs#get_operation_number") + .with_arg("block_number", &block_number) + .fetch_one(self.storage) .await? .max .map(|max| max as u32 + 1) @@ -142,7 +147,7 @@ impl StorageLogsDal<'_, '_> { pub async fn rollback_storage( &mut self, last_miniblock_to_keep: MiniblockNumber, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { let stage_start = Instant::now(); let modified_keys = self .modified_keys_in_miniblocks(last_miniblock_to_keep.next()..=MiniblockNumber(u32::MAX)) @@ -190,7 +195,8 @@ impl StorageLogsDal<'_, '_> { "#, &keys_to_delete as &[&[u8]], ) - .execute(self.storage.conn()) + .instrument("rollback_storage#delete_storage") + .execute(self.storage) .await?; tracing::info!( @@ -213,7 +219,8 @@ impl StorageLogsDal<'_, '_> { &keys_to_update as &[&[u8]], &values_to_update as &[&[u8]], ) - .execute(self.storage.conn()) + .instrument("rollback_storage#update_storage") + .execute(self.storage) .await?; tracing::info!( @@ -228,7 +235,7 @@ impl StorageLogsDal<'_, '_> { pub async fn modified_keys_in_miniblocks( &mut self, miniblock_numbers: ops::RangeInclusive, - ) -> sqlx::Result> { + ) -> DalResult> { let rows = sqlx::query!( r#" SELECT DISTINCT @@ -241,7 +248,9 @@ impl StorageLogsDal<'_, '_> { i64::from(miniblock_numbers.start().0), i64::from(miniblock_numbers.end().0) ) - .fetch_all(self.storage.conn()) + .instrument("modified_keys_in_miniblocks") + .with_arg("miniblock_numbers", &miniblock_numbers) + .fetch_all(self.storage) .await?; Ok(rows @@ -251,10 +260,7 @@ impl StorageLogsDal<'_, '_> { } /// Removes all storage logs with a miniblock number strictly greater than the specified `block_number`. - pub async fn rollback_storage_logs( - &mut self, - block_number: MiniblockNumber, - ) -> sqlx::Result<()> { + pub async fn rollback_storage_logs(&mut self, block_number: MiniblockNumber) -> DalResult<()> { sqlx::query!( r#" DELETE FROM storage_logs @@ -263,7 +269,9 @@ impl StorageLogsDal<'_, '_> { "#, i64::from(block_number.0) ) - .execute(self.storage.conn()) + .instrument("rollback_storage_logs") + .with_arg("block_number", &block_number) + .execute(self.storage) .await?; Ok(()) } @@ -307,7 +315,7 @@ impl StorageLogsDal<'_, '_> { &mut self, addresses: impl Iterator, at_miniblock: Option, - ) -> sqlx::Result> { + ) -> DalResult> { let (bytecode_hashed_keys, address_by_hashed_key): (Vec<_>, HashMap<_, _>) = addresses .map(|address| { let hashed_key = get_code_key(&address).hashed_key().0; @@ -336,7 +344,11 @@ impl StorageLogsDal<'_, '_> { &bytecode_hashed_keys as &[_], i64::from(max_miniblock_number) ) - .fetch_all(self.storage.conn()) + .instrument("filter_deployed_contracts") + .with_arg("addresses.len", &bytecode_hashed_keys.len()) + .with_arg("at_miniblock", &at_miniblock) + .report_latency() + .fetch_all(self.storage) .await?; let deployment_data = rows.into_iter().filter_map(|row| { @@ -355,7 +367,7 @@ impl StorageLogsDal<'_, '_> { pub async fn get_touched_slots_for_l1_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let rows = sqlx::query!( r#" SELECT @@ -386,7 +398,9 @@ impl StorageLogsDal<'_, '_> { "#, i64::from(l1_batch_number.0) ) - .fetch_all(self.storage.conn()) + .instrument("get_touched_slots_for_l1_batch") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_all(self.storage) .await?; let touched_slots = rows.into_iter().map(|row| { @@ -404,7 +418,7 @@ impl StorageLogsDal<'_, '_> { pub async fn get_storage_logs_for_revert( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result>> { + ) -> DalResult>> { let miniblock_range = self .storage .blocks_dal() @@ -488,7 +502,7 @@ impl StorageLogsDal<'_, '_> { pub async fn get_l1_batches_and_indices_for_initial_writes( &mut self, hashed_keys: &[H256], - ) -> sqlx::Result> { + ) -> DalResult> { if hashed_keys.is_empty() { return Ok(HashMap::new()); // Shortcut to save time on communication with DB in the common case } @@ -508,6 +522,7 @@ impl StorageLogsDal<'_, '_> { &hashed_keys as &[&[u8]], ) .instrument("get_l1_batches_and_indices_for_initial_writes") + .with_arg("hashed_keys.len", &hashed_keys.len()) .report_latency() .fetch_all(self.storage) .await?; @@ -537,7 +552,7 @@ impl StorageLogsDal<'_, '_> { &mut self, hashed_keys: &[H256], next_l1_batch: L1BatchNumber, - ) -> sqlx::Result>> { + ) -> DalResult>> { let (miniblock_number, _) = self .storage .blocks_dal() @@ -558,7 +573,7 @@ impl StorageLogsDal<'_, '_> { &mut self, hashed_keys: &[H256], miniblock_number: MiniblockNumber, - ) -> sqlx::Result>> { + ) -> DalResult>> { let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); let rows = sqlx::query!( @@ -585,7 +600,10 @@ impl StorageLogsDal<'_, '_> { &hashed_keys as &[&[u8]], i64::from(miniblock_number.0) ) - .fetch_all(self.storage.conn()) + .instrument("get_storage_values") + .with_arg("miniblock_number", &miniblock_number) + .with_arg("hashed_keys.len", &hashed_keys.len()) + .fetch_all(self.storage) .await?; Ok(rows @@ -637,7 +655,7 @@ impl StorageLogsDal<'_, '_> { pub async fn get_storage_logs_row_count( &mut self, at_miniblock: MiniblockNumber, - ) -> sqlx::Result { + ) -> DalResult { let row = sqlx::query!( r#" SELECT @@ -664,7 +682,7 @@ impl StorageLogsDal<'_, '_> { &mut self, miniblock_number: MiniblockNumber, key_ranges: &[ops::RangeInclusive], - ) -> sqlx::Result>> { + ) -> DalResult>> { let (start_keys, end_keys): (Vec<_>, Vec<_>) = key_ranges .iter() .map(|range| (range.start().as_bytes(), range.end().as_bytes())) @@ -703,7 +721,10 @@ impl StorageLogsDal<'_, '_> { &start_keys as &[&[u8]], &end_keys as &[&[u8]], ) - .fetch_all(self.storage.conn()) + .instrument("get_chunk_starts_for_miniblock") + .with_arg("miniblock_number", &miniblock_number) + .with_arg("key_ranges.len", &key_ranges.len()) + .fetch_all(self.storage) .await?; let rows = rows.into_iter().map(|row| { @@ -722,7 +743,7 @@ impl StorageLogsDal<'_, '_> { &mut self, miniblock_number: MiniblockNumber, key_range: ops::RangeInclusive, - ) -> sqlx::Result> { + ) -> DalResult> { let rows = sqlx::query!( r#" SELECT @@ -743,7 +764,10 @@ impl StorageLogsDal<'_, '_> { key_range.start().as_bytes(), key_range.end().as_bytes() ) - .fetch_all(self.storage.conn()) + .instrument("get_tree_entries_for_miniblock") + .with_arg("miniblock_number", &miniblock_number) + .with_arg("key_range", &key_range) + .fetch_all(self.storage) .await?; let rows = rows.into_iter().map(|row| StorageRecoveryLogEntry { diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index efd4568662c..86dc3417c19 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -1,7 +1,11 @@ use std::collections::HashSet; use sqlx::types::chrono::Utc; -use zksync_db_connection::connection::Connection; +use zksync_db_connection::{ + connection::Connection, + error::DalResult, + instrument::{CopyStatement, InstrumentExt}, +}; use zksync_types::{ snapshots::SnapshotStorageLog, zk_evm_types::LogQuery, AccountTreeId, Address, L1BatchNumber, StorageKey, H256, @@ -21,15 +25,17 @@ impl StorageLogsDedupDal<'_, '_> { &mut self, l1_batch_number: L1BatchNumber, read_logs: &[LogQuery], - ) -> sqlx::Result<()> { - let mut copy = self - .storage - .conn() - .copy_in_raw( - "COPY protective_reads (l1_batch_number, address, key, created_at, updated_at) \ - FROM STDIN WITH (DELIMITER '|')", - ) - .await?; + ) -> DalResult<()> { + let read_logs_len = read_logs.len(); + let copy = CopyStatement::new( + "COPY protective_reads (l1_batch_number, address, key, created_at, updated_at) \ + FROM STDIN WITH (DELIMITER '|')", + ) + .instrument("insert_protective_reads") + .with_arg("l1_batch_number", &l1_batch_number) + .with_arg("read_logs.len", &read_logs_len) + .start(self.storage) + .await?; let mut bytes: Vec = Vec::new(); let now = Utc::now().naive_utc().to_string(); @@ -42,9 +48,7 @@ impl StorageLogsDedupDal<'_, '_> { ); bytes.extend_from_slice(row.as_bytes()); } - copy.send(bytes).await?; - copy.finish().await?; - Ok(()) + copy.send(&bytes).await } /// Insert initial writes and assigns indices to them. @@ -52,15 +56,16 @@ impl StorageLogsDedupDal<'_, '_> { pub async fn insert_initial_writes_from_snapshot( &mut self, snapshot_storage_logs: &[SnapshotStorageLog], - ) -> sqlx::Result<()> { - let mut copy = self - .storage - .conn() - .copy_in_raw( - "COPY initial_writes (hashed_key, index, l1_batch_number, created_at, updated_at) \ - FROM STDIN WITH (DELIMITER '|')", - ) - .await?; + ) -> DalResult<()> { + let storage_logs_len = snapshot_storage_logs.len(); + let copy = CopyStatement::new( + "COPY initial_writes (hashed_key, index, l1_batch_number, created_at, updated_at) \ + FROM STDIN WITH (DELIMITER '|')", + ) + .instrument("insert_initial_writes_from_snapshot") + .with_arg("storage_logs.len", &storage_logs_len) + .start(self.storage) + .await?; let mut bytes: Vec = Vec::new(); let now = Utc::now().naive_utc().to_string(); @@ -75,17 +80,14 @@ impl StorageLogsDedupDal<'_, '_> { ); bytes.extend_from_slice(row.as_bytes()); } - copy.send(bytes).await?; - copy.finish().await?; - - Ok(()) + copy.send(&bytes).await } pub async fn insert_initial_writes( &mut self, l1_batch_number: L1BatchNumber, written_storage_keys: &[StorageKey], - ) -> sqlx::Result<()> { + ) -> DalResult<()> { let hashed_keys: Vec<_> = written_storage_keys .iter() .map(|key| StorageKey::raw_hashed_key(key.address(), key.key()).to_vec()) @@ -113,7 +115,10 @@ impl StorageLogsDedupDal<'_, '_> { &indices, i64::from(l1_batch_number.0) ) - .execute(self.storage.conn()) + .instrument("insert_initial_writes") + .with_arg("l1_batch_number", &l1_batch_number) + .with_arg("hashed_keys.len", &hashed_keys.len()) + .execute(self.storage) .await?; Ok(()) @@ -149,7 +154,7 @@ impl StorageLogsDedupDal<'_, '_> { .collect()) } - async fn max_enumeration_index(&mut self) -> sqlx::Result> { + async fn max_enumeration_index(&mut self) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -158,7 +163,8 @@ impl StorageLogsDedupDal<'_, '_> { initial_writes "#, ) - .fetch_one(self.storage.conn()) + .instrument("max_enumeration_index") + .fetch_one(self.storage) .await? .max .map(|max| max as u64)) @@ -167,7 +173,7 @@ impl StorageLogsDedupDal<'_, '_> { pub async fn initial_writes_for_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -182,7 +188,9 @@ impl StorageLogsDedupDal<'_, '_> { "#, i64::from(l1_batch_number.0) ) - .fetch_all(self.storage.conn()) + .instrument("initial_writes_for_batch") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_all(self.storage) .await? .into_iter() .map(|row| (H256::from_slice(&row.hashed_key), row.index as u64)) @@ -192,7 +200,7 @@ impl StorageLogsDedupDal<'_, '_> { pub async fn get_enumeration_index_for_key( &mut self, hashed_key: H256, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -204,16 +212,15 @@ impl StorageLogsDedupDal<'_, '_> { "#, hashed_key.as_bytes() ) - .fetch_optional(self.storage.conn()) + .instrument("get_enumeration_index_for_key") + .with_arg("hashed_key", &hashed_key) + .fetch_optional(self.storage) .await? .map(|row| row.index as u64)) } /// Returns `hashed_keys` that are both present in the input and in `initial_writes` table. - pub async fn filter_written_slots( - &mut self, - hashed_keys: &[H256], - ) -> sqlx::Result> { + pub async fn filter_written_slots(&mut self, hashed_keys: &[H256]) -> DalResult> { let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); Ok(sqlx::query!( r#" @@ -226,7 +233,9 @@ impl StorageLogsDedupDal<'_, '_> { "#, &hashed_keys as &[&[u8]], ) - .fetch_all(self.storage.conn()) + .instrument("filter_written_slots") + .with_arg("hashed_keys.len", &hashed_keys.len()) + .fetch_all(self.storage) .await? .into_iter() .map(|row| H256::from_slice(&row.hashed_key)) diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 75cbc936604..6583bc1410b 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use zksync_db_connection::{connection::Connection, instrument::InstrumentExt}; +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; use zksync_types::{ get_code_key, get_nonce_key, utils::{decompose_full_nonce, storage_key_for_standard_token_balance}, @@ -21,7 +21,7 @@ impl StorageWeb3Dal<'_, '_> { &mut self, address: Address, block_number: MiniblockNumber, - ) -> sqlx::Result { + ) -> DalResult { let nonce_key = get_nonce_key(&address); let nonce_value = self .get_historical_value_unchecked(&nonce_key, block_number) @@ -34,7 +34,7 @@ impl StorageWeb3Dal<'_, '_> { pub async fn get_nonces_for_addresses( &mut self, addresses: &[Address], - ) -> sqlx::Result> { + ) -> DalResult> { let nonce_keys: HashMap<_, _> = addresses .iter() .map(|address| (get_nonce_key(address).hashed_key(), *address)) @@ -59,7 +59,7 @@ impl StorageWeb3Dal<'_, '_> { token_id: AccountTreeId, account_id: AccountTreeId, block_number: MiniblockNumber, - ) -> sqlx::Result { + ) -> DalResult { let key = storage_key_for_standard_token_balance(token_id, account_id.address()); let balance = self .get_historical_value_unchecked(&key, block_number) @@ -68,14 +68,14 @@ impl StorageWeb3Dal<'_, '_> { } /// Gets the current value for the specified `key`. - pub async fn get_value(&mut self, key: &StorageKey) -> sqlx::Result { + pub async fn get_value(&mut self, key: &StorageKey) -> DalResult { self.get_historical_value_unchecked(key, MiniblockNumber(u32::MAX)) .await } /// Gets the current values for the specified `hashed_keys`. The returned map has requested hashed keys as keys /// and current storage values as values. - pub async fn get_values(&mut self, hashed_keys: &[H256]) -> sqlx::Result> { + pub async fn get_values(&mut self, hashed_keys: &[H256]) -> DalResult> { let storage_map = self .storage .storage_logs_dal() @@ -93,7 +93,7 @@ impl StorageWeb3Dal<'_, '_> { &mut self, key: &StorageKey, block_number: MiniblockNumber, - ) -> sqlx::Result { + ) -> DalResult { let hashed_key = key.hashed_key(); sqlx::query!( @@ -117,6 +117,7 @@ impl StorageWeb3Dal<'_, '_> { .instrument("get_historical_value_unchecked") .report_latency() .with_arg("key", &hashed_key) + .with_arg("block_number", &block_number) .fetch_optional(self.storage) .await .map(|option_row| { @@ -174,7 +175,7 @@ impl StorageWeb3Dal<'_, '_> { pub async fn get_l1_batch_number_for_initial_write( &mut self, key: &StorageKey, - ) -> Result, SqlxError> { + ) -> DalResult> { let hashed_key = key.hashed_key(); let row = sqlx::query!( r#" @@ -203,7 +204,7 @@ impl StorageWeb3Dal<'_, '_> { &mut self, address: Address, block_number: MiniblockNumber, - ) -> sqlx::Result>> { + ) -> DalResult>> { let hashed_key = get_code_key(&address).hashed_key(); let row = sqlx::query!( r#" @@ -232,7 +233,10 @@ impl StorageWeb3Dal<'_, '_> { i64::from(block_number.0), FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes(), ) - .fetch_optional(self.storage.conn()) + .instrument("get_contract_code_unchecked") + .with_arg("address", &address) + .with_arg("block_number", &block_number) + .fetch_optional(self.storage) .await?; Ok(row.map(|row| row.bytecode)) } diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 01e65e9abfc..546ebbed51a 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -1,5 +1,5 @@ use zksync_db_connection::{ - connection::Connection, instrument::InstrumentExt, metrics::MethodLatency, + connection::Connection, error::DalResult, instrument::InstrumentExt, metrics::MethodLatency, }; use zksync_types::{api::en, MiniblockNumber}; @@ -18,8 +18,8 @@ impl SyncDal<'_, '_> { pub(super) async fn sync_block_inner( &mut self, block_number: MiniblockNumber, - ) -> anyhow::Result> { - let Some(block) = sqlx::query_as!( + ) -> DalResult> { + let Some(mut block) = sqlx::query_as!( StorageSyncBlock, r#" SELECT @@ -64,6 +64,7 @@ impl SyncDal<'_, '_> { "#, i64::from(block_number.0) ) + .try_map(SyncBlock::try_from) .instrument("sync_dal_sync_block.block") .with_arg("block_number", &block_number) .fetch_optional(self.storage) @@ -72,7 +73,6 @@ impl SyncDal<'_, '_> { return Ok(None); }; - let mut block = SyncBlock::try_from(block)?; // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration #[allow(deprecated)] self.storage @@ -86,7 +86,7 @@ impl SyncDal<'_, '_> { &mut self, block_number: MiniblockNumber, include_transactions: bool, - ) -> anyhow::Result> { + ) -> DalResult> { let _latency = MethodLatency::new("sync_dal_sync_block"); let Some(block) = self.sync_block_inner(block_number).await? else { return Ok(None); diff --git a/core/lib/dal/src/system_dal.rs b/core/lib/dal/src/system_dal.rs index 0a2c02f6402..a1cf48219dd 100644 --- a/core/lib/dal/src/system_dal.rs +++ b/core/lib/dal/src/system_dal.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use zksync_db_connection::{connection::Connection, instrument::InstrumentExt}; +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; use crate::Core; @@ -42,7 +42,7 @@ impl SystemDal<'_, '_> { }) } - pub(crate) async fn get_table_sizes(&mut self) -> sqlx::Result> { + pub(crate) async fn get_table_sizes(&mut self) -> DalResult> { let rows = sqlx::query!( r#" SELECT @@ -59,6 +59,7 @@ impl SystemDal<'_, '_> { ) .instrument("get_table_sizes") .report_latency() + .expect_slow_query() .fetch_all(self.storage) .await?; diff --git a/core/lib/dal/src/tokens_dal.rs b/core/lib/dal/src/tokens_dal.rs index 389df3cb4be..777e057e091 100644 --- a/core/lib/dal/src/tokens_dal.rs +++ b/core/lib/dal/src/tokens_dal.rs @@ -1,5 +1,10 @@ use sqlx::types::chrono::Utc; -use zksync_db_connection::{connection::Connection, write_str, writeln_str}; +use zksync_db_connection::{ + connection::Connection, + error::DalResult, + instrument::{CopyStatement, InstrumentExt}, + write_str, writeln_str, +}; use zksync_types::{tokens::TokenInfo, Address, MiniblockNumber}; use crate::{Core, CoreDal}; @@ -10,15 +15,16 @@ pub struct TokensDal<'a, 'c> { } impl TokensDal<'_, '_> { - pub async fn add_tokens(&mut self, tokens: &[TokenInfo]) -> sqlx::Result<()> { - let mut copy = self - .storage - .conn() - .copy_in_raw( - "COPY tokens (l1_address, l2_address, name, symbol, decimals, well_known, created_at, updated_at) - FROM STDIN WITH (DELIMITER '|')", - ) - .await?; + pub async fn add_tokens(&mut self, tokens: &[TokenInfo]) -> DalResult<()> { + let tokens_len = tokens.len(); + let copy = CopyStatement::new( + "COPY tokens (l1_address, l2_address, name, symbol, decimals, well_known, created_at, updated_at) + FROM STDIN WITH (DELIMITER '|')", + ) + .instrument("add_tokens") + .with_arg("tokens.len", &tokens_len) + .start(self.storage) + .await?; let mut buffer = String::new(); let now = Utc::now().naive_utc().to_string(); @@ -37,12 +43,10 @@ impl TokensDal<'_, '_> { token_info.metadata.decimals ); } - copy.send(buffer.as_bytes()).await?; - copy.finish().await?; - Ok(()) + copy.send(buffer.as_bytes()).await } - pub async fn mark_token_as_well_known(&mut self, l1_address: Address) -> sqlx::Result<()> { + pub async fn mark_token_as_well_known(&mut self, l1_address: Address) -> DalResult<()> { sqlx::query!( r#" UPDATE tokens @@ -54,12 +58,14 @@ impl TokensDal<'_, '_> { "#, l1_address.as_bytes() ) - .execute(self.storage.conn()) + .instrument("mark_token_as_well_known") + .with_arg("l1_address", &l1_address) + .execute(self.storage) .await?; Ok(()) } - pub async fn get_all_l2_token_addresses(&mut self) -> sqlx::Result> { + pub async fn get_all_l2_token_addresses(&mut self) -> DalResult> { let rows = sqlx::query!( r#" SELECT @@ -68,7 +74,9 @@ impl TokensDal<'_, '_> { tokens "# ) - .fetch_all(self.storage.conn()) + .instrument("get_all_l2_token_addresses") + .report_latency() + .fetch_all(self.storage) .await?; Ok(rows @@ -78,7 +86,7 @@ impl TokensDal<'_, '_> { } /// Removes token records that were deployed after `block_number`. - pub async fn rollback_tokens(&mut self, block_number: MiniblockNumber) -> sqlx::Result<()> { + pub async fn rollback_tokens(&mut self, block_number: MiniblockNumber) -> DalResult<()> { let all_token_addresses = self.get_all_l2_token_addresses().await?; let token_deployment_data = self .storage @@ -97,7 +105,13 @@ impl TokensDal<'_, '_> { "#, &token_addresses_to_be_removed as &[_] ) - .execute(self.storage.conn()) + .instrument("rollback_tokens") + .with_arg("block_number", &block_number) + .with_arg( + "token_addresses_to_be_removed.len", + &token_addresses_to_be_removed.len(), + ) + .execute(self.storage) .await?; Ok(()) diff --git a/core/lib/dal/src/tokens_web3_dal.rs b/core/lib/dal/src/tokens_web3_dal.rs index f7c51ead6d3..370b2ab7eb2 100644 --- a/core/lib/dal/src/tokens_web3_dal.rs +++ b/core/lib/dal/src/tokens_web3_dal.rs @@ -1,4 +1,4 @@ -use zksync_db_connection::connection::Connection; +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; use zksync_types::{ tokens::{TokenInfo, TokenMetadata}, Address, MiniblockNumber, @@ -36,7 +36,7 @@ pub struct TokensWeb3Dal<'a, 'c> { impl TokensWeb3Dal<'_, '_> { /// Returns information about well-known tokens. - pub async fn get_well_known_tokens(&mut self) -> sqlx::Result> { + pub async fn get_well_known_tokens(&mut self) -> DalResult> { let records = sqlx::query_as!( StorageTokenInfo, r#" @@ -54,7 +54,8 @@ impl TokensWeb3Dal<'_, '_> { symbol "# ) - .fetch_all(self.storage.conn()) + .instrument("get_well_known_tokens") + .fetch_all(self.storage) .await?; Ok(records.into_iter().map(Into::into).collect()) @@ -64,7 +65,7 @@ impl TokensWeb3Dal<'_, '_> { pub async fn get_all_tokens( &mut self, at_miniblock: Option, - ) -> sqlx::Result> { + ) -> DalResult> { let records = sqlx::query_as!( StorageTokenInfo, r#" @@ -80,7 +81,10 @@ impl TokensWeb3Dal<'_, '_> { symbol "# ) - .fetch_all(self.storage.conn()) + .instrument("get_all_tokens") + .with_arg("at_miniblock", &at_miniblock) + .report_latency() + .fetch_all(self.storage) .await?; let mut all_tokens: Vec<_> = records.into_iter().map(TokenInfo::from).collect(); diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 6dcc2bf4af9..3d42f33ea04 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -3,9 +3,10 @@ use std::{collections::HashMap, fmt, time::Duration}; use anyhow::Context as _; use bigdecimal::BigDecimal; use itertools::Itertools; -use sqlx::{error, types::chrono::NaiveDateTime}; +use sqlx::types::chrono::NaiveDateTime; use zksync_db_connection::{ - connection::Connection, instrument::InstrumentExt, utils::pg_interval_from_duration, + connection::Connection, error::DalResult, instrument::InstrumentExt, + utils::pg_interval_from_duration, }; use zksync_types::{ block::MiniblockExecutionData, @@ -433,7 +434,7 @@ impl TransactionsDal<'_, '_> { // In this case we identify it as Duplicate // Note, this error can happen because of the race condition (tx can be taken by several // API servers, that simultaneously start execute it and try to inserted to DB) - if let error::Error::Database(ref error) = err { + if let sqlx::Error::Database(error) = &err { if let Some(constraint) = error.constraint() { if constraint == "transactions_pkey" { tracing::debug!( @@ -1293,7 +1294,7 @@ impl TransactionsDal<'_, '_> { } } - pub async fn get_call_trace(&mut self, tx_hash: H256) -> sqlx::Result> { + pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { Ok(sqlx::query_as!( CallTrace, r#" @@ -1306,7 +1307,9 @@ impl TransactionsDal<'_, '_> { "#, tx_hash.as_bytes() ) - .fetch_optional(self.storage.conn()) + .instrument("get_call_trace") + .with_arg("tx_hash", &tx_hash) + .fetch_optional(self.storage) .await? .map(Into::into)) } diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index bf8e8f38656..2a0e5b15824 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -1,6 +1,7 @@ use sqlx::types::chrono::NaiveDateTime; use zksync_db_connection::{ - connection::Connection, instrument::InstrumentExt, interpolate_query, match_query_as, + connection::Connection, error::DalResult, instrument::InstrumentExt, interpolate_query, + match_query_as, }; use zksync_types::{ api, api::TransactionReceipt, Address, L2ChainId, MiniblockNumber, Transaction, @@ -133,7 +134,7 @@ impl TransactionsWeb3Dal<'_, '_> { &mut self, hashes: &[H256], chain_id: L2ChainId, - ) -> sqlx::Result> { + ) -> DalResult> { self.get_transactions_inner(TransactionSelector::Hashes(hashes), chain_id) .await } @@ -142,7 +143,7 @@ impl TransactionsWeb3Dal<'_, '_> { &mut self, selector: TransactionSelector<'_>, chain_id: L2ChainId, - ) -> sqlx::Result> { + ) -> DalResult> { if let TransactionSelector::Position(_, idx) = selector { // Since index is not trusted, we check it to prevent potential overflow below. if idx > i32::MAX as u32 { @@ -191,7 +192,11 @@ impl TransactionsWeb3Dal<'_, '_> { } ); - let rows = query.fetch_all(self.storage.conn()).await?; + let rows = query + .instrument("get_transactions") + .with_arg("selector", &selector) + .fetch_all(self.storage) + .await?; Ok(rows.into_iter().map(|row| row.into_api(chain_id)).collect()) } @@ -199,7 +204,7 @@ impl TransactionsWeb3Dal<'_, '_> { &mut self, hash: H256, chain_id: L2ChainId, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(self .get_transactions_inner(TransactionSelector::Hashes(&[hash]), chain_id) .await? @@ -212,7 +217,7 @@ impl TransactionsWeb3Dal<'_, '_> { block_number: MiniblockNumber, index_in_block: u32, chain_id: L2ChainId, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(self .get_transactions_inner( TransactionSelector::Position(block_number, index_in_block), @@ -226,7 +231,7 @@ impl TransactionsWeb3Dal<'_, '_> { pub async fn get_transaction_details( &mut self, hash: H256, - ) -> sqlx::Result> { + ) -> DalResult> { { let storage_tx_details: Option = sqlx::query_as!( StorageTransactionDetails, @@ -281,7 +286,7 @@ impl TransactionsWeb3Dal<'_, '_> { &mut self, from_timestamp: NaiveDateTime, limit: Option, - ) -> Result, SqlxError> { + ) -> DalResult> { let records = sqlx::query!( r#" SELECT @@ -300,7 +305,10 @@ impl TransactionsWeb3Dal<'_, '_> { from_timestamp, limit.map(|limit| limit as i64) ) - .fetch_all(self.storage.conn()) + .instrument("get_pending_txs_hashes_after") + .with_arg("from_timestamp", &from_timestamp) + .with_arg("limit", &limit) + .fetch_all(self.storage) .await?; let hashes = records @@ -315,7 +323,7 @@ impl TransactionsWeb3Dal<'_, '_> { &mut self, initiator_address: Address, committed_next_nonce: u64, - ) -> Result { + ) -> DalResult { // Get nonces of non-rejected transactions, starting from the 'latest' nonce. // `latest` nonce is used, because it is guaranteed that there are no gaps before it. // `(miniblock_number IS NOT NULL OR error IS NULL)` is the condition that filters non-rejected transactions. @@ -341,7 +349,10 @@ impl TransactionsWeb3Dal<'_, '_> { initiator_address.as_bytes(), committed_next_nonce as i64 ) - .fetch_all(self.storage.conn()) + .instrument("next_nonce_by_initiator_account#non_rejected_nonces") + .with_arg("initiator_address", &initiator_address) + .with_arg("committed_next_nonce", &committed_next_nonce) + .fetch_all(self.storage) .await? .into_iter() .map(|row| row.nonce as u64) @@ -365,7 +376,7 @@ impl TransactionsWeb3Dal<'_, '_> { pub async fn get_raw_miniblock_transactions( &mut self, miniblock: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let rows = sqlx::query_as!( StorageTransaction, r#" @@ -380,7 +391,9 @@ impl TransactionsWeb3Dal<'_, '_> { "#, i64::from(miniblock.0) ) - .fetch_all(self.storage.conn()) + .instrument("get_raw_miniblock_transactions") + .with_arg("miniblock", &miniblock) + .fetch_all(self.storage) .await?; Ok(rows.into_iter().map(Into::into).collect()) diff --git a/core/lib/db_connection/Cargo.toml b/core/lib/db_connection/Cargo.toml index 8abf111ede7..75a23922647 100644 --- a/core/lib/db_connection/Cargo.toml +++ b/core/lib/db_connection/Cargo.toml @@ -31,6 +31,7 @@ tokio = { workspace = true, features = ["full"] } anyhow.workspace = true url.workspace = true rand.workspace = true +thiserror.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/core/lib/db_connection/src/connection.rs b/core/lib/db_connection/src/connection.rs index 972dfb219de..584fdcdd2fa 100644 --- a/core/lib/db_connection/src/connection.rs +++ b/core/lib/db_connection/src/connection.rs @@ -13,7 +13,12 @@ use sqlx::{ pool::PoolConnection, types::chrono, Connection as _, PgConnection, Postgres, Transaction, }; -use crate::{connection_pool::ConnectionPool, metrics::CONNECTION_METRICS, utils::InternalMarker}; +use crate::{ + connection_pool::ConnectionPool, + error::{DalConnectionError, DalResult}, + metrics::CONNECTION_METRICS, + utils::InternalMarker, +}; /// Tags that can be associated with a connection. #[derive(Debug, Clone, Copy, PartialEq)] @@ -178,10 +183,13 @@ impl<'a, DB: DbMarker> Connection<'a, DB> { } } - pub async fn start_transaction(&mut self) -> sqlx::Result> { + pub async fn start_transaction(&mut self) -> DalResult> { let (conn, tags) = self.conn_and_tags(); let inner = ConnectionInner::Transaction { - transaction: conn.begin().await?, + transaction: conn + .begin() + .await + .map_err(|err| DalConnectionError::start_transaction(err, tags.cloned()))?, tags, }; Ok(Connection { @@ -195,13 +203,16 @@ impl<'a, DB: DbMarker> Connection<'a, DB> { matches!(self.inner, ConnectionInner::Transaction { .. }) } - pub async fn commit(self) -> sqlx::Result<()> { + pub async fn commit(self) -> DalResult<()> { if let ConnectionInner::Transaction { transaction: postgres, - .. + tags, } = self.inner { - postgres.commit().await + postgres + .commit() + .await + .map_err(|err| DalConnectionError::commit_transaction(err, tags.cloned()).into()) } else { panic!("Connection::commit can only be invoked after calling Connection::begin_transaction"); } diff --git a/core/lib/db_connection/src/connection_pool.rs b/core/lib/db_connection/src/connection_pool.rs index 682d6176eeb..0aeeba6cad5 100644 --- a/core/lib/db_connection/src/connection_pool.rs +++ b/core/lib/db_connection/src/connection_pool.rs @@ -19,6 +19,7 @@ use sqlx::{ use crate::{ connection::{Connection, ConnectionTags, DbMarker, TracedConnections}, + error::{DalConnectionError, DalResult}, metrics::CONNECTION_METRICS, }; @@ -333,7 +334,7 @@ impl ConnectionPool { /// /// This method is intended to be used in crucial contexts, where the /// database access is must-have (e.g. block committer). - pub async fn connection(&self) -> anyhow::Result> { + pub async fn connection(&self) -> DalResult> { self.connection_inner(None).await } @@ -347,7 +348,7 @@ impl ConnectionPool { pub fn connection_tagged( &self, requester: &'static str, - ) -> impl Future>> + '_ { + ) -> impl Future>> + '_ { let location = Location::caller(); async move { let tags = ConnectionTags { @@ -361,12 +362,9 @@ impl ConnectionPool { async fn connection_inner( &self, tags: Option, - ) -> anyhow::Result> { + ) -> DalResult> { let acquire_latency = CONNECTION_METRICS.acquire.start(); - let conn = self - .acquire_connection_retried(tags.as_ref()) - .await - .context("acquire_connection_retried()")?; + let conn = self.acquire_connection_retried(tags.as_ref()).await?; let elapsed = acquire_latency.observe(); if let Some(tags) = &tags { CONNECTION_METRICS.acquire_tagged[&tags.requester].observe(elapsed); @@ -382,7 +380,7 @@ impl ConnectionPool { async fn acquire_connection_retried( &self, tags: Option<&ConnectionTags>, - ) -> anyhow::Result> { + ) -> DalResult> { const DB_CONNECTION_RETRIES: usize = 3; const AVG_BACKOFF_INTERVAL: Duration = Duration::from_secs(1); @@ -410,21 +408,10 @@ impl ConnectionPool { } // Attempting to get the pooled connection for the last time - match self.inner.acquire().await { - Ok(conn) => Ok(conn), - Err(err) => { - Self::report_connection_error(&err); - let tags_display = ConnectionTags::display(tags); - if let Some(traced_connections) = &self.traced_connections { - anyhow::bail!( - "Run out of retries getting a DB connection ({tags_display}), last error: {err}\n\ - Active connections: {traced_connections:#?}" - ); - } else { - anyhow::bail!("Run out of retries getting a DB connection ({tags_display}), last error: {err}"); - } - } - } + self.inner.acquire().await.map_err(|err| { + Self::report_connection_error(&err); + DalConnectionError::acquire_connection(err, tags.cloned()).into() + }) } fn report_connection_error(err: &sqlx::Error) { diff --git a/core/lib/db_connection/src/error.rs b/core/lib/db_connection/src/error.rs new file mode 100644 index 00000000000..ce6966679e0 --- /dev/null +++ b/core/lib/db_connection/src/error.rs @@ -0,0 +1,186 @@ +use std::{fmt, panic::Location}; + +use sqlx::error::BoxDynError; + +use crate::connection::ConnectionTags; + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum DalError { + #[error(transparent)] + Request(#[from] DalRequestError), + #[error(transparent)] + Connection(#[from] DalConnectionError), +} + +impl DalError { + /// Returns a reference to the underlying `sqlx` error. + pub fn inner(&self) -> &sqlx::Error { + match self { + Self::Request(err) => &err.inner, + Self::Connection(err) => &err.inner, + } + } + + /// Wraps this error into an `anyhow` wrapper. + pub fn generalize(self) -> anyhow::Error { + anyhow::Error::from(self).context("Postgres error") + } +} + +#[derive(Debug, thiserror::Error)] +pub struct DalRequestError { + #[source] + inner: sqlx::Error, + method: &'static str, + location: &'static Location<'static>, + args: Vec<(&'static str, String)>, + connection_tags: Option, +} + +pub type DalResult = Result; + +impl DalRequestError { + pub(crate) fn new( + inner: sqlx::Error, + method: &'static str, + location: &'static Location<'static>, + ) -> Self { + Self { + inner, + method, + location, + args: vec![], + connection_tags: None, + } + } + + pub(crate) fn with_args(mut self, args: Vec<(&'static str, String)>) -> Self { + self.args = args; + self + } + + pub(crate) fn with_connection_tags(mut self, tags: Option) -> Self { + self.connection_tags = tags; + self + } +} + +impl fmt::Display for DalRequestError { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + struct ArgsFormatter<'a>(&'a [(&'static str, String)]); + + impl fmt::Display for ArgsFormatter<'_> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + for (i, (name, value)) in self.0.iter().enumerate() { + write!(formatter, "{name}={value}")?; + if i + 1 < self.0.len() { + formatter.write_str(", ")?; + } + } + Ok(()) + } + } + + write!( + formatter, + "Query {name}({args}) called at {file}:{line} [{connection_tags}] failed: {err}", + name = self.method, + args = ArgsFormatter(&self.args), + file = self.location.file(), + line = self.location.line(), + connection_tags = ConnectionTags::display(self.connection_tags.as_ref()), + err = self.inner + ) + } +} + +#[derive(Debug, Clone, Copy)] +enum ConnectionAction { + AcquireConnection, + StartTransaction, + CommitTransaction, +} + +impl ConnectionAction { + fn as_str(self) -> &'static str { + match self { + Self::AcquireConnection => "acquiring DB connection", + Self::StartTransaction => "starting DB transaction", + Self::CommitTransaction => "committing DB transaction", + } + } +} + +#[derive(Debug, thiserror::Error)] +pub struct DalConnectionError { + #[source] + inner: sqlx::Error, + action: ConnectionAction, + connection_tags: Option, +} + +impl fmt::Display for DalConnectionError { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + formatter, + "Failed {action} [{connection_tags}]: {err}", + action = self.action.as_str(), + connection_tags = ConnectionTags::display(self.connection_tags.as_ref()), + err = self.inner + ) + } +} + +impl DalConnectionError { + pub(crate) fn acquire_connection( + inner: sqlx::Error, + connection_tags: Option, + ) -> Self { + Self { + inner, + action: ConnectionAction::AcquireConnection, + connection_tags, + } + } + + pub(crate) fn start_transaction( + inner: sqlx::Error, + connection_tags: Option, + ) -> Self { + Self { + inner, + action: ConnectionAction::StartTransaction, + connection_tags, + } + } + + pub(crate) fn commit_transaction( + inner: sqlx::Error, + connection_tags: Option, + ) -> Self { + Self { + inner, + action: ConnectionAction::CommitTransaction, + connection_tags, + } + } +} + +/// Extension trait to create `sqlx::Result`s, similar to `anyhow::Context`. +pub trait SqlxContext { + /// Wraps the error variant of a result into a column conversion error. + fn decode_column(self, column_name: &'static str) -> sqlx::Result; +} + +impl SqlxContext for Result +where + E: Into, +{ + fn decode_column(self, column_name: &'static str) -> sqlx::Result { + self.map_err(|err| sqlx::Error::ColumnDecode { + index: column_name.to_string(), + source: err.into(), + }) + } +} diff --git a/core/lib/db_connection/src/instrument.rs b/core/lib/db_connection/src/instrument.rs index a441c69d927..2ce82e50b5d 100644 --- a/core/lib/db_connection/src/instrument.rs +++ b/core/lib/db_connection/src/instrument.rs @@ -14,15 +14,16 @@ use std::{fmt, future::Future, panic::Location}; use sqlx::{ - postgres::{PgQueryResult, PgRow}, + postgres::{PgCopyIn, PgQueryResult, PgRow}, query::{Map, Query, QueryAs}, - FromRow, IntoArguments, Postgres, + FromRow, IntoArguments, PgConnection, Postgres, }; use tokio::time::Instant; use crate::{ connection::{Connection, ConnectionTags, DbMarker}, connection_pool::ConnectionPool, + error::{DalError, DalRequestError, DalResult}, metrics::REQUEST_METRICS, utils::InternalMarker, }; @@ -35,6 +36,15 @@ struct QueryArgs<'a> { inner: Vec<(&'static str, &'a ThreadSafeDebug<'a>)>, } +impl QueryArgs<'_> { + fn to_owned(&self) -> Vec<(&'static str, String)> { + self.inner + .iter() + .map(|(name, value)| (*name, format!("{value:?}"))) + .collect() + } +} + impl fmt::Display for QueryArgs<'_> { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { if self.inner.is_empty() { @@ -99,6 +109,64 @@ where } } +/// Wrapper for a `COPY` SQL statement. To actually do something on a statement, it should be instrumented. +#[derive(Debug)] +pub struct CopyStatement { + statement: &'static str, +} + +impl CopyStatement { + /// Creates a new statement wrapping the specified SQL. + pub fn new(statement: &'static str) -> Self { + Self { statement } + } +} + +impl InstrumentExt for CopyStatement { + #[track_caller] + fn instrument(self, name: &'static str) -> Instrumented<'static, Self> { + Instrumented { + query: self, + data: InstrumentedData::new(name, Location::caller()), + } + } +} + +/// Result of `start()`ing copying on a [`CopyStatement`]. +#[must_use = "Data should be sent to database using `send()`"] +pub struct ActiveCopy<'a> { + raw: PgCopyIn<&'a mut PgConnection>, + data: InstrumentedData<'a>, + tags: Option<&'a ConnectionTags>, +} + +impl fmt::Debug for ActiveCopy<'_> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("ActiveCopy") + .field("data", &self.data) + .field("tags", &self.tags) + .finish_non_exhaustive() + } +} + +impl ActiveCopy<'_> { + /// Sends the specified bytes to the database and finishes the copy statement. + // FIXME: measure latency? + pub async fn send(mut self, data: &[u8]) -> DalResult<()> { + let inner_send = async { + self.raw.send(data).await?; + self.raw.finish().await.map(drop) + }; + inner_send.await.map_err(|err| { + DalRequestError::new(err, self.data.name, self.data.location) + .with_args(self.data.args.to_owned()) + .with_connection_tags(self.tags.cloned()) + .into() + }) + } +} + #[derive(Debug)] struct InstrumentedData<'a> { name: &'static str, @@ -123,7 +191,7 @@ impl<'a> InstrumentedData<'a> { self, connection_tags: Option<&ConnectionTags>, query_future: impl Future>, - ) -> Result { + ) -> DalResult { let Self { name, location, @@ -161,22 +229,28 @@ impl<'a> InstrumentedData<'a> { REQUEST_METRICS.request[&name].observe(elapsed); } - let connection_tags = ConnectionTags::display(connection_tags); + let connection_tags_display = ConnectionTags::display(connection_tags); if let Err(err) = &output { tracing::warn!( - "Query {name}{args} called at {file}:{line} [{connection_tags}] has resulted in error: {err}", + "Query {name}{args} called at {file}:{line} [{connection_tags_display}] has resulted in error: {err}", file = location.file(), line = location.line() ); REQUEST_METRICS.request_error[&name].inc(); } else if is_slow { tracing::info!( - "Slow query {name}{args} called at {file}:{line} [{connection_tags}] has finished after {elapsed:?}", + "Slow query {name}{args} called at {file}:{line} [{connection_tags_display}] has finished after {elapsed:?}", file = location.file(), line = location.line() ); } - output + + output.map_err(|err| { + DalRequestError::new(err, name, location) + .with_args(args.to_owned()) + .with_connection_tags(connection_tags.cloned()) + .into() + }) } } @@ -197,6 +271,41 @@ pub struct Instrumented<'a, Q> { data: InstrumentedData<'a>, } +impl<'a> Instrumented<'a, ()> { + /// Creates an empty instrumentation information. This is useful if you need to validate query arguments + /// before invoking a query. + #[track_caller] + pub fn new(name: &'static str) -> Self { + Self { + query: (), + data: InstrumentedData::new(name, Location::caller()), + } + } + + /// Wraps a provided argument validation error. + pub fn arg_error(&self, arg_name: &'static str, err: E) -> DalError + where + E: Into, + { + let err: anyhow::Error = err.into(); + let err = err.context(format!("failed validating query argument `{arg_name}`")); + DalRequestError::new( + sqlx::Error::Decode(err.into()), + self.data.name, + self.data.location, + ) + .with_args(self.data.args.to_owned()) + .into() + } + + pub fn with(self, query: Q) -> Instrumented<'a, Q> { + Instrumented { + query, + data: self.data, + } + } +} + impl<'a, Q> Instrumented<'a, Q> { /// Indicates that latency should be reported for all calls. pub fn report_latency(mut self) -> Self { @@ -225,7 +334,7 @@ where pub async fn execute( self, storage: &mut Connection<'_, DB>, - ) -> sqlx::Result { + ) -> DalResult { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.execute(conn)).await } @@ -234,7 +343,7 @@ where pub async fn fetch_optional( self, storage: &mut Connection<'_, DB>, - ) -> Result, sqlx::Error> { + ) -> DalResult> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_optional(conn)).await } @@ -249,7 +358,7 @@ where pub async fn fetch_all( self, storage: &mut Connection<'_, DB>, - ) -> sqlx::Result> { + ) -> DalResult> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_all(conn)).await } @@ -265,16 +374,13 @@ where pub async fn fetch_optional( self, storage: &mut Connection<'_, DB>, - ) -> sqlx::Result> { + ) -> DalResult> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_optional(conn)).await } /// Fetches a single row using this query. - pub async fn fetch_one( - self, - storage: &mut Connection<'_, DB>, - ) -> sqlx::Result { + pub async fn fetch_one(self, storage: &mut Connection<'_, DB>) -> DalResult { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_one(conn)).await } @@ -283,12 +389,35 @@ where pub async fn fetch_all( self, storage: &mut Connection<'_, DB>, - ) -> sqlx::Result> { + ) -> DalResult> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_all(conn)).await } } +impl<'a> Instrumented<'a, CopyStatement> { + /// Starts `COPY`ing data using this statement. + pub async fn start( + self, + storage: &'a mut Connection<'_, DB>, + ) -> DalResult> { + let (conn, tags) = storage.conn_and_tags(); + match conn.copy_in_raw(self.query.statement).await { + Ok(raw) => Ok(ActiveCopy { + raw, + data: self.data, + tags, + }), + Err(err) => Err( + DalRequestError::new(err, self.data.name, self.data.location) + .with_args(self.data.args.to_owned()) + .with_connection_tags(tags.cloned()) + .into(), + ), + } + } +} + #[cfg(test)] mod tests { use zksync_basic_types::{MiniblockNumber, H256}; diff --git a/core/lib/db_connection/src/lib.rs b/core/lib/db_connection/src/lib.rs index 04a6cf7ac10..649af477e63 100644 --- a/core/lib/db_connection/src/lib.rs +++ b/core/lib/db_connection/src/lib.rs @@ -1,5 +1,8 @@ +//! Common utils for data access layer (DAL) implementations. + pub mod connection; pub mod connection_pool; +pub mod error; pub mod healthcheck; pub mod instrument; pub mod metrics; diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index 285da0f345e..b619016da25 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -6,7 +6,7 @@ use anyhow::Context as _; use async_trait::async_trait; use serde::Serialize; use tokio::sync::Semaphore; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, SqlxError}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError, SqlxError}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_object_store::{ObjectStore, ObjectStoreError}; use zksync_types::{ @@ -62,18 +62,17 @@ impl SnapshotsApplierError { } } } +} - fn db(err: SqlxError, context: impl Into) -> Self { - let context = context.into(); - match err { +impl From for SnapshotsApplierError { + fn from(err: DalError) -> Self { + match err.inner() { SqlxError::Database(_) | SqlxError::RowNotFound | SqlxError::ColumnNotFound(_) | SqlxError::Configuration(_) - | SqlxError::TypeNotFound { .. } => { - Self::Fatal(anyhow::Error::from(err).context(context)) - } - _ => Self::Retryable(anyhow::Error::from(err).context(context)), + | SqlxError::TypeNotFound { .. } => Self::Fatal(anyhow::Error::from(err)), + _ => Self::Retryable(anyhow::Error::from(err)), } } } @@ -271,10 +270,7 @@ impl<'a> SnapshotsApplier<'a> { let applied_snapshot_status = storage .snapshot_recovery_dal() .get_applied_snapshot_status() - .await - .map_err(|err| { - SnapshotsApplierError::db(err, "failed fetching applied snapshot status from DB") - })?; + .await?; if let Some(applied_snapshot_status) = applied_snapshot_status { let latency = latency.observe(); @@ -282,14 +278,7 @@ impl<'a> SnapshotsApplier<'a> { Ok((applied_snapshot_status, false)) } else { - let is_genesis_needed = - storage - .blocks_dal() - .is_genesis_needed() - .await - .map_err(|err| { - SnapshotsApplierError::db(err, "failed checking genesis L1 batch in DB") - })?; + let is_genesis_needed = storage.blocks_dal().is_genesis_needed().await?; if !is_genesis_needed { let err = anyhow::anyhow!( "node contains a non-genesis L1 batch; snapshot recovery is unsafe" @@ -303,10 +292,7 @@ impl<'a> SnapshotsApplier<'a> { let storage_logs_count = storage .storage_logs_dal() .get_storage_logs_row_count(recovery_status.miniblock_number) - .await - .map_err(|err| { - SnapshotsApplierError::db(err, "cannot get storage_logs row count") - })?; + .await?; if storage_logs_count > 0 { let err = anyhow::anyhow!( "storage_logs table has {storage_logs_count} rows at or before the snapshot miniblock #{}; \ @@ -333,9 +319,7 @@ impl<'a> SnapshotsApplier<'a> { let mut storage = connection_pool .connection_tagged("snapshots_applier") .await?; - let mut storage_transaction = storage.start_transaction().await.map_err(|err| { - SnapshotsApplierError::db(err, "failed starting initial DB transaction") - })?; + let mut storage_transaction = storage.start_transaction().await?; let (applied_snapshot_status, created_from_scratch) = Self::prepare_applied_snapshot_status(&mut storage_transaction, main_node_client) @@ -367,14 +351,9 @@ impl<'a> SnapshotsApplier<'a> { storage_transaction .snapshot_recovery_dal() .insert_initial_recovery_status(&this.applied_snapshot_status) - .await - .map_err(|err| { - SnapshotsApplierError::db(err, "failed persisting initial recovery status") - })?; + .await?; } - storage_transaction.commit().await.map_err(|err| { - SnapshotsApplierError::db(err, "failed committing initial DB transaction") - })?; + storage_transaction.commit().await?; drop(storage); this.factory_deps_recovered = true; this.update_health(); @@ -503,10 +482,7 @@ impl<'a> SnapshotsApplier<'a> { self.applied_snapshot_status.miniblock_number, &all_deps_hashmap, ) - .await - .map_err(|err| { - SnapshotsApplierError::db(err, "failed persisting factory deps to DB") - })?; + .await?; let latency = latency.observe(); tracing::info!("Applied factory dependencies in {latency:?}"); @@ -516,25 +492,18 @@ impl<'a> SnapshotsApplier<'a> { async fn insert_initial_writes_chunk( &self, - chunk_id: u64, storage_logs: &[SnapshotStorageLog], storage: &mut Connection<'_, Core>, ) -> Result<(), SnapshotsApplierError> { storage .storage_logs_dedup_dal() .insert_initial_writes_from_snapshot(storage_logs) - .await - .map_err(|err| { - let context = - format!("failed persisting initial writes from storage logs chunk {chunk_id}"); - SnapshotsApplierError::db(err, context) - })?; + .await?; Ok(()) } async fn insert_storage_logs_chunk( &self, - chunk_id: u64, storage_logs: &[SnapshotStorageLog], storage: &mut Connection<'_, Core>, ) -> Result<(), SnapshotsApplierError> { @@ -544,11 +513,7 @@ impl<'a> SnapshotsApplier<'a> { self.applied_snapshot_status.miniblock_number, storage_logs, ) - .await - .map_err(|err| { - let context = format!("failed persisting storage logs from chunk {chunk_id}"); - SnapshotsApplierError::db(err, context) - })?; + .await?; Ok(()) } @@ -590,29 +555,19 @@ impl<'a> SnapshotsApplier<'a> { .connection_pool .connection_tagged("snapshots_applier") .await?; - let mut storage_transaction = storage.start_transaction().await.map_err(|err| { - let context = format!("cannot start DB transaction for storage logs chunk {chunk_id}"); - SnapshotsApplierError::db(err, context) - })?; + let mut storage_transaction = storage.start_transaction().await?; tracing::info!("Loading {} storage logs into Postgres", storage_logs.len()); - self.insert_storage_logs_chunk(chunk_id, storage_logs, &mut storage_transaction) + self.insert_storage_logs_chunk(storage_logs, &mut storage_transaction) .await?; - self.insert_initial_writes_chunk(chunk_id, storage_logs, &mut storage_transaction) + self.insert_initial_writes_chunk(storage_logs, &mut storage_transaction) .await?; storage_transaction .snapshot_recovery_dal() .mark_storage_logs_chunk_as_processed(chunk_id) - .await - .map_err(|err| { - let context = format!("failed marking storage logs chunk {chunk_id} as processed"); - SnapshotsApplierError::db(err, context) - })?; - storage_transaction.commit().await.map_err(|err| { - let context = format!("cannot commit DB transaction for storage logs chunk {chunk_id}"); - SnapshotsApplierError::db(err, context) - })?; + .await?; + storage_transaction.commit().await?; let chunks_left = METRICS.storage_logs_chunks_left_to_process.dec_by(1) - 1; let latency = latency.observe(); @@ -660,8 +615,7 @@ impl<'a> SnapshotsApplier<'a> { let total_log_count = storage .storage_logs_dal() .get_storage_logs_row_count(self.applied_snapshot_status.miniblock_number) - .await - .map_err(|err| SnapshotsApplierError::db(err, "cannot get storage_logs row count"))?; + .await?; tracing::info!( "Recovered {total_log_count} storage logs in total; checking overall consistency..." ); @@ -669,10 +623,7 @@ impl<'a> SnapshotsApplier<'a> { let number_of_logs_by_enum_indices = storage .snapshots_creator_dal() .get_distinct_storage_logs_keys_count(self.applied_snapshot_status.l1_batch_number) - .await - .map_err(|err| { - SnapshotsApplierError::db(err, "cannot get storage log count by initial writes") - })?; + .await?; if number_of_logs_by_enum_indices != total_log_count { let err = anyhow::anyhow!( "mismatch between the expected number of storage logs by enumeration indices ({number_of_logs_by_enum_indices}) \ @@ -691,11 +642,7 @@ impl<'a> SnapshotsApplier<'a> { .connection_pool .connection_tagged("snapshots_applier") .await?; - let all_token_addresses = storage - .tokens_dal() - .get_all_l2_token_addresses() - .await - .map_err(|err| SnapshotsApplierError::db(err, "failed fetching L2 token addresses"))?; + let all_token_addresses = storage.tokens_dal().get_all_l2_token_addresses().await?; if !all_token_addresses.is_empty() { tracing::info!( "{} tokens are already present in DB; skipping token recovery", @@ -721,10 +668,7 @@ impl<'a> SnapshotsApplier<'a> { let filtered_addresses = storage .storage_logs_dal() .filter_deployed_contracts(l2_addresses, Some(snapshot_miniblock_number)) - .await - .map_err(|err| { - SnapshotsApplierError::db(err, "failed querying L2 contracts for tokens") - })?; + .await?; let bogus_tokens = tokens.iter().filter(|token| { // We need special handling for L2 ether; its `l2_address` doesn't have a deployed contract @@ -742,11 +686,7 @@ impl<'a> SnapshotsApplier<'a> { "Checked {} tokens deployment on L2; persisting tokens into DB", tokens.len() ); - storage - .tokens_dal() - .add_tokens(&tokens) - .await - .map_err(|err| SnapshotsApplierError::db(err, "failed persisting tokens"))?; + storage.tokens_dal().add_tokens(&tokens).await?; Ok(()) } } diff --git a/core/lib/state/src/postgres/mod.rs b/core/lib/state/src/postgres/mod.rs index 049a8b8575c..51e6014afac 100644 --- a/core/lib/state/src/postgres/mod.rs +++ b/core/lib/state/src/postgres/mod.rs @@ -185,10 +185,7 @@ impl ValuesCache { let modified_keys = connection .storage_logs_dal() .modified_keys_in_miniblocks(miniblocks.clone()) - .await - .with_context(|| { - format!("failed loading modified keys for miniblocks {miniblocks:?}") - })?; + .await?; let elapsed = update_latency.observe(); CACHE_METRICS diff --git a/core/lib/state/src/rocksdb/mod.rs b/core/lib/state/src/rocksdb/mod.rs index 7f77cc64680..36607ae3593 100644 --- a/core/lib/state/src/rocksdb/mod.rs +++ b/core/lib/state/src/rocksdb/mod.rs @@ -30,7 +30,7 @@ use std::{ use anyhow::Context as _; use itertools::{Either, Itertools}; use tokio::sync::watch; -use zksync_dal::{Connection, Core, CoreDal}; +use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_storage::{db::NamedColumnFamily, RocksDB}; use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; @@ -257,7 +257,7 @@ impl RocksdbStorage { .blocks_dal() .get_sealed_l1_batch_number() .await - .context("failed fetching sealed L1 batch number")? + .map_err(DalError::generalize)? else { // No L1 batches are persisted in Postgres; update is not necessary. return Ok(()); @@ -284,9 +284,7 @@ impl RocksdbStorage { .storage_logs_dal() .get_touched_slots_for_l1_batch(current_l1_batch_number) .await - .with_context(|| { - format!("failed loading touched slots for L1 batch {current_l1_batch_number}") - })?; + .map_err(DalError::generalize)?; self.apply_storage_logs(storage_logs, storage).await?; tracing::debug!("Loading factory deps for L1 batch {current_l1_batch_number}"); @@ -294,9 +292,7 @@ impl RocksdbStorage { .blocks_dal() .get_l1_batch_factory_deps(current_l1_batch_number) .await - .with_context(|| { - format!("failed loading factory deps for L1 batch {current_l1_batch_number}") - })?; + .map_err(DalError::generalize)?; for (hash, bytecode) in factory_deps { self.store_factory_dep(hash, bytecode); } @@ -515,10 +511,7 @@ impl RocksdbStorage { let (_, last_miniblock_to_keep) = connection .blocks_dal() .get_miniblock_range_of_l1_batch(last_l1_batch_to_keep) - .await - .with_context(|| { - format!("failed fetching miniblock range for L1 batch #{last_l1_batch_to_keep}") - })? + .await? .context("L1 batch should contain at least one miniblock")?; tracing::info!( "Got miniblock number {last_miniblock_to_keep}, took {:?}", diff --git a/core/lib/state/src/rocksdb/recovery.rs b/core/lib/state/src/rocksdb/recovery.rs index c3e286b107c..5909ce84cfe 100644 --- a/core/lib/state/src/rocksdb/recovery.rs +++ b/core/lib/state/src/rocksdb/recovery.rs @@ -4,7 +4,7 @@ use std::ops; use anyhow::Context as _; use tokio::sync::watch; -use zksync_dal::{storage_logs_dal::StorageRecoveryLogEntry, Connection, Core, CoreDal}; +use zksync_dal::{storage_logs_dal::StorageRecoveryLogEntry, Connection, Core, CoreDal, DalError}; use zksync_types::{ snapshots::{uniform_hashed_keys_chunk, SnapshotRecoveryStatus}, L1BatchNumber, MiniblockNumber, H256, @@ -43,7 +43,7 @@ impl RocksdbStorage { .snapshot_recovery_dal() .get_applied_snapshot_status() .await - .context("failed getting snapshot recovery info")?; + .map_err(DalError::generalize)?; Ok(if let Some(snapshot_recovery) = snapshot_recovery { self.recover_from_snapshot( storage, @@ -148,8 +148,7 @@ impl RocksdbStorage { let factory_deps = storage .snapshots_creator_dal() .get_all_factory_deps(snapshot_recovery.miniblock_number) - .await - .context("Failed getting factory dependencies")?; + .await?; let latency = latency.observe(); tracing::info!( "Loaded {} factory dependencies from the snapshot in {latency:?}", @@ -193,8 +192,7 @@ impl RocksdbStorage { let chunk_starts = storage .storage_logs_dal() .get_chunk_starts_for_miniblock(snapshot_miniblock, &key_chunks) - .await - .context("Failed getting chunk starts")?; + .await?; let latency = latency.observe(); tracing::info!("Loaded {chunk_count} chunk starts in {latency:?}"); @@ -227,10 +225,7 @@ impl RocksdbStorage { let all_entries = storage .storage_logs_dal() .get_tree_entries_for_miniblock(snapshot_miniblock, key_chunk.clone()) - .await - .with_context(|| { - format!("Failed getting entries for chunk {key_chunk:?} in snapshot for miniblock #{snapshot_miniblock}") - })?; + .await?; let latency = latency.observe(); tracing::debug!( "Loaded {} log entries for chunk {key_chunk:?} in {latency:?}", diff --git a/core/lib/vm_utils/src/storage.rs b/core/lib/vm_utils/src/storage.rs index 15be01a1aac..9c90452ba3c 100644 --- a/core/lib/vm_utils/src/storage.rs +++ b/core/lib/vm_utils/src/storage.rs @@ -183,8 +183,7 @@ impl L1BatchParamsProvider { Some(number) => storage .blocks_dal() .get_miniblock_header(number) - .await - .context("failed getting miniblock header")? + .await? .map(|header| FirstMiniblockInBatch { header, l1_batch_number, @@ -219,10 +218,7 @@ impl L1BatchParamsProvider { let Some((_, last_miniblock_in_prev_l1_batch)) = storage .blocks_dal() .get_miniblock_range_of_l1_batch(prev_l1_batch) - .await - .with_context(|| { - format!("failed getting miniblock range for L1 batch #{prev_l1_batch}") - })? + .await? else { return Ok(None); }; diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs index e5d967f853f..8b895c17d65 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs @@ -436,14 +436,12 @@ impl BlockArgs { vm_l1_batch_number = connection .blocks_dal() .get_sealed_l1_batch_number() - .await - .context("failed getting sealed L1 batch number")? + .await? .context("no L1 batches in storage")?; let sealed_miniblock_header = connection .blocks_dal() .get_last_sealed_miniblock_header() - .await - .context("failed getting sealed miniblock header")? + .await? .context("no miniblocks in storage")?; state_l2_block_number = sealed_miniblock_header.number; @@ -465,8 +463,7 @@ impl BlockArgs { connection .blocks_dal() .get_miniblock_header(self.resolved_block_number) - .await - .context("failed getting header of resolved miniblock")? + .await? .context("resolved miniblock disappeared from storage")? }; @@ -474,8 +471,7 @@ impl BlockArgs { let miniblock_header = connection .blocks_dal() .get_miniblock_header(self.resolved_block_number) - .await - .context("failed getting resolved miniblock header")? + .await? .context("resolved miniblock is not in storage")?; Some(miniblock_header.batch_fee_input) } else { diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs index c540ca56dd2..ba464ba3cc8 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -247,8 +247,7 @@ impl BlockStartInfo { let snapshot_recovery = storage .snapshot_recovery_dal() .get_applied_snapshot_status() - .await - .context("failed getting snapshot recovery status")?; + .await?; let snapshot_recovery = snapshot_recovery.as_ref(); Ok(Self { first_miniblock: snapshot_recovery diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs index f403a5c17ec..5f7d94b078d 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs @@ -127,11 +127,7 @@ async fn get_validation_params( // This method assumes that the number of tokens is relatively low. When it grows // we may need to introduce some kind of caching. - let all_tokens = connection - .tokens_dal() - .get_all_l2_token_addresses() - .await - .context("failed getting addresses of L2 tokens")?; + let all_tokens = connection.tokens_dal().get_all_l2_token_addresses().await?; EXECUTION_METRICS.tokens_amount.set(all_tokens.len()); let span = tracing::debug_span!("compute_trusted_slots_for_validation").entered(); diff --git a/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs b/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs index 7bd26966f71..48501e4aec3 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs @@ -63,7 +63,7 @@ impl TxSink for MasterPoolSink { submission_res_handle }) .map_err(|err| anyhow::format_err!(err).into()), - Err(err) => Err(err.into()), + Err(err) => Err(err.generalize().into()), }; self.inflight_requests diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index c7965199300..ed6546f1b10 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -505,11 +505,7 @@ impl TxSender { async fn get_expected_nonce(&self, initiator_account: Address) -> anyhow::Result { let mut storage = self.acquire_replica_connection().await?; - let latest_block_number = storage - .blocks_dal() - .get_sealed_miniblock_number() - .await - .context("failed getting sealed miniblock number")?; + let latest_block_number = storage.blocks_dal().get_sealed_miniblock_number().await?; let latest_block_number = match latest_block_number { Some(number) => number, None => { diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs index e424432d80c..2cb8d490a73 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use anyhow::Context as _; use multivm::{interface::ExecutionResult, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT}; use once_cell::sync::OnceCell; -use zksync_dal::CoreDal; +use zksync_dal::{CoreDal, DalError}; use zksync_system_constants::MAX_ENCODED_TX_SIZE; use zksync_types::{ api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, @@ -67,7 +67,7 @@ impl DebugNamespace { let only_top_call = options .map(|options| options.tracer_config.only_top_call) .unwrap_or(false); - let mut connection = self.state.connection_pool.connection_tagged("api").await?; + let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.current_method() .set_block_diff(self.state.last_sealed_miniblock.diff(block_number)); @@ -76,7 +76,7 @@ impl DebugNamespace { .blocks_web3_dal() .get_traces_for_miniblock(block_number) .await - .context("get_traces_for_miniblock")?; + .map_err(DalError::generalize)?; let call_trace = call_traces .into_iter() .map(|call_trace| { @@ -110,12 +110,12 @@ impl DebugNamespace { let only_top_call = options .map(|options| options.tracer_config.only_top_call) .unwrap_or(false); - let mut connection = self.state.connection_pool.connection_tagged("api").await?; + let mut connection = self.state.acquire_connection().await?; let call_trace = connection .transactions_dal() .get_call_trace(tx_hash) .await - .context("get_call_trace")?; + .map_err(DalError::generalize)?; Ok(call_trace.map(|call_trace| { let mut result: DebugCall = call_trace.into(); if only_top_call { @@ -139,7 +139,7 @@ impl DebugNamespace { .map(|options| options.tracer_config.only_top_call) .unwrap_or(false); - let mut connection = self.state.connection_pool.connection_tagged("api").await?; + let mut connection = self.state.acquire_connection().await?; let block_args = self .state .resolve_block_args(&mut connection, block_id) diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs index 2f91998b155..9911d15ce86 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs @@ -1,6 +1,6 @@ use anyhow::Context as _; use zksync_config::{configs::genesis::SharedBridge, GenesisConfig}; -use zksync_dal::CoreDal; +use zksync_dal::{CoreDal, DalError}; use zksync_types::{api::en, tokens::TokenInfo, L1BatchNumber, MiniblockNumber, H256}; use zksync_web3_decl::error::Web3Error; @@ -19,14 +19,12 @@ impl EnNamespace { } pub async fn consensus_genesis_impl(&self) -> Result, Web3Error> { - let Some(genesis) = self - .state - .connection_pool - .connection_tagged("api") - .await? + let mut storage = self.state.acquire_connection().await?; + let Some(genesis) = storage .consensus_dal() .genesis() - .await? + .await + .map_err(DalError::generalize)? else { return Ok(None); }; @@ -45,12 +43,12 @@ impl EnNamespace { block_number: MiniblockNumber, include_transactions: bool, ) -> Result, Web3Error> { - let mut storage = self.state.connection_pool.connection_tagged("api").await?; + let mut storage = self.state.acquire_connection().await?; Ok(storage .sync_dal() .sync_block(block_number, include_transactions) .await - .context("sync_block")?) + .map_err(DalError::generalize)?) } #[tracing::instrument(skip(self))] @@ -58,23 +56,23 @@ impl EnNamespace { &self, block_number: Option, ) -> Result, Web3Error> { - let mut storage = self.state.connection_pool.connection_tagged("api").await?; + let mut storage = self.state.acquire_connection().await?; Ok(storage .tokens_web3_dal() .get_all_tokens(block_number) .await - .context("get_all_tokens")?) + .map_err(DalError::generalize)?) } #[tracing::instrument(skip(self))] pub async fn genesis_config_impl(&self) -> Result { // If this method will cause some load, we can cache everything in memory - let mut storage = self.state.connection_pool.connection_tagged("api").await?; + let mut storage = self.state.acquire_connection().await?; let genesis_batch = storage .blocks_dal() .get_storage_l1_batch(L1BatchNumber(0)) .await - .context("genesis_config")? + .map_err(DalError::generalize)? .context("Genesis batch doesn't exist")?; let protocol_version = genesis_batch @@ -89,7 +87,7 @@ impl EnNamespace { .blocks_dal() .get_fee_address_for_miniblock(MiniblockNumber(0)) .await - .context("genesis_config")? + .map_err(DalError::generalize)? .context("Genesis not finished")?; let shared_bridge = if self.state.api_config.state_transition_proxy_addr.is_some() { diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index ad9e2215d95..15265d80ba2 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -1,5 +1,5 @@ use anyhow::Context as _; -use zksync_dal::CoreDal; +use zksync_dal::{CoreDal, DalError}; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ api::{ @@ -44,12 +44,12 @@ impl EthNamespace { #[tracing::instrument(skip(self))] pub async fn get_block_number_impl(&self) -> Result { - let mut storage = self.state.connection_pool.connection_tagged("api").await?; + let mut storage = self.state.acquire_connection().await?; let block_number = storage .blocks_dal() .get_sealed_miniblock_number() .await - .context("get_sealed_miniblock_number")? + .map_err(DalError::generalize)? .ok_or(Web3Error::NoBlock)?; Ok(block_number.0.into()) } @@ -63,7 +63,7 @@ impl EthNamespace { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); - let mut connection = self.state.connection_pool.connection_tagged("api").await?; + let mut connection = self.state.acquire_connection().await?; let block_args = self .state .resolve_block_args(&mut connection, block_id) @@ -146,7 +146,7 @@ impl EthNamespace { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); - let mut connection = self.state.connection_pool.connection_tagged("api").await?; + let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; let balance = connection @@ -157,7 +157,7 @@ impl EthNamespace { block_number, ) .await - .context("standard_token_historical_balance")?; + .map_err(DalError::generalize)?; self.set_block_diff(block_number); Ok(balance) @@ -218,7 +218,7 @@ impl EthNamespace { self.current_method().set_block_id(block_id); self.state.start_info.ensure_not_pruned(block_id)?; - let mut storage = self.state.connection_pool.connection_tagged("api").await?; + let mut storage = self.state.acquire_connection().await?; let Some(block_number) = self .state .resolve_block_unchecked(&mut storage, block_id) @@ -241,7 +241,7 @@ impl EthNamespace { .transactions_web3_dal() .get_transactions(&block.transactions, self.state.api_config.l2_chain_id) .await - .context("get_transactions")?; + .map_err(DalError::generalize)?; if transactions.len() != block.transactions.len() { let err = anyhow::anyhow!( "storage inconsistency: get_api_block({block_number}) returned {} tx hashes, but get_transactions({:?}) \ @@ -280,7 +280,7 @@ impl EthNamespace { self.current_method().set_block_id(block_id); self.state.start_info.ensure_not_pruned(block_id)?; - let mut storage = self.state.connection_pool.connection_tagged("api").await?; + let mut storage = self.state.acquire_connection().await?; let Some(block_number) = self .state .resolve_block_unchecked(&mut storage, block_id) @@ -308,7 +308,7 @@ impl EthNamespace { self.current_method().set_block_id(block_id); self.state.start_info.ensure_not_pruned(block_id)?; - let mut storage = self.state.connection_pool.connection_tagged("api").await?; + let mut storage = self.state.acquire_connection().await?; let Some(block_number) = self .state .resolve_block_unchecked(&mut storage, block_id) @@ -344,7 +344,7 @@ impl EthNamespace { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); - let mut connection = self.state.connection_pool.connection_tagged("api").await?; + let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.set_block_diff(block_number); @@ -352,7 +352,7 @@ impl EthNamespace { .storage_web3_dal() .get_contract_code_unchecked(address, block_number) .await - .context("get_contract_code_unchecked")?; + .map_err(DalError::generalize)?; Ok(contract_code.unwrap_or_default().into()) } @@ -372,14 +372,14 @@ impl EthNamespace { self.current_method().set_block_id(block_id); let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(idx)); - let mut connection = self.state.connection_pool.connection_tagged("api").await?; + let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.set_block_diff(block_number); let value = connection .storage_web3_dal() .get_historical_value_unchecked(&storage_key, block_number) .await - .context("get_historical_value_unchecked")?; + .map_err(DalError::generalize)?; Ok(value) } @@ -393,7 +393,7 @@ impl EthNamespace { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); - let mut connection = self.state.connection_pool.connection_tagged("api").await?; + let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.set_block_diff(block_number); @@ -401,7 +401,7 @@ impl EthNamespace { .storage_web3_dal() .get_address_historical_nonce(address, block_number) .await - .context("get_address_historical_nonce")?; + .map_err(DalError::generalize)?; // TODO (SMA-1612): currently account nonce is returning always, but later we will // return account nonce for account abstraction and deployment nonce for non account abstraction. @@ -424,7 +424,7 @@ impl EthNamespace { .transactions_web3_dal() .next_nonce_by_initiator_account(address, account_nonce_u64) .await - .context("next_nonce_by_initiator_account")? + .map_err(DalError::generalize)? }; } Ok(account_nonce) @@ -435,14 +435,14 @@ impl EthNamespace { &self, id: TransactionId, ) -> Result, Web3Error> { - let mut storage = self.state.connection_pool.connection_tagged("api").await?; + let mut storage = self.state.acquire_connection().await?; let chain_id = self.state.api_config.l2_chain_id; let mut transaction = match id { TransactionId::Hash(hash) => storage .transactions_web3_dal() .get_transaction_by_hash(hash, chain_id) .await - .with_context(|| format!("get_transaction_by_hash({hash:?})"))?, + .map_err(DalError::generalize)?, TransactionId::Block(block_id, idx) => { let Ok(idx) = u32::try_from(idx) else { @@ -460,9 +460,7 @@ impl EthNamespace { .transactions_web3_dal() .get_transaction_by_position(block_number, idx, chain_id) .await - .with_context(|| { - format!("get_transaction_by_position({block_number}, {idx})") - })? + .map_err(DalError::generalize)? } }; @@ -477,11 +475,8 @@ impl EthNamespace { &self, hash: H256, ) -> Result, Web3Error> { - let receipts = self - .state - .connection_pool - .connection_tagged("api") - .await? + let mut storage = self.state.acquire_connection().await?; + let receipts = storage .transactions_web3_dal() .get_transaction_receipts(&[hash]) .await @@ -496,12 +491,12 @@ impl EthNamespace { .installed_filters .as_ref() .ok_or(Web3Error::NotImplemented)?; - let mut storage = self.state.connection_pool.connection_tagged("api").await?; + let mut storage = self.state.acquire_connection().await?; let last_block_number = storage .blocks_dal() .get_sealed_miniblock_number() .await - .context("get_sealed_miniblock_number")? + .map_err(DalError::generalize)? .context("no miniblocks in storage")?; let next_block_number = last_block_number + 1; drop(storage); @@ -644,7 +639,7 @@ impl EthNamespace { .min(self.state.api_config.fee_history_limit) .max(1); - let mut connection = self.state.connection_pool.connection_tagged("api").await?; + let mut connection = self.state.acquire_connection().await?; let newest_miniblock = self .state .resolve_block(&mut connection, BlockId::Number(newest_block)) @@ -655,7 +650,7 @@ impl EthNamespace { .blocks_web3_dal() .get_fee_history(newest_miniblock, block_count) .await - .context("get_fee_history")?; + .map_err(DalError::generalize)?; // DAL method returns fees in DESC order while we need ASC. base_fee_per_gas.reverse(); @@ -685,12 +680,12 @@ impl EthNamespace { ) -> Result { Ok(match typed_filter { TypedFilter::Blocks(from_block) => { - let mut conn = self.state.connection_pool.connection_tagged("api").await?; + let mut conn = self.state.acquire_connection().await?; let (block_hashes, last_block_number) = conn .blocks_web3_dal() .get_block_hashes_since(*from_block, self.state.api_config.req_entities_limit) .await - .context("get_block_hashes_since")?; + .map_err(DalError::generalize)?; *from_block = match last_block_number { Some(last_block_number) => last_block_number + 1, @@ -715,14 +710,14 @@ impl EthNamespace { } None => { // On cache miss, query the database. - let mut conn = self.state.connection_pool.connection_tagged("api").await?; + let mut conn = self.state.acquire_connection().await?; conn.transactions_web3_dal() .get_pending_txs_hashes_after( *from_timestamp_excluded, Some(self.state.api_config.req_entities_limit), ) .await - .context("get_pending_txs_hashes_after")? + .map_err(DalError::generalize)? } }; @@ -774,7 +769,7 @@ impl EthNamespace { topics, }; - let mut storage = self.state.connection_pool.connection_tagged("api").await?; + let mut storage = self.state.acquire_connection().await?; // Check if there is more than one block in range and there are more than `req_entities_limit` logs that satisfies filter. // In this case we should return error and suggest requesting logs with smaller block range. @@ -786,7 +781,7 @@ impl EthNamespace { self.state.api_config.req_entities_limit, ) .await - .context("get_log_block_number")? + .map_err(DalError::generalize)? { return Err(Web3Error::LogsLimitExceeded( self.state.api_config.req_entities_limit, @@ -800,7 +795,7 @@ impl EthNamespace { .events_web3_dal() .get_logs(get_logs_filter, i32::MAX as usize) .await - .context("get_logs")?; + .map_err(DalError::generalize)?; *from_block = to_block + 1; FilterChanges::Logs(logs) } diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs index 7599a49cd5f..d6c9acadfe7 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs @@ -1,5 +1,5 @@ use anyhow::Context as _; -use zksync_dal::CoreDal; +use zksync_dal::{CoreDal, DalError}; use zksync_types::{ snapshots::{AllSnapshots, SnapshotHeader, SnapshotStorageLogsChunkMetadata}, L1BatchNumber, @@ -23,24 +23,24 @@ impl SnapshotsNamespace { } pub async fn get_all_snapshots_impl(&self) -> Result { - let mut storage_processor = self.state.connection_pool.connection_tagged("api").await?; + let mut storage_processor = self.state.acquire_connection().await?; let mut snapshots_dal = storage_processor.snapshots_dal(); Ok(snapshots_dal .get_all_complete_snapshots() .await - .context("get_all_complete_snapshots")?) + .map_err(DalError::generalize)?) } pub async fn get_snapshot_by_l1_batch_number_impl( &self, l1_batch_number: L1BatchNumber, ) -> Result, Web3Error> { - let mut storage_processor = self.state.connection_pool.connection_tagged("api").await?; + let mut storage_processor = self.state.acquire_connection().await?; let snapshot_metadata = storage_processor .snapshots_dal() .get_snapshot_metadata(l1_batch_number) .await - .context("get_snapshot_metadata")?; + .map_err(DalError::generalize)?; let Some(snapshot_metadata) = snapshot_metadata else { return Ok(None); @@ -67,7 +67,7 @@ impl SnapshotsNamespace { .blocks_dal() .get_miniblock_range_of_l1_batch(l1_batch_number) .await - .context("get_miniblock_range_of_l1_batch")? + .map_err(DalError::generalize)? .with_context(|| format!("missing miniblocks for L1 batch #{l1_batch_number}"))?; Ok(Some(SnapshotHeader { diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index b0f1a57762d..7ccca7f2bee 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -1,7 +1,7 @@ use std::{collections::HashMap, convert::TryInto}; use anyhow::Context as _; -use zksync_dal::{Connection, Core, CoreDal}; +use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ @@ -45,10 +45,6 @@ impl ZksNamespace { &self.state.current_method } - async fn connection(&self) -> Result, Web3Error> { - Ok(self.state.connection_pool.connection_tagged("api").await?) - } - #[tracing::instrument(skip(self, request))] pub async fn estimate_fee_impl(&self, request: CallRequest) -> Result { let mut request_with_gas_per_pubdata_overridden = request; @@ -137,12 +133,12 @@ impl ZksNamespace { from: u32, limit: u8, ) -> Result, Web3Error> { - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; let tokens = storage .tokens_web3_dal() .get_well_known_tokens() .await - .context("get_well_known_tokens")?; + .map_err(DalError::generalize)?; let tokens = tokens .into_iter() @@ -164,12 +160,12 @@ impl ZksNamespace { &self, address: Address, ) -> Result, Web3Error> { - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; let tokens = storage .tokens_dal() .get_all_l2_token_addresses() .await - .context("get_all_l2_token_addresses")?; + .map_err(DalError::generalize)?; let hashed_balance_keys = tokens.iter().map(|&token_address| { let token_account = AccountTreeId::new(if token_address == ETHEREUM_ADDRESS { L2_ETH_TOKEN_ADDRESS @@ -187,7 +183,7 @@ impl ZksNamespace { .storage_web3_dal() .get_values(&hashed_balance_keys) .await - .context("get_values")?; + .map_err(DalError::generalize)?; let balances = balance_values .into_iter() @@ -211,12 +207,12 @@ impl ZksNamespace { l2_log_position: Option, ) -> Result, Web3Error> { self.state.start_info.ensure_not_pruned(block_number)?; - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; let Some(l1_batch_number) = storage .blocks_web3_dal() .get_l1_batch_number_of_miniblock(block_number) .await - .context("get_l1_batch_number_of_miniblock")? + .map_err(DalError::generalize)? else { return Ok(None); }; @@ -224,7 +220,7 @@ impl ZksNamespace { .blocks_web3_dal() .get_miniblock_range_of_l1_batch(l1_batch_number) .await - .context("get_miniblock_range_of_l1_batch")? + .map_err(DalError::generalize)? .context("L1 batch should contain at least one miniblock")?; // Position of l1 log in L1 batch relative to logs with identical data @@ -241,7 +237,7 @@ impl ZksNamespace { self.state.api_config.req_entities_limit, ) .await - .context("get_logs")?; + .map_err(DalError::generalize)?; let maybe_pos = logs.iter().position(|event| { event.block_number == Some(block_number.0.into()) && event.log_index == Some(l2_log_position.into()) @@ -280,7 +276,7 @@ impl ZksNamespace { .blocks_web3_dal() .get_l2_to_l1_logs(l1_batch_number) .await - .context("get_l2_to_l1_logs")?; + .map_err(DalError::generalize)?; let Some((l1_log_index, _)) = all_l1_logs_in_batch .iter() @@ -295,7 +291,7 @@ impl ZksNamespace { .blocks_dal() .get_l1_batch_header(l1_batch_number) .await - .context("get_l1_batch_header")? + .map_err(DalError::generalize)? else { return Ok(None); }; @@ -322,12 +318,12 @@ impl ZksNamespace { tx_hash: H256, index: Option, ) -> Result, Web3Error> { - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; let Some((l1_batch_number, l1_batch_tx_index)) = storage .blocks_web3_dal() .get_l1_batch_info_for_tx(tx_hash) .await - .context("get_l1_batch_info_for_tx")? + .map_err(DalError::generalize)? else { return Ok(None); }; @@ -345,12 +341,12 @@ impl ZksNamespace { #[tracing::instrument(skip(self))] pub async fn get_l1_batch_number_impl(&self) -> Result { - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; let l1_batch_number = storage .blocks_dal() .get_sealed_l1_batch_number() .await - .context("get_sealed_l1_batch_number")? + .map_err(DalError::generalize)? .ok_or(Web3Error::NoBlock)?; Ok(l1_batch_number.0.into()) } @@ -361,12 +357,12 @@ impl ZksNamespace { batch: L1BatchNumber, ) -> Result, Web3Error> { self.state.start_info.ensure_not_pruned(batch)?; - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; let range = storage .blocks_web3_dal() .get_miniblock_range_of_l1_batch(batch) .await - .context("get_miniblock_range_of_l1_batch")?; + .map_err(DalError::generalize)?; Ok(range.map(|(min, max)| (U64::from(min.0), U64::from(max.0)))) } @@ -376,12 +372,12 @@ impl ZksNamespace { block_number: MiniblockNumber, ) -> Result, Web3Error> { self.state.start_info.ensure_not_pruned(block_number)?; - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; Ok(storage .blocks_web3_dal() .get_block_details(block_number) .await - .context("get_block_details")?) + .map_err(DalError::generalize)?) } #[tracing::instrument(skip(self))] @@ -390,12 +386,12 @@ impl ZksNamespace { block_number: MiniblockNumber, ) -> Result, Web3Error> { self.state.start_info.ensure_not_pruned(block_number)?; - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; Ok(storage .transactions_web3_dal() .get_raw_miniblock_transactions(block_number) .await - .context("get_raw_miniblock_transactions")?) + .map_err(DalError::generalize)?) } #[tracing::instrument(skip(self))] @@ -403,12 +399,12 @@ impl ZksNamespace { &self, hash: H256, ) -> Result, Web3Error> { - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; let mut tx_details = storage .transactions_web3_dal() .get_transaction_details(hash) .await - .context("get_transaction_details")?; + .map_err(DalError::generalize)?; drop(storage); if tx_details.is_none() { @@ -423,12 +419,12 @@ impl ZksNamespace { batch_number: L1BatchNumber, ) -> Result, Web3Error> { self.state.start_info.ensure_not_pruned(batch_number)?; - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; Ok(storage .blocks_web3_dal() .get_l1_batch_details(batch_number) .await - .context("get_l1_batch_details")?) + .map_err(DalError::generalize)?) } #[tracing::instrument(skip(self))] @@ -436,12 +432,12 @@ impl ZksNamespace { &self, hash: H256, ) -> Result>, Web3Error> { - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; Ok(storage .factory_deps_dal() .get_factory_dep(hash) .await - .context("get_factory_dep")?) + .map_err(DalError::generalize)?) } #[tracing::instrument(skip(self))] @@ -471,7 +467,7 @@ impl ZksNamespace { &self, version_id: Option, ) -> Result, Web3Error> { - let mut storage = self.connection().await?; + let mut storage = self.state.acquire_connection().await?; let protocol_version = match version_id { Some(id) => { storage diff --git a/core/lib/zksync_core/src/api_server/web3/pubsub.rs b/core/lib/zksync_core/src/api_server/web3/pubsub.rs index 7a48c3e0a46..5530a6cbe26 100644 --- a/core/lib/zksync_core/src/api_server/web3/pubsub.rs +++ b/core/lib/zksync_core/src/api_server/web3/pubsub.rs @@ -1,6 +1,6 @@ //! (Largely) backend-agnostic logic for dealing with Web3 subscriptions. -use anyhow::{Context as _, Error}; +use anyhow::Context as _; use chrono::NaiveDateTime; use futures::FutureExt; use tokio::{ @@ -64,11 +64,7 @@ impl PubSubNotifier { .connection_tagged("api") .await .context("connection_tagged")?; - let sealed_miniblock_number = storage - .blocks_dal() - .get_sealed_miniblock_number() - .await - .context("get_sealed_miniblock_number()")?; + let sealed_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number().await?; Ok(match sealed_miniblock_number { Some(number) => number, None => { @@ -134,7 +130,7 @@ impl PubSubNotifier { .blocks_web3_dal() .get_block_headers_after(last_block_number) .await - .with_context(|| format!("get_block_headers_after({last_block_number})")) + .map_err(Into::into) } async fn notify_txs(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { @@ -164,7 +160,10 @@ impl PubSubNotifier { Ok(()) } - async fn new_txs(&self, last_time: NaiveDateTime) -> Result, Error> { + async fn new_txs( + &self, + last_time: NaiveDateTime, + ) -> anyhow::Result> { self.connection_pool .connection_tagged("api") .await @@ -172,7 +171,7 @@ impl PubSubNotifier { .transactions_web3_dal() .get_pending_txs_hashes_after(last_time, None) .await - .context("get_pending_txs_hashes_after()") + .map_err(Into::into) } async fn notify_logs(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { @@ -212,7 +211,7 @@ impl PubSubNotifier { .events_web3_dal() .get_all_logs(last_block_number) .await - .context("events_web3_dal().get_all_logs()") + .map_err(Into::into) } } diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index 5eb1fbfc1a5..238bceb585f 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -8,6 +8,7 @@ use std::{ }; use anyhow::Context as _; +use futures::TryFutureExt; use lru::LruCache; use tokio::sync::{watch, Mutex}; use vise::GaugeGuard; @@ -15,7 +16,7 @@ use zksync_config::{ configs::{api::Web3JsonRpcConfig, chain::L1BatchCommitDataGeneratorMode, ContractsConfig}, GenesisConfig, }; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; use zksync_types::{ api, l2::L2Tx, transaction_request::CallRequest, Address, L1BatchNumber, L1ChainId, L2ChainId, MiniblockNumber, H256, U256, U64, @@ -262,6 +263,18 @@ impl RpcState { self.tx_sender.0.tx_sink.as_ref() } + /// Acquires a DB connection mapping possible errors. + // `track_caller` is necessary to correctly record call location. `async fn`s don't support it yet, + // thus manual de-sugaring. + #[track_caller] + pub(crate) fn acquire_connection( + &self, + ) -> impl Future, Web3Error>> + '_ { + self.connection_pool + .connection_tagged("api") + .map_err(|err| err.generalize().into()) + } + /// Resolves the specified block ID to a block number, which is guaranteed to be present in the node storage. pub(crate) async fn resolve_block( &self, @@ -327,7 +340,7 @@ impl RpcState { let block_number = block_number.unwrap_or(api::BlockNumber::Latest); let block_id = api::BlockId::Number(block_number); - let mut conn = self.connection_pool.connection_tagged("api").await?; + let mut conn = self.acquire_connection().await?; Ok(self.resolve_block(&mut conn, block_id).await.unwrap()) // ^ `unwrap()` is safe: `resolve_block_id(api::BlockId::Number(_))` can only return `None` // if called with an explicit number, and we've handled this case earlier. @@ -347,8 +360,7 @@ impl RpcState { match (filter.block_hash, filter.from_block, filter.to_block) { (Some(block_hash), None, None) => { let block_number = self - .connection_pool - .connection_tagged("api") + .acquire_connection() .await? .blocks_web3_dal() .resolve_block_id(api::BlockId::Hash(block_hash)) @@ -372,8 +384,7 @@ impl RpcState { filter: &Filter, ) -> Result { let pending_block = self - .connection_pool - .connection_tagged("api") + .acquire_connection() .await? .blocks_web3_dal() .resolve_block_id(api::BlockId::Number(api::BlockNumber::Pending)) @@ -397,8 +408,7 @@ impl RpcState { if call_request.nonce.is_some() { return Ok(()); } - let mut connection = self.connection_pool.connection_tagged("api").await?; - + let mut connection = self.acquire_connection().await?; let latest_block_id = api::BlockId::Number(api::BlockNumber::Latest); let latest_block_number = self.resolve_block(&mut connection, latest_block_id).await?; @@ -407,7 +417,7 @@ impl RpcState { .storage_web3_dal() .get_address_historical_nonce(from, latest_block_number) .await - .context("get_address_historical_nonce")?; + .map_err(DalError::generalize)?; call_request.nonce = Some(address_historical_nonce); Ok(()) } diff --git a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs index 549c6941b64..30ef53c0163 100644 --- a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs +++ b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs @@ -118,8 +118,7 @@ impl JobProcessor for BasicWitnessInputProducer { let l1_batch_to_process = connection .basic_witness_input_producer_dal() .get_next_basic_witness_input_producer_job() - .await - .context("failed to get next basic witness input producer job")?; + .await?; Ok(l1_batch_to_process.map(|number| (number, number))) } @@ -187,8 +186,7 @@ impl JobProcessor for BasicWitnessInputProducer { transaction .basic_witness_input_producer_dal() .mark_job_as_successful(job_id, started_at, &object_path) - .await - .context("failed to mark job as successful for BasicWitnessInputProducer")?; + .await?; transaction .commit() .await diff --git a/core/lib/zksync_core/src/consensus/storage/mod.rs b/core/lib/zksync_core/src/consensus/storage/mod.rs index 7335f797eff..bdd1ab34b64 100644 --- a/core/lib/zksync_core/src/consensus/storage/mod.rs +++ b/core/lib/zksync_core/src/consensus/storage/mod.rs @@ -5,7 +5,7 @@ use zksync_concurrency::{ctx, error::Wrap as _, sync, time}; use zksync_consensus_bft::PayloadManager; use zksync_consensus_roles::validator; use zksync_consensus_storage::{BlockStoreState, PersistentBlockStore, ReplicaState, ReplicaStore}; -use zksync_dal::{consensus_dal::Payload, ConnectionPool, Core, CoreDal}; +use zksync_dal::{consensus_dal::Payload, ConnectionPool, Core, CoreDal, DalError}; use zksync_types::MiniblockNumber; #[cfg(test)] @@ -45,7 +45,10 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, ) -> ctx::Result> { - Ok(ctx.wait(self.0.consensus_dal().block_range()).await??) + Ok(ctx + .wait(self.0.consensus_dal().block_range()) + .await? + .context("sqlx")?) } /// Wrapper for `consensus_dal().block_payload()`. @@ -56,7 +59,8 @@ impl<'a> Connection<'a> { ) -> ctx::Result> { Ok(ctx .wait(self.0.consensus_dal().block_payload(number)) - .await??) + .await? + .map_err(DalError::generalize)?) } /// Wrapper for `consensus_dal().first_certificate()`. @@ -66,7 +70,8 @@ impl<'a> Connection<'a> { ) -> ctx::Result> { Ok(ctx .wait(self.0.consensus_dal().first_certificate()) - .await??) + .await? + .map_err(DalError::generalize)?) } /// Wrapper for `consensus_dal().last_certificate()`. @@ -76,7 +81,8 @@ impl<'a> Connection<'a> { ) -> ctx::Result> { Ok(ctx .wait(self.0.consensus_dal().last_certificate()) - .await??) + .await? + .map_err(DalError::generalize)?) } /// Wrapper for `consensus_dal().certificate()`. @@ -87,7 +93,8 @@ impl<'a> Connection<'a> { ) -> ctx::Result> { Ok(ctx .wait(self.0.consensus_dal().certificate(number)) - .await??) + .await? + .map_err(DalError::generalize)?) } /// Wrapper for `consensus_dal().insert_certificate()`. @@ -103,7 +110,10 @@ impl<'a> Connection<'a> { /// Wrapper for `consensus_dal().replica_state()`. pub async fn replica_state(&mut self, ctx: &ctx::Ctx) -> ctx::Result { - Ok(ctx.wait(self.0.consensus_dal().replica_state()).await??) + Ok(ctx + .wait(self.0.consensus_dal().replica_state()) + .await? + .map_err(DalError::generalize)?) } /// Wrapper for `consensus_dal().set_replica_state()`. @@ -131,7 +141,10 @@ impl<'a> Connection<'a> { } pub async fn genesis(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { - Ok(ctx.wait(self.0.consensus_dal().genesis()).await??) + Ok(ctx + .wait(self.0.consensus_dal().genesis()) + .await? + .map_err(DalError::generalize)?) } pub async fn try_update_genesis( @@ -199,7 +212,9 @@ impl Store { /// Wrapper for `connection_tagged()`. pub(super) async fn access<'a>(&'a self, ctx: &ctx::Ctx) -> ctx::Result> { Ok(Connection( - ctx.wait(self.0.connection_tagged("consensus")).await??, + ctx.wait(self.0.connection_tagged("consensus")) + .await? + .map_err(DalError::generalize)?, )) } diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs index ab0ff50cdf6..a56cb30c5bb 100644 --- a/core/lib/zksync_core/src/consensus/testonly.rs +++ b/core/lib/zksync_core/src/consensus/testonly.rs @@ -8,7 +8,7 @@ use zksync_concurrency::{ctx, error::Wrap as _, scope, sync}; use zksync_config::{configs, GenesisConfig}; use zksync_consensus_roles::validator; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::CoreDal; +use zksync_dal::{CoreDal, DalError}; use zksync_types::{ api, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, H256, @@ -324,20 +324,20 @@ async fn calculate_mock_metadata(ctx: &ctx::Ctx, store: &Store) -> ctx::Result<( let Some(last) = ctx .wait(conn.0.blocks_dal().get_sealed_l1_batch_number()) .await? - .context("get_sealed_l1_batch_number()")? + .map_err(DalError::generalize)? else { return Ok(()); }; let prev = ctx .wait(conn.0.blocks_dal().get_last_l1_batch_number_with_metadata()) .await? - .context("get_last_l1_batch_number_with_metadata()")?; + .map_err(DalError::generalize)?; let mut first = match prev { Some(prev) => prev + 1, None => ctx .wait(conn.0.blocks_dal().get_earliest_l1_batch_number()) .await? - .context("get_earliest_l1_batch_number()")? + .map_err(DalError::generalize)? .context("batches disappeared")?, }; while first <= last { diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index 1e7fcb5c848..5ec8c794063 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -13,8 +13,7 @@ use multivm::{ }; use zksync_config::{configs::database::MerkleTreeMode, GenesisConfig, PostgresConfig}; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SET_CHAIN_ID_EVENT}; -use zksync_dal::{ConnectionPool, Core, CoreDal, SqlxError}; -use zksync_db_connection::connection::Connection; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; use zksync_eth_client::{clients::QueryClient, EthInterface}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_system_constants::PRIORITY_EXPIRATION; @@ -68,7 +67,7 @@ pub enum GenesisError { #[error("Wrong protocol version")] ProtocolVersion(u16), #[error("DB Error: {0}")] - DBError(#[from] SqlxError), + DBError(#[from] DalError), #[error("Error: {0}")] Other(#[from] anyhow::Error), #[error("Field: {0} required for genesis")] diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs index a918d1a67f3..d1211ba1b86 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs @@ -280,8 +280,7 @@ impl AsyncTreeRecovery { let chunk_starts = storage .storage_logs_dal() .get_chunk_starts_for_miniblock(snapshot_miniblock, key_chunks) - .await - .context("Failed getting chunk starts")?; + .await?; let chunk_starts_latency = chunk_starts_latency.observe(); tracing::debug!( "Loaded start entries for {} chunks in {chunk_starts_latency:?}", @@ -335,10 +334,7 @@ impl AsyncTreeRecovery { let all_entries = storage .storage_logs_dal() .get_tree_entries_for_miniblock(snapshot_miniblock, key_chunk.clone()) - .await - .with_context(|| { - format!("Failed getting entries for chunk {key_chunk:?} in snapshot for miniblock #{snapshot_miniblock}") - })?; + .await?; drop(storage); let entries_latency = entries_latency.observe(); tracing::debug!( diff --git a/core/lib/zksync_core/src/reorg_detector/mod.rs b/core/lib/zksync_core/src/reorg_detector/mod.rs index 6d6acd6c0df..15d06a6b10b 100644 --- a/core/lib/zksync_core/src/reorg_detector/mod.rs +++ b/core/lib/zksync_core/src/reorg_detector/mod.rs @@ -3,7 +3,7 @@ use std::{fmt, time::Duration}; use anyhow::Context as _; use async_trait::async_trait; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_dal::{ConnectionPool, Core, CoreDal, DalError}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_types::{L1BatchNumber, MiniblockNumber, H256}; use zksync_web3_decl::{ @@ -246,7 +246,7 @@ impl ReorgDetector { .blocks_dal() .get_last_l1_batch_number_with_metadata() .await - .context("get_last_l1_batch_number_with_metadata()")? + .map_err(DalError::generalize)? else { return Ok(()); }; @@ -254,7 +254,7 @@ impl ReorgDetector { .blocks_dal() .get_sealed_miniblock_number() .await - .context("get_sealed_miniblock_number()")? + .map_err(DalError::generalize)? else { return Ok(()); }; @@ -290,8 +290,8 @@ impl ReorgDetector { .blocks_dal() .get_earliest_l1_batch_number_with_metadata() .await - .context("get_earliest_l1_batch_number_with_metadata")? - .context("all L1 batches dissapeared")?; + .map_err(DalError::generalize)? + .context("all L1 batches disappeared")?; drop(storage); match self.root_hashes_match(first_l1_batch).await { Ok(true) => {} @@ -318,7 +318,7 @@ impl ReorgDetector { .blocks_dal() .get_miniblock_header(miniblock) .await - .context("get_miniblock_header()")? + .map_err(DalError::generalize)? .with_context(|| format!("Header does not exist for local miniblock #{miniblock}"))? .hash; drop(storage); @@ -347,7 +347,7 @@ impl ReorgDetector { .blocks_dal() .get_l1_batch_state_root(l1_batch) .await - .context("get_l1_batch_state_root()")? + .map_err(DalError::generalize)? .with_context(|| format!("Root hash does not exist for local batch #{l1_batch}"))?; drop(storage); diff --git a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs index e285ad9492e..75f42a0faf7 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs @@ -31,16 +31,11 @@ pub struct IoCursor { impl IoCursor { /// Loads the cursor from Postgres. pub async fn new(storage: &mut Connection<'_, Core>) -> anyhow::Result { - let last_sealed_l1_batch_number = storage - .blocks_dal() - .get_sealed_l1_batch_number() - .await - .context("Failed getting sealed L1 batch number")?; + let last_sealed_l1_batch_number = storage.blocks_dal().get_sealed_l1_batch_number().await?; let last_miniblock_header = storage .blocks_dal() .get_last_sealed_miniblock_header() - .await - .context("Failed getting sealed miniblock header")?; + .await?; if let (Some(l1_batch_number), Some(miniblock_header)) = (last_sealed_l1_batch_number, &last_miniblock_header) @@ -55,8 +50,7 @@ impl IoCursor { let snapshot_recovery = storage .snapshot_recovery_dal() .get_applied_snapshot_status() - .await - .context("Failed getting snapshot recovery info")? + .await? .context("Postgres contains neither blocks nor snapshot recovery info")?; let l1_batch = last_sealed_l1_batch_number.unwrap_or(snapshot_recovery.l1_batch_number) + 1; diff --git a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs index d1f80c32776..7eda9353887 100644 --- a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs +++ b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs @@ -163,8 +163,7 @@ async fn is_fee_address_migrated( storage .blocks_dal() .is_fee_address_migrated(miniblock) - .await - .with_context(|| format!("Failed getting fee address for miniblock #{miniblock}"))? + .await? .with_context(|| format!("Miniblock #{miniblock} disappeared")) } diff --git a/core/lib/zksync_core/src/state_keeper/state_keeper_storage.rs b/core/lib/zksync_core/src/state_keeper/state_keeper_storage.rs index a193de53105..0cb6f2b0001 100644 --- a/core/lib/zksync_core/src/state_keeper/state_keeper_storage.rs +++ b/core/lib/zksync_core/src/state_keeper/state_keeper_storage.rs @@ -88,17 +88,12 @@ impl AsyncRocksdbCache { connection: &mut Connection<'_, Core>, ) -> anyhow::Result> { let mut dal = connection.blocks_dal(); - let Some(l1_batch_number) = dal - .get_sealed_l1_batch_number() - .await - .context("Failed to load the latest sealed L1 batch number")? - else { + let Some(l1_batch_number) = dal.get_sealed_l1_batch_number().await? else { return Ok(None); }; let (_, miniblock_number) = dal .get_miniblock_range_of_l1_batch(l1_batch_number) - .await - .context("Failed to load the miniblock range for the latest sealed L1 batch")? + .await? .context("The latest sealed L1 batch does not have a miniblock range")?; Ok(Some((miniblock_number, l1_batch_number))) } @@ -117,8 +112,7 @@ impl AsyncRocksdbCache { let snapshot_recovery = connection .snapshot_recovery_dal() .get_applied_snapshot_status() - .await - .context("Failed getting snapshot recovery info")? + .await? .context("Could not find snapshot, no state available")?; ( snapshot_recovery.miniblock_number, diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs index 892dddd6c47..b262c071561 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs @@ -65,8 +65,8 @@ enum UpdaterError { Internal(#[from] anyhow::Error), } -impl From for UpdaterError { - fn from(err: zksync_dal::SqlxError) -> Self { +impl From for UpdaterError { + fn from(err: zksync_dal::DalError) -> Self { Self::Internal(err.into()) } } diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index a8cb8fac015..0c7bef1ec74 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -79,8 +79,7 @@ impl ExternalIO { .await? .factory_deps_dal() .get_factory_dep(hash) - .await - .with_context(|| format!("failed getting bytecode for hash {hash:?}"))?; + .await?; Ok(match bytecode { Some(bytecode) => SystemContractCode { diff --git a/core/lib/zksync_core/src/utils/mod.rs b/core/lib/zksync_core/src/utils/mod.rs index a1718ddc3d3..0a158f4437d 100644 --- a/core/lib/zksync_core/src/utils/mod.rs +++ b/core/lib/zksync_core/src/utils/mod.rs @@ -131,8 +131,7 @@ pub(crate) async fn projected_first_l1_batch( let snapshot_recovery = storage .snapshot_recovery_dal() .get_applied_snapshot_status() - .await - .context("failed getting snapshot recovery status")?; + .await?; Ok(snapshot_recovery.map_or(L1BatchNumber(0), |recovery| recovery.l1_batch_number + 1)) } @@ -146,8 +145,7 @@ pub(crate) async fn pending_protocol_version( let last_miniblock = storage .blocks_dal() .get_last_sealed_miniblock_header() - .await - .context("failed getting last sealed miniblock")?; + .await?; if let Some(last_miniblock) = last_miniblock { return Ok(last_miniblock.protocol_version.unwrap_or_else(|| { // Protocol version should be set for the most recent miniblock even in cases it's not filled @@ -163,8 +161,7 @@ pub(crate) async fn pending_protocol_version( let snapshot_recovery = storage .snapshot_recovery_dal() .get_applied_snapshot_status() - .await - .context("failed getting snapshot recovery status")? + .await? .context("storage contains neither miniblocks, nor snapshot recovery info")?; Ok(snapshot_recovery.protocol_version) } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 3ab27af7dd3..b91ab4c9426 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7540,6 +7540,7 @@ dependencies = [ "serde", "serde_json", "sqlx", + "thiserror", "tokio", "tracing", "url", From a9706294fe740cbc9af37eef8d968584a3ec4859 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Thu, 4 Apr 2024 12:02:01 +0300 Subject: [PATCH 2/9] feat: Archiving of prover in gpu_prover_queue (#1537) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add archiver for provers in gpu_prover_queue, which will move all provers, whose status was dead during some time to archive. Add availability checker for provers, which will check whether prover wasn't marked dead while being alive, and shut down it if so. ## Why ❔ To improve prover performance and prevent incidents with provers marked dead while being alive(autoscalers won't scale provers more, because they see that prover is alive) ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- checks-config/era.dic | 1 + core/lib/basic_types/src/prover_dal.rs | 18 ++- core/lib/config/src/configs/fri_prover.rs | 2 + core/lib/config/src/configs/house_keeper.rs | 17 ++- core/lib/config/src/testonly.rs | 7 +- ...be812a6d7e76df4e545d639a564992f12bbe1.json | 100 ------------- core/lib/env_config/src/fri_prover.rs | 2 + core/lib/env_config/src/house_keeper.rs | 14 +- core/lib/protobuf_config/src/house_keeper.rs | 22 +-- .../src/proto/house_keeper.proto | 26 ++-- .../protobuf_config/src/proto/prover.proto | 132 +++++++++--------- core/lib/protobuf_config/src/prover.rs | 5 + .../house_keeper/fri_gpu_prover_archiver.rs | 52 +++++++ .../house_keeper/fri_prover_jobs_archiver.rs | 6 +- core/lib/zksync_core/src/house_keeper/mod.rs | 1 + core/lib/zksync_core/src/lib.rs | 25 +++- core/lib/zksync_core/src/metrics.rs | 10 ++ .../implementations/layers/house_keeper.rs | 42 ++++-- etc/env/base/fri_prover.toml | 27 ++-- etc/env/base/house_keeper.toml | 6 +- etc/env/file_based/general.yaml | 7 +- ...47e0574419632a40119b661a31ec70f0f950b.json | 22 +++ ...aae31358088e142dff51c9f0bde8f386900d3.json | 24 ++++ ...at_column_to_prover_queue_archive.down.sql | 2 + ...d_at_column_to_prover_queue_archive.up.sql | 2 + .../src/fri_gpu_prover_queue_dal.rs | 55 +++++++- .../src/gpu_prover_availability_checker.rs | 81 +++++++++++ prover/prover_fri/src/main.rs | 16 ++- prover/prover_fri/src/metrics.rs | 15 +- 29 files changed, 499 insertions(+), 240 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-37069b0fbe07f12f6ac93d434b7be812a6d7e76df4e545d639a564992f12bbe1.json create mode 100644 core/lib/zksync_core/src/house_keeper/fri_gpu_prover_archiver.rs create mode 100644 prover/prover_dal/.sqlx/query-02bbd0a1c01747fb24a68f0ecf447e0574419632a40119b661a31ec70f0f950b.json create mode 100644 prover/prover_dal/.sqlx/query-2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3.json create mode 100644 prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.down.sql create mode 100644 prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.up.sql create mode 100644 prover/prover_fri/src/gpu_prover_availability_checker.rs diff --git a/checks-config/era.dic b/checks-config/era.dic index fdd961f601e..78da85c1d90 100644 --- a/checks-config/era.dic +++ b/checks-config/era.dic @@ -927,3 +927,4 @@ StorageMarker SIGINT opentelemetry PubdataSendingMode +FriGpuProverArchiver diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index 827d1942b6a..41ab439a15f 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -1,5 +1,5 @@ //! Types exposed by the prover DAL for general-purpose use. -use std::{net::IpAddr, ops::Add}; +use std::{net::IpAddr, ops::Add, str::FromStr}; use chrono::{DateTime, Duration, Utc}; @@ -204,7 +204,7 @@ pub struct JobExtendedStatistics { pub active_area: Vec, } -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum GpuProverInstanceStatus { // The instance is available for processing. Available, @@ -215,3 +215,17 @@ pub enum GpuProverInstanceStatus { // The instance is not alive anymore. Dead, } + +impl FromStr for GpuProverInstanceStatus { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "available" => Ok(Self::Available), + "full" => Ok(Self::Full), + "reserved" => Ok(Self::Reserved), + "dead" => Ok(Self::Dead), + _ => Err(()), + } + } +} diff --git a/core/lib/config/src/configs/fri_prover.rs b/core/lib/config/src/configs/fri_prover.rs index 958b8c5fec2..f8b9b8adf1c 100644 --- a/core/lib/config/src/configs/fri_prover.rs +++ b/core/lib/config/src/configs/fri_prover.rs @@ -25,6 +25,8 @@ pub struct FriProverConfig { pub queue_capacity: usize, pub witness_vector_receiver_port: u16, pub zone_read_url: String, + pub availability_check_interval_in_secs: u32, + // whether to write to public GCS bucket for https://github.com/matter-labs/era-boojum-validator-cli pub shall_save_to_public_bucket: bool, pub object_store: Option, diff --git a/core/lib/config/src/configs/house_keeper.rs b/core/lib/config/src/configs/house_keeper.rs index d2f85df9bec..e1eb1337566 100644 --- a/core/lib/config/src/configs/house_keeper.rs +++ b/core/lib/config/src/configs/house_keeper.rs @@ -13,13 +13,20 @@ pub struct HouseKeeperConfig { pub prover_db_pool_size: u32, pub proof_compressor_job_retrying_interval_ms: u64, pub proof_compressor_stats_reporting_interval_ms: u64, - pub prover_job_archiver_reporting_interval_ms: Option, - pub prover_job_archiver_archiving_interval_secs: Option, + pub prover_job_archiver_archiving_interval_ms: Option, + pub prover_job_archiver_archive_after_secs: Option, + pub fri_gpu_prover_archiver_archiving_interval_ms: Option, + pub fri_gpu_prover_archiver_archive_after_secs: Option, } impl HouseKeeperConfig { - pub fn prover_job_archiver_enabled(&self) -> bool { - self.prover_job_archiver_reporting_interval_ms.is_some() - && self.prover_job_archiver_archiving_interval_secs.is_some() + pub fn prover_job_archiver_params(&self) -> Option<(u64, u64)> { + self.prover_job_archiver_archiving_interval_ms + .zip(self.prover_job_archiver_archive_after_secs) + } + + pub fn fri_gpu_prover_archiver_params(&self) -> Option<(u64, u64)> { + self.fri_gpu_prover_archiver_archiving_interval_ms + .zip(self.fri_gpu_prover_archiver_archive_after_secs) } } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index d92590f466b..097e1af6dbc 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -430,6 +430,7 @@ impl Distribution for EncodeDist { witness_vector_receiver_port: self.sample(rng), zone_read_url: self.sample(rng), shall_save_to_public_bucket: self.sample(rng), + availability_check_interval_in_secs: self.sample(rng), object_store: self.sample(rng), } } @@ -563,8 +564,10 @@ impl Distribution for EncodeDist { witness_generator_job_retrying_interval_ms: self.sample(rng), proof_compressor_job_retrying_interval_ms: self.sample(rng), proof_compressor_stats_reporting_interval_ms: self.sample(rng), - prover_job_archiver_reporting_interval_ms: self.sample(rng), - prover_job_archiver_archiving_interval_secs: self.sample(rng), + prover_job_archiver_archiving_interval_ms: self.sample(rng), + prover_job_archiver_archive_after_secs: self.sample(rng), + fri_gpu_prover_archiver_archiving_interval_ms: self.sample(rng), + fri_gpu_prover_archiver_archive_after_secs: self.sample(rng), } } } diff --git a/core/lib/dal/.sqlx/query-37069b0fbe07f12f6ac93d434b7be812a6d7e76df4e545d639a564992f12bbe1.json b/core/lib/dal/.sqlx/query-37069b0fbe07f12f6ac93d434b7be812a6d7e76df4e545d639a564992f12bbe1.json deleted file mode 100644 index a830db9f4c9..00000000000 --- a/core/lib/dal/.sqlx/query-37069b0fbe07f12f6ac93d434b7be812a6d7e76df4e545d639a564992f12bbe1.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (\n SELECT\n MAX(m2.number)\n FROM\n miniblocks m2\n WHERE\n miniblocks.l1_batch_number = m2.l1_batch_number\n ) AS \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.gas_limit,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "l1_batch_number!", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "last_batch_miniblock?", - "type_info": "Int8" - }, - { - "ordinal": 3, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 4, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 5, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 6, - "name": "fair_pubdata_price", - "type_info": "Int8" - }, - { - "ordinal": 7, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 8, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 9, - "name": "virtual_blocks", - "type_info": "Int8" - }, - { - "ordinal": 10, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 11, - "name": "gas_limit", - "type_info": "Int8" - }, - { - "ordinal": 12, - "name": "protocol_version!", - "type_info": "Int4" - }, - { - "ordinal": 13, - "name": "fee_account_address!", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - null, - null, - false, - false, - false, - true, - true, - true, - false, - false, - true, - true, - false - ] - }, - "hash": "37069b0fbe07f12f6ac93d434b7be812a6d7e76df4e545d639a564992f12bbe1" -} diff --git a/core/lib/env_config/src/fri_prover.rs b/core/lib/env_config/src/fri_prover.rs index f888436dc71..373d1e6f990 100644 --- a/core/lib/env_config/src/fri_prover.rs +++ b/core/lib/env_config/src/fri_prover.rs @@ -45,6 +45,7 @@ mod tests { }, max_retries: 5, }), + availability_check_interval_in_secs: 1_800, } } @@ -65,6 +66,7 @@ mod tests { FRI_PROVER_WITNESS_VECTOR_RECEIVER_PORT="3316" FRI_PROVER_ZONE_READ_URL="http://metadata.google.internal/computeMetadata/v1/instance/zone" FRI_PROVER_SHALL_SAVE_TO_PUBLIC_BUCKET=true + FRI_PROVER_AVAILABILITY_CHECK_INTERVAL_IN_SECS="1800" OBJECT_STORE_BUCKET_BASE_URL="/base/url" OBJECT_STORE_MODE="GCSWithCredentialFile" OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json" diff --git a/core/lib/env_config/src/house_keeper.rs b/core/lib/env_config/src/house_keeper.rs index 5adccc5f0d4..f23d2705bd0 100644 --- a/core/lib/env_config/src/house_keeper.rs +++ b/core/lib/env_config/src/house_keeper.rs @@ -27,8 +27,12 @@ mod tests { prover_db_pool_size: 2, proof_compressor_job_retrying_interval_ms: 30_000, proof_compressor_stats_reporting_interval_ms: 10_000, - prover_job_archiver_reporting_interval_ms: Some(1_800_000), - prover_job_archiver_archiving_interval_secs: Some(172_800), + prover_job_archiver_archiving_interval_ms: Some(1_800_000), + prover_job_archiver_archive_after_secs: Some(172_800), + // 24 hours + fri_gpu_prover_archiver_archiving_interval_ms: Some(86_400_000), + // 48 hours + fri_gpu_prover_archiver_archive_after_secs: Some(172_800), } } @@ -48,8 +52,10 @@ mod tests { HOUSE_KEEPER_PROVER_STATS_REPORTING_INTERVAL_MS="5000" HOUSE_KEEPER_PROOF_COMPRESSOR_STATS_REPORTING_INTERVAL_MS="10000" HOUSE_KEEPER_PROOF_COMPRESSOR_JOB_RETRYING_INTERVAL_MS="30000" - HOUSE_KEEPER_PROVER_JOB_ARCHIVER_REPORTING_INTERVAL_MS="1800000" - HOUSE_KEEPER_PROVER_JOB_ARCHIVER_ARCHIVING_INTERVAL_SECS="172800" + HOUSE_KEEPER_PROVER_JOB_ARCHIVER_ARCHIVING_INTERVAL_MS="1800000" + HOUSE_KEEPER_PROVER_JOB_ARCHIVER_ARCHIVE_AFTER_SECS="172800" + HOUSE_KEEPER_FRI_GPU_PROVER_ARCHIVER_ARCHIVING_INTERVAL_MS="86400000" + HOUSE_KEEPER_FRI_GPU_PROVER_ARCHIVER_ARCHIVE_AFTER_SECS="172800" "#; lock.set_env(config); diff --git a/core/lib/protobuf_config/src/house_keeper.rs b/core/lib/protobuf_config/src/house_keeper.rs index ef6f659fd6b..b6871de853f 100644 --- a/core/lib/protobuf_config/src/house_keeper.rs +++ b/core/lib/protobuf_config/src/house_keeper.rs @@ -42,10 +42,13 @@ impl ProtoRepr for proto::HouseKeeper { .context("proof_compressor_stats_reporting_interval_ms")?, // TODO(PLA-862): Make these 2 variables required - prover_job_archiver_reporting_interval_ms: self - .prover_job_archiver_reporting_interval_ms, - prover_job_archiver_archiving_interval_secs: self - .prover_job_archiver_archiving_interval_secs, + prover_job_archiver_archiving_interval_ms: self + .prover_job_archiver_archiving_interval_ms, + prover_job_archiver_archive_after_secs: self.prover_job_archiver_archive_after_secs, + fri_gpu_prover_archiver_archiving_interval_ms: self + .fri_gpu_prover_archiver_archiving_interval_ms, + fri_gpu_prover_archiver_archive_after_secs: self + .fri_gpu_prover_archiver_archive_after_secs, }) } @@ -73,10 +76,13 @@ impl ProtoRepr for proto::HouseKeeper { proof_compressor_stats_reporting_interval_ms: Some( this.proof_compressor_stats_reporting_interval_ms, ), - prover_job_archiver_reporting_interval_ms: this - .prover_job_archiver_reporting_interval_ms, - prover_job_archiver_archiving_interval_secs: this - .prover_job_archiver_archiving_interval_secs, + prover_job_archiver_archiving_interval_ms: this + .prover_job_archiver_archiving_interval_ms, + prover_job_archiver_archive_after_secs: this.prover_job_archiver_archive_after_secs, + fri_gpu_prover_archiver_archiving_interval_ms: this + .fri_gpu_prover_archiver_archiving_interval_ms, + fri_gpu_prover_archiver_archive_after_secs: this + .fri_gpu_prover_archiver_archive_after_secs, } } } diff --git a/core/lib/protobuf_config/src/proto/house_keeper.proto b/core/lib/protobuf_config/src/proto/house_keeper.proto index 05f68680c4e..dce4af95b80 100644 --- a/core/lib/protobuf_config/src/proto/house_keeper.proto +++ b/core/lib/protobuf_config/src/proto/house_keeper.proto @@ -3,16 +3,18 @@ syntax = "proto3"; package zksync.config.house_keeper; message HouseKeeper { - optional uint64 l1_batch_metrics_reporting_interval_ms = 1; // required; ms - optional uint64 gpu_prover_queue_reporting_interval_ms = 2; // required; ms - optional uint64 prover_job_retrying_interval_ms = 3; // required; ms - optional uint64 prover_stats_reporting_interval_ms = 4; // required ms - optional uint64 witness_job_moving_interval_ms = 5; // required; ms - optional uint64 witness_generator_stats_reporting_interval_ms = 6; // required; ms - optional uint64 witness_generator_job_retrying_interval_ms = 9; // required; ms - optional uint32 prover_db_pool_size = 10; // required - optional uint64 proof_compressor_job_retrying_interval_ms = 12; // required; ms - optional uint64 proof_compressor_stats_reporting_interval_ms = 13; // required; ms - optional uint64 prover_job_archiver_reporting_interval_ms = 14; // optional; ms - optional uint64 prover_job_archiver_archiving_interval_secs = 15; // optional; seconds + optional uint64 l1_batch_metrics_reporting_interval_ms = 1; // required; ms + optional uint64 gpu_prover_queue_reporting_interval_ms = 2; // required; ms + optional uint64 prover_job_retrying_interval_ms = 3; // required; ms + optional uint64 prover_stats_reporting_interval_ms = 4; // required ms + optional uint64 witness_job_moving_interval_ms = 5; // required; ms + optional uint64 witness_generator_stats_reporting_interval_ms = 6; // required; ms + optional uint64 witness_generator_job_retrying_interval_ms = 9; // required; ms + optional uint32 prover_db_pool_size = 10; // required + optional uint64 proof_compressor_job_retrying_interval_ms = 12; // required; ms + optional uint64 proof_compressor_stats_reporting_interval_ms = 13; // required; ms + optional uint64 prover_job_archiver_archiving_interval_ms = 14; // optional; ms + optional uint64 prover_job_archiver_archive_after_secs = 15; // optional; seconds + optional uint64 fri_gpu_prover_archiver_archiving_interval_ms = 16; // optional; ms + optional uint64 fri_gpu_prover_archiver_archive_after_secs = 17; // optional; seconds } diff --git a/core/lib/protobuf_config/src/proto/prover.proto b/core/lib/protobuf_config/src/proto/prover.proto index da1f295a582..36700cd555e 100644 --- a/core/lib/protobuf_config/src/proto/prover.proto +++ b/core/lib/protobuf_config/src/proto/prover.proto @@ -1,99 +1,101 @@ syntax = "proto3"; + import "zksync/config/object_store.proto"; package zksync.config.prover; message ProofCompressor { - optional uint32 compression_mode = 1; // required; u8 - optional uint32 prometheus_listener_port = 2; // required; u16 - optional string prometheus_pushgateway_url = 3; // required - optional uint64 prometheus_push_interval_ms = 4; // optional; ms - optional uint32 generation_timeout_in_secs = 5; // required; s - optional uint32 max_attempts = 6; // required - optional string universal_setup_path = 7; // required; fs path - optional string universal_setup_download_url = 8; // required - optional bool verify_wrapper_proof = 9; // required + optional uint32 compression_mode = 1; // required; u8 + optional uint32 prometheus_listener_port = 2; // required; u16 + optional string prometheus_pushgateway_url = 3; // required + optional uint64 prometheus_push_interval_ms = 4; // optional; ms + optional uint32 generation_timeout_in_secs = 5; // required; s + optional uint32 max_attempts = 6; // required + optional string universal_setup_path = 7; // required; fs path + optional string universal_setup_download_url = 8; // required + optional bool verify_wrapper_proof = 9; // required } enum SetupLoadMode { - FROM_DISK = 0; - FROM_MEMORY = 1; + FROM_DISK = 0; + FROM_MEMORY = 1; } message Prover { - optional string setup_data_path = 1; // required; fs path? - optional uint32 prometheus_port = 2; // required; u16 - optional uint32 max_attempts = 3; // required - optional uint32 generation_timeout_in_secs = 4; // required; s - repeated uint32 base_layer_circuit_ids_to_be_verified = 5; // required - repeated uint32 recursive_layer_circuit_ids_to_be_verified = 6; // required - optional SetupLoadMode setup_load_mode = 7; // required - optional uint32 specialized_group_id = 8; // required; u8 - optional uint64 witness_vector_generator_thread_count = 9; // optional - optional uint64 queue_capacity = 10; // required - optional uint32 witness_vector_receiver_port = 11; // required; u16 - optional string zone_read_url = 12; // required - optional bool shall_save_to_public_bucket = 13; // required - optional config.object_store.ObjectStore object_store = 20; + optional string setup_data_path = 1; // required; fs path? + optional uint32 prometheus_port = 2; // required; u16 + optional uint32 max_attempts = 3; // required + optional uint32 generation_timeout_in_secs = 4; // required; s + repeated uint32 base_layer_circuit_ids_to_be_verified = 5; // required + repeated uint32 recursive_layer_circuit_ids_to_be_verified = 6; // required + optional SetupLoadMode setup_load_mode = 7; // required + optional uint32 specialized_group_id = 8; // required; u8 + optional uint64 witness_vector_generator_thread_count = 9; // optional + optional uint64 queue_capacity = 10; // required + optional uint32 witness_vector_receiver_port = 11; // required; u16 + optional string zone_read_url = 12; // required + optional uint32 availability_check_interval_in_secs = 21; // required; s + optional bool shall_save_to_public_bucket = 13; // required + optional config.object_store.ObjectStore object_store = 20; } message CircuitIdRoundTuple { - optional uint32 circuit_id = 1; // required; u8 - optional uint32 aggregation_round = 2; // required; u8 + optional uint32 circuit_id = 1; // required; u8 + optional uint32 aggregation_round = 2; // required; u8 } message ProverGroup { - repeated CircuitIdRoundTuple group_0 = 1; - repeated CircuitIdRoundTuple group_1 = 2; - repeated CircuitIdRoundTuple group_2 = 3; - repeated CircuitIdRoundTuple group_3 = 4; - repeated CircuitIdRoundTuple group_4 = 5; - repeated CircuitIdRoundTuple group_5 = 6; - repeated CircuitIdRoundTuple group_6 = 7; - repeated CircuitIdRoundTuple group_7 = 8; - repeated CircuitIdRoundTuple group_8 = 9; - repeated CircuitIdRoundTuple group_9 = 10; - repeated CircuitIdRoundTuple group_10 = 11; - repeated CircuitIdRoundTuple group_11 = 12; - repeated CircuitIdRoundTuple group_12 = 13; + repeated CircuitIdRoundTuple group_0 = 1; + repeated CircuitIdRoundTuple group_1 = 2; + repeated CircuitIdRoundTuple group_2 = 3; + repeated CircuitIdRoundTuple group_3 = 4; + repeated CircuitIdRoundTuple group_4 = 5; + repeated CircuitIdRoundTuple group_5 = 6; + repeated CircuitIdRoundTuple group_6 = 7; + repeated CircuitIdRoundTuple group_7 = 8; + repeated CircuitIdRoundTuple group_8 = 9; + repeated CircuitIdRoundTuple group_9 = 10; + repeated CircuitIdRoundTuple group_10 = 11; + repeated CircuitIdRoundTuple group_11 = 12; + repeated CircuitIdRoundTuple group_12 = 13; } message ProverGateway { - optional string api_url = 1; // required - optional uint32 api_poll_duration_secs = 2; // required; s - optional uint32 prometheus_listener_port = 3; // required; u16 - optional string prometheus_pushgateway_url = 4; // required - optional uint64 prometheus_push_interval_ms = 5; // optional; ms + optional string api_url = 1; // required + optional uint32 api_poll_duration_secs = 2; // required; s + optional uint32 prometheus_listener_port = 3; // required; u16 + optional string prometheus_pushgateway_url = 4; // required + optional uint64 prometheus_push_interval_ms = 5; // optional; ms } message WitnessGenerator { - optional uint32 generation_timeout_in_secs = 1; // required; - optional uint32 max_attempts = 2; // required - optional uint32 blocks_proving_percentage = 3; // optional; 0-100 - repeated uint32 dump_arguments_for_blocks = 4; - optional uint32 last_l1_batch_to_process = 5; // optional - optional uint32 force_process_block = 6; // optional - optional bool shall_save_to_public_bucket = 7; // required - optional uint32 basic_generation_timeout_in_secs = 8; // optional; - optional uint32 leaf_generation_timeout_in_secs = 9; // optional; - optional uint32 node_generation_timeout_in_secs = 10; // optional; - optional uint32 scheduler_generation_timeout_in_secs = 11; // optional; + optional uint32 generation_timeout_in_secs = 1; // required; + optional uint32 max_attempts = 2; // required + optional uint32 blocks_proving_percentage = 3; // optional; 0-100 + repeated uint32 dump_arguments_for_blocks = 4; + optional uint32 last_l1_batch_to_process = 5; // optional + optional uint32 force_process_block = 6; // optional + optional bool shall_save_to_public_bucket = 7; // required + optional uint32 basic_generation_timeout_in_secs = 8; // optional; + optional uint32 leaf_generation_timeout_in_secs = 9; // optional; + optional uint32 node_generation_timeout_in_secs = 10; // optional; + optional uint32 scheduler_generation_timeout_in_secs = 11; // optional; } message WitnessVectorGenerator { - optional uint32 max_prover_reservation_duration_in_secs = 1; // required; s - optional uint32 prover_instance_wait_timeout_in_secs = 2; // required; s - optional uint32 prover_instance_poll_time_in_milli_secs = 3; // required; ms - optional uint32 prometheus_listener_port = 4; // required; u16 - optional string prometheus_pushgateway_url = 5; // required - optional uint64 prometheus_push_interval_ms = 6; // optional; ms - optional uint32 specialized_group_id = 7; // required; u8 + optional uint32 max_prover_reservation_duration_in_secs = 1; // required; s + optional uint32 prover_instance_wait_timeout_in_secs = 2; // required; s + optional uint32 prover_instance_poll_time_in_milli_secs = 3; // required; ms + optional uint32 prometheus_listener_port = 4; // required; u16 + optional string prometheus_pushgateway_url = 5; // required + optional uint64 prometheus_push_interval_ms = 6; // optional; ms + optional uint32 specialized_group_id = 7; // required; u8 } message ProofDataHandler { - optional uint32 http_port = 1; // required; u16 - optional uint32 proof_generation_timeout_in_secs = 2; // required; s + optional uint32 http_port = 1; // required; u16 + optional uint32 proof_generation_timeout_in_secs = 2; // required; s } diff --git a/core/lib/protobuf_config/src/prover.rs b/core/lib/protobuf_config/src/prover.rs index 3dd582fb206..12ff2378251 100644 --- a/core/lib/protobuf_config/src/prover.rs +++ b/core/lib/protobuf_config/src/prover.rs @@ -335,6 +335,10 @@ impl ProtoRepr for proto::Prover { zone_read_url: required(&self.zone_read_url) .context("zone_read_url")? .clone(), + availability_check_interval_in_secs: *required( + &self.availability_check_interval_in_secs, + ) + .context("availability_check_interval_in_secs")?, shall_save_to_public_bucket: *required(&self.shall_save_to_public_bucket) .context("shall_save_to_public_bucket")?, object_store, @@ -365,6 +369,7 @@ impl ProtoRepr for proto::Prover { queue_capacity: Some(this.queue_capacity.try_into().unwrap()), witness_vector_receiver_port: Some(this.witness_vector_receiver_port.into()), zone_read_url: Some(this.zone_read_url.clone()), + availability_check_interval_in_secs: Some(this.availability_check_interval_in_secs), shall_save_to_public_bucket: Some(this.shall_save_to_public_bucket), object_store: this.object_store.as_ref().map(ProtoRepr::build), } diff --git a/core/lib/zksync_core/src/house_keeper/fri_gpu_prover_archiver.rs b/core/lib/zksync_core/src/house_keeper/fri_gpu_prover_archiver.rs new file mode 100644 index 00000000000..0297dc9c328 --- /dev/null +++ b/core/lib/zksync_core/src/house_keeper/fri_gpu_prover_archiver.rs @@ -0,0 +1,52 @@ +use prover_dal::{Prover, ProverDal}; +use zksync_db_connection::connection_pool::ConnectionPool; + +use crate::{house_keeper::periodic_job::PeriodicJob, metrics::HOUSE_KEEPER_METRICS}; + +/// FriGpuProverArchiver is a task that periodically archives old fri GPU prover records. +/// The task will archive the `dead` prover records that have not been updated for a certain amount of time. +#[derive(Debug)] +pub struct FriGpuProverArchiver { + pool: ConnectionPool, + archiving_interval_ms: u64, + archive_prover_after_secs: u64, +} + +impl FriGpuProverArchiver { + pub fn new( + pool: ConnectionPool, + archiving_interval_ms: u64, + archive_prover_after_secs: u64, + ) -> Self { + Self { + pool, + archiving_interval_ms, + archive_prover_after_secs, + } + } +} + +#[async_trait::async_trait] +impl PeriodicJob for FriGpuProverArchiver { + const SERVICE_NAME: &'static str = "FriGpuProverArchiver"; + + async fn run_routine_task(&mut self) -> anyhow::Result<()> { + let archived_provers = self + .pool + .connection() + .await + .unwrap() + .fri_gpu_prover_queue_dal() + .archive_old_provers(self.archive_prover_after_secs) + .await; + tracing::info!("Archived {:?} fri gpu prover records", archived_provers); + HOUSE_KEEPER_METRICS + .gpu_prover_archived + .inc_by(archived_provers as u64); + Ok(()) + } + + fn polling_interval_ms(&self) -> u64 { + self.archiving_interval_ms + } +} diff --git a/core/lib/zksync_core/src/house_keeper/fri_prover_jobs_archiver.rs b/core/lib/zksync_core/src/house_keeper/fri_prover_jobs_archiver.rs index 28fadd104ed..afbd7d8c005 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_prover_jobs_archiver.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_prover_jobs_archiver.rs @@ -1,7 +1,7 @@ use prover_dal::{Prover, ProverDal}; use zksync_db_connection::connection_pool::ConnectionPool; -use crate::house_keeper::periodic_job::PeriodicJob; +use crate::{house_keeper::periodic_job::PeriodicJob, metrics::HOUSE_KEEPER_METRICS}; #[derive(Debug)] pub struct FriProverJobArchiver { @@ -38,7 +38,9 @@ impl PeriodicJob for FriProverJobArchiver { .archive_old_jobs(self.archiving_interval_secs) .await; tracing::info!("Archived {:?} fri prover jobs", archived_jobs); - metrics::counter!("server.prover_fri.archived_jobs", archived_jobs as u64); + HOUSE_KEEPER_METRICS + .prover_job_archived + .inc_by(archived_jobs as u64); Ok(()) } diff --git a/core/lib/zksync_core/src/house_keeper/mod.rs b/core/lib/zksync_core/src/house_keeper/mod.rs index f3b5b202ee1..ca28384fab1 100644 --- a/core/lib/zksync_core/src/house_keeper/mod.rs +++ b/core/lib/zksync_core/src/house_keeper/mod.rs @@ -1,4 +1,5 @@ pub mod blocks_state_reporter; +pub mod fri_gpu_prover_archiver; pub mod fri_proof_compressor_job_retry_manager; pub mod fri_proof_compressor_queue_monitor; pub mod fri_prover_job_retry_manager; diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index bc5f7d20503..2ae7a4e64bf 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -71,6 +71,7 @@ use crate::{ genesis::GenesisParams, house_keeper::{ blocks_state_reporter::L1BatchMetricsReporter, + fri_gpu_prover_archiver::FriGpuProverArchiver, fri_proof_compressor_job_retry_manager::FriProofCompressorJobRetryManager, fri_proof_compressor_queue_monitor::FriProofCompressorStatsReporter, fri_prover_job_retry_manager::FriProverJobRetryManager, @@ -1104,20 +1105,30 @@ async fn add_house_keeper_to_task_futures( task_futures.push(tokio::spawn(task)); // TODO(PLA-862): remove after fields become required - if house_keeper_config.prover_job_archiver_enabled() { + if let Some((archiving_interval, archive_after)) = + house_keeper_config.prover_job_archiver_params() + { let fri_prover_jobs_archiver = FriProverJobArchiver::new( prover_connection_pool.clone(), - house_keeper_config - .prover_job_archiver_reporting_interval_ms - .unwrap(), - house_keeper_config - .prover_job_archiver_archiving_interval_secs - .unwrap(), + archiving_interval, + archive_after, ); let task = fri_prover_jobs_archiver.run(stop_receiver.clone()); task_futures.push(tokio::spawn(task)); } + if let Some((archiving_interval, archive_after)) = + house_keeper_config.fri_gpu_prover_archiver_params() + { + let fri_gpu_prover_jobs_archiver = FriGpuProverArchiver::new( + prover_connection_pool.clone(), + archiving_interval, + archive_after, + ); + let task = fri_gpu_prover_jobs_archiver.run(stop_receiver.clone()); + task_futures.push(tokio::spawn(task)); + } + let fri_prover_group_config = configs .prover_group_config .clone() diff --git a/core/lib/zksync_core/src/metrics.rs b/core/lib/zksync_core/src/metrics.rs index 56e8223b893..066f8f3e251 100644 --- a/core/lib/zksync_core/src/metrics.rs +++ b/core/lib/zksync_core/src/metrics.rs @@ -183,3 +183,13 @@ pub(crate) struct ExternalNodeMetrics { #[vise::register] pub(crate) static EN_METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "house_keeper")] +pub(crate) struct HouseKeeperMetrics { + pub prover_job_archived: Counter, + pub gpu_prover_archived: Counter, +} + +#[vise::register] +pub(crate) static HOUSE_KEEPER_METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/node_framework/src/implementations/layers/house_keeper.rs b/core/node/node_framework/src/implementations/layers/house_keeper.rs index 43e35103da2..f6257808fc8 100644 --- a/core/node/node_framework/src/implementations/layers/house_keeper.rs +++ b/core/node/node_framework/src/implementations/layers/house_keeper.rs @@ -5,7 +5,7 @@ use zksync_config::configs::{ FriProofCompressorConfig, FriProverConfig, FriWitnessGeneratorConfig, }; use zksync_core::house_keeper::{ - blocks_state_reporter::L1BatchMetricsReporter, + blocks_state_reporter::L1BatchMetricsReporter, fri_gpu_prover_archiver::FriGpuProverArchiver, fri_proof_compressor_job_retry_manager::FriProofCompressorJobRetryManager, fri_proof_compressor_queue_monitor::FriProofCompressorStatsReporter, fri_prover_job_retry_manager::FriProverJobRetryManager, @@ -112,21 +112,26 @@ impl WiringLayer for HouseKeeperLayer { waiting_to_queued_fri_witness_job_mover, })); - if self.house_keeper_config.prover_job_archiver_enabled() { - let fri_prover_job_archiver = FriProverJobArchiver::new( - prover_pool.clone(), - self.house_keeper_config - .prover_job_archiver_reporting_interval_ms - .unwrap(), - self.house_keeper_config - .prover_job_archiver_archiving_interval_secs - .unwrap(), - ); + if let Some((archiving_interval, archive_after)) = + self.house_keeper_config.prover_job_archiver_params() + { + let fri_prover_job_archiver = + FriProverJobArchiver::new(prover_pool.clone(), archiving_interval, archive_after); context.add_task(Box::new(FriProverJobArchiverTask { fri_prover_job_archiver, })); } + if let Some((archiving_interval, archive_after)) = + self.house_keeper_config.fri_gpu_prover_archiver_params() + { + let fri_prover_gpu_archiver = + FriGpuProverArchiver::new(prover_pool.clone(), archiving_interval, archive_after); + context.add_task(Box::new(FriProverGpuArchiverTask { + fri_prover_gpu_archiver, + })); + } + let scheduler_circuit_queuer = SchedulerCircuitQueuer::new( self.house_keeper_config.witness_job_moving_interval_ms, prover_pool.clone(), @@ -364,3 +369,18 @@ impl Task for FriProverJobArchiverTask { self.fri_prover_job_archiver.run(stop_receiver.0).await } } + +struct FriProverGpuArchiverTask { + fri_prover_gpu_archiver: FriGpuProverArchiver, +} + +#[async_trait::async_trait] +impl Task for FriProverGpuArchiverTask { + fn name(&self) -> &'static str { + "fri_prover_gpu_archiver" + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + self.fri_prover_gpu_archiver.run(stop_receiver.0).await + } +} diff --git a/etc/env/base/fri_prover.toml b/etc/env/base/fri_prover.toml index 94af27417ae..fc99e756cf5 100644 --- a/etc/env/base/fri_prover.toml +++ b/etc/env/base/fri_prover.toml @@ -1,14 +1,15 @@ [fri_prover] -setup_data_path="/usr/src/setup-data" -prometheus_port=3315 -max_attempts=10 -generation_timeout_in_secs=600 -base_layer_circuit_ids_to_be_verified="1" -recursive_layer_circuit_ids_to_be_verified="1" -setup_load_mode="FromDisk" -specialized_group_id=100 -witness_vector_generator_thread_count=5 -queue_capacity=10 -witness_vector_receiver_port=3316 -zone_read_url="http://metadata.google.internal/computeMetadata/v1/instance/zone" -shall_save_to_public_bucket=true +setup_data_path = "/usr/src/setup-data" +prometheus_port = 3315 +max_attempts = 10 +generation_timeout_in_secs = 600 +base_layer_circuit_ids_to_be_verified = "1" +recursive_layer_circuit_ids_to_be_verified = "1" +setup_load_mode = "FromDisk" +specialized_group_id = 100 +witness_vector_generator_thread_count = 5 +queue_capacity = 10 +witness_vector_receiver_port = 3316 +zone_read_url = "http://metadata.google.internal/computeMetadata/v1/instance/zone" +availability_check_interval_in_secs = 10 +shall_save_to_public_bucket = true diff --git a/etc/env/base/house_keeper.toml b/etc/env/base/house_keeper.toml index 9043455491b..9596f63d062 100644 --- a/etc/env/base/house_keeper.toml +++ b/etc/env/base/house_keeper.toml @@ -9,5 +9,7 @@ prover_db_pool_size = 2 prover_stats_reporting_interval_ms = 50000 proof_compressor_job_retrying_interval_ms = 30000 proof_compressor_stats_reporting_interval_ms = 10000 -prover_job_archiver_reporting_interval_ms = 1800000 -prover_job_archiver_archiving_interval_secs = 172800 \ No newline at end of file +prover_job_archiver_archiving_interval_ms = 1800000 +prover_job_archiver_archive_after_secs = 172800 +fri_gpu_prover_archiver_archiving_interval_ms = 86400000 +fri_gpu_prover_archiver_archive_after_secs = 172800 \ No newline at end of file diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index eb52db083a1..5415c70e572 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -170,6 +170,7 @@ prover: witness_vector_generator_thread_count: 5 queue_capacity: 10 witness_vector_receiver_port: 3316 + availability_check_interval_in_secs: 10000 zone_read_url: http://metadata.google.internal/computeMetadata/v1/instance/zone shall_save_to_public_bucket: true witness_generator: @@ -286,8 +287,10 @@ house_keeper: prover_stats_reporting_interval_ms: 5000 proof_compressor_job_retrying_interval_ms: 30000 proof_compressor_stats_reporting_interval_ms: 10000 - prover_job_archiver_reporting_interval_ms: 1800000 - prover_job_archiver_archiving_interval_secs: 15 + prover_job_archiver_archiving_interval_ms: 1800000 + prover_job_archiver_archive_after_secs: 172800 + fri_gpu_prover_archiver_archiving_interval_ms: 86400000 + fri_gpu_prover_archiver_archive_after_secs: 172800 prometheus: listener_port: 3312 diff --git a/prover/prover_dal/.sqlx/query-02bbd0a1c01747fb24a68f0ecf447e0574419632a40119b661a31ec70f0f950b.json b/prover/prover_dal/.sqlx/query-02bbd0a1c01747fb24a68f0ecf447e0574419632a40119b661a31ec70f0f950b.json new file mode 100644 index 00000000000..013d7db9cad --- /dev/null +++ b/prover/prover_dal/.sqlx/query-02bbd0a1c01747fb24a68f0ecf447e0574419632a40119b661a31ec70f0f950b.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH deleted AS (\n DELETE FROM gpu_prover_queue_fri\n WHERE\n instance_status = 'dead'\n AND updated_at < NOW() - $1::INTERVAL\n RETURNING *, NOW() AS archived_at\n ),\n inserted_count AS (\n INSERT INTO gpu_prover_queue_fri_archive\n SELECT * FROM deleted\n )\n SELECT COUNT(*) FROM deleted\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Interval" + ] + }, + "nullable": [ + null + ] + }, + "hash": "02bbd0a1c01747fb24a68f0ecf447e0574419632a40119b661a31ec70f0f950b" +} diff --git a/prover/prover_dal/.sqlx/query-2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3.json b/prover/prover_dal/.sqlx/query-2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3.json new file mode 100644 index 00000000000..fd7c7c7874d --- /dev/null +++ b/prover/prover_dal/.sqlx/query-2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n instance_status\n FROM\n gpu_prover_queue_fri\n WHERE\n instance_host = $1::TEXT::inet\n AND instance_port = $2\n AND zone = $3\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "instance_status", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Text", + "Int4", + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3" +} diff --git a/prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.down.sql b/prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.down.sql new file mode 100644 index 00000000000..557f6f6e175 --- /dev/null +++ b/prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE gpu_prover_queue_fri_archive + DROP COLUMN IF EXISTS archived_at; diff --git a/prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.up.sql b/prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.up.sql new file mode 100644 index 00000000000..fa8b61e5599 --- /dev/null +++ b/prover/prover_dal/migrations/20240403070124_add_archived_at_column_to_prover_queue_archive.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE gpu_prover_queue_fri_archive + ADD COLUMN archived_at TIMESTAMP DEFAULT NULL; diff --git a/prover/prover_dal/src/fri_gpu_prover_queue_dal.rs b/prover/prover_dal/src/fri_gpu_prover_queue_dal.rs index 231f1b6599d..731d74df2ca 100644 --- a/prover/prover_dal/src/fri_gpu_prover_queue_dal.rs +++ b/prover/prover_dal/src/fri_gpu_prover_queue_dal.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{str::FromStr, time::Duration}; use zksync_basic_types::prover_dal::{GpuProverInstanceStatus, SocketAddress}; use zksync_db_connection::connection::Connection; @@ -156,4 +156,57 @@ impl FriGpuProverQueueDal<'_, '_> { .await .unwrap(); } + + pub async fn get_prover_instance_status( + &mut self, + address: SocketAddress, + zone: String, + ) -> Option { + sqlx::query!( + r#" + SELECT + instance_status + FROM + gpu_prover_queue_fri + WHERE + instance_host = $1::TEXT::inet + AND instance_port = $2 + AND zone = $3 + "#, + address.host.to_string(), + i32::from(address.port), + zone + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| GpuProverInstanceStatus::from_str(&row.instance_status).unwrap()) + } + + pub async fn archive_old_provers(&mut self, archive_prover_after_secs: u64) -> usize { + let prover_max_age = + pg_interval_from_duration(Duration::from_secs(archive_prover_after_secs)); + + sqlx::query_scalar!( + r#" + WITH deleted AS ( + DELETE FROM gpu_prover_queue_fri + WHERE + instance_status = 'dead' + AND updated_at < NOW() - $1::INTERVAL + RETURNING *, NOW() AS archived_at + ), + inserted_count AS ( + INSERT INTO gpu_prover_queue_fri_archive + SELECT * FROM deleted + ) + SELECT COUNT(*) FROM deleted + "#, + &prover_max_age + ) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .unwrap_or(0) as usize + } } diff --git a/prover/prover_fri/src/gpu_prover_availability_checker.rs b/prover/prover_fri/src/gpu_prover_availability_checker.rs new file mode 100644 index 00000000000..515919cff5b --- /dev/null +++ b/prover/prover_fri/src/gpu_prover_availability_checker.rs @@ -0,0 +1,81 @@ +#[cfg(feature = "gpu")] +pub mod availability_checker { + use std::time::Duration; + + use prover_dal::{ConnectionPool, Prover, ProverDal}; + use zksync_types::prover_dal::{GpuProverInstanceStatus, SocketAddress}; + + use crate::metrics::{KillingReason, METRICS}; + + /// Availability checker is a task that periodically checks the status of the prover instance in the database. + /// If the prover instance is not found in the database or marked as dead, the availability checker will shut down the prover. + pub struct AvailabilityChecker { + address: SocketAddress, + zone: String, + polling_interval: Duration, + pool: ConnectionPool, + } + + impl AvailabilityChecker { + pub fn new( + address: SocketAddress, + zone: String, + polling_interval_secs: u32, + pool: ConnectionPool, + ) -> Self { + Self { + address, + zone, + polling_interval: Duration::from_secs(polling_interval_secs as u64), + pool, + } + } + + pub async fn run( + self, + stop_receiver: tokio::sync::watch::Receiver, + ) -> anyhow::Result<()> { + while !*stop_receiver.borrow() { + let status = self + .pool + .connection() + .await + .unwrap() + .fri_gpu_prover_queue_dal() + .get_prover_instance_status(self.address.clone(), self.zone.clone()) + .await; + + // If the prover instance is not found in the database or marked as dead, we should shut down the prover + match status { + None => { + METRICS.zombie_prover_instances_count[&KillingReason::Absent].inc(); + tracing::info!( + "Prover instance at address {:?}, availability zone {} was not found in the database, shutting down", + self.address, + self.zone + ); + // After returning from the task, it will shut down all the other tasks + return Ok(()); + } + Some(GpuProverInstanceStatus::Dead) => { + METRICS.zombie_prover_instances_count[&KillingReason::Dead].inc(); + tracing::info!( + "Prover instance at address {:?}, availability zone {} was found marked as dead, shutting down", + self.address, + self.zone + ); + // After returning from the task, it will shut down all the other tasks + return Ok(()); + } + Some(_) => (), + } + + tokio::time::sleep(self.polling_interval).await; + } + + tracing::info!("Availability checker was shut down"); + + Ok(()) + } + } +} diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs index 841406c2833..efc552d50c2 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/prover_fri/src/main.rs @@ -26,6 +26,7 @@ use zksync_types::{ }; use zksync_utils::wait_for_tasks::ManagedTasks; +mod gpu_prover_availability_checker; mod gpu_prover_job_processor; mod metrics; mod prover_job_processor; @@ -256,14 +257,23 @@ async fn get_prover_tasks( zone.clone() ); let socket_listener = gpu_socket_listener::SocketListener::new( - address, + address.clone(), producer, pool.clone(), prover_config.specialized_group_id, - zone, + zone.clone(), ); + let availability_checker = + gpu_prover_availability_checker::availability_checker::AvailabilityChecker::new( + address, + zone, + prover_config.availability_check_interval_in_secs, + pool, + ); + Ok(vec![ tokio::spawn(socket_listener.listen_incoming_connections(stop_receiver.clone())), - tokio::spawn(prover.run(stop_receiver, None)), + tokio::spawn(prover.run(stop_receiver.clone(), None)), + tokio::spawn(availability_checker.run(stop_receiver.clone())), ]) } diff --git a/prover/prover_fri/src/metrics.rs b/prover/prover_fri/src/metrics.rs index b03f0233e7a..1bff9161c49 100644 --- a/prover/prover_fri/src/metrics.rs +++ b/prover/prover_fri/src/metrics.rs @@ -1,6 +1,8 @@ use std::time::Duration; -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, LabeledFamily, Metrics}; +use vise::{ + Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram, LabeledFamily, Metrics, +}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] pub(crate) struct CircuitLabels { @@ -15,6 +17,16 @@ pub(crate) enum Layer { Base, } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] +#[metrics(label = "reason", rename_all = "snake_case")] +#[allow(dead_code)] +pub(crate) enum KillingReason { + /// Prover was found with Dead status in the database + Dead, + /// Prover was not found in the database + Absent, +} + #[derive(Debug, Metrics)] #[metrics(prefix = "prover_fri_prover")] pub(crate) struct ProverFriMetrics { @@ -36,6 +48,7 @@ pub(crate) struct ProverFriMetrics { pub witness_vector_blob_time: LabeledFamily>, #[metrics(buckets = Buckets::LATENCIES, labels = ["circuit_type"])] pub blob_save_time: LabeledFamily>, + pub zombie_prover_instances_count: Family, } #[vise::register] From 688ad786e74059fb314fbd9c8a2a350c9f43e9a2 Mon Sep 17 00:00:00 2001 From: Stanislav Bezkorovainyi Date: Thu, 4 Apr 2024 11:29:03 +0200 Subject: [PATCH 3/9] fix: Fix CI by ignoring "era-compiler-llvm" and bumping h2 (#1571) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ The `era-compiler-llvm` will be open sourced soon. For now we'll just ignore it in the CI ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- Cargo.lock | 4 ++-- checks-config/links.json | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3b9473153f3..427e51e331f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2472,9 +2472,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", diff --git a/checks-config/links.json b/checks-config/links.json index ed336a66590..7790503ed4f 100644 --- a/checks-config/links.json +++ b/checks-config/links.json @@ -23,6 +23,9 @@ }, { "pattern": "^https://github\\.com/matter-labs/zksync-era/commit/" + }, + { + "pattern": "^https://github\\.com/matter-labs//era-compiler-llvm" } ], "aliveStatusCodes": [0, 200, 206, 304] From a923e11ecfecc3de9b0b2cb578939a1f877a1e8a Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 4 Apr 2024 12:35:55 +0300 Subject: [PATCH 4/9] fix(cache): use factory deps cache correctly (#1547) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Factory deps should be known only for miniblocks when it was already published ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- ...fe400fcba3bfc5cf1b7c2b801508f6673d94e.json | 23 --------- ...a4a992bee16a2d3d84be2aafc27af8468b64e.json | 28 +++++++++++ core/lib/dal/src/storage_web3_dal.rs | 15 +++--- core/lib/state/src/cache/lru_cache.rs | 6 +++ core/lib/state/src/postgres/mod.rs | 34 ++++++++++---- core/lib/state/src/postgres/tests.rs | 47 ++++++++++++++++++- 6 files changed, 111 insertions(+), 42 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-2b1136c7781bcdbd9d5d1e6f900fe400fcba3bfc5cf1b7c2b801508f6673d94e.json create mode 100644 core/lib/dal/.sqlx/query-fe3aa7ce9cd799026de57bdb943a4a992bee16a2d3d84be2aafc27af8468b64e.json diff --git a/core/lib/dal/.sqlx/query-2b1136c7781bcdbd9d5d1e6f900fe400fcba3bfc5cf1b7c2b801508f6673d94e.json b/core/lib/dal/.sqlx/query-2b1136c7781bcdbd9d5d1e6f900fe400fcba3bfc5cf1b7c2b801508f6673d94e.json deleted file mode 100644 index 58b1236e6f6..00000000000 --- a/core/lib/dal/.sqlx/query-2b1136c7781bcdbd9d5d1e6f900fe400fcba3bfc5cf1b7c2b801508f6673d94e.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n bytecode\n FROM\n factory_deps\n WHERE\n bytecode_hash = $1\n AND miniblock_number <= $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "bytecode", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "2b1136c7781bcdbd9d5d1e6f900fe400fcba3bfc5cf1b7c2b801508f6673d94e" -} diff --git a/core/lib/dal/.sqlx/query-fe3aa7ce9cd799026de57bdb943a4a992bee16a2d3d84be2aafc27af8468b64e.json b/core/lib/dal/.sqlx/query-fe3aa7ce9cd799026de57bdb943a4a992bee16a2d3d84be2aafc27af8468b64e.json new file mode 100644 index 00000000000..76144c3000a --- /dev/null +++ b/core/lib/dal/.sqlx/query-fe3aa7ce9cd799026de57bdb943a4a992bee16a2d3d84be2aafc27af8468b64e.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n bytecode,\n miniblock_number\n FROM\n factory_deps\n WHERE\n bytecode_hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "bytecode", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "miniblock_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "fe3aa7ce9cd799026de57bdb943a4a992bee16a2d3d84be2aafc27af8468b64e" +} diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 6583bc1410b..cc250a6cefc 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -241,30 +241,27 @@ impl StorageWeb3Dal<'_, '_> { Ok(row.map(|row| row.bytecode)) } - /// This method doesn't check if block with number equals to `block_number` - /// is present in the database. For such blocks `None` will be returned. - pub async fn get_factory_dep_unchecked( + /// Given bytecode hash, returns `bytecode` and `miniblock_number` at which it was inserted. + pub async fn get_factory_dep( &mut self, hash: H256, - block_number: MiniblockNumber, - ) -> sqlx::Result>> { + ) -> sqlx::Result, MiniblockNumber)>> { let row = sqlx::query!( r#" SELECT - bytecode + bytecode, + miniblock_number FROM factory_deps WHERE bytecode_hash = $1 - AND miniblock_number <= $2 "#, hash.as_bytes(), - i64::from(block_number.0) ) .fetch_optional(self.storage.conn()) .await?; - Ok(row.map(|row| row.bytecode)) + Ok(row.map(|row| (row.bytecode, MiniblockNumber(row.miniblock_number as u32)))) } } diff --git a/core/lib/state/src/cache/lru_cache.rs b/core/lib/state/src/cache/lru_cache.rs index 608ab4da115..0e0f3541117 100644 --- a/core/lib/state/src/cache/lru_cache.rs +++ b/core/lib/state/src/cache/lru_cache.rs @@ -100,6 +100,12 @@ mod tests { use crate::cache::{lru_cache::LruCache, *}; + impl CacheValue for Vec { + fn cache_weight(&self) -> u32 { + self.len().try_into().expect("Cached bytes are too large") + } + } + #[test] fn cache_with_zero_capacity() { let zero_cache = LruCache::>::new("test", 0); diff --git a/core/lib/state/src/postgres/mod.rs b/core/lib/state/src/postgres/mod.rs index 51e6014afac..de58a860630 100644 --- a/core/lib/state/src/postgres/mod.rs +++ b/core/lib/state/src/postgres/mod.rs @@ -24,12 +24,20 @@ mod metrics; #[cfg(test)] mod tests; +#[derive(Debug, Clone, PartialEq, Eq)] +struct TimestampedFactoryDep { + bytecode: Vec, + inserted_at: MiniblockNumber, +} + /// Type alias for smart contract source code cache. -type FactoryDepsCache = LruCache>; +type FactoryDepsCache = LruCache; -impl CacheValue for Vec { +impl CacheValue for TimestampedFactoryDep { fn cache_weight(&self) -> u32 { - self.len().try_into().expect("Cached bytes are too large") + (self.bytecode.len() + mem::size_of::()) + .try_into() + .expect("Cached bytes are too large") } } @@ -553,17 +561,21 @@ impl ReadStorage for PostgresStorage<'_> { .as_ref() .and_then(|caches| caches.factory_deps.get(&hash)); - let result = cached_value.or_else(|| { + let value = cached_value.or_else(|| { let mut dal = self.connection.storage_web3_dal(); let value = self .rt_handle - .block_on(dal.get_factory_dep_unchecked(hash, self.miniblock_number)) - .expect("Failed executing `load_factory_dep`"); + .block_on(dal.get_factory_dep(hash)) + .expect("Failed executing `load_factory_dep`") + .map(|(bytecode, inserted_at)| TimestampedFactoryDep { + bytecode, + inserted_at, + }); if let Some(caches) = &self.caches { // If we receive None, we won't cache it. - if let Some(dep) = value.clone() { - caches.factory_deps.insert(hash, dep); + if let Some(value) = value.clone() { + caches.factory_deps.insert(hash, value); } }; @@ -571,7 +583,11 @@ impl ReadStorage for PostgresStorage<'_> { }); latency.observe(); - result + Some( + value + .filter(|dep| dep.inserted_at <= self.miniblock_number)? + .bytecode, + ) } fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { diff --git a/core/lib/state/src/postgres/tests.rs b/core/lib/state/src/postgres/tests.rs index 1d878a9c631..5e13464dc9b 100644 --- a/core/lib/state/src/postgres/tests.rs +++ b/core/lib/state/src/postgres/tests.rs @@ -231,6 +231,18 @@ fn test_factory_deps_cache(pool: &ConnectionPool, rt_handle: Handle) { ) .unwrap(); + let mut contracts = HashMap::new(); + contracts.insert(H256::from_low_u64_be(1), vec![1, 2, 3, 4]); + storage + .rt_handle + .block_on( + storage + .connection + .factory_deps_dal() + .insert_factory_deps(MiniblockNumber(1), &contracts), + ) + .unwrap(); + // Create the storage that should have the cache filled. let mut storage = PostgresStorage::new( storage.rt_handle, @@ -243,7 +255,40 @@ fn test_factory_deps_cache(pool: &ConnectionPool, rt_handle: Handle) { // Fill the cache let dep = storage.load_factory_dep(zero_addr); assert_eq!(dep, Some(vec![1, 2, 3])); - assert_eq!(caches.factory_deps.get(&zero_addr), Some(vec![1, 2, 3])); + assert_eq!( + caches.factory_deps.get(&zero_addr), + Some(TimestampedFactoryDep { + bytecode: vec![1, 2, 3], + inserted_at: MiniblockNumber(0) + }) + ); + + let dep = storage.load_factory_dep(H256::from_low_u64_be(1)); + assert_eq!(dep, Some(vec![1, 2, 3, 4])); + assert_eq!( + caches.factory_deps.get(&H256::from_low_u64_be(1)), + Some(TimestampedFactoryDep { + bytecode: vec![1, 2, 3, 4], + inserted_at: MiniblockNumber(1) + }) + ); + + // Create storage with `MiniblockNumber(0)`. + let mut storage = PostgresStorage::new( + storage.rt_handle, + storage.connection, + MiniblockNumber(0), + true, + ) + .with_caches(caches.clone()); + + // First bytecode was published at miniblock 0, so it should be visible. + let dep = storage.load_factory_dep(zero_addr); + assert_eq!(dep, Some(vec![1, 2, 3])); + + // Second bytecode was published at miniblock 1, so it shouldn't be visible. + let dep = storage.load_factory_dep(H256::from_low_u64_be(1)); + assert!(dep.is_none()); } #[tokio::test] From 6da89cd5222435aa9994fb5989af75ecbe69b6fd Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 4 Apr 2024 12:49:31 +0300 Subject: [PATCH 5/9] feat(api): Add `tokens_whitelisted_for_paymaster` (#1545) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Adds `tokens_whitelisted_for_paymaster` config var to main node. - Adds `en_tokensWhitelistedForPaymaster` endpoint. ## Why ❔ Required to whitelist some tokens to be used by paymaster in addition to natively bridged tokens. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- checks-config/era.dic | 1 + core/bin/external_node/src/config/mod.rs | 2 + core/bin/external_node/src/main.rs | 40 +++++++++++++++++-- core/lib/config/src/configs/api.rs | 7 +++- core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/api.rs | 7 +++- core/lib/protobuf_config/src/api.rs | 14 ++++++- core/lib/protobuf_config/src/proto/api.proto | 1 + core/lib/web3_decl/src/namespaces/en.rs | 6 ++- .../src/api_server/execution_sandbox/mod.rs | 5 ++- .../api_server/execution_sandbox/validate.rs | 29 ++++++++++---- .../src/api_server/tx_sender/mod.rs | 28 ++++++++++++- .../web3/backend_jsonrpsee/namespaces/en.rs | 8 +++- .../src/api_server/web3/namespaces/debug.rs | 9 ++++- .../src/api_server/web3/namespaces/en.rs | 11 ++++- 15 files changed, 146 insertions(+), 23 deletions(-) diff --git a/checks-config/era.dic b/checks-config/era.dic index 78da85c1d90..b83368526be 100644 --- a/checks-config/era.dic +++ b/checks-config/era.dic @@ -190,6 +190,7 @@ sorted_timestamps known_bytecodes returndata namespaces +natively StateDiffRecord BYTES_PER_ENUMERATION_INDEX derived_key diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index bf5c80785c6..1181a68200c 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -737,6 +737,8 @@ impl From for TxSenderConfig { .optional .l1_to_l2_transactions_compatibility_mode, max_pubdata_per_batch: config.remote.max_pubdata_per_batch, + // Does not matter for EN. + whitelisted_tokens_for_aa: Default::default(), } } } diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 3eb7f77c428..a0aa371de04 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -5,7 +5,7 @@ use clap::Parser; use metrics::EN_METRICS; use prometheus_exporter::PrometheusExporterConfig; use tokio::{ - sync::watch, + sync::{watch, RwLock}, task::{self, JoinHandle}, }; use zksync_concurrency::{ctx, scope}; @@ -51,7 +51,7 @@ use zksync_state::PostgresStorageCaches; use zksync_storage::RocksDB; use zksync_types::L2ChainId; use zksync_utils::wait_for_tasks::ManagedTasks; -use zksync_web3_decl::client::L2Client; +use zksync_web3_decl::{client::L2Client, namespaces::EnNamespaceClient}; use crate::{ config::{observability::observability_config_from_env, ExternalNodeConfig}, @@ -387,8 +387,14 @@ async fn run_api( } }; - let (tx_sender, vm_barrier, cache_update_handle, proxy_cache_updater_handle) = { - let tx_proxy = TxProxy::new(main_node_client); + let ( + tx_sender, + vm_barrier, + cache_update_handle, + proxy_cache_updater_handle, + whitelisted_tokens_update_handle, + ) = { + let tx_proxy = TxProxy::new(main_node_client.clone()); let proxy_cache_updater_pool = singleton_pool_builder .build() .await @@ -426,7 +432,31 @@ async fn run_api( ) }); + let whitelisted_tokens_for_aa_cache = Arc::new(RwLock::new(Vec::new())); + let whitelisted_tokens_for_aa_cache_clone = whitelisted_tokens_for_aa_cache.clone(); + let mut stop_receiver_for_task = stop_receiver.clone(); + let whitelisted_tokens_update_task = task::spawn(async move { + loop { + match main_node_client.whitelisted_tokens_for_aa().await { + Ok(tokens) => { + *whitelisted_tokens_for_aa_cache_clone.write().await = tokens; + } + Err(err) => { + tracing::error!( + "Failed to query `whitelisted_tokens_for_aa`, error: {err:?}" + ); + } + } + + // Error here corresponds to a timeout w/o `stop_receiver` changed; we're OK with this. + tokio::time::timeout(Duration::from_secs(60), stop_receiver_for_task.changed()) + .await + .ok(); + } + }); + let tx_sender = tx_sender_builder + .with_whitelisted_tokens_for_aa(whitelisted_tokens_for_aa_cache) .build( fee_params_fetcher, Arc::new(vm_concurrency_limiter), @@ -439,6 +469,7 @@ async fn run_api( vm_barrier, cache_update_handle, proxy_cache_updater_handle, + whitelisted_tokens_update_task, ) }; @@ -490,6 +521,7 @@ async fn run_api( task_futures.extend(cache_update_handle); task_futures.push(proxy_cache_updater_handle); + task_futures.push(whitelisted_tokens_update_handle); Ok(()) } diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index a83e5ef0c28..2739849444b 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -1,7 +1,7 @@ use std::{net::SocketAddr, num::NonZeroU32, time::Duration}; use serde::Deserialize; -use zksync_basic_types::H256; +use zksync_basic_types::{Address, H256}; pub use crate::configs::PrometheusConfig; @@ -100,6 +100,10 @@ pub struct Web3JsonRpcConfig { pub mempool_cache_update_interval: Option, /// Maximum number of transactions to be stored in the mempool cache. Default is 10000. pub mempool_cache_size: Option, + /// List of L2 token addresses that are white-listed to use by paymasters + /// (additionally to natively bridged tokens). + #[serde(default)] + pub whitelisted_tokens_for_aa: Vec
, } impl Web3JsonRpcConfig { @@ -137,6 +141,7 @@ impl Web3JsonRpcConfig { mempool_cache_update_interval: Default::default(), mempool_cache_size: Default::default(), tree_api_url: None, + whitelisted_tokens_for_aa: Default::default(), } } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 097e1af6dbc..7f6215fceae 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -79,6 +79,7 @@ impl Distribution for EncodeDist { tree_api_url: self.sample(rng), mempool_cache_update_interval: self.sample(rng), mempool_cache_size: self.sample(rng), + whitelisted_tokens_for_aa: self.sample_range(rng).map(|_| rng.gen()).collect(), } } } diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index 30ceb376bda..999b4457076 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -49,7 +49,7 @@ mod tests { use std::num::NonZeroU32; use super::*; - use crate::test_utils::{hash, EnvMutex}; + use crate::test_utils::{addr, hash, EnvMutex}; static MUTEX: EnvMutex = EnvMutex::new(); @@ -88,6 +88,10 @@ mod tests { tree_api_url: None, mempool_cache_update_interval: Some(50), mempool_cache_size: Some(10000), + whitelisted_tokens_for_aa: vec![ + addr("0x0000000000000000000000000000000000000001"), + addr("0x0000000000000000000000000000000000000002"), + ], }, prometheus: PrometheusConfig { listener_port: 3312, @@ -120,6 +124,7 @@ mod tests { API_WEB3_JSON_RPC_GAS_PRICE_SCALE_FACTOR=1.2 API_WEB3_JSON_RPC_REQUEST_TIMEOUT=10 API_WEB3_JSON_RPC_ACCOUNT_PKS="0x0000000000000000000000000000000000000000000000000000000000000001,0x0000000000000000000000000000000000000000000000000000000000000002" + API_WEB3_JSON_RPC_WHITELISTED_TOKENS_FOR_AA="0x0000000000000000000000000000000000000001,0x0000000000000000000000000000000000000002" API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.0 API_WEB3_JSON_RPC_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 API_WEB3_JSON_RPC_L1_TO_L2_TRANSACTIONS_COMPATIBILITY_MODE=true diff --git a/core/lib/protobuf_config/src/api.rs b/core/lib/protobuf_config/src/api.rs index 41deb5103a4..1832a2eb2a7 100644 --- a/core/lib/protobuf_config/src/api.rs +++ b/core/lib/protobuf_config/src/api.rs @@ -5,7 +5,7 @@ use zksync_protobuf::{ required, }; -use crate::{parse_h256, proto::api as proto}; +use crate::{parse_h160, parse_h256, proto::api as proto}; impl ProtoRepr for proto::Api { type Type = ApiConfig; @@ -124,6 +124,13 @@ impl ProtoRepr for proto::Web3JsonRpc { .map(|x| x.try_into()) .transpose() .context("mempool_cache_size")?, + whitelisted_tokens_for_aa: self + .whitelisted_tokens_for_aa + .iter() + .enumerate() + .map(|(i, k)| parse_h160(k).context(i)) + .collect::, _>>() + .context("account_pks")?, }) } fn build(this: &Self::Type) -> Self { @@ -177,6 +184,11 @@ impl ProtoRepr for proto::Web3JsonRpc { .websocket_requests_per_minute_limit .map(|x| x.into()), tree_api_url: this.tree_api_url.clone(), + whitelisted_tokens_for_aa: this + .whitelisted_tokens_for_aa + .iter() + .map(|k| format!("{:?}", k)) + .collect(), } } } diff --git a/core/lib/protobuf_config/src/proto/api.proto b/core/lib/protobuf_config/src/proto/api.proto index e596e9473f8..9199cac2b5c 100644 --- a/core/lib/protobuf_config/src/proto/api.proto +++ b/core/lib/protobuf_config/src/proto/api.proto @@ -34,6 +34,7 @@ message Web3JsonRpc { optional bool filters_disabled = 27; // optional optional uint64 mempool_cache_update_interval = 28; // optional optional uint64 mempool_cache_size = 29; // optional + repeated string whitelisted_tokens_for_aa = 30; // optional } diff --git a/core/lib/web3_decl/src/namespaces/en.rs b/core/lib/web3_decl/src/namespaces/en.rs index e8451eb14a1..db90ec91894 100644 --- a/core/lib/web3_decl/src/namespaces/en.rs +++ b/core/lib/web3_decl/src/namespaces/en.rs @@ -1,6 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use zksync_config::GenesisConfig; -use zksync_types::{api::en, tokens::TokenInfo, MiniblockNumber}; +use zksync_types::{api::en, tokens::TokenInfo, Address, MiniblockNumber}; #[cfg_attr( all(feature = "client", feature = "server"), @@ -35,4 +35,8 @@ pub trait EnNamespace { /// Get genesis configuration #[method(name = "genesisConfig")] async fn genesis_config(&self) -> RpcResult; + + /// Get tokens that are white-listed and it can be used by paymasters. + #[method(name = "whitelistedTokensForAA")] + async fn whitelisted_tokens_for_aa(&self) -> RpcResult>; } diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs index ba464ba3cc8..5408c7ac0c3 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -6,7 +6,8 @@ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_state::{PostgresStorage, PostgresStorageCaches, ReadStorage, StorageView}; use zksync_system_constants::PUBLISH_BYTECODE_OVERHEAD; use zksync_types::{ - api, fee_model::BatchFeeInput, AccountTreeId, L1BatchNumber, L2ChainId, MiniblockNumber, + api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2ChainId, + MiniblockNumber, }; use zksync_utils::bytecode::{compress_bytecode, hash_bytecode}; @@ -217,6 +218,7 @@ pub(crate) struct TxSharedArgs { pub caches: PostgresStorageCaches, pub validation_computational_gas_limit: u32, pub chain_id: L2ChainId, + pub whitelisted_tokens_for_aa: Vec
, } impl TxSharedArgs { @@ -229,6 +231,7 @@ impl TxSharedArgs { caches: PostgresStorageCaches::new(1, 1), validation_computational_gas_limit: u32::MAX, chain_id: L2ChainId::default(), + whitelisted_tokens_for_aa: Vec::new(), } } } diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs index 5f7d94b078d..ad38d9e73ae 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs @@ -11,7 +11,7 @@ use multivm::{ MultiVMTracer, }; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_types::{l2::L2Tx, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; +use zksync_types::{l2::L2Tx, Address, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; use super::{ apply, @@ -50,10 +50,14 @@ impl TransactionExecutor { .connection_tagged("api") .await .context("failed acquiring DB connection")?; - let validation_params = - get_validation_params(&mut connection, &tx, computational_gas_limit) - .await - .context("failed getting validation params")?; + let validation_params = get_validation_params( + &mut connection, + &tx, + computational_gas_limit, + &shared_args.whitelisted_tokens_for_aa, + ) + .await + .context("failed getting validation params")?; drop(connection); let execution_args = TxExecutionArgs::for_validation(&tx); @@ -120,6 +124,7 @@ async fn get_validation_params( connection: &mut Connection<'_, Core>, tx: &L2Tx, computational_gas_limit: u32, + whitelisted_tokens_for_aa: &[Address], ) -> anyhow::Result { let method_latency = EXECUTION_METRICS.get_validation_params.start(); let user_address = tx.common_data.initiator_address; @@ -127,13 +132,17 @@ async fn get_validation_params( // This method assumes that the number of tokens is relatively low. When it grows // we may need to introduce some kind of caching. - let all_tokens = connection.tokens_dal().get_all_l2_token_addresses().await?; + let all_bridged_tokens = connection.tokens_dal().get_all_l2_token_addresses().await?; + let all_tokens: Vec<_> = all_bridged_tokens + .iter() + .chain(whitelisted_tokens_for_aa) + .collect(); EXECUTION_METRICS.tokens_amount.set(all_tokens.len()); let span = tracing::debug_span!("compute_trusted_slots_for_validation").entered(); let trusted_slots: HashSet<_> = all_tokens .iter() - .flat_map(|&token| TRUSTED_TOKEN_SLOTS.iter().map(move |&slot| (token, slot))) + .flat_map(|&token| TRUSTED_TOKEN_SLOTS.iter().map(move |&slot| (*token, slot))) .collect(); // We currently don't support any specific trusted addresses. @@ -143,7 +152,11 @@ async fn get_validation_params( // Required for working with transparent proxies. let trusted_address_slots: HashSet<_> = all_tokens .into_iter() - .flat_map(|token| TRUSTED_ADDRESS_SLOTS.iter().map(move |&slot| (token, slot))) + .flat_map(|token| { + TRUSTED_ADDRESS_SLOTS + .iter() + .map(move |&slot| (*token, slot)) + }) .collect(); EXECUTION_METRICS .trusted_address_slots_amount diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index ed6546f1b10..4d85a7d6f28 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -11,6 +11,7 @@ use multivm::{ }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; +use tokio::sync::RwLock; use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig}; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ @@ -157,6 +158,8 @@ pub struct TxSenderBuilder { tx_sink: Arc, /// Batch sealer used to check whether transaction can be executed by the sequencer. sealer: Option>, + /// Cache for tokens that are white-listed for AA. + whitelisted_tokens_for_aa_cache: Option>>>, } impl TxSenderBuilder { @@ -170,6 +173,7 @@ impl TxSenderBuilder { replica_connection_pool, tx_sink, sealer: None, + whitelisted_tokens_for_aa_cache: None, } } @@ -178,6 +182,11 @@ impl TxSenderBuilder { self } + pub fn with_whitelisted_tokens_for_aa(mut self, cache: Arc>>) -> Self { + self.whitelisted_tokens_for_aa_cache = Some(cache); + self + } + pub async fn build( self, batch_fee_input_provider: Arc, @@ -187,6 +196,10 @@ impl TxSenderBuilder { ) -> TxSender { // Use noop sealer if no sealer was explicitly provided. let sealer = self.sealer.unwrap_or_else(|| Arc::new(NoopSealer)); + let whitelisted_tokens_for_aa_cache = + self.whitelisted_tokens_for_aa_cache.unwrap_or_else(|| { + Arc::new(RwLock::new(self.config.whitelisted_tokens_for_aa.clone())) + }); TxSender(Arc::new(TxSenderInner { sender_config: self.config, @@ -196,6 +209,7 @@ impl TxSenderBuilder { api_contracts, vm_concurrency_limiter, storage_caches, + whitelisted_tokens_for_aa_cache, sealer, executor: TransactionExecutor::Real, })) @@ -217,6 +231,7 @@ pub struct TxSenderConfig { pub l1_to_l2_transactions_compatibility_mode: bool, pub chain_id: L2ChainId, pub max_pubdata_per_batch: u64, + pub whitelisted_tokens_for_aa: Vec
, } impl TxSenderConfig { @@ -238,6 +253,7 @@ impl TxSenderConfig { .l1_to_l2_transactions_compatibility_mode, chain_id, max_pubdata_per_batch: state_keeper_config.max_pubdata_per_batch, + whitelisted_tokens_for_aa: web3_json_config.whitelisted_tokens_for_aa.clone(), } } } @@ -254,6 +270,8 @@ pub struct TxSenderInner { pub(super) vm_concurrency_limiter: Arc, // Caches used in VM execution. storage_caches: PostgresStorageCaches, + // Cache for white-listed tokens. + pub(super) whitelisted_tokens_for_aa_cache: Arc>>, /// Batch sealer used to check whether transaction can be executed by the sequencer. sealer: Arc, pub(super) executor: TransactionExecutor, @@ -277,6 +295,10 @@ impl TxSender { self.0.storage_caches.clone() } + pub(crate) async fn read_whitelisted_tokens_for_aa_cache(&self) -> Vec
{ + self.0.whitelisted_tokens_for_aa_cache.read().await.clone() + } + async fn acquire_replica_connection(&self) -> anyhow::Result> { self.0 .replica_connection_pool @@ -397,6 +419,7 @@ impl TxSender { .sender_config .validation_computational_gas_limit, chain_id: self.0.sender_config.chain_id, + whitelisted_tokens_for_aa: self.read_whitelisted_tokens_for_aa_cache().await, } } @@ -605,7 +628,7 @@ impl TxSender { } } - let shared_args = self.shared_args_for_gas_estimate(fee_model_params); + let shared_args = self.shared_args_for_gas_estimate(fee_model_params).await; let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit; let execution_args = TxExecutionArgs::for_gas_estimate(vm_execution_cache_misses_limit, &tx, base_fee); @@ -626,7 +649,7 @@ impl TxSender { Ok((execution_output.vm, execution_output.metrics)) } - fn shared_args_for_gas_estimate(&self, fee_input: BatchFeeInput) -> TxSharedArgs { + async fn shared_args_for_gas_estimate(&self, fee_input: BatchFeeInput) -> TxSharedArgs { let config = &self.0.sender_config; TxSharedArgs { @@ -637,6 +660,7 @@ impl TxSender { base_system_contracts: self.0.api_contracts.estimate_gas.clone(), caches: self.storage_caches(), chain_id: config.chain_id, + whitelisted_tokens_for_aa: self.read_whitelisted_tokens_for_aa_cache().await, } } diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/en.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/en.rs index 90e9e218e0d..49489a58091 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/en.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/en.rs @@ -1,5 +1,5 @@ use zksync_config::GenesisConfig; -use zksync_types::{api::en, tokens::TokenInfo, MiniblockNumber}; +use zksync_types::{api::en, tokens::TokenInfo, Address, MiniblockNumber}; use zksync_web3_decl::{ jsonrpsee::core::{async_trait, RpcResult}, namespaces::en::EnNamespaceServer, @@ -39,4 +39,10 @@ impl EnNamespaceServer for EnNamespace { .await .map_err(|err| self.current_method().map_err(err)) } + + async fn whitelisted_tokens_for_aa(&self) -> RpcResult> { + self.whitelisted_tokens_for_aa_impl() + .await + .map_err(|err| self.current_method().map_err(err)) + } } diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs index 2cb8d490a73..cfb3dab034e 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -153,7 +153,7 @@ impl DebugNamespace { ); let tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; - let shared_args = self.shared_args(); + let shared_args = self.shared_args().await; let vm_permit = self .state .tx_sender @@ -211,7 +211,7 @@ impl DebugNamespace { Ok(call.into()) } - fn shared_args(&self) -> TxSharedArgs { + async fn shared_args(&self) -> TxSharedArgs { let sender_config = self.sender_config(); TxSharedArgs { operator_account: AccountTreeId::default(), @@ -220,6 +220,11 @@ impl DebugNamespace { caches: self.state.tx_sender.storage_caches().clone(), validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: sender_config.chain_id, + whitelisted_tokens_for_aa: self + .state + .tx_sender + .read_whitelisted_tokens_for_aa_cache() + .await, } } } diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs index 9911d15ce86..134e375a8ab 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs @@ -1,7 +1,7 @@ use anyhow::Context as _; use zksync_config::{configs::genesis::SharedBridge, GenesisConfig}; use zksync_dal::{CoreDal, DalError}; -use zksync_types::{api::en, tokens::TokenInfo, L1BatchNumber, MiniblockNumber, H256}; +use zksync_types::{api::en, tokens::TokenInfo, Address, L1BatchNumber, MiniblockNumber, H256}; use zksync_web3_decl::error::Web3Error; use crate::api_server::web3::{backend_jsonrpsee::MethodTracer, state::RpcState}; @@ -154,4 +154,13 @@ impl EnNamespace { }; Ok(config) } + + #[tracing::instrument(skip(self))] + pub async fn whitelisted_tokens_for_aa_impl(&self) -> Result, Web3Error> { + Ok(self + .state + .tx_sender + .read_whitelisted_tokens_for_aa_cache() + .await) + } } From bf604b6a523b2227cf4cf40d4f9481ec7c1572f4 Mon Sep 17 00:00:00 2001 From: Stanislav Bezkorovainyi Date: Thu, 4 Apr 2024 12:34:44 +0200 Subject: [PATCH 6/9] chore: Migrate some more functionality from 1 5 0 (#1569) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ⚠️ This PR requires DB migration ⚠️ - Pubdata costs - Circuit secp256k1_verify ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- ...617de769aac94aeabc55d58b906ca3698bc8.json} | 5 ++- ...ad3caf76ed9fed031da9e04313073af2fb4a.json} | 10 ++++- .../20240403174704_pubdata_costs.down.sql | 2 + .../20240403174704_pubdata_costs.up.sql | 2 + core/lib/dal/src/blocks_dal.rs | 44 +++++++++++-------- core/lib/dal/src/models/mod.rs | 1 + .../lib/dal/src/models/storage_oracle_info.rs | 38 ++++++++++++++++ .../src/glue/types/vm/vm_block_result.rs | 3 ++ .../types/outputs/execution_state.rs | 5 +++ core/lib/multivm/src/versions/vm_1_3_2/vm.rs | 1 + .../vm_1_4_1/tracers/circuits_capacity.rs | 1 + core/lib/multivm/src/versions/vm_1_4_1/vm.rs | 1 + .../vm_1_4_2/tracers/circuits_capacity.rs | 1 + core/lib/multivm/src/versions/vm_1_4_2/vm.rs | 1 + .../tracers/circuits_capacity.rs | 1 + .../src/versions/vm_boojum_integration/vm.rs | 1 + .../vm_latest/tracers/circuits_capacity.rs | 1 + core/lib/multivm/src/versions/vm_latest/vm.rs | 2 + core/lib/multivm/src/versions/vm_m5/vm.rs | 1 + core/lib/multivm/src/versions/vm_m6/vm.rs | 1 + .../src/versions/vm_refunds_enhancement/vm.rs | 1 + .../src/versions/vm_virtual_blocks/vm.rs | 1 + core/lib/types/src/block.rs | 8 ++++ core/lib/types/src/circuit.rs | 6 +++ core/lib/zksync_core/src/genesis.rs | 1 + .../src/state_keeper/io/seal_logic.rs | 1 + .../zksync_core/src/state_keeper/tests/mod.rs | 1 + .../witness_generator/src/basic_circuits.rs | 5 ++- 28 files changed, 122 insertions(+), 24 deletions(-) rename core/lib/dal/.sqlx/{query-cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57.json => query-3e3ddd6578e37d38cc03fa1df0b7617de769aac94aeabc55d58b906ca3698bc8.json} (56%) rename core/lib/dal/.sqlx/{query-03c585c7e9f918e608757496088c7e3b6bdb2a08149d5f443310607d3c78988c.json => query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json} (50%) create mode 100644 core/lib/dal/migrations/20240403174704_pubdata_costs.down.sql create mode 100644 core/lib/dal/migrations/20240403174704_pubdata_costs.up.sql create mode 100644 core/lib/dal/src/models/storage_oracle_info.rs diff --git a/core/lib/dal/.sqlx/query-cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57.json b/core/lib/dal/.sqlx/query-3e3ddd6578e37d38cc03fa1df0b7617de769aac94aeabc55d58b906ca3698bc8.json similarity index 56% rename from core/lib/dal/.sqlx/query-cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57.json rename to core/lib/dal/.sqlx/query-3e3ddd6578e37d38cc03fa1df0b7617de769aac94aeabc55d58b906ca3698bc8.json index da21c126347..fb1478c1a62 100644 --- a/core/lib/dal/.sqlx/query-cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57.json +++ b/core/lib/dal/.sqlx/query-3e3ddd6578e37d38cc03fa1df0b7617de769aac94aeabc55d58b906ca3698bc8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", + "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n $21,\n NOW(),\n NOW()\n )\n ", "describe": { "columns": [], "parameters": { @@ -23,11 +23,12 @@ "Int4", "ByteaArray", "Int8Array", + "Int8Array", "Bytea", "Jsonb" ] }, "nullable": [] }, - "hash": "cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57" + "hash": "3e3ddd6578e37d38cc03fa1df0b7617de769aac94aeabc55d58b906ca3698bc8" } diff --git a/core/lib/dal/.sqlx/query-03c585c7e9f918e608757496088c7e3b6bdb2a08149d5f443310607d3c78988c.json b/core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json similarity index 50% rename from core/lib/dal/.sqlx/query-03c585c7e9f918e608757496088c7e3b6bdb2a08149d5f443310607d3c78988c.json rename to core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json index 9c811e9f87c..853acb9f71a 100644 --- a/core/lib/dal/.sqlx/query-03c585c7e9f918e608757496088c7e3b6bdb2a08149d5f443310607d3c78988c.json +++ b/core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json @@ -1,12 +1,17 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n storage_refunds\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n storage_refunds,\n pubdata_costs\n FROM\n l1_batches\n WHERE\n number = $1\n ", "describe": { "columns": [ { "ordinal": 0, "name": "storage_refunds", "type_info": "Int8Array" + }, + { + "ordinal": 1, + "name": "pubdata_costs", + "type_info": "Int8Array" } ], "parameters": { @@ -15,8 +20,9 @@ ] }, "nullable": [ + true, true ] }, - "hash": "03c585c7e9f918e608757496088c7e3b6bdb2a08149d5f443310607d3c78988c" + "hash": "cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a" } diff --git a/core/lib/dal/migrations/20240403174704_pubdata_costs.down.sql b/core/lib/dal/migrations/20240403174704_pubdata_costs.down.sql new file mode 100644 index 00000000000..ffd48b4ac47 --- /dev/null +++ b/core/lib/dal/migrations/20240403174704_pubdata_costs.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + DROP COLUMN IF EXISTS pubdata_costs; diff --git a/core/lib/dal/migrations/20240403174704_pubdata_costs.up.sql b/core/lib/dal/migrations/20240403174704_pubdata_costs.up.sql new file mode 100644 index 00000000000..5a2d6e95962 --- /dev/null +++ b/core/lib/dal/migrations/20240403174704_pubdata_costs.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + ADD COLUMN IF NOT EXISTS pubdata_costs BIGINT[]; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index b0fc0ef0299..2deb63c823b 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -12,7 +12,7 @@ use zksync_db_connection::{ }; use zksync_types::{ aggregated_operations::AggregatedActionType, - block::{BlockGasCount, L1BatchHeader, L1BatchTreeData, MiniblockHeader}, + block::{BlockGasCount, L1BatchHeader, L1BatchTreeData, MiniblockHeader, StorageOracleInfo}, circuit::CircuitStatistic, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, U256, @@ -22,6 +22,7 @@ use crate::{ models::{ parse_protocol_version, storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageMiniblockHeader}, + storage_oracle_info::DbStorageOracleInfo, }, Core, CoreDal, }; @@ -369,14 +370,16 @@ impl BlocksDal<'_, '_> { Ok(Some(heap)) } - pub async fn get_storage_refunds( + pub async fn get_storage_oracle_info( &mut self, number: L1BatchNumber, - ) -> anyhow::Result>> { - let Some(row) = sqlx::query!( + ) -> anyhow::Result> { + let storage_oracle_info = sqlx::query_as!( + DbStorageOracleInfo, r#" SELECT - storage_refunds + storage_refunds, + pubdata_costs FROM l1_batches WHERE @@ -388,16 +391,9 @@ impl BlocksDal<'_, '_> { .report_latency() .with_arg("number", &number) .fetch_optional(self.storage) - .await? - else { - return Ok(None); - }; - let Some(storage_refunds) = row.storage_refunds else { - return Ok(None); - }; + .await?; - let storage_refunds: Vec<_> = storage_refunds.into_iter().map(|n| n as u32).collect(); - Ok(Some(storage_refunds)) + Ok(storage_oracle_info.and_then(DbStorageOracleInfo::into_optional_batch_oracle_info)) } pub async fn set_eth_tx_id( @@ -468,6 +464,7 @@ impl BlocksDal<'_, '_> { initial_bootloader_contents: &[(usize, U256)], predicted_block_gas: BlockGasCount, storage_refunds: &[u32], + pubdata_costs: &[i32], predicted_circuits_by_type: CircuitStatistic, // predicted number of circuits for each circuit type ) -> anyhow::Result<()> { let priority_onchain_data: Vec> = header @@ -494,6 +491,7 @@ impl BlocksDal<'_, '_> { let used_contract_hashes = serde_json::to_value(&header.used_contract_hashes) .expect("failed to serialize used_contract_hashes to JSON value"); let storage_refunds: Vec<_> = storage_refunds.iter().copied().map(i64::from).collect(); + let pubdata_costs: Vec<_> = pubdata_costs.iter().copied().map(i64::from).collect(); let mut transaction = self.storage.start_transaction().await?; sqlx::query!( @@ -518,6 +516,7 @@ impl BlocksDal<'_, '_> { protocol_version, system_logs, storage_refunds, + pubdata_costs, pubdata_input, predicted_circuits_by_type, created_at, @@ -545,6 +544,7 @@ impl BlocksDal<'_, '_> { $18, $19, $20, + $21, NOW(), NOW() ) @@ -567,6 +567,7 @@ impl BlocksDal<'_, '_> { header.protocol_version.map(|v| v as i32), &system_logs, &storage_refunds, + &pubdata_costs, pubdata_input, serde_json::to_value(predicted_circuits_by_type).unwrap(), ) @@ -2372,8 +2373,15 @@ impl BlocksDal<'_, '_> { } pub async fn insert_mock_l1_batch(&mut self, header: &L1BatchHeader) -> anyhow::Result<()> { - self.insert_l1_batch(header, &[], Default::default(), &[], Default::default()) - .await + self.insert_l1_batch( + header, + &[], + Default::default(), + &[], + &[], + Default::default(), + ) + .await } /// Deletes all miniblocks and L1 batches, including the genesis ones. Should only be used in tests. @@ -2480,7 +2488,7 @@ mod tests { execute: 10, }; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], Default::default()) + .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); @@ -2488,7 +2496,7 @@ mod tests { header.timestamp += 100; predicted_gas += predicted_gas; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], Default::default()) + .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index 773852ba712..07941a95714 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -9,6 +9,7 @@ pub mod storage_eth_tx; pub mod storage_event; pub mod storage_fee_monitor; pub mod storage_log; +pub mod storage_oracle_info; pub mod storage_protocol_version; pub mod storage_sync; pub mod storage_transaction; diff --git a/core/lib/dal/src/models/storage_oracle_info.rs b/core/lib/dal/src/models/storage_oracle_info.rs new file mode 100644 index 00000000000..a08e89583c3 --- /dev/null +++ b/core/lib/dal/src/models/storage_oracle_info.rs @@ -0,0 +1,38 @@ +use zksync_types::block::StorageOracleInfo; + +/// The structure represents the storage oracle info stored in the database. +#[derive(Debug, Clone, sqlx::FromRow)] +pub(crate) struct DbStorageOracleInfo { + pub storage_refunds: Option>, + pub pubdata_costs: Option>, +} + +impl DbStorageOracleInfo { + pub(crate) fn into_optional_batch_oracle_info(self) -> Option { + let DbStorageOracleInfo { + storage_refunds, + pubdata_costs, + } = self; + + let storage_refunds: Vec = storage_refunds.map(|refunds| { + // Here we do `.try_into().unwrap()` to ensure consistency of the data + refunds + .into_iter() + .map(|refund| refund.try_into().unwrap()) + .collect() + })?; + + let pubdata_costs = pubdata_costs.map(|costs| { + // Here we do `.try_into().unwrap()` to ensure consistency of the data + costs + .into_iter() + .map(|cost| cost.try_into().unwrap()) + .collect() + }); + + Some(StorageOracleInfo { + storage_refunds, + pubdata_costs, + }) + } +} diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index fa0cbe4c15a..de8016b69fa 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -71,6 +71,7 @@ impl GlueFrom for crate::interface::Fi cycles_used: value.full_result.cycles_used, deduplicated_events_logs: vec![], storage_refunds: Vec::new(), + pubdata_costs: Vec::new(), }, final_bootloader_memory: None, pubdata_input: None, @@ -133,6 +134,7 @@ impl GlueFrom for crate::interface::Fi cycles_used: value.full_result.cycles_used, deduplicated_events_logs: vec![], storage_refunds: Vec::new(), + pubdata_costs: Vec::new(), }, final_bootloader_memory: None, pubdata_input: None, @@ -193,6 +195,7 @@ impl GlueFrom for crate::interface: cycles_used: value.full_result.cycles_used, deduplicated_events_logs: vec![], storage_refunds: Vec::new(), + pubdata_costs: Vec::new(), }, final_bootloader_memory: None, pubdata_input: None, diff --git a/core/lib/multivm/src/interface/types/outputs/execution_state.rs b/core/lib/multivm/src/interface/types/outputs/execution_state.rs index 523d90b7fd6..dcd637b8564 100644 --- a/core/lib/multivm/src/interface/types/outputs/execution_state.rs +++ b/core/lib/multivm/src/interface/types/outputs/execution_state.rs @@ -31,6 +31,11 @@ pub struct CurrentExecutionState { pub deduplicated_events_logs: Vec, /// Refunds returned by `StorageOracle`. pub storage_refunds: Vec, + /// Pubdata costs returned by `StorageOracle`. + /// This field is non-empty only starting from v1.5.0. + /// Note, that it is a signed integer, because the pubdata costs can be negative, e.g. in case + /// the user rolls back a state diff. + pub pubdata_costs: Vec, } /// Bootloader Memory of the VM. diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index cc6cb579476..3ad835639da 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -180,6 +180,7 @@ impl VmInterface for Vm { // It's not applicable for vm 1.3.2 deduplicated_events_logs: vec![], storage_refunds: vec![], + pubdata_costs: Vec::new(), } } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs index 7b9f8f46271..0c5d93a7c6a 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs @@ -63,5 +63,6 @@ pub(crate) fn circuit_statistic_from_cycles(cycles: CircuitCycleStatistic) -> Ci ecrecover: cycles.ecrecover_cycles as f32 / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, sha256: cycles.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, + secp256k1_verify: 0.0, } } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index ff795669d64..c9cc6848d0f 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -133,6 +133,7 @@ impl VmInterface for Vm { .map(GlueInto::glue_into) .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), + pubdata_costs: Vec::new(), } } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs index 1d08352849f..651e0b11188 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs @@ -63,5 +63,6 @@ pub(crate) fn circuit_statistic_from_cycles(cycles: CircuitCycleStatistic) -> Ci ecrecover: cycles.ecrecover_cycles as f32 / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, sha256: cycles.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, + secp256k1_verify: 0.0, } } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index bbf0944a47e..5067326c15f 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -133,6 +133,7 @@ impl VmInterface for Vm { .map(GlueInto::glue_into) .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), + pubdata_costs: Vec::new(), } } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs index 71fd03e6d0a..b0ddcf821d8 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs @@ -63,5 +63,6 @@ pub(crate) fn circuit_statistic_from_cycles(cycles: CircuitCycleStatistic) -> Ci ecrecover: cycles.ecrecover_cycles as f32 / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, sha256: cycles.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, + secp256k1_verify: 0.0, } } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index 257b6ff44ae..f612a62de62 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -133,6 +133,7 @@ impl VmInterface for Vm { .map(GlueInto::glue_into) .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), + pubdata_costs: Vec::new(), } } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs index 1d08352849f..651e0b11188 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs @@ -63,5 +63,6 @@ pub(crate) fn circuit_statistic_from_cycles(cycles: CircuitCycleStatistic) -> Ci ecrecover: cycles.ecrecover_cycles as f32 / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, sha256: cycles.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, + secp256k1_verify: 0.0, } } diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index ef8da9e0a61..8850a7a9d21 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -133,6 +133,8 @@ impl VmInterface for Vm { .map(GlueInto::glue_into) .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), + // TODO: fix this line as soon as v1.5.0 is supported + pubdata_costs: Vec::new(), } } diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index ba24b9673b7..d302b539b1b 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -191,6 +191,7 @@ impl VmInterface for Vm { // It's not applicable for `vm5` deduplicated_events_logs: vec![], storage_refunds: vec![], + pubdata_costs: vec![], } } diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 532ff47ae3f..f7b65a2de4c 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -208,6 +208,7 @@ impl VmInterface for Vm { // It's not applicable for `vm6` deduplicated_events_logs: vec![], storage_refunds: vec![], + pubdata_costs: vec![], user_l2_to_l1_logs: l2_to_l1_logs, } } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 3f8e57711a3..341ef0b1099 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -126,6 +126,7 @@ impl VmInterface for Vm { .map(GlueInto::glue_into) .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), + pubdata_costs: Vec::new(), } } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 987e6283294..9d48d46547e 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -126,6 +126,7 @@ impl VmInterface for Vm { .map(GlueInto::glue_into) .collect(), storage_refunds: Vec::new(), + pubdata_costs: Vec::new(), } } diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 58b34e899bb..8002a21ae55 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -84,6 +84,14 @@ pub struct MiniblockHeader { pub gas_limit: u64, } +/// Structure that represents the data is returned by the storage oracle during batch execution. +pub struct StorageOracleInfo { + /// The refunds returned by the storage oracle. + pub storage_refunds: Vec, + // Pubdata costs are available only since v1.5.0, so we allow them to be optional. + pub pubdata_costs: Option>, +} + /// Data needed to execute a miniblock in the VM. #[derive(Debug)] pub struct MiniblockExecutionData { diff --git a/core/lib/types/src/circuit.rs b/core/lib/types/src/circuit.rs index d97594913c4..dacb34c813b 100644 --- a/core/lib/types/src/circuit.rs +++ b/core/lib/types/src/circuit.rs @@ -16,6 +16,7 @@ pub struct CircuitCycleStatistic { pub keccak256_cycles: u32, pub ecrecover_cycles: u32, pub sha256_cycles: u32, + pub secp256k1_verify_cycles: u32, } impl CircuitCycleStatistic { @@ -38,6 +39,8 @@ pub struct CircuitStatistic { pub keccak256: f32, pub ecrecover: f32, pub sha256: f32, + #[serde(default)] + pub secp256k1_verify: f32, } impl CircuitStatistic { @@ -54,6 +57,7 @@ impl CircuitStatistic { + self.keccak256.ceil() as usize + self.ecrecover.ceil() as usize + self.sha256.ceil() as usize + + self.secp256k1_verify.ceil() as usize } /// Adds numbers. @@ -69,6 +73,7 @@ impl CircuitStatistic { + self.keccak256 + self.ecrecover + self.sha256 + + self.secp256k1_verify } } @@ -88,6 +93,7 @@ impl Add for CircuitStatistic { keccak256: self.keccak256 + other.keccak256, ecrecover: self.ecrecover + other.ecrecover, sha256: self.sha256 + other.sha256, + secp256k1_verify: self.secp256k1_verify + other.secp256k1_verify, } } } diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index 5ec8c794063..09e1e16cc2b 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -520,6 +520,7 @@ pub(crate) async fn create_genesis_l1_batch( &[], BlockGasCount::default(), &[], + &[], Default::default(), ) .await?; diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index 2e38af11c4d..96ee93e8744 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -134,6 +134,7 @@ impl UpdatesManager { &final_bootloader_memory, self.pending_l1_gas_count(), &finished_batch.final_execution_state.storage_refunds, + &finished_batch.final_execution_state.pubdata_costs, self.pending_execution_metrics().circuit_statistic, ) .await diff --git a/core/lib/zksync_core/src/state_keeper/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/tests/mod.rs index dbfaac434e0..7d278b11bce 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/mod.rs @@ -110,6 +110,7 @@ pub(super) fn default_vm_batch_result() -> FinishedL1Batch { cycles_used: 0, deduplicated_events_logs: vec![], storage_refunds: Vec::new(), + pubdata_costs: Vec::new(), }, final_bootloader_memory: Some(vec![]), pubdata_input: Some(vec![]), diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index e5ebc759e99..6f424c2127f 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -598,10 +598,11 @@ async fn generate_witness( let storage_refunds = connection .blocks_dal() - .get_storage_refunds(input.block_number) + .get_storage_oracle_info(input.block_number) .await .unwrap() - .unwrap(); + .unwrap() + .storage_refunds; let mut used_bytecodes = connection .factory_deps_dal() From 76f38c9102d14b8cdf321063d3c48cd64cb3df94 Mon Sep 17 00:00:00 2001 From: Stanislav Bezkorovainyi Date: Thu, 4 Apr 2024 13:59:49 +0200 Subject: [PATCH 7/9] chore: Migrate more components including call tracer to u64 (#1556) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Migrate more parts to u64 in preparation to the v1.5.0. Important note: - While the internal `gas` used by the calls in the tracer could remain u32, the top level call should continue being equal to `gasLimit` requiring it to be moved to u64 - In our DB call traces are stored as `Vec` encoded by `bincode`, in other words no data about the structure is preserved and so we'll have to decode the trace based on the protocol version ## Why ❔ In preparation for v1.5.0, to reduce the diff on the v1.5.0 PR. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- .../system-constants-generator/src/utils.rs | 5 +- core/lib/basic_types/src/protocol_version.rs | 6 ++ ...6cd97cb00335b1ef8c613824ea7b55c076b16.json | 22 +++++ ...e3b0fd67aa1f5f7ea0de673a2fbe1f742db86.json | 22 ----- core/lib/dal/src/blocks_dal.rs | 23 +++++ core/lib/dal/src/blocks_web3_dal.rs | 11 ++- .../lib/dal/src/models/storage_transaction.rs | 33 +++++-- core/lib/dal/src/transactions_dal.rs | 44 +++++++-- .../src/glue/types/vm/vm_block_result.rs | 12 +-- .../glue/types/vm/vm_tx_execution_result.rs | 12 +-- .../types/outputs/execution_result.rs | 4 +- .../src/interface/types/outputs/statistic.rs | 2 +- .../src/tracers/call_tracer/vm_1_4_1/mod.rs | 6 +- .../src/tracers/call_tracer/vm_1_4_2/mod.rs | 6 +- .../call_tracer/vm_boojum_integration/mod.rs | 6 +- .../src/tracers/call_tracer/vm_latest/mod.rs | 6 +- .../call_tracer/vm_refunds_enhancement/mod.rs | 6 +- .../call_tracer/vm_virtual_blocks/mod.rs | 6 +- .../versions/vm_1_3_2/oracles/tracer/call.rs | 6 +- .../vm_1_4_1/implementation/statistics.rs | 2 +- .../src/versions/vm_1_4_1/tracers/refunds.rs | 4 +- .../vm_1_4_2/implementation/statistics.rs | 2 +- .../src/versions/vm_1_4_2/tests/block_tip.rs | 5 +- .../src/versions/vm_1_4_2/tests/refunds.rs | 10 ++- .../src/versions/vm_1_4_2/tracers/refunds.rs | 4 +- .../implementation/statistics.rs | 2 +- .../vm_boojum_integration/tracers/refunds.rs | 4 +- .../vm_latest/bootloader_state/state.rs | 4 +- .../versions/vm_latest/bootloader_state/tx.rs | 4 +- .../vm_latest/implementation/statistics.rs | 2 +- .../versions/vm_latest/implementation/tx.rs | 2 +- .../src/versions/vm_latest/tests/block_tip.rs | 5 +- .../src/versions/vm_latest/tracers/refunds.rs | 37 ++++---- .../src/versions/vm_m6/oracles/tracer/call.rs | 6 +- .../implementation/statistics.rs | 2 +- .../vm_refunds_enhancement/tracers/refunds.rs | 4 +- .../implementation/statistics.rs | 2 +- .../vm_virtual_blocks/tracers/refunds.rs | 8 +- core/lib/types/src/tx/mod.rs | 8 +- core/lib/types/src/vm_trace.rs | 89 ++++++++++++++++++- .../src/api_server/tx_sender/mod.rs | 19 +++- .../src/api_server/web3/namespaces/debug.rs | 2 +- .../src/state_keeper/batch_executor/mod.rs | 2 +- .../state_keeper/batch_executor/tests/mod.rs | 2 +- 44 files changed, 329 insertions(+), 140 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-4c9df15553b3add049d5756bfa96cd97cb00335b1ef8c613824ea7b55c076b16.json delete mode 100644 core/lib/dal/.sqlx/query-9c2a5f32c627d3a5c6f1e87b31ce3b0fd67aa1f5f7ea0de673a2fbe1f742db86.json diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 81eaaf1220c..ee8749624d8 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -333,8 +333,9 @@ pub(super) fn execute_user_txs_in_test_gas_vm( let metrics = result.get_execution_metrics(None); VmSpentResourcesResult { - gas_consumed: result.statistics.gas_used, - total_gas_paid: total_gas_paid_upfront.as_u32() - total_gas_refunded, + // It is assumed that the entire `gas_used` was spent on computation and so it safe to convert to u32 + gas_consumed: result.statistics.gas_used as u32, + total_gas_paid: total_gas_paid_upfront.as_u32() - total_gas_refunded as u32, pubdata_published: metrics.size() as u32, total_pubdata_paid: 0, } diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index 7d96095aa80..97419fd418f 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -121,6 +121,12 @@ impl ProtocolVersionId { pub fn is_pre_1_4_2(&self) -> bool { self < &ProtocolVersionId::Version21 } + + pub fn is_pre_1_5_0(&self) -> bool { + // In the current codebase all the protocol versions are pre-1.5.0. + // This method will be updated once the v1.5.0 is added to the server + true + } } impl Default for ProtocolVersionId { diff --git a/core/lib/dal/.sqlx/query-4c9df15553b3add049d5756bfa96cd97cb00335b1ef8c613824ea7b55c076b16.json b/core/lib/dal/.sqlx/query-4c9df15553b3add049d5756bfa96cd97cb00335b1ef8c613824ea7b55c076b16.json new file mode 100644 index 00000000000..23b98142557 --- /dev/null +++ b/core/lib/dal/.sqlx/query-4c9df15553b3add049d5756bfa96cd97cb00335b1ef8c613824ea7b55c076b16.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version\n FROM\n transactions\n LEFT JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + true + ] + }, + "hash": "4c9df15553b3add049d5756bfa96cd97cb00335b1ef8c613824ea7b55c076b16" +} diff --git a/core/lib/dal/.sqlx/query-9c2a5f32c627d3a5c6f1e87b31ce3b0fd67aa1f5f7ea0de673a2fbe1f742db86.json b/core/lib/dal/.sqlx/query-9c2a5f32c627d3a5c6f1e87b31ce3b0fd67aa1f5f7ea0de673a2fbe1f742db86.json deleted file mode 100644 index f9a53d70763..00000000000 --- a/core/lib/dal/.sqlx/query-9c2a5f32c627d3a5c6f1e87b31ce3b0fd67aa1f5f7ea0de673a2fbe1f742db86.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n miniblocks\n WHERE\n number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "timestamp", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "9c2a5f32c627d3a5c6f1e87b31ce3b0fd67aa1f5f7ea0de673a2fbe1f742db86" -} diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 2deb63c823b..8e24350c930 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -2076,6 +2076,29 @@ impl BlocksDal<'_, '_> { Ok(()) } + pub async fn get_miniblock_protocol_version_id( + &mut self, + miniblock_number: MiniblockNumber, + ) -> DalResult> { + Ok(sqlx::query!( + r#" + SELECT + protocol_version + FROM + miniblocks + WHERE + number = $1 + "#, + i64::from(miniblock_number.0) + ) + .try_map(|row| row.protocol_version.map(parse_protocol_version).transpose()) + .instrument("get_miniblock_protocol_version_id") + .with_arg("miniblock_number", &miniblock_number) + .fetch_optional(self.storage) + .await? + .flatten()) + } + pub async fn get_fee_address_for_miniblock( &mut self, number: MiniblockNumber, diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 5d756fdbc0e..4f9cc3a7b7b 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -8,7 +8,7 @@ use zksync_types::{ l2_to_l1_log::L2ToL1Log, vm_trace::Call, web3::types::{BlockHeader, U64}, - Bytes, L1BatchNumber, MiniblockNumber, H160, H2048, H256, U256, + Bytes, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H160, H2048, H256, U256, }; use zksync_utils::bigdecimal_to_u256; @@ -473,6 +473,13 @@ impl BlocksWeb3Dal<'_, '_> { &mut self, block_number: MiniblockNumber, ) -> DalResult> { + let protocol_version = self + .storage + .blocks_dal() + .get_miniblock_protocol_version_id(block_number) + .await? + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + Ok(sqlx::query_as!( CallTrace, r#" @@ -493,7 +500,7 @@ impl BlocksWeb3Dal<'_, '_> { .fetch_all(self.storage) .await? .into_iter() - .map(Call::from) + .map(|call_trace| call_trace.into_call(protocol_version)) .collect()) } diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 56db5623fd4..9f321c07d5a 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -3,18 +3,18 @@ use std::{convert::TryInto, str::FromStr}; use bigdecimal::Zero; use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; use zksync_types::{ - api, - api::{TransactionDetails, TransactionReceipt, TransactionStatus}, + api::{self, TransactionDetails, TransactionReceipt, TransactionStatus}, fee::Fee, l1::{OpProcessingType, PriorityQueueType}, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTxCommonData, transaction_request::PaymasterParams, - vm_trace::Call, + vm_trace::{Call, LegacyCall}, web3::types::U64, Address, Bytes, Execute, ExecuteTransactionCommon, L1TxCommonData, L2ChainId, L2TxCommonData, - Nonce, PackedEthSignature, PriorityOpId, Transaction, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, - EIP_712_TX_TYPE, H160, H256, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, + Nonce, PackedEthSignature, PriorityOpId, ProtocolVersionId, Transaction, EIP_1559_TX_TYPE, + EIP_2930_TX_TYPE, EIP_712_TX_TYPE, H160, H256, PRIORITY_OPERATION_L2_TX_TYPE, + PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::{bigdecimal_to_u256, h256_to_account_address}; @@ -543,8 +543,25 @@ pub(crate) struct CallTrace { pub call_trace: Vec, } -impl From for Call { - fn from(call_trace: CallTrace) -> Self { - bincode::deserialize(&call_trace.call_trace).unwrap() +impl CallTrace { + pub(crate) fn into_call(self, protocol_version: ProtocolVersionId) -> Call { + if protocol_version.is_pre_1_5_0() { + let legacy_call_trace: LegacyCall = bincode::deserialize(&self.call_trace).unwrap(); + + legacy_call_trace.into() + } else { + bincode::deserialize(&self.call_trace).unwrap() + } + } + + pub(crate) fn from_call(call: Call, protocol_version: ProtocolVersionId) -> Self { + let call_trace = if protocol_version.is_pre_1_5_0() { + bincode::serialize(&LegacyCall::try_from(call).unwrap()) + } else { + bincode::serialize(&call) + } + .unwrap(); + + Self { call_trace } } } diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 3d42f33ea04..0d70dec6a39 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -17,13 +17,13 @@ use zksync_types::{ tx::{tx_execution_info::TxExecutionStatus, TransactionExecutionResult}, vm_trace::Call, Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, MiniblockNumber, PriorityOpId, - Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, + ProtocolVersionId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::u256_to_big_decimal; use crate::{ models::storage_transaction::{CallTrace, StorageTransaction}, - Core, + Core, CoreDal, }; #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] @@ -501,6 +501,13 @@ impl TransactionsDal<'_, '_> { ) { { let mut transaction = self.storage.start_transaction().await.unwrap(); + let protocol_version = transaction + .blocks_dal() + .get_miniblock_protocol_version_id(miniblock_number) + .await + .unwrap() + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + let mut l1_hashes = Vec::with_capacity(transactions.len()); let mut l1_indices_in_block = Vec::with_capacity(transactions.len()); let mut l1_errors = Vec::with_capacity(transactions.len()); @@ -561,7 +568,8 @@ impl TransactionsDal<'_, '_> { }; if let Some(call_trace) = tx_res.call_trace() { - bytea_call_traces.push(bincode::serialize(&call_trace).unwrap()); + bytea_call_traces + .push(CallTrace::from_call(call_trace, protocol_version).call_trace); call_traces_tx_hashes.push(hash.0.to_vec()); } @@ -571,7 +579,8 @@ impl TransactionsDal<'_, '_> { l1_indices_in_block.push(index_in_block as i32); l1_errors.push(error.unwrap_or_default()); l1_execution_infos.push(serde_json::to_value(execution_info).unwrap()); - l1_refunded_gas.push(i64::from(*refunded_gas)); + l1_refunded_gas + .push(i64::try_from(*refunded_gas).expect("Refund exceeds i64")); l1_effective_gas_prices .push(u256_to_big_decimal(common_data.max_fee_per_gas)); } @@ -608,7 +617,8 @@ impl TransactionsDal<'_, '_> { )); l2_gas_per_pubdata_limit .push(u256_to_big_decimal(common_data.fee.gas_per_pubdata_limit)); - l2_refunded_gas.push(i64::from(*refunded_gas)); + l2_refunded_gas + .push(i64::try_from(*refunded_gas).expect("Refund exceeds i64")); } ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { upgrade_hashes.push(hash.0.to_vec()); @@ -616,7 +626,8 @@ impl TransactionsDal<'_, '_> { upgrade_errors.push(error.unwrap_or_default()); upgrade_execution_infos .push(serde_json::to_value(execution_info).unwrap()); - upgrade_refunded_gas.push(i64::from(*refunded_gas)); + upgrade_refunded_gas + .push(i64::try_from(*refunded_gas).expect("Refund exceeds i64")); upgrade_effective_gas_prices .push(u256_to_big_decimal(common_data.max_fee_per_gas)); } @@ -1295,6 +1306,25 @@ impl TransactionsDal<'_, '_> { } pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { + let protocol_version: ProtocolVersionId = sqlx::query!( + r#" + SELECT + protocol_version + FROM + transactions + LEFT JOIN miniblocks ON transactions.miniblock_number = miniblocks.number + WHERE + transactions.hash = $1 + "#, + tx_hash.as_bytes() + ) + .instrument("get_call_trace") + .with_arg("tx_hash", &tx_hash) + .fetch_optional(self.storage) + .await? + .and_then(|row| row.protocol_version.map(|v| (v as u16).try_into().unwrap())) + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + Ok(sqlx::query_as!( CallTrace, r#" @@ -1311,7 +1341,7 @@ impl TransactionsDal<'_, '_> { .with_arg("tx_hash", &tx_hash) .fetch_optional(self.storage) .await? - .map(Into::into)) + .map(|call_trace| call_trace.into_call(protocol_version))) } pub(crate) async fn get_tx_by_hash(&mut self, hash: H256) -> Option { diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index de8016b69fa..0e79018bd55 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -42,7 +42,7 @@ impl GlueFrom for crate::interface::Fi cycles_used: value.block_tip_result.cycles_used, total_log_queries: value.block_tip_result.logs.total_log_queries_count, computational_gas_used: value.full_result.gas_used, - gas_used: value.full_result.gas_used, + gas_used: value.full_result.gas_used as u64, gas_remaining: value.full_result.gas_remaining, pubdata_published: 0, circuit_statistic: Default::default(), @@ -105,7 +105,7 @@ impl GlueFrom for crate::interface::Fi cycles_used: value.block_tip_result.cycles_used, total_log_queries: value.block_tip_result.logs.total_log_queries_count, computational_gas_used: value.full_result.computational_gas_used, - gas_used: value.full_result.gas_used, + gas_used: value.full_result.gas_used as u64, gas_remaining: value.full_result.gas_remaining, pubdata_published: 0, circuit_statistic: Default::default(), @@ -166,7 +166,7 @@ impl GlueFrom for crate::interface: cycles_used: value.block_tip_result.cycles_used, total_log_queries: value.block_tip_result.logs.total_log_queries_count, computational_gas_used: value.full_result.computational_gas_used, - gas_used: value.full_result.gas_used, + gas_used: value.full_result.gas_used as u64, gas_remaining: value.full_result.gas_remaining, pubdata_published: 0, circuit_statistic: Default::default(), @@ -241,7 +241,7 @@ impl GlueFrom cycles_used: value.full_result.cycles_used, total_log_queries: value.full_result.total_log_queries, computational_gas_used: value.full_result.computational_gas_used, - gas_used: value.full_result.gas_used, + gas_used: value.full_result.gas_used as u64, gas_remaining: value.full_result.gas_remaining, pubdata_published: 0, circuit_statistic: Default::default(), @@ -273,7 +273,7 @@ impl GlueFrom cycles_used: value.full_result.cycles_used, total_log_queries: value.full_result.total_log_queries, computational_gas_used: 0, - gas_used: value.full_result.gas_used, + gas_used: value.full_result.gas_used as u64, gas_remaining: value.full_result.gas_remaining, pubdata_published: 0, circuit_statistic: Default::default(), @@ -321,7 +321,7 @@ impl GlueFrom cycles_used: value.full_result.cycles_used, total_log_queries: value.full_result.total_log_queries, computational_gas_used: value.full_result.computational_gas_used, - gas_used: value.full_result.gas_used, + gas_used: value.full_result.gas_used as u64, gas_remaining: value.full_result.gas_remaining, pubdata_published: 0, circuit_statistic: Default::default(), diff --git a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs index 0c888cdda23..80d1ef8a294 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs @@ -13,8 +13,8 @@ impl GlueFrom for VmExecutionRes } result.refunds = Refunds { - gas_refunded: value.gas_refunded, - operator_suggested_refund: value.operator_suggested_refund, + gas_refunded: value.gas_refunded as u64, + operator_suggested_refund: value.operator_suggested_refund as u64, }; result } @@ -28,8 +28,8 @@ impl GlueFrom for VmExecutionRes } result.refunds = Refunds { - gas_refunded: value.gas_refunded, - operator_suggested_refund: value.operator_suggested_refund, + gas_refunded: value.gas_refunded as u64, + operator_suggested_refund: value.operator_suggested_refund as u64, }; result } @@ -43,8 +43,8 @@ impl GlueFrom for VmExecution } result.refunds = Refunds { - gas_refunded: value.gas_refunded, - operator_suggested_refund: value.operator_suggested_refund, + gas_refunded: value.gas_refunded as u64, + operator_suggested_refund: value.operator_suggested_refund as u64, }; result } diff --git a/core/lib/multivm/src/interface/types/outputs/execution_result.rs b/core/lib/multivm/src/interface/types/outputs/execution_result.rs index a3c201e7970..3ce7d31f212 100644 --- a/core/lib/multivm/src/interface/types/outputs/execution_result.rs +++ b/core/lib/multivm/src/interface/types/outputs/execution_result.rs @@ -12,8 +12,8 @@ use crate::interface::{Halt, VmExecutionStatistics, VmRevertReason}; /// Refunds produced for the user. #[derive(Debug, Clone, Default)] pub struct Refunds { - pub gas_refunded: u32, - pub operator_suggested_refund: u32, + pub gas_refunded: u64, + pub operator_suggested_refund: u64, } /// Events/storage logs/l2->l1 logs created within transaction execution. diff --git a/core/lib/multivm/src/interface/types/outputs/statistic.rs b/core/lib/multivm/src/interface/types/outputs/statistic.rs index f61954885fd..fb99ba7e36b 100644 --- a/core/lib/multivm/src/interface/types/outputs/statistic.rs +++ b/core/lib/multivm/src/interface/types/outputs/statistic.rs @@ -8,7 +8,7 @@ pub struct VmExecutionStatistics { /// Cycles used by the VM during the tx execution. pub cycles_used: u32, /// Gas used by the VM during the tx execution. - pub gas_used: u32, + pub gas_used: u64, /// Gas remaining after the tx execution. pub gas_remaining: u32, /// Computational gas used by the VM during the tx execution. diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_1_4_1/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_1_4_1/mod.rs index 54809168fca..4e1e4deb729 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_1_4_1/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_1_4_1/mod.rs @@ -44,7 +44,7 @@ impl DynTracer> for CallTracer { .inner .last() .map(|call| call.ergs_remaining + current_ergs) - .unwrap_or(current_ergs); + .unwrap_or(current_ergs) as u64; let mut current_call = Call { r#type: CallType::Call(far_call.glue_into()), @@ -128,7 +128,7 @@ impl CallTracer { current_call.from = current.msg_sender; current_call.to = current.this_address; current_call.value = U256::from(current.context_u128_value); - current_call.gas = current.ergs_remaining; + current_call.gas = current.ergs_remaining as u64; } fn save_output_vm_1_4_1( @@ -194,7 +194,7 @@ impl CallTracer { current_call.farcall.gas_used = current_call .farcall .parent_gas - .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining); + .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining as u64); self.save_output_vm_1_4_1(state, memory, ret_opcode, &mut current_call.farcall); diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_1_4_2/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_1_4_2/mod.rs index 72ba34af9c4..d1ecd25db3a 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_1_4_2/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_1_4_2/mod.rs @@ -44,7 +44,7 @@ impl DynTracer> for CallTracer { .inner .last() .map(|call| call.ergs_remaining + current_ergs) - .unwrap_or(current_ergs); + .unwrap_or(current_ergs) as u64; let mut current_call = Call { r#type: CallType::Call(far_call.glue_into()), @@ -128,7 +128,7 @@ impl CallTracer { current_call.from = current.msg_sender; current_call.to = current.this_address; current_call.value = U256::from(current.context_u128_value); - current_call.gas = current.ergs_remaining; + current_call.gas = current.ergs_remaining as u64; } fn save_output_vm_1_4_2( @@ -194,7 +194,7 @@ impl CallTracer { current_call.farcall.gas_used = current_call .farcall .parent_gas - .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining); + .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining as u64); self.save_output_vm_1_4_2(state, memory, ret_opcode, &mut current_call.farcall); diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs index c36bfb0f966..06f24ef9b09 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs @@ -44,7 +44,7 @@ impl DynTracer> for CallTracer { .inner .last() .map(|call| call.ergs_remaining + current_ergs) - .unwrap_or(current_ergs); + .unwrap_or(current_ergs) as u64; let mut current_call = Call { r#type: CallType::Call(far_call.glue_into()), @@ -132,7 +132,7 @@ impl CallTracer { current_call.from = current.msg_sender; current_call.to = current.this_address; current_call.value = U256::from(current.context_u128_value); - current_call.gas = current.ergs_remaining; + current_call.gas = current.ergs_remaining as u64; } fn save_output_vm_boojum_integration( @@ -198,7 +198,7 @@ impl CallTracer { current_call.farcall.gas_used = current_call .farcall .parent_gas - .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining); + .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining as u64); self.save_output_vm_boojum_integration( state, diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs index 4d5a9857c93..8a1c7b725d1 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs @@ -44,7 +44,7 @@ impl DynTracer> for CallTracer { .inner .last() .map(|call| call.ergs_remaining + current_ergs) - .unwrap_or(current_ergs); + .unwrap_or(current_ergs) as u64; let mut current_call = Call { r#type: CallType::Call(far_call.glue_into()), @@ -128,7 +128,7 @@ impl CallTracer { current_call.from = current.msg_sender; current_call.to = current.this_address; current_call.value = U256::from(current.context_u128_value); - current_call.gas = current.ergs_remaining; + current_call.gas = current.ergs_remaining as u64; } fn save_output_latest( @@ -194,7 +194,7 @@ impl CallTracer { current_call.farcall.gas_used = current_call .farcall .parent_gas - .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining); + .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining as u64); self.save_output_latest(state, memory, ret_opcode, &mut current_call.farcall); diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs index 4c9a9d8e8d6..d310e459508 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs @@ -44,7 +44,7 @@ impl DynTracer> for CallTracer { .inner .last() .map(|call| call.ergs_remaining + current_ergs) - .unwrap_or(current_ergs); + .unwrap_or(current_ergs) as u64; let mut current_call = Call { r#type: CallType::Call(far_call.glue_into()), @@ -129,7 +129,7 @@ impl CallTracer { current_call.from = current.msg_sender; current_call.to = current.this_address; current_call.value = U256::from(current.context_u128_value); - current_call.gas = current.ergs_remaining; + current_call.gas = current.ergs_remaining as u64; } fn save_output_refunds_enhancement( @@ -195,7 +195,7 @@ impl CallTracer { current_call.farcall.gas_used = current_call .farcall .parent_gas - .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining); + .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining as u64); self.save_output_refunds_enhancement(state, memory, ret_opcode, &mut current_call.farcall); diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs index 7ffaab1392b..43b9b8524e6 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs @@ -43,7 +43,7 @@ impl DynTracer> for CallTracer { .inner .last() .map(|call| call.ergs_remaining + current_ergs) - .unwrap_or(current_ergs); + .unwrap_or(current_ergs) as u64; let mut current_call = Call { r#type: CallType::Call(far_call.glue_into()), @@ -127,7 +127,7 @@ impl CallTracer { current_call.from = current.msg_sender; current_call.to = current.this_address; current_call.value = U256::from(current.context_u128_value); - current_call.gas = current.ergs_remaining; + current_call.gas = current.ergs_remaining as u64; } fn save_output_virtual_blocks( @@ -194,7 +194,7 @@ impl CallTracer { current_call.farcall.gas_used = current_call .farcall .parent_gas - .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining); + .saturating_sub(state.vm_local_state.callstack.current.ergs_remaining as u64); self.save_output_virtual_blocks(state, memory, ret_opcode, &mut current_call.farcall); diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs index 8160f5911a9..a3d5f622286 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs @@ -109,7 +109,7 @@ impl CallTracer { .inner .last() .map(|call| call.ergs_remaining + current.ergs_remaining) - .unwrap_or(current.ergs_remaining); + .unwrap_or(current.ergs_remaining) as u64; current_call.parent_gas = parent_gas; } @@ -177,7 +177,7 @@ impl CallTracer { current_call.from = current.msg_sender; current_call.to = current.this_address; current_call.value = U256::from(current.context_u128_value); - current_call.gas = current.ergs_remaining; + current_call.gas = current.ergs_remaining as u64; } fn save_output( @@ -240,7 +240,7 @@ impl CallTracer { // It's safe to unwrap here because we are sure that we have at least one call in the stack let mut current_call = self.stack.pop().unwrap(); current_call.gas_used = - current_call.parent_gas - state.vm_local_state.callstack.current.ergs_remaining; + current_call.parent_gas - state.vm_local_state.callstack.current.ergs_remaining as u64; if current_call.r#type != CallType::NearCall { self.save_output(state, memory, ret_opcode, &mut current_call); diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs index dea1b3d2925..d7119932068 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs @@ -37,7 +37,7 @@ impl Vm { .decommittment_processor .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, - gas_used: gas_remaining_before - gas_remaining_after, + gas_used: (gas_remaining_before - gas_remaining_after) as u64, gas_remaining: gas_remaining_after, computational_gas_used, total_log_queries: total_log_queries_count, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs index 6f942a807e7..bfb06deb28a 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs @@ -88,8 +88,8 @@ impl RefundsTracer { pub(crate) fn get_refunds(&self) -> Refunds { Refunds { - gas_refunded: self.refund_gas, - operator_suggested_refund: self.operator_refund.unwrap_or_default(), + gas_refunded: self.refund_gas as u64, + operator_suggested_refund: self.operator_refund.unwrap_or_default() as u64, } } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs index e6e1bc97fdb..3d3649750e3 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs @@ -37,7 +37,7 @@ impl Vm { .decommittment_processor .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, - gas_used: gas_remaining_before - gas_remaining_after, + gas_used: (gas_remaining_before - gas_remaining_after) as u64, gas_remaining: gas_remaining_after, computational_gas_used, total_log_queries: total_log_queries_count, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs index 16dc2617445..8578b73ccfa 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs @@ -186,7 +186,10 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - assert_eq!(ergs_before - ergs_after, result.statistics.gas_used); + assert_eq!( + (ergs_before - ergs_after) as u64, + result.statistics.gas_used + ); TestStatistics { max_used_gas: ergs_before - ergs_after, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs index 4aef01effbd..401c2c12a43 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs @@ -61,8 +61,12 @@ fn test_predetermined_refunded_gas() { let tx: TransactionData = tx.into(); // Overhead let overhead = tx.overhead_gas(); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); + vm.vm.push_raw_transaction( + tx.clone(), + overhead, + result.refunds.gas_refunded as u32, + true, + ); let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); @@ -113,7 +117,7 @@ fn test_predetermined_refunded_gas() { let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); + .push_raw_transaction(tx, overhead, changed_operator_suggested_refund as u32, true); let result = vm.vm.execute(VmExecutionMode::Batch); let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs index 37300c635eb..503aad00c68 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs @@ -88,8 +88,8 @@ impl RefundsTracer { pub(crate) fn get_refunds(&self) -> Refunds { Refunds { - gas_refunded: self.refund_gas, - operator_suggested_refund: self.operator_refund.unwrap_or_default(), + gas_refunded: self.refund_gas as u64, + operator_suggested_refund: self.operator_refund.unwrap_or_default() as u64, } } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs index 10972b5408b..744ac6d4097 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs @@ -37,7 +37,7 @@ impl Vm { .decommittment_processor .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, - gas_used: gas_remaining_before - gas_remaining_after, + gas_used: (gas_remaining_before - gas_remaining_after) as u64, gas_remaining: gas_remaining_after, computational_gas_used, total_log_queries: total_log_queries_count, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs index a4e7295eca8..c23ddf47acd 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs @@ -90,8 +90,8 @@ impl RefundsTracer { pub(crate) fn get_refunds(&self) -> Refunds { Refunds { - gas_refunded: self.refund_gas, - operator_suggested_refund: self.operator_refund.unwrap_or_default(), + gas_refunded: self.refund_gas as u64, + operator_suggested_refund: self.operator_refund.unwrap_or_default() as u64, } } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index d914aacab17..c43d82b0d28 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -66,7 +66,7 @@ impl BootloaderState { } } - pub(crate) fn set_refund_for_current_tx(&mut self, refund: u32) { + pub(crate) fn set_refund_for_current_tx(&mut self, refund: u64) { let current_tx = self.current_tx(); // We can't set the refund for the latest tx or using the latest l2_block for fining tx // Because we can fill the whole batch first and then execute txs one by one @@ -100,7 +100,7 @@ impl BootloaderState { &mut self, tx: TransactionData, predefined_overhead: u32, - predefined_refund: u32, + predefined_refund: u64, compressed_bytecodes: Vec, trusted_ergs_limit: U256, chain_id: L2ChainId, diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs index 9f44d848a4e..8f14976be34 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs @@ -12,7 +12,7 @@ pub(crate) struct BootloaderTx { /// Compressed bytecodes, which has been published during this transaction pub(crate) compressed_bytecodes: Vec, /// Refunds for this transaction - pub(crate) refund: u32, + pub(crate) refund: u64, /// Gas overhead pub(crate) gas_overhead: u32, /// Gas Limit for this transaction. It can be different from the gas limit inside the transaction @@ -24,7 +24,7 @@ pub(crate) struct BootloaderTx { impl BootloaderTx { pub(super) fn new( tx: TransactionData, - predefined_refund: u32, + predefined_refund: u64, predefined_overhead: u32, trusted_gas_limit: U256, compressed_bytecodes: Vec, diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs index 59254a88ca6..ba20ab46c56 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs @@ -37,7 +37,7 @@ impl Vm { .decommittment_processor .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, - gas_used: gas_remaining_before - gas_remaining_after, + gas_used: (gas_remaining_before - gas_remaining_after) as u64, gas_remaining: gas_remaining_after, computational_gas_used, total_log_queries: total_log_queries_count, diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs index d7d948c8b57..31b2cce33f1 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs @@ -17,7 +17,7 @@ impl Vm { &mut self, tx: TransactionData, predefined_overhead: u32, - predefined_refund: u32, + predefined_refund: u64, with_compression: bool, ) { let timestamp = Timestamp(self.state.local_state.timestamp); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs index 5f4bdcde36a..5801270ee7e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs @@ -186,7 +186,10 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - assert_eq!(ergs_before - ergs_after, result.statistics.gas_used); + assert_eq!( + (ergs_before - ergs_after) as u64, + result.statistics.gas_used + ); TestStatistics { max_used_gas: ergs_before - ergs_after, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs index 24003d6e81b..b1046ee3c0e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs @@ -42,12 +42,12 @@ pub(crate) struct RefundsTracer { // Some(x) means that the bootloader has asked the operator // to provide the refund the user, where `x` is the refund proposed // by the bootloader itself. - pending_operator_refund: Option, - refund_gas: u32, - operator_refund: Option, + pending_operator_refund: Option, + refund_gas: u64, + operator_refund: Option, timestamp_initial: Timestamp, timestamp_before_cycle: Timestamp, - gas_remaining_before: u32, + computational_gas_remaining_before: u32, spent_pubdata_counter_before: u32, gas_spent_on_bytecodes_and_long_messages: u32, l1_batch: L1BatchEnv, @@ -63,7 +63,7 @@ impl RefundsTracer { operator_refund: None, timestamp_initial: Timestamp(0), timestamp_before_cycle: Timestamp(0), - gas_remaining_before: 0, + computational_gas_remaining_before: 0, spent_pubdata_counter_before: 0, gas_spent_on_bytecodes_and_long_messages: 0, l1_batch, @@ -74,7 +74,7 @@ impl RefundsTracer { } impl RefundsTracer { - fn requested_refund(&self) -> Option { + fn requested_refund(&self) -> Option { self.pending_operator_refund } @@ -82,7 +82,7 @@ impl RefundsTracer { self.pending_operator_refund = None; } - fn block_overhead_refund(&mut self) -> u32 { + fn block_overhead_refund(&mut self) -> u64 { 0 } @@ -95,13 +95,13 @@ impl RefundsTracer { pub(crate) fn tx_body_refund( &self, - bootloader_refund: u32, - gas_spent_on_pubdata: u32, - tx_gas_limit: u32, + bootloader_refund: u64, + gas_spent_on_pubdata: u64, + tx_gas_limit: u64, current_ergs_per_pubdata_byte: u32, pubdata_published: u32, tx_hash: H256, - ) -> u32 { + ) -> u64 { let total_gas_spent = tx_gas_limit - bootloader_refund; let gas_spent_on_computation = total_gas_spent @@ -153,7 +153,7 @@ impl RefundsTracer { tracing::trace!("Gas spent on pubdata: {}", gas_spent_on_pubdata); tracing::trace!("Pubdata published: {}", pubdata_published); - ceil_div_u256(refund_eth, effective_gas_price.into()).as_u32() + ceil_div_u256(refund_eth, effective_gas_price.into()).as_u64() } pub(crate) fn gas_spent_on_pubdata(&self, vm_local_state: &VmLocalState) -> u32 { @@ -176,9 +176,9 @@ impl DynTracer> for RefundsTracer { self.timestamp_before_cycle = Timestamp(state.vm_local_state.timestamp); let hook = VmHook::from_opcode_memory(&state, &data); match hook { - VmHook::NotifyAboutRefund => self.refund_gas = get_vm_hook_params(memory)[0].as_u32(), + VmHook::NotifyAboutRefund => self.refund_gas = get_vm_hook_params(memory)[0].as_u64(), VmHook::AskOperatorForRefund => { - self.pending_operator_refund = Some(get_vm_hook_params(memory)[0].as_u32()) + self.pending_operator_refund = Some(get_vm_hook_params(memory)[0].as_u64()) } _ => {} } @@ -191,7 +191,8 @@ impl DynTracer> for RefundsTracer { impl VmTracer for RefundsTracer { fn initialize_tracer(&mut self, state: &mut ZkSyncVmState) { self.timestamp_initial = Timestamp(state.local_state.timestamp); - self.gas_remaining_before = state.local_state.callstack.current.ergs_remaining; + self.computational_gas_remaining_before = + state.local_state.callstack.current.ergs_remaining; self.spent_pubdata_counter_before = state.local_state.spent_pubdata_counter; } @@ -230,8 +231,8 @@ impl VmTracer for RefundsTracer { self.operator_refund.is_none(), "Operator was asked for refund two times" ); - let gas_spent_on_pubdata = - self.gas_spent_on_pubdata(&state.local_state) - self.spent_pubdata_counter_before; + let gas_spent_on_pubdata = (self.gas_spent_on_pubdata(&state.local_state) + - self.spent_pubdata_counter_before) as u64; let current_tx_index = bootloader_state.current_tx(); let tx_description_offset = @@ -243,7 +244,7 @@ impl VmTracer for RefundsTracer { tx_description_offset + TX_GAS_LIMIT_OFFSET, ) .value - .as_u32(); + .as_u64(); let used_published_storage_slots = state .storage diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs index 19cbf13b275..ed47ace7b89 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs @@ -108,7 +108,7 @@ impl CallTracer { .inner .last() .map(|call| call.ergs_remaining + current.ergs_remaining) - .unwrap_or(current.ergs_remaining); + .unwrap_or(current.ergs_remaining) as u64; current_call.parent_gas = parent_gas; } @@ -176,7 +176,7 @@ impl CallTracer { current_call.from = current.msg_sender; current_call.to = current.this_address; current_call.value = U256::from(current.context_u128_value); - current_call.gas = current.ergs_remaining; + current_call.gas = current.ergs_remaining as u64; } fn save_output( @@ -239,7 +239,7 @@ impl CallTracer { // It's safe to unwrap here because we are sure that we have at least one call in the stack let mut current_call = self.stack.pop().unwrap(); current_call.gas_used = - current_call.parent_gas - state.vm_local_state.callstack.current.ergs_remaining; + current_call.parent_gas - state.vm_local_state.callstack.current.ergs_remaining as u64; if current_call.r#type != CallType::NearCall { self.save_output(state, memory, ret_opcode, &mut current_call); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs index c29843d7e4a..1feae1f72e2 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs @@ -36,7 +36,7 @@ impl Vm { .decommittment_processor .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, - gas_used: gas_remaining_before - gas_remaining_after, + gas_used: (gas_remaining_before - gas_remaining_after) as u64, gas_remaining: gas_remaining_after, computational_gas_used, total_log_queries: total_log_queries_count, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs index 20e799a3883..ca99e862b2d 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs @@ -85,8 +85,8 @@ impl RefundsTracer { pub(crate) fn get_refunds(&self) -> Refunds { Refunds { - gas_refunded: self.refund_gas, - operator_suggested_refund: self.operator_refund.unwrap_or_default(), + gas_refunded: self.refund_gas as u64, + operator_suggested_refund: self.operator_refund.unwrap_or_default() as u64, } } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs index 20113cf34f1..4a15b07530f 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs @@ -35,7 +35,7 @@ impl Vm { .decommittment_processor .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, - gas_used: gas_remaining_before - gas_remaining_after, + gas_used: (gas_remaining_before - gas_remaining_after) as u64, gas_remaining: gas_remaining_after, computational_gas_used, total_log_queries: total_log_queries_count, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs index 925a294d880..7b687536da5 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs @@ -71,8 +71,8 @@ impl RefundsTracer { } pub(crate) fn get_refunds(&self) -> Refunds { Refunds { - gas_refunded: self.refund_gas, - operator_suggested_refund: self.operator_refund.unwrap_or_default(), + gas_refunded: self.refund_gas as u64, + operator_suggested_refund: self.operator_refund.unwrap_or_default() as u64, } } @@ -396,8 +396,8 @@ fn pubdata_published_for_writes( impl VmTracer for RefundsTracer { fn save_results(&mut self, result: &mut VmExecutionResultAndLogs) { result.refunds = Refunds { - gas_refunded: self.refund_gas, - operator_suggested_refund: self.operator_refund.unwrap_or_default(), + gas_refunded: self.refund_gas as u64, + operator_suggested_refund: self.operator_refund.unwrap_or_default() as u64, }; result.statistics.pubdata_published = self.pubdata_published; } diff --git a/core/lib/types/src/tx/mod.rs b/core/lib/types/src/tx/mod.rs index 8e40795baf6..91d44acab74 100644 --- a/core/lib/types/src/tx/mod.rs +++ b/core/lib/types/src/tx/mod.rs @@ -23,8 +23,8 @@ pub struct TransactionExecutionResult { pub hash: H256, pub execution_info: ExecutionMetrics, pub execution_status: TxExecutionStatus, - pub refunded_gas: u32, - pub operator_suggested_refund: u32, + pub refunded_gas: u64, + pub operator_suggested_refund: u64, pub compressed_bytecodes: Vec, pub call_traces: Vec, pub revert_reason: Option, @@ -36,8 +36,8 @@ impl TransactionExecutionResult { None } else { Some(Call::new_high_level( - self.transaction.gas_limit().as_u32(), - self.transaction.gas_limit().as_u32() - self.refunded_gas, + self.transaction.gas_limit().as_u64(), + self.transaction.gas_limit().as_u64() - self.refunded_gas, self.transaction.execute.value, self.transaction.execute.calldata.clone(), vec![], diff --git a/core/lib/types/src/vm_trace.rs b/core/lib/types/src/vm_trace.rs index 9d36900e396..0c2c8f90acb 100644 --- a/core/lib/types/src/vm_trace.rs +++ b/core/lib/types/src/vm_trace.rs @@ -74,7 +74,9 @@ pub enum CallType { #[derive(Clone, Serialize, Deserialize)] /// Represents a call in the VM trace. -pub struct Call { +/// This version of the call represents the call structure before the 1.5.0 protocol version, where +/// all the gas-related fields were represented as `u32` instead of `u64`. +pub struct LegacyCall { /// Type of the call. pub r#type: CallType, /// Address of the caller. @@ -101,10 +103,91 @@ pub struct Call { pub calls: Vec, } +#[derive(Clone, Serialize, Deserialize)] +/// Represents a call in the VM trace. +pub struct Call { + /// Type of the call. + pub r#type: CallType, + /// Address of the caller. + pub from: Address, + /// Address of the callee. + pub to: Address, + /// Gas from the parent call. + pub parent_gas: u64, + /// Gas provided for the call. + pub gas: u64, + /// Gas used by the call. + pub gas_used: u64, + /// Value transferred. + pub value: U256, + /// Input data. + pub input: Vec, + /// Output data. + pub output: Vec, + /// Error message provided by vm or some unexpected errors. + pub error: Option, + /// Revert reason. + pub revert_reason: Option, + /// Subcalls. + pub calls: Vec, +} + +impl From for Call { + fn from(legacy_call: LegacyCall) -> Self { + Self { + r#type: legacy_call.r#type, + from: legacy_call.from, + to: legacy_call.to, + parent_gas: legacy_call.parent_gas as u64, + gas: legacy_call.gas as u64, + gas_used: legacy_call.gas_used as u64, + value: legacy_call.value, + input: legacy_call.input, + output: legacy_call.output, + error: legacy_call.error, + revert_reason: legacy_call.revert_reason, + calls: legacy_call.calls, + } + } +} + +#[derive(Debug, Clone)] +pub struct LegacyCallConversionOverflowError; + +impl TryFrom for LegacyCall { + type Error = LegacyCallConversionOverflowError; + + fn try_from(call: Call) -> Result { + Ok(Self { + r#type: call.r#type, + from: call.from, + to: call.to, + parent_gas: call + .parent_gas + .try_into() + .map_err(|_| LegacyCallConversionOverflowError)?, + gas: call + .gas + .try_into() + .map_err(|_| LegacyCallConversionOverflowError)?, + gas_used: call + .gas_used + .try_into() + .map_err(|_| LegacyCallConversionOverflowError)?, + value: call.value, + input: call.input, + output: call.output, + error: call.error, + revert_reason: call.revert_reason, + calls: call.calls, + }) + } +} + impl Call { pub fn new_high_level( - gas: u32, - gas_used: u32, + gas: u64, + gas_used: u64, value: U256, input: Vec, output: Vec, diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index 4d85a7d6f28..3607744fef4 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -607,19 +607,21 @@ impl TxSender { tx.tx_format() as u8, vm_version, ) as u64; + // We need to ensure that we never use a gas limit that is higher than the maximum allowed + let forced_gas_limit = gas_limit_with_overhead.min(get_max_batch_gas_limit(vm_version)); match &mut tx.common_data { ExecuteTransactionCommon::L1(l1_common_data) => { - l1_common_data.gas_limit = gas_limit_with_overhead.into(); + l1_common_data.gas_limit = forced_gas_limit.into(); let required_funds = l1_common_data.gas_limit * l1_common_data.max_fee_per_gas + tx.execute.value; l1_common_data.to_mint = required_funds; } ExecuteTransactionCommon::L2(l2_common_data) => { - l2_common_data.fee.gas_limit = gas_limit_with_overhead.into(); + l2_common_data.fee.gas_limit = forced_gas_limit.into(); } ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { - common_data.gas_limit = gas_limit_with_overhead.into(); + common_data.gas_limit = forced_gas_limit.into(); let required_funds = common_data.gas_limit * common_data.max_fee_per_gas + tx.execute.value; @@ -882,7 +884,16 @@ impl TxSender { let full_gas_limit = match tx_body_gas_limit.overflowing_add(gas_for_bytecodes_pubdata + overhead) { - (value, false) => value, + (value, false) => { + if value > get_max_batch_gas_limit(protocol_version.into()) { + return Err(SubmitTxError::ExecutionReverted( + "exceeds block gas limit".to_string(), + vec![], + )); + } + + value + } (_, true) => { return Err(SubmitTxError::ExecutionReverted( "exceeds block gas limit".to_string(), diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs index cfb3dab034e..d869edced16 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -200,7 +200,7 @@ impl DebugNamespace { .take() .unwrap_or_default(); let call = Call::new_high_level( - tx.common_data.fee.gas_limit.as_u32(), + tx.common_data.fee.gas_limit.as_u64(), result.statistics.gas_used, tx.execute.value, tx.execute.calldata, diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs index d09a1803cdc..13b484c2081 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs @@ -82,7 +82,7 @@ impl BatchExecutorHandle { } pub(super) async fn execute_tx(&self, tx: Transaction) -> TxExecutionResult { - let tx_gas_limit = tx.gas_limit().as_u32(); + let tx_gas_limit = tx.gas_limit().as_u64(); let (response_sender, response_receiver) = oneshot::channel(); self.commands diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs index 72a3ad1d143..7beccf8c0e9 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs @@ -457,7 +457,7 @@ async fn bootloader_tip_out_of_gas() { finished_batch .block_tip_execution_result .statistics - .gas_used + .computational_gas_used - 10, ), validation_computational_gas_limit: u32::MAX, From 90eb9d8b2f3a50544e4020964dfea90b19a9894b Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 4 Apr 2024 16:37:53 +0300 Subject: [PATCH 8/9] fix(vm): Increase log demuxer cycles on far calls (#1575) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Increases log demuxer cycles on far calls ## Why ❔ Storage is read when applying far call: https://github.com/matter-labs/era-zk_evm/blob/v1.5.0/src/opcodes/execution/far_call.rs#L159, this adds one query to a log query queue ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs | 2 +- .../multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs | 1 + .../multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs | 1 + core/lib/multivm/src/versions/vm_latest/tests/circuits.rs | 2 +- .../multivm/src/versions/vm_latest/tracers/circuits_capacity.rs | 1 + .../multivm/src/versions/vm_latest/tracers/circuits_tracer.rs | 1 + 6 files changed, 6 insertions(+), 2 deletions(-) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs index 6460fe3a28d..7d0dfd1ed0e 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs @@ -33,7 +33,7 @@ fn test_circuits() { let s = res.statistics.circuit_statistic; // Check `circuit_statistic`. const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.00226, 0.00077, 0.1195, 0.1429, 0.0, + 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.003438, 0.00077, 0.1195, 0.1429, 0.0, ]; let actual = [ (s.main_vm, "main_vm"), diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs index 651e0b11188..dc5d0caa77c 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs @@ -26,6 +26,7 @@ pub(crate) const STORAGE_WRITE_STORAGE_APPLICATION_CYCLES: u32 = 2; pub(crate) const FAR_CALL_RAM_CYCLES: u32 = 1; pub(crate) const FAR_CALL_STORAGE_SORTER_CYCLES: u32 = 1; pub(crate) const FAR_CALL_CODE_DECOMMITTER_SORTER_CYCLES: u32 = 1; +pub(crate) const FAR_CALL_LOG_DEMUXER_CYCLES: u32 = 1; // 5 RAM permutations, because: 1 to read opcode + 2 reads + 2 writes. // 2 reads and 2 writes are needed because unaligned access is implemented with diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs index b3b866f652a..f4045b53dd8 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs @@ -79,6 +79,7 @@ impl DynTracer> for Circuits self.statistics.code_decommitter_sorter_cycles += FAR_CALL_CODE_DECOMMITTER_SORTER_CYCLES; self.statistics.storage_sorter_cycles += FAR_CALL_STORAGE_SORTER_CYCLES; + self.statistics.log_demuxer_cycles += FAR_CALL_LOG_DEMUXER_CYCLES; } Opcode::UMA(UMAOpcode::AuxHeapWrite | UMAOpcode::HeapWrite) => { self.statistics.ram_permutation_cycles += UMA_WRITE_RAM_CYCLES; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs index bdacd4feadb..1242c5f10da 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs @@ -35,7 +35,7 @@ fn test_circuits() { let s = res.statistics.circuit_statistic; // Check `circuit_statistic`. const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.00226, 0.00077, 0.1195, 0.1429, 0.0, + 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.003438, 0.00077, 0.1195, 0.1429, 0.0, ]; let actual = [ (s.main_vm, "main_vm"), diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs index 651e0b11188..dc5d0caa77c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs @@ -26,6 +26,7 @@ pub(crate) const STORAGE_WRITE_STORAGE_APPLICATION_CYCLES: u32 = 2; pub(crate) const FAR_CALL_RAM_CYCLES: u32 = 1; pub(crate) const FAR_CALL_STORAGE_SORTER_CYCLES: u32 = 1; pub(crate) const FAR_CALL_CODE_DECOMMITTER_SORTER_CYCLES: u32 = 1; +pub(crate) const FAR_CALL_LOG_DEMUXER_CYCLES: u32 = 1; // 5 RAM permutations, because: 1 to read opcode + 2 reads + 2 writes. // 2 reads and 2 writes are needed because unaligned access is implemented with diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs index 3c4214f1409..55199811f16 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs @@ -79,6 +79,7 @@ impl DynTracer> for Circuits self.statistics.code_decommitter_sorter_cycles += FAR_CALL_CODE_DECOMMITTER_SORTER_CYCLES; self.statistics.storage_sorter_cycles += FAR_CALL_STORAGE_SORTER_CYCLES; + self.statistics.log_demuxer_cycles += FAR_CALL_LOG_DEMUXER_CYCLES; } Opcode::UMA(UMAOpcode::AuxHeapWrite | UMAOpcode::HeapWrite) => { self.statistics.ram_permutation_cycles += UMA_WRITE_RAM_CYCLES; From 9fcb87e9b126e2ad5465c6e2326d87cdc2f1a5cb Mon Sep 17 00:00:00 2001 From: Stanislav Bezkorovainyi Date: Thu, 4 Apr 2024 19:31:12 +0200 Subject: [PATCH 9/9] feat: Use config for max number of circuits (#1573) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ⚠️ This PR requires a new config variable ⚠️ Use config for managing the maximal number of circuits ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --------- Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com> --- core/lib/config/src/configs/chain.rs | 6 ++ core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/chain.rs | 1 + core/lib/multivm/src/utils.rs | 20 ++++ .../src/versions/vm_1_4_2/constants.rs | 2 + core/lib/protobuf_config/src/chain.rs | 4 + .../lib/protobuf_config/src/proto/chain.proto | 1 + .../criteria/geometry_seal_criteria.rs | 96 +++++++++---------- etc/env/base/chain.toml | 4 + etc/env/file_based/general.yaml | 1 + 10 files changed, 84 insertions(+), 52 deletions(-) diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index f09c5911806..bde96012347 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -155,6 +155,11 @@ pub struct StateKeeperConfig { /// Number of keys that is processed by enum_index migration in State Keeper each L1 batch. pub enum_index_migration_chunk_size: Option, + /// The maximal number of circuits that a batch can support. + /// Note, that this number corresponds to the "base layer" circuits, i.e. it does not include + /// the recursion layers' circuits. + pub max_circuits_per_batch: usize, + // Base system contract hashes, required only for generating genesis config. // #PLA-811 #[deprecated(note = "Use GenesisConfig::bootloader_hash instead")] @@ -199,6 +204,7 @@ impl StateKeeperConfig { virtual_blocks_interval: 1, virtual_blocks_per_miniblock: 1, enum_index_migration_chunk_size: None, + max_circuits_per_batch: 24100, bootloader_hash: None, default_aa_hash: None, l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode::Rollup, diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 7f6215fceae..c4e9e23a25e 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -172,6 +172,7 @@ impl Distribution for EncodeDist { virtual_blocks_interval: self.sample(rng), virtual_blocks_per_miniblock: self.sample(rng), enum_index_migration_chunk_size: self.sample(rng), + max_circuits_per_batch: self.sample(rng), // These values are not involved into files serialization skip them fee_account_addr: None, bootloader_hash: None, diff --git a/core/lib/env_config/src/chain.rs b/core/lib/env_config/src/chain.rs index b8554541c9c..c5182739929 100644 --- a/core/lib/env_config/src/chain.rs +++ b/core/lib/env_config/src/chain.rs @@ -105,6 +105,7 @@ mod tests { "0x0100055b041eb28aff6e3a6e0f37c31fd053fc9ef142683b05e5f0aee6934066", )), l1_batch_commit_data_generator_mode, + max_circuits_per_batch: 24100, } } diff --git a/core/lib/multivm/src/utils.rs b/core/lib/multivm/src/utils.rs index 89b61519c61..9dbb2389371 100644 --- a/core/lib/multivm/src/utils.rs +++ b/core/lib/multivm/src/utils.rs @@ -387,3 +387,23 @@ pub fn get_max_batch_gas_limit(version: VmVersion) -> u64 { VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::BLOCK_GAS_LIMIT as u64, } } + +pub fn get_max_batch_base_layer_circuits(version: VmVersion) -> usize { + match version { + VmVersion::M5WithRefunds + | VmVersion::M5WithoutRefunds + | VmVersion::M6Initial + | VmVersion::M6BugWithCompressionFixed + | VmVersion::Vm1_3_2 + | VmVersion::VmVirtualBlocks + | VmVersion::VmVirtualBlocksRefundsEnhancement + | VmVersion::VmBoojumIntegration + | VmVersion::Vm1_4_1 + | VmVersion::Vm1_4_2 => { + // For pre-v1.4.2 the maximal number of circuits has not been calculated, but since + // these are used only for replaying transactions, we'll reuse the same value as for v1.4.2. + // We avoid providing `0` for the old versions to avoid potential errors when working with old versions. + crate::vm_1_4_2::constants::MAX_BASE_LAYER_CIRCUITS + } + } +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/constants.rs b/core/lib/multivm/src/versions/vm_1_4_2/constants.rs index e2bb03f56e3..79f20660c14 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/constants.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/constants.rs @@ -13,6 +13,8 @@ pub(crate) const BOOTLOADER_BATCH_TIP_OVERHEAD: u32 = 170_000_000; pub(crate) const BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD: u32 = 5000; pub(crate) const BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD: u32 = 1500; +pub(crate) const MAX_BASE_LAYER_CIRCUITS: usize = 24100; + /// The size of the bootloader memory in bytes which is used by the protocol. /// While the maximal possible size is a lot higher, we restrict ourselves to a certain limit to reduce /// the requirements on RAM. diff --git a/core/lib/protobuf_config/src/chain.rs b/core/lib/protobuf_config/src/chain.rs index c3a25385921..eaae1d9e79c 100644 --- a/core/lib/protobuf_config/src/chain.rs +++ b/core/lib/protobuf_config/src/chain.rs @@ -81,6 +81,9 @@ impl ProtoRepr for proto::StateKeeper { .map(|x| x.try_into()) .transpose() .context("enum_index_migration_chunk_size")?, + max_circuits_per_batch: required(&self.max_circuits_per_batch) + .and_then(|x| Ok((*x).try_into()?)) + .context("max_circuits_per_batch")?, // We need these values only for instantiating configs from environmental variables, so it's not // needed during the initialization from files @@ -122,6 +125,7 @@ impl ProtoRepr for proto::StateKeeper { .enum_index_migration_chunk_size .as_ref() .map(|x| (*x).try_into().unwrap()), + max_circuits_per_batch: Some(this.max_circuits_per_batch.try_into().unwrap()), } } } diff --git a/core/lib/protobuf_config/src/proto/chain.proto b/core/lib/protobuf_config/src/proto/chain.proto index a8eb7089a7d..bbbc1bc6af2 100644 --- a/core/lib/protobuf_config/src/proto/chain.proto +++ b/core/lib/protobuf_config/src/proto/chain.proto @@ -34,6 +34,7 @@ message StateKeeper { optional uint32 virtual_blocks_interval = 23; // required optional uint32 virtual_blocks_per_miniblock = 24; // required optional uint64 enum_index_migration_chunk_size = 26; // optional + optional uint64 max_circuits_per_batch = 27; // required } message OperationsManager { diff --git a/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs b/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs index e2a7b53c1b7..8e838dacab5 100644 --- a/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs +++ b/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs @@ -1,8 +1,8 @@ -use std::fmt; - -use multivm::utils::circuit_statistics_bootloader_batch_tip_overhead; +use multivm::utils::{ + circuit_statistics_bootloader_batch_tip_overhead, get_max_batch_base_layer_circuits, +}; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_types::{tx::tx_execution_info::ExecutionMetrics, ProtocolVersionId}; +use zksync_types::ProtocolVersionId; // Local uses use crate::state_keeper::seal_criteria::{SealCriterion, SealData, SealResolution}; @@ -10,19 +10,11 @@ use crate::state_keeper::seal_criteria::{SealCriterion, SealData, SealResolution // Collected vm execution metrics should fit into geometry limits. // Otherwise witness generation will fail and proof won't be generated. -#[derive(Debug, Default)] +/// Checks whether we should exclude the transaction because we don't have enough circuits for it. +#[derive(Debug)] pub struct CircuitsCriterion; -trait MetricExtractor { - const PROM_METRIC_CRITERION_NAME: &'static str; - fn limit_per_block(protocol_version: ProtocolVersionId) -> usize; - fn extract(metric: &ExecutionMetrics) -> usize; -} - -impl SealCriterion for T -where - T: MetricExtractor + fmt::Debug + Send + Sync + 'static, -{ +impl SealCriterion for CircuitsCriterion { fn should_seal( &self, config: &StateKeeperConfig, @@ -30,29 +22,41 @@ where _tx_count: usize, block_data: &SealData, tx_data: &SealData, - protocol_version_id: ProtocolVersionId, + protocol_version: ProtocolVersionId, ) -> SealResolution { - let reject_bound = (T::limit_per_block(protocol_version_id) as f64 + let max_allowed_base_layer_circuits = + get_max_batch_base_layer_circuits(protocol_version.into()); + assert!( + config.max_circuits_per_batch <= max_allowed_base_layer_circuits, + "Configured max_circuits_per_batch ({}) must be lower than the constant MAX_BASE_LAYER_CIRCUITS={} for protocol version {}", + config.max_circuits_per_batch, max_allowed_base_layer_circuits, protocol_version as u16 + ); + + let batch_tip_circuit_overhead = + circuit_statistics_bootloader_batch_tip_overhead(protocol_version.into()); + + // Double checking that it is possible to seal batches + assert!( + batch_tip_circuit_overhead < config.max_circuits_per_batch, + "Invalid circuit criteria" + ); + + let reject_bound = (config.max_circuits_per_batch as f64 * config.reject_tx_at_geometry_percentage) - .round(); - let close_bound = (T::limit_per_block(protocol_version_id) as f64 + .round() as usize; + let include_and_seal_bound = (config.max_circuits_per_batch as f64 * config.close_block_at_geometry_percentage) - .round(); + .round() as usize; - if T::extract(&tx_data.execution_metrics) - + circuit_statistics_bootloader_batch_tip_overhead(protocol_version_id.into()) - > reject_bound as usize - { + let used_circuits_tx = tx_data.execution_metrics.circuit_statistic.total(); + let used_circuits_batch = block_data.execution_metrics.circuit_statistic.total(); + + if used_circuits_tx + batch_tip_circuit_overhead >= reject_bound { SealResolution::Unexecutable("ZK proof cannot be generated for a transaction".into()) - } else if T::extract(&block_data.execution_metrics) - + circuit_statistics_bootloader_batch_tip_overhead(protocol_version_id.into()) - >= T::limit_per_block(protocol_version_id) + } else if used_circuits_batch + batch_tip_circuit_overhead >= config.max_circuits_per_batch { SealResolution::ExcludeAndSeal - } else if T::extract(&block_data.execution_metrics) - + circuit_statistics_bootloader_batch_tip_overhead(protocol_version_id.into()) - > close_bound as usize - { + } else if used_circuits_batch + batch_tip_circuit_overhead >= include_and_seal_bound { SealResolution::IncludeAndSeal } else { SealResolution::NoSeal @@ -60,34 +64,22 @@ where } fn prom_criterion_name(&self) -> &'static str { - T::PROM_METRIC_CRITERION_NAME - } -} - -impl MetricExtractor for CircuitsCriterion { - const PROM_METRIC_CRITERION_NAME: &'static str = "circuits"; - - fn limit_per_block(_protocol_version_id: ProtocolVersionId) -> usize { - const MAX_NUMBER_OF_CIRCUITS: usize = 24100; - - MAX_NUMBER_OF_CIRCUITS - } - - fn extract(metrics: &ExecutionMetrics) -> usize { - metrics.circuit_statistic.total() + "circuits_criterion" } } - #[cfg(test)] mod tests { - use zksync_types::circuit::CircuitStatistic; + use zksync_types::{circuit::CircuitStatistic, tx::ExecutionMetrics}; use super::*; + const MAX_CIRCUITS_PER_BATCH: usize = 20_000; + fn get_config() -> StateKeeperConfig { StateKeeperConfig { close_block_at_geometry_percentage: 0.9, reject_tx_at_geometry_percentage: 0.9, + max_circuits_per_batch: MAX_CIRCUITS_PER_BATCH, ..Default::default() } } @@ -182,7 +174,7 @@ mod tests { let protocol_version = ProtocolVersionId::latest(); let block_execution_metrics = ExecutionMetrics { circuit_statistic: CircuitStatistic { - main_vm: (CircuitsCriterion::limit_per_block(protocol_version) / 2) as f32, + main_vm: (MAX_CIRCUITS_PER_BATCH / 2) as f32, ..CircuitStatistic::default() }, ..ExecutionMetrics::default() @@ -195,7 +187,7 @@ mod tests { let block_execution_metrics = ExecutionMetrics { circuit_statistic: CircuitStatistic { - main_vm: (CircuitsCriterion::limit_per_block(protocol_version) + main_vm: (MAX_CIRCUITS_PER_BATCH - 1 - circuit_statistics_bootloader_batch_tip_overhead( ProtocolVersionId::latest().into(), @@ -213,7 +205,7 @@ mod tests { let block_execution_metrics = ExecutionMetrics { circuit_statistic: CircuitStatistic { - main_vm: CircuitsCriterion::limit_per_block(protocol_version) as f32, + main_vm: MAX_CIRCUITS_PER_BATCH as f32, ..CircuitStatistic::default() }, ..ExecutionMetrics::default() @@ -227,7 +219,7 @@ mod tests { let tx_execution_metrics = ExecutionMetrics { circuit_statistic: CircuitStatistic { - main_vm: CircuitsCriterion::limit_per_block(protocol_version) as f32 + main_vm: MAX_CIRCUITS_PER_BATCH as f32 * config.reject_tx_at_geometry_percentage as f32 + 1.0, ..CircuitStatistic::default() diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 0b152326f5c..e82565e2b1e 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -40,6 +40,10 @@ reject_tx_at_geometry_percentage = 0.95 # Configuration option for block to be sealed in case # it takes more percentage of the max block capacity than this value. reject_tx_at_eth_params_percentage = 0.95 +# The maximal number of circuits that a batch can support. +# Note, that this number corresponds to the "base layer" circuits, i.e. it does not include +# the recursion layers' circuits. +max_circuits_per_batch = 24100 # Configuration option for block to be sealed in case # it takes more percentage of the max block gas capacity than this value. diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 5415c70e572..3efa9081468 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -88,6 +88,7 @@ state_keeper: save_call_traces: true virtual_blocks_interval: 1 virtual_blocks_per_miniblock: 1 + max_circuits_per_batch: 24100 mempool: delay_interval: 100 sync_interval_ms: 10