diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index 9a687fc890f..3e5d5595069 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -8,7 +8,7 @@ use zksync_config::{ use zksync_core::block_reverter::{ BlockReverter, BlockReverterEthConfig, BlockReverterFlags, L1ExecutedBatchesRevert, NodeRole, }; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_env_config::FromEnv; use zksync_types::{L1BatchNumber, U256}; @@ -96,7 +96,7 @@ async fn main() -> anyhow::Result<()> { let postgres_config = PostgresConfig::from_env().context("PostgresConfig::from_env()")?; let config = BlockReverterEthConfig::new(eth_sender, contracts, eth_client.web3_url.clone()); - let connection_pool = ConnectionPool::::builder( + let connection_pool = ConnectionPool::::builder( postgres_config.master_url()?, postgres_config.max_connections()?, ) diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index c8977f99daf..fda771a5e93 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -8,7 +8,7 @@ use zksync_config::{ configs::{ObservabilityConfig, PrometheusConfig}, ApiConfig, ContractVerifierConfig, PostgresConfig, }; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_env_config::FromEnv; use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::wait_for_tasks; @@ -20,8 +20,8 @@ pub mod verifier; pub mod zksolc_utils; pub mod zkvyper_utils; -async fn update_compiler_versions(connection_pool: &ConnectionPool) { - let mut storage = connection_pool.access_storage().await.unwrap(); +async fn update_compiler_versions(connection_pool: &ConnectionPool) { + let mut storage = connection_pool.connection().await.unwrap(); let mut transaction = storage.start_transaction().await.unwrap(); let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); @@ -134,7 +134,7 @@ async fn main() -> anyhow::Result<()> { ..ApiConfig::from_env().context("ApiConfig")?.prometheus }; let postgres_config = PostgresConfig::from_env().context("PostgresConfig")?; - let pool = ConnectionPool::::singleton( + let pool = ConnectionPool::::singleton( postgres_config .master_url() .context("Master DB URL is absent")?, diff --git a/core/bin/contract-verifier/src/verifier.rs b/core/bin/contract-verifier/src/verifier.rs index 55ac7177f6d..c6c70b32ae9 100644 --- a/core/bin/contract-verifier/src/verifier.rs +++ b/core/bin/contract-verifier/src/verifier.rs @@ -12,7 +12,7 @@ use lazy_static::lazy_static; use regex::Regex; use tokio::time; use zksync_config::ContractVerifierConfig; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_env_config::FromEnv; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ @@ -42,11 +42,11 @@ enum ConstructorArgs { #[derive(Debug)] pub struct ContractVerifier { config: ContractVerifierConfig, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, } impl ContractVerifier { - pub fn new(config: ContractVerifierConfig, connection_pool: ConnectionPool) -> Self { + pub fn new(config: ContractVerifierConfig, connection_pool: ConnectionPool) -> Self { Self { config, connection_pool, @@ -54,7 +54,7 @@ impl ContractVerifier { } async fn verify( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, mut request: VerificationRequest, config: ContractVerifierConfig, ) -> Result { @@ -429,7 +429,7 @@ impl ContractVerifier { } async fn process_result( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, request_id: usize, verification_result: Result, ) { @@ -471,7 +471,7 @@ impl JobProcessor for ContractVerifier { const BACKOFF_MULTIPLIER: u64 = 1; async fn get_next_job(&self) -> anyhow::Result> { - let mut connection = self.connection_pool.access_storage().await.unwrap(); + let mut connection = self.connection_pool.connection().await.unwrap(); // Time overhead for all operations except for compilation. const TIME_OVERHEAD: Duration = Duration::from_secs(10); @@ -489,7 +489,7 @@ impl JobProcessor for ContractVerifier { } async fn save_failure(&self, job_id: usize, _started_at: Instant, error: String) { - let mut connection = self.connection_pool.access_storage().await.unwrap(); + let mut connection = self.connection_pool.connection().await.unwrap(); connection .contract_verification_dal() @@ -515,7 +515,7 @@ impl JobProcessor for ContractVerifier { let config: ContractVerifierConfig = ContractVerifierConfig::from_env().context("ContractVerifierConfig")?; - let mut connection = connection_pool.access_storage().await.unwrap(); + let mut connection = connection_pool.connection().await.unwrap(); let job_id = job.id; let verification_result = Self::verify(&mut connection, job, config).await; diff --git a/core/bin/external_node/src/init.rs b/core/bin/external_node/src/init.rs index de8844cd2d2..472f510d4f3 100644 --- a/core/bin/external_node/src/init.rs +++ b/core/bin/external_node/src/init.rs @@ -3,7 +3,7 @@ use anyhow::Context as _; use zksync_basic_types::{L1BatchNumber, L2ChainId}; use zksync_core::sync_layer::genesis::perform_genesis_if_needed; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_health_check::AppHealthCheck; use zksync_object_store::ObjectStoreFactory; use zksync_snapshots_applier::SnapshotsApplierConfig; @@ -20,13 +20,13 @@ enum InitDecision { } pub(crate) async fn ensure_storage_initialized( - pool: &ConnectionPool, + pool: &ConnectionPool, main_node_client: &HttpClient, app_health: &AppHealthCheck, l2_chain_id: L2ChainId, consider_snapshot_recovery: bool, ) -> anyhow::Result<()> { - let mut storage = pool.access_storage_tagged("en").await?; + let mut storage = pool.connection_tagged("en").await?; let genesis_l1_batch = storage .blocks_dal() .get_l1_batch_header(L1BatchNumber(0)) @@ -67,7 +67,7 @@ pub(crate) async fn ensure_storage_initialized( tracing::info!("Chosen node initialization strategy: {decision:?}"); match decision { InitDecision::Genesis => { - let mut storage = pool.access_storage_tagged("en").await?; + let mut storage = pool.connection_tagged("en").await?; perform_genesis_if_needed(&mut storage, l2_chain_id, main_node_client) .await .context("performing genesis failed")?; diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 2205abeb077..b3924242c16 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -33,7 +33,7 @@ use zksync_core::{ MainNodeClient, SyncState, }, }; -use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Server, ServerDals}; +use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core, CoreDal}; use zksync_db_connection::healthcheck::ConnectionPoolHealthCheck; use zksync_health_check::{AppHealthCheck, HealthStatus, ReactiveHealthCheck}; use zksync_state::PostgresStorageCaches; @@ -61,7 +61,7 @@ async fn build_state_keeper( action_queue: ActionQueue, state_keeper_db_path: String, config: &ExternalNodeConfig, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, sync_state: SyncState, l2_erc20_bridge_addr: Address, miniblock_sealer_handle: MiniblockSealerHandle, @@ -113,7 +113,7 @@ async fn build_state_keeper( async fn init_tasks( config: &ExternalNodeConfig, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, main_node_client: HttpClient, task_handles: &mut Vec>>, app_health: &AppHealthCheck, @@ -143,7 +143,7 @@ async fn init_tasks( task_handles.push(tokio::spawn(async move { loop { let protocol_version = pool - .access_storage() + .connection() .await .unwrap() .protocol_versions_dal() @@ -222,7 +222,7 @@ async fn init_tasks( } })); - let singleton_pool_builder = ConnectionPool::::singleton(&config.postgres.database_url); + let singleton_pool_builder = ConnectionPool::::singleton(&config.postgres.database_url); let metadata_calculator_config = MetadataCalculatorConfig { db_path: config.required.merkle_tree_path.clone(), @@ -497,13 +497,13 @@ async fn main() -> anyhow::Result<()> { config.consensus = None; } if let Some(threshold) = config.optional.slow_query_threshold() { - ConnectionPool::::global_config().set_slow_query_threshold(threshold)?; + ConnectionPool::::global_config().set_slow_query_threshold(threshold)?; } if let Some(threshold) = config.optional.long_connection_threshold() { - ConnectionPool::::global_config().set_long_connection_threshold(threshold)?; + ConnectionPool::::global_config().set_long_connection_threshold(threshold)?; } - let connection_pool = ConnectionPool::::builder( + let connection_pool = ConnectionPool::::builder( &config.postgres.database_url, config.postgres.max_connections, ) @@ -598,7 +598,7 @@ async fn main() -> anyhow::Result<()> { } if opt.revert_pending_l1_batch { tracing::info!("Rolling pending L1 batch back.."); - let mut connection = connection_pool.access_storage().await?; + let mut connection = connection_pool.connection().await?; let sealed_l1_batch_number = connection .blocks_dal() .get_sealed_l1_batch_number() diff --git a/core/bin/external_node/src/version_sync_task.rs b/core/bin/external_node/src/version_sync_task.rs index ffb2c6eba37..524082225d6 100644 --- a/core/bin/external_node/src/version_sync_task.rs +++ b/core/bin/external_node/src/version_sync_task.rs @@ -2,7 +2,7 @@ use std::cmp::Ordering; use anyhow::Context; use zksync_basic_types::{L1BatchNumber, MiniblockNumber}; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::ProtocolVersionId; use zksync_web3_decl::{ jsonrpsee::http_client::HttpClient, @@ -27,12 +27,12 @@ pub async fn get_l1_batch_remote_protocol_version( // Synchronizes protocol version in `l1_batches` and `miniblocks` tables between EN and main node. pub async fn sync_versions( - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, main_node_client: HttpClient, ) -> anyhow::Result<()> { tracing::info!("Starting syncing protocol version of blocks"); - let mut connection = connection_pool.access_storage().await?; + let mut connection = connection_pool.connection().await?; // Load the first local batch number with version 22. let Some(local_first_v22_l1_batch) = connection diff --git a/core/bin/snapshots_creator/src/creator.rs b/core/bin/snapshots_creator/src/creator.rs index 753a9bf9402..6f28d69d454 100644 --- a/core/bin/snapshots_creator/src/creator.rs +++ b/core/bin/snapshots_creator/src/creator.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use anyhow::Context as _; use tokio::sync::Semaphore; use zksync_config::SnapshotsCreatorConfig; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; use zksync_types::{ snapshots::{ @@ -60,16 +60,16 @@ impl SnapshotProgress { #[derive(Debug)] pub(crate) struct SnapshotCreator { pub blob_store: Arc, - pub master_pool: ConnectionPool, - pub replica_pool: ConnectionPool, + pub master_pool: ConnectionPool, + pub replica_pool: ConnectionPool, #[cfg(test)] pub event_listener: Box, } impl SnapshotCreator { - async fn connect_to_replica(&self) -> anyhow::Result> { + async fn connect_to_replica(&self) -> anyhow::Result> { self.replica_pool - .access_storage_tagged("snapshots_creator") + .connection_tagged("snapshots_creator") .await } @@ -124,7 +124,7 @@ impl SnapshotCreator { let mut master_conn = self .master_pool - .access_storage_tagged("snapshots_creator") + .connection_tagged("snapshots_creator") .await?; master_conn .snapshots_dal() @@ -192,7 +192,7 @@ impl SnapshotCreator { config: &SnapshotsCreatorConfig, min_chunk_count: u64, latest_snapshot: Option<&SnapshotMetadata>, - conn: &mut StorageProcessor<'_, Server>, + conn: &mut Connection<'_, Core>, ) -> anyhow::Result> { // We subtract 1 so that after restore, EN node has at least one L1 batch to fetch let sealed_l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await?; @@ -237,7 +237,7 @@ impl SnapshotCreator { ) -> anyhow::Result> { let mut master_conn = self .master_pool - .access_storage_tagged("snapshots_creator") + .connection_tagged("snapshots_creator") .await?; let latest_snapshot = master_conn .snapshots_dal() @@ -302,7 +302,7 @@ impl SnapshotCreator { let mut master_conn = self .master_pool - .access_storage_tagged("snapshots_creator") + .connection_tagged("snapshots_creator") .await?; master_conn .snapshots_dal() diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs index 5f36ca1bb97..fabc2c3b1ce 100644 --- a/core/bin/snapshots_creator/src/main.rs +++ b/core/bin/snapshots_creator/src/main.rs @@ -16,7 +16,7 @@ use zksync_config::{ configs::{ObservabilityConfig, PrometheusConfig}, PostgresConfig, SnapshotsCreatorConfig, }; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_env_config::{object_store::SnapshotsObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; @@ -81,14 +81,14 @@ async fn main() -> anyhow::Result<()> { let creator_config = SnapshotsCreatorConfig::from_env().context("SnapshotsCreatorConfig::from_env")?; - let replica_pool = ConnectionPool::::builder( + let replica_pool = ConnectionPool::::builder( postgres_config.replica_url()?, creator_config.concurrent_queries_count, ) .build() .await?; - let master_pool = ConnectionPool::::singleton(postgres_config.master_url()?) + let master_pool = ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await?; diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index 9d2aab69029..ee2251d1f8a 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -10,7 +10,7 @@ use std::{ }; use rand::{thread_rng, Rng}; -use zksync_dal::{ServerDals, StorageProcessor}; +use zksync_dal::{Connection, CoreDal}; use zksync_object_store::ObjectStore; use zksync_types::{ block::{L1BatchHeader, MiniblockHeader}, @@ -61,7 +61,7 @@ impl HandleEvent for TestEventListener { } impl SnapshotCreator { - fn for_tests(blob_store: Arc, pool: ConnectionPool) -> Self { + fn for_tests(blob_store: Arc, pool: ConnectionPool) -> Self { Self { blob_store, master_pool: pool.clone(), @@ -132,7 +132,7 @@ struct ExpectedOutputs { } async fn create_miniblock( - conn: &mut StorageProcessor<'_, Server>, + conn: &mut Connection<'_, Core>, miniblock_number: MiniblockNumber, block_logs: Vec, ) { @@ -162,7 +162,7 @@ async fn create_miniblock( } async fn create_l1_batch( - conn: &mut StorageProcessor<'_, Server>, + conn: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, logs_for_initial_writes: &[StorageLog], ) { @@ -186,7 +186,7 @@ async fn create_l1_batch( async fn prepare_postgres( rng: &mut impl Rng, - conn: &mut StorageProcessor<'_, Server>, + conn: &mut Connection<'_, Core>, block_count: u32, ) -> ExpectedOutputs { conn.protocol_versions_dal() @@ -241,13 +241,13 @@ async fn prepare_postgres( #[tokio::test] async fn persisting_snapshot_metadata() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; // Insert some data to Postgres. - let mut conn = pool.access_storage().await.unwrap(); + let mut conn = pool.connection().await.unwrap(); prepare_postgres(&mut rng, &mut conn, 10).await; SnapshotCreator::for_tests(object_store, pool.clone()) @@ -290,11 +290,11 @@ async fn persisting_snapshot_metadata() { #[tokio::test] async fn persisting_snapshot_factory_deps() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; - let mut conn = pool.access_storage().await.unwrap(); + let mut conn = pool.connection().await.unwrap(); let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; SnapshotCreator::for_tests(object_store, pool.clone()) @@ -312,11 +312,11 @@ async fn persisting_snapshot_factory_deps() { #[tokio::test] async fn persisting_snapshot_logs() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; - let mut conn = pool.access_storage().await.unwrap(); + let mut conn = pool.connection().await.unwrap(); let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; SnapshotCreator::for_tests(object_store, pool.clone()) @@ -348,11 +348,11 @@ async fn assert_storage_logs( #[tokio::test] async fn recovery_workflow() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; - let mut conn = pool.access_storage().await.unwrap(); + let mut conn = pool.connection().await.unwrap(); let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; SnapshotCreator::for_tests(object_store, pool.clone()) @@ -414,11 +414,11 @@ async fn recovery_workflow() { #[tokio::test] async fn recovery_workflow_with_varying_chunk_size() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; - let mut conn = pool.access_storage().await.unwrap(); + let mut conn = pool.connection().await.unwrap(); let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; SnapshotCreator::for_tests(object_store, pool.clone()) diff --git a/core/bin/verified_sources_fetcher/src/main.rs b/core/bin/verified_sources_fetcher/src/main.rs index c24624636a6..cff20b8d859 100644 --- a/core/bin/verified_sources_fetcher/src/main.rs +++ b/core/bin/verified_sources_fetcher/src/main.rs @@ -1,18 +1,18 @@ use std::io::Write; use zksync_config::PostgresConfig; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_env_config::FromEnv; use zksync_types::contract_verification_api::SourceCodeData; #[tokio::main] async fn main() { let config = PostgresConfig::from_env().unwrap(); - let pool = ConnectionPool::::singleton(config.replica_url().unwrap()) + let pool = ConnectionPool::::singleton(config.replica_url().unwrap()) .build() .await .unwrap(); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); let reqs = storage .contract_verification_dal() .get_all_successful_requests() diff --git a/core/lib/circuit_breaker/src/l1_txs.rs b/core/lib/circuit_breaker/src/l1_txs.rs index b31b742c0cf..455c25d80a3 100644 --- a/core/lib/circuit_breaker/src/l1_txs.rs +++ b/core/lib/circuit_breaker/src/l1_txs.rs @@ -1,10 +1,10 @@ -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use crate::{CircuitBreaker, CircuitBreakerError}; #[derive(Debug)] pub struct FailedL1TransactionChecker { - pub pool: ConnectionPool, + pub pool: ConnectionPool, } #[async_trait::async_trait] @@ -12,7 +12,7 @@ impl CircuitBreaker for FailedL1TransactionChecker { async fn check(&self) -> Result<(), CircuitBreakerError> { if self .pool - .access_storage() + .connection() .await .unwrap() .eth_sender_dal() diff --git a/core/lib/circuit_breaker/src/replication_lag.rs b/core/lib/circuit_breaker/src/replication_lag.rs index b12686e54ed..0c7c8d6df76 100644 --- a/core/lib/circuit_breaker/src/replication_lag.rs +++ b/core/lib/circuit_breaker/src/replication_lag.rs @@ -1,10 +1,10 @@ -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use crate::{CircuitBreaker, CircuitBreakerError}; #[derive(Debug)] pub struct ReplicationLagChecker { - pub pool: ConnectionPool, + pub pool: ConnectionPool, pub replication_lag_limit_sec: Option, } @@ -13,7 +13,7 @@ impl CircuitBreaker for ReplicationLagChecker { async fn check(&self) -> Result<(), CircuitBreakerError> { let lag = self .pool - .access_storage() + .connection() .await .unwrap() .system_dal() diff --git a/core/lib/dal/src/basic_witness_input_producer_dal.rs b/core/lib/dal/src/basic_witness_input_producer_dal.rs index 56666a83689..5d6cc060f00 100644 --- a/core/lib/dal/src/basic_witness_input_producer_dal.rs +++ b/core/lib/dal/src/basic_witness_input_producer_dal.rs @@ -3,17 +3,17 @@ use std::time::{Duration, Instant}; use sqlx::postgres::types::PgInterval; use zksync_db_connection::{ + connection::Connection, instrument::InstrumentExt, - processor::StorageProcessor, utils::{duration_to_naive_time, pg_interval_from_duration}, }; use zksync_types::L1BatchNumber; -use crate::Server; +use crate::Core; #[derive(Debug)] pub struct BasicWitnessInputProducerDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } /// The amount of attempts to process a job before giving up. diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 5e8a0dc43d1..fbe6a259222 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -8,7 +8,7 @@ use std::{ use anyhow::Context as _; use bigdecimal::{BigDecimal, FromPrimitive, ToPrimitive}; use zksync_db_connection::{ - instrument::InstrumentExt, interpolate_query, match_query_as, processor::StorageProcessor, + connection::Connection, instrument::InstrumentExt, interpolate_query, match_query_as, }; use zksync_types::{ aggregated_operations::AggregatedActionType, @@ -21,12 +21,12 @@ use zksync_types::{ use crate::{ models::storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageMiniblockHeader}, - Server, ServerDals, + Core, CoreDal, }; #[derive(Debug)] pub struct BlocksDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl BlocksDal<'_, '_> { @@ -2416,12 +2416,12 @@ mod tests { }; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool, Server, ServerDals}; + use crate::{tests::create_miniblock_header, ConnectionPool, Core, CoreDal}; #[tokio::test] async fn loading_l1_batch_header() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -2476,8 +2476,8 @@ mod tests { #[tokio::test] async fn getting_predicted_gas() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -2537,8 +2537,8 @@ mod tests { #[allow(deprecated)] // that's the whole point #[tokio::test] async fn checking_fee_account_address_in_l1_batches() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); assert!(conn .blocks_dal() .check_l1_batches_have_fee_account_address() @@ -2549,8 +2549,8 @@ mod tests { #[allow(deprecated)] // that's the whole point #[tokio::test] async fn ensuring_fee_account_address_for_miniblocks() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index d5ee2a6b0e2..0a8faa3e42b 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -1,5 +1,5 @@ use zksync_db_connection::{ - instrument::InstrumentExt, interpolate_query, match_query_as, processor::StorageProcessor, + connection::Connection, instrument::InstrumentExt, interpolate_query, match_query_as, }; use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ @@ -16,14 +16,14 @@ use crate::{ storage_block::{ResolvedL1BatchForMiniblock, StorageBlockDetails, StorageL1BatchDetails}, storage_transaction::CallTrace, }, - Server, ServerDals, + Core, CoreDal, }; const BLOCK_GAS_LIMIT: u32 = u32::MAX; #[derive(Debug)] pub struct BlocksWeb3Dal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl BlocksWeb3Dal<'_, '_> { @@ -656,13 +656,13 @@ mod tests { create_miniblock_header, create_snapshot_recovery, mock_execution_result, mock_l2_transaction, }, - ConnectionPool, Server, + ConnectionPool, Core, }; #[tokio::test] async fn getting_web3_block_and_tx_count() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) .await @@ -709,8 +709,8 @@ mod tests { #[tokio::test] async fn resolving_earliest_block_id() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); let miniblock_number = conn .blocks_web3_dal() @@ -735,8 +735,8 @@ mod tests { #[tokio::test] async fn resolving_latest_block_id() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -801,8 +801,8 @@ mod tests { #[tokio::test] async fn resolving_pending_block_id_for_snapshot_recovery() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); let snapshot_recovery = create_snapshot_recovery(); conn.snapshot_recovery_dal() .insert_initial_recovery_status(&snapshot_recovery) @@ -819,8 +819,8 @@ mod tests { #[tokio::test] async fn resolving_block_by_hash() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -848,8 +848,8 @@ mod tests { #[tokio::test] async fn getting_traces_for_block() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 95283ffa4fd..3e051ce5f5f 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,16 +1,16 @@ use anyhow::Context as _; use zksync_consensus_roles::validator; use zksync_consensus_storage::ReplicaState; -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use zksync_types::MiniblockNumber; pub use crate::models::consensus::Payload; -use crate::{Server, ServerDals}; +use crate::{Core, CoreDal}; /// Storage access methods for `zksync_core::consensus` module. #[derive(Debug)] pub struct ConsensusDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c, Server>, + pub storage: &'a mut Connection<'c, Core>, } impl ConsensusDal<'_, '_> { @@ -330,13 +330,13 @@ mod tests { use zksync_consensus_roles::validator; use zksync_consensus_storage::ReplicaState; - use crate::{ConnectionPool, Server, ServerDals}; + use crate::{ConnectionPool, Core, CoreDal}; #[tokio::test] async fn replica_state_read_write() { let rng = &mut rand::thread_rng(); - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); assert_eq!(None, conn.consensus_dal().genesis().await.unwrap()); for n in 0..3 { let fork = validator::Fork { diff --git a/core/lib/dal/src/contract_verification_dal.rs b/core/lib/dal/src/contract_verification_dal.rs index 6dd98550212..03c6c408f65 100644 --- a/core/lib/dal/src/contract_verification_dal.rs +++ b/core/lib/dal/src/contract_verification_dal.rs @@ -6,7 +6,7 @@ use std::{ use anyhow::Context as _; use sqlx::postgres::types::PgInterval; -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use zksync_types::{ contract_verification_api::{ DeployContractCalldata, VerificationIncomingRequest, VerificationInfo, VerificationRequest, @@ -15,11 +15,11 @@ use zksync_types::{ get_code_key, Address, CONTRACT_DEPLOYER_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, }; -use crate::{models::storage_verification_request::StorageVerificationRequest, Server}; +use crate::{models::storage_verification_request::StorageVerificationRequest, Core}; #[derive(Debug)] pub struct ContractVerificationDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } #[derive(Debug)] diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index db28c1712e5..ad1e910af12 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -2,7 +2,7 @@ use std::{convert::TryFrom, str::FromStr}; use anyhow::Context as _; use sqlx::types::chrono::{DateTime, Utc}; -use zksync_db_connection::{interpolate_query, match_query_as, processor::StorageProcessor}; +use zksync_db_connection::{connection::Connection, interpolate_query, match_query_as}; use zksync_types::{ aggregated_operations::AggregatedActionType, eth_sender::{EthTx, EthTxBlobSidecar, TxHistory, TxHistoryToSend}, @@ -13,12 +13,12 @@ use crate::{ models::storage_eth_tx::{ L1BatchEthSenderStats, StorageEthTx, StorageTxHistory, StorageTxHistoryToSend, }, - Server, + Core, }; #[derive(Debug)] pub struct EthSenderDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl EthSenderDal<'_, '_> { diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index f8ab1ed7b67..909ba9e815e 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, fmt}; use sqlx::types::chrono::Utc; use zksync_db_connection::{ - instrument::InstrumentExt, processor::StorageProcessor, write_str, writeln_str, + connection::Connection, instrument::InstrumentExt, write_str, writeln_str, }; use zksync_system_constants::L1_MESSENGER_ADDRESS; use zksync_types::{ @@ -15,7 +15,7 @@ use zksync_types::{ use crate::{ models::storage_event::{StorageL2ToL1Log, StorageWeb3Log}, - Server, ServerDals, SqlxError, + Core, CoreDal, SqlxError, }; /// Wrapper around an optional event topic allowing to hex-format it for `COPY` instructions. @@ -34,7 +34,7 @@ impl fmt::LowerHex for EventTopic<'_> { #[derive(Debug)] pub struct EventsDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl EventsDal<'_, '_> { @@ -401,7 +401,7 @@ mod tests { use zksync_types::{Address, L1BatchNumber, ProtocolVersion}; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool, Server}; + use crate::{tests::create_miniblock_header, ConnectionPool, Core}; fn create_vm_event(index: u8, topic_count: u8) -> VmEvent { assert!(topic_count <= 4); @@ -415,8 +415,8 @@ mod tests { #[tokio::test] async fn storing_events() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.events_dal().rollback_events(MiniblockNumber(0)).await; conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) @@ -491,8 +491,8 @@ mod tests { #[tokio::test] async fn storing_l2_to_l1_logs() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.events_dal() .rollback_l2_to_l1_logs(MiniblockNumber(0)) .await; diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index 1098c520347..2264fc539eb 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -3,17 +3,17 @@ use sqlx::{ query::{Query, QueryAs}, Postgres, Row, }; -use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; +use zksync_db_connection::{connection::Connection, instrument::InstrumentExt}; use zksync_types::{ api::{GetLogsFilter, Log}, Address, MiniblockNumber, H256, }; -use crate::{models::storage_event::StorageWeb3Log, Server, SqlxError}; +use crate::{models::storage_event::StorageWeb3Log, Core, SqlxError}; #[derive(Debug)] pub struct EventsWeb3Dal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl EventsWeb3Dal<'_, '_> { @@ -258,12 +258,12 @@ mod tests { use zksync_types::{Address, H256}; use super::*; - use crate::{ConnectionPool, Server}; + use crate::{ConnectionPool, Core}; #[tokio::test] async fn test_build_get_logs_where_clause() { - let connection_pool = ConnectionPool::::test_pool().await; - let storage = &mut connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let storage = &mut connection_pool.connection().await.unwrap(); let events_web3_dal = EventsWeb3Dal { storage }; let filter = GetLogsFilter { from_block: MiniblockNumber(100), @@ -283,8 +283,8 @@ mod tests { #[tokio::test] async fn test_build_get_logs_with_multiple_topics_where_clause() { - let connection_pool = ConnectionPool::::test_pool().await; - let storage = &mut connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let storage = &mut connection_pool.connection().await.unwrap(); let events_web3_dal = EventsWeb3Dal { storage }; let filter = GetLogsFilter { from_block: MiniblockNumber(10), @@ -317,8 +317,8 @@ mod tests { #[tokio::test] async fn test_build_get_logs_with_no_address_where_clause() { - let connection_pool = ConnectionPool::::test_pool().await; - let storage = &mut connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let storage = &mut connection_pool.connection().await.unwrap(); let events_web3_dal = EventsWeb3Dal { storage }; let filter = GetLogsFilter { from_block: MiniblockNumber(10), diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index 7c415a23e74..920e0ce0329 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -2,16 +2,16 @@ use std::collections::{HashMap, HashSet}; use anyhow::Context as _; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use zksync_types::{MiniblockNumber, H256, U256}; use zksync_utils::{bytes_to_be_words, bytes_to_chunks}; -use crate::Server; +use crate::Core; /// DAL methods related to factory dependencies. #[derive(Debug)] pub struct FactoryDepsDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl FactoryDepsDal<'_, '_> { diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 7b3336608bf..37fc8d49aec 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -4,8 +4,8 @@ #![warn(clippy::cast_lossless)] pub use sqlx::{types::BigDecimal, Error as SqlxError}; -use zksync_db_connection::processor::StorageMarker; -pub use zksync_db_connection::{connection::ConnectionPool, processor::StorageProcessor}; +use zksync_db_connection::connection::DbMarker; +pub use zksync_db_connection::{connection::Connection, connection_pool::ConnectionPool}; use crate::{ basic_witness_input_producer_dal::BasicWitnessInputProducerDal, blocks_dal::BlocksDal, @@ -61,7 +61,7 @@ mod private { // Here we are making the trait sealed, because it should be public to function correctly, but we don't // want to allow any other downstream implementations of this trait. -pub trait ServerDals<'a>: private::Sealed +pub trait CoreDal<'a>: private::Sealed where Self: 'a, { @@ -119,14 +119,14 @@ where } #[derive(Clone, Debug)] -pub struct Server; +pub struct Core; -// Implement the marker trait for the Server to be able to use it in StorageProcessor. -impl StorageMarker for Server {} +// Implement the marker trait for the Core to be able to use it in Connection. +impl DbMarker for Core {} // Implement the sealed trait for the struct itself. -impl private::Sealed for StorageProcessor<'_, Server> {} +impl private::Sealed for Connection<'_, Core> {} -impl<'a> ServerDals<'a> for StorageProcessor<'a, Server> { +impl<'a> CoreDal<'a> for Connection<'a, Core> { fn transactions_dal(&mut self) -> TransactionsDal<'_, 'a> { TransactionsDal { storage: self } } diff --git a/core/lib/dal/src/metrics.rs b/core/lib/dal/src/metrics.rs index 28a2bb04cb2..2ec80af5db5 100644 --- a/core/lib/dal/src/metrics.rs +++ b/core/lib/dal/src/metrics.rs @@ -3,9 +3,9 @@ use std::time::Duration; use anyhow::Context; use vise::{Gauge, LabeledFamily, Metrics, Unit}; -use zksync_db_connection::connection::ConnectionPool; +use zksync_db_connection::connection_pool::ConnectionPool; -use crate::{Server, ServerDals}; +use crate::{Core, CoreDal}; #[derive(Debug, Metrics)] #[metrics(prefix = "postgres")] @@ -28,7 +28,7 @@ pub struct PostgresMetrics { static POSTGRES_METRICS: vise::Global = vise::Global::new(); impl PostgresMetrics { - pub async fn run_scraping(pool: ConnectionPool, scrape_interval: Duration) { + pub async fn run_scraping(pool: ConnectionPool, scrape_interval: Duration) { let scrape_timeout = Duration::from_secs(1).min(scrape_interval / 2); loop { match tokio::time::timeout(scrape_timeout, Self::scrape(&pool)).await { @@ -44,9 +44,9 @@ impl PostgresMetrics { } } - async fn scrape(pool: &ConnectionPool) -> anyhow::Result<()> { + async fn scrape(pool: &ConnectionPool) -> anyhow::Result<()> { let mut storage = pool - .access_storage_tagged("postgres_metrics") + .connection_tagged("postgres_metrics") .await .context("cannot acquire Postgres connection")?; let table_sizes = storage diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 5543a2daceb..5c173475145 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -2,14 +2,14 @@ use std::time::Duration; use strum::{Display, EnumString}; -use zksync_db_connection::{processor::StorageProcessor, utils::pg_interval_from_duration}; +use zksync_db_connection::{connection::Connection, utils::pg_interval_from_duration}; use zksync_types::L1BatchNumber; -use crate::{Server, SqlxError}; +use crate::{Core, SqlxError}; #[derive(Debug)] pub struct ProofGenerationDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } #[derive(Debug, EnumString, Display)] diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 65e283146ce..13100470736 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -2,7 +2,7 @@ use std::convert::TryInto; use anyhow::Context as _; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use zksync_types::{ protocol_upgrade::{ProtocolUpgradeTx, ProtocolVersion}, protocol_version::{L1VerifierConfig, VerifierParams}, @@ -11,12 +11,12 @@ use zksync_types::{ use crate::{ models::storage_protocol_version::{protocol_version_from_storage, StorageProtocolVersion}, - Server, ServerDals, + Core, CoreDal, }; #[derive(Debug)] pub struct ProtocolVersionsDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c, Server>, + pub storage: &'a mut Connection<'c, Core>, } impl ProtocolVersionsDal<'_, '_> { diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs index cc3efb802bf..fd5ee2e9d30 100644 --- a/core/lib/dal/src/protocol_versions_web3_dal.rs +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -1,11 +1,11 @@ -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use zksync_types::api::ProtocolVersion; -use crate::{models::storage_protocol_version::StorageProtocolVersion, Server}; +use crate::{models::storage_protocol_version::StorageProtocolVersion, Core}; #[derive(Debug)] pub struct ProtocolVersionsWeb3Dal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c, Server>, + pub storage: &'a mut Connection<'c, Core>, } impl ProtocolVersionsWeb3Dal<'_, '_> { diff --git a/core/lib/dal/src/snapshot_recovery_dal.rs b/core/lib/dal/src/snapshot_recovery_dal.rs index 27bcecca1cd..e4d082c8eb9 100644 --- a/core/lib/dal/src/snapshot_recovery_dal.rs +++ b/core/lib/dal/src/snapshot_recovery_dal.rs @@ -1,13 +1,13 @@ -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use zksync_types::{ snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, }; -use crate::Server; +use crate::Core; #[derive(Debug)] pub struct SnapshotRecoveryDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl SnapshotRecoveryDal<'_, '_> { @@ -106,12 +106,12 @@ mod tests { snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, }; - use crate::{ConnectionPool, Server, ServerDals}; + use crate::{ConnectionPool, Core, CoreDal}; #[tokio::test] async fn manipulating_snapshot_recovery_table() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); let mut applied_status_dal = conn.snapshot_recovery_dal(); let empty_status = applied_status_dal .get_applied_snapshot_status() diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs index 44055c39e75..32e388770e7 100644 --- a/core/lib/dal/src/snapshots_creator_dal.rs +++ b/core/lib/dal/src/snapshots_creator_dal.rs @@ -1,14 +1,14 @@ -use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; +use zksync_db_connection::{connection::Connection, instrument::InstrumentExt}; use zksync_types::{ snapshots::SnapshotStorageLog, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, H256, }; -use crate::Server; +use crate::Core; #[derive(Debug)] pub struct SnapshotsCreatorDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl SnapshotsCreatorDal<'_, '_> { @@ -146,12 +146,12 @@ mod tests { use zksync_types::StorageLog; use super::*; - use crate::{ConnectionPool, Server, ServerDals}; + use crate::{ConnectionPool, Core, CoreDal}; #[tokio::test] async fn getting_storage_log_chunks_basics() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let logs = (0..100).map(|i| { let key = StorageKey::new( @@ -222,7 +222,7 @@ mod tests { } async fn assert_logs_for_snapshot( - conn: &mut StorageProcessor<'_, Server>, + conn: &mut Connection<'_, Core>, miniblock_number: MiniblockNumber, l1_batch_number: L1BatchNumber, expected_logs: &[StorageLog], @@ -262,8 +262,8 @@ mod tests { #[tokio::test] async fn phantom_writes_are_filtered_out() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let key = StorageKey::new(AccountTreeId::default(), H256::repeat_byte(1)); let phantom_writes = vec![ diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs index cc707837b2e..1e621a01017 100644 --- a/core/lib/dal/src/snapshots_dal.rs +++ b/core/lib/dal/src/snapshots_dal.rs @@ -1,10 +1,10 @@ -use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; +use zksync_db_connection::{connection::Connection, instrument::InstrumentExt}; use zksync_types::{ snapshots::{AllSnapshots, SnapshotMetadata}, L1BatchNumber, }; -use crate::Server; +use crate::Core; #[derive(Debug, sqlx::FromRow)] struct StorageSnapshotMetadata { @@ -29,7 +29,7 @@ impl From for SnapshotMetadata { #[derive(Debug)] pub struct SnapshotsDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl SnapshotsDal<'_, '_> { @@ -171,12 +171,12 @@ impl SnapshotsDal<'_, '_> { mod tests { use zksync_types::L1BatchNumber; - use crate::{ConnectionPool, Server, ServerDals}; + use crate::{ConnectionPool, Core, CoreDal}; #[tokio::test] async fn adding_snapshot() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let mut dal = conn.snapshots_dal(); let l1_batch_number = L1BatchNumber(100); dal.add_snapshot(l1_batch_number, 2, "gs:///bucket/factory_deps.bin") @@ -215,8 +215,8 @@ mod tests { #[tokio::test] async fn adding_files() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let mut dal = conn.snapshots_dal(); let l1_batch_number = L1BatchNumber(100); dal.add_snapshot(l1_batch_number, 2, "gs:///bucket/factory_deps.bin") diff --git a/core/lib/dal/src/storage_dal.rs b/core/lib/dal/src/storage_dal.rs index b61a4b17020..703fd3706d4 100644 --- a/core/lib/dal/src/storage_dal.rs +++ b/core/lib/dal/src/storage_dal.rs @@ -1,14 +1,14 @@ use std::collections::HashMap; use itertools::Itertools; -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use zksync_types::{StorageKey, StorageLog, StorageValue, H256}; -use crate::Server; +use crate::Core; #[derive(Debug)] pub struct StorageDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } #[deprecated(note = "Soft-removed in favor of `storage_logs`; don't use")] @@ -99,13 +99,13 @@ mod tests { use zksync_types::{AccountTreeId, Address}; use super::*; - use crate::{ConnectionPool, Server, ServerDals}; + use crate::{ConnectionPool, Core, CoreDal}; #[allow(deprecated)] #[tokio::test] async fn applying_storage_logs() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let account = AccountTreeId::new(Address::repeat_byte(1)); let first_key = StorageKey::new(account, H256::zero()); diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index e69ac87313d..5a3daba0dcb 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, ops, time::Instant}; use sqlx::types::chrono::Utc; use zksync_db_connection::{ - instrument::InstrumentExt, processor::StorageProcessor, write_str, writeln_str, + connection::Connection, instrument::InstrumentExt, write_str, writeln_str, }; use zksync_types::{ get_code_key, snapshots::SnapshotStorageLog, AccountTreeId, Address, L1BatchNumber, @@ -10,11 +10,11 @@ use zksync_types::{ }; pub use crate::models::storage_log::{DbStorageLog, StorageRecoveryLogEntry}; -use crate::{Server, ServerDals}; +use crate::{Core, CoreDal}; #[derive(Debug)] pub struct StorageLogsDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl StorageLogsDal<'_, '_> { @@ -761,13 +761,9 @@ mod tests { use zksync_types::{block::L1BatchHeader, ProtocolVersion, ProtocolVersionId}; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool, Server}; + use crate::{tests::create_miniblock_header, ConnectionPool, Core}; - async fn insert_miniblock( - conn: &mut StorageProcessor<'_, Server>, - number: u32, - logs: Vec, - ) { + async fn insert_miniblock(conn: &mut Connection<'_, Core>, number: u32, logs: Vec) { let header = L1BatchHeader::new( L1BatchNumber(number), 0, @@ -798,8 +794,8 @@ mod tests { #[tokio::test] async fn inserting_storage_logs() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -843,7 +839,7 @@ mod tests { } async fn test_rollback( - conn: &mut StorageProcessor<'_, Server>, + conn: &mut Connection<'_, Core>, key: StorageKey, second_key: StorageKey, ) { @@ -921,8 +917,8 @@ mod tests { #[tokio::test] async fn getting_storage_logs_for_revert() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -971,8 +967,8 @@ mod tests { #[tokio::test] async fn reverting_keys_without_initial_write() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -1039,8 +1035,8 @@ mod tests { #[tokio::test] async fn getting_starting_entries_in_chunks() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let sorted_hashed_keys = prepare_tree_entries(&mut conn, 100).await; let key_ranges = [ @@ -1072,7 +1068,7 @@ mod tests { } } - async fn prepare_tree_entries(conn: &mut StorageProcessor<'_, Server>, count: u8) -> Vec { + async fn prepare_tree_entries(conn: &mut Connection<'_, Core>, count: u8) -> Vec { conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -1100,8 +1096,8 @@ mod tests { #[tokio::test] async fn getting_tree_entries() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let sorted_hashed_keys = prepare_tree_entries(&mut conn, 10).await; let key_range = H256::zero()..=H256::repeat_byte(0xff); @@ -1142,8 +1138,8 @@ mod tests { FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, ); - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); // If deployment fails then two writes are issued, one that writes `bytecode_hash` to the "correct" value, // and the next write reverts its value back to `FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH`. conn.storage_logs_dal() diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index a8033e8b7e9..cfb8071a7a0 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -1,7 +1,7 @@ use std::collections::HashSet; use sqlx::types::chrono::Utc; -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use zksync_types::{ snapshots::SnapshotStorageLog, zk_evm_types::LogQuery, AccountTreeId, Address, L1BatchNumber, StorageKey, H256, @@ -9,11 +9,11 @@ use zksync_types::{ use zksync_utils::u256_to_h256; pub use crate::models::storage_log::DbInitialWrite; -use crate::Server; +use crate::Core; #[derive(Debug)] pub struct StorageLogsDedupDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl StorageLogsDedupDal<'_, '_> { diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 928402b48fc..75cbc936604 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; +use zksync_db_connection::{connection::Connection, instrument::InstrumentExt}; use zksync_types::{ get_code_key, get_nonce_key, utils::{decompose_full_nonce, storage_key_for_standard_token_balance}, @@ -9,11 +9,11 @@ use zksync_types::{ }; use zksync_utils::h256_to_u256; -use crate::{models::storage_block::ResolvedL1BatchForMiniblock, Server, ServerDals, SqlxError}; +use crate::{models::storage_block::ResolvedL1BatchForMiniblock, Core, CoreDal, SqlxError}; #[derive(Debug)] pub struct StorageWeb3Dal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl StorageWeb3Dal<'_, '_> { @@ -271,13 +271,13 @@ mod tests { use super::*; use crate::{ tests::{create_miniblock_header, create_snapshot_recovery}, - ConnectionPool, Server, ServerDals, + ConnectionPool, Core, CoreDal, }; #[tokio::test] async fn resolving_l1_batch_number_of_miniblock() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -343,8 +343,8 @@ mod tests { #[tokio::test] async fn resolving_l1_batch_number_of_miniblock_with_snapshot_recovery() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 590cae3b40e..f704608c89a 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -1,17 +1,17 @@ use zksync_db_connection::{ - instrument::InstrumentExt, metrics::MethodLatency, processor::StorageProcessor, + connection::Connection, instrument::InstrumentExt, metrics::MethodLatency, }; use zksync_types::{api::en, MiniblockNumber}; use crate::{ models::storage_sync::{StorageSyncBlock, SyncBlock}, - Server, ServerDals, + Core, CoreDal, }; /// DAL subset dedicated to the EN synchronization. #[derive(Debug)] pub struct SyncDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c, Server>, + pub storage: &'a mut Connection<'c, Core>, } impl SyncDal<'_, '_> { @@ -119,13 +119,13 @@ mod tests { create_miniblock_header, create_snapshot_recovery, mock_execution_result, mock_l2_transaction, }, - ConnectionPool, Server, + ConnectionPool, Core, }; #[tokio::test] async fn sync_block_basics() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); // Simulate genesis. conn.protocol_versions_dal() @@ -240,8 +240,8 @@ mod tests { #[tokio::test] async fn sync_block_after_snapshot_recovery() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); // Simulate snapshot recovery. conn.protocol_versions_dal() diff --git a/core/lib/dal/src/system_dal.rs b/core/lib/dal/src/system_dal.rs index b72ec249aa0..7797e69d616 100644 --- a/core/lib/dal/src/system_dal.rs +++ b/core/lib/dal/src/system_dal.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use sqlx::Row; -use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; +use zksync_db_connection::{connection::Connection, instrument::InstrumentExt}; #[derive(Debug)] pub(crate) struct TableSize { @@ -10,10 +10,10 @@ pub(crate) struct TableSize { pub relation_size: u64, pub total_size: u64, } -use crate::Server; +use crate::Core; pub struct SystemDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c, Server>, + pub storage: &'a mut Connection<'c, Core>, } impl SystemDal<'_, '_> { diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 96270121eb6..bd0e54ee6cf 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -1,7 +1,7 @@ use std::time::Duration; use zksync_contracts::BaseSystemContractsHashes; -use zksync_db_connection::connection::ConnectionPool; +use zksync_db_connection::connection_pool::ConnectionPool; use zksync_types::{ block::{MiniblockHasher, MiniblockHeader}, fee::{Fee, TransactionExecutionMetrics}, @@ -21,7 +21,7 @@ use crate::{ protocol_versions_dal::ProtocolVersionsDal, transactions_dal::{L2TxSubmissionResult, TransactionsDal}, transactions_web3_dal::TransactionsWeb3Dal, - Server, + Core, }; const DEFAULT_GAS_PER_PUBDATA: u32 = 100; @@ -165,8 +165,8 @@ pub(crate) fn create_snapshot_recovery() -> SnapshotRecoveryStatus { #[tokio::test] async fn workflow_with_submit_tx_equal_hashes() { - let connection_pool = ConnectionPool::::test_pool().await; - let storage = &mut connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let storage = &mut connection_pool.connection().await.unwrap(); let mut transactions_dal = TransactionsDal { storage }; let tx = mock_l2_transaction(); @@ -185,8 +185,8 @@ async fn workflow_with_submit_tx_equal_hashes() { #[tokio::test] async fn workflow_with_submit_tx_diff_hashes() { - let connection_pool = ConnectionPool::::test_pool().await; - let storage = &mut connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let storage = &mut connection_pool.connection().await.unwrap(); let mut transactions_dal = TransactionsDal { storage }; let tx = mock_l2_transaction(); @@ -212,8 +212,8 @@ async fn workflow_with_submit_tx_diff_hashes() { #[tokio::test] async fn remove_stuck_txs() { - let connection_pool = ConnectionPool::::test_pool().await; - let storage = &mut connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let storage = &mut connection_pool.connection().await.unwrap(); let mut protocol_versions_dal = ProtocolVersionsDal { storage }; protocol_versions_dal .save_protocol_version_with_tx(Default::default()) diff --git a/core/lib/dal/src/tokens_dal.rs b/core/lib/dal/src/tokens_dal.rs index 2456e72ed87..389df3cb4be 100644 --- a/core/lib/dal/src/tokens_dal.rs +++ b/core/lib/dal/src/tokens_dal.rs @@ -1,12 +1,12 @@ use sqlx::types::chrono::Utc; -use zksync_db_connection::{processor::StorageProcessor, write_str, writeln_str}; +use zksync_db_connection::{connection::Connection, write_str, writeln_str}; use zksync_types::{tokens::TokenInfo, Address, MiniblockNumber}; -use crate::{Server, ServerDals}; +use crate::{Core, CoreDal}; #[derive(Debug)] pub struct TokensDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl TokensDal<'_, '_> { @@ -112,7 +112,7 @@ mod tests { use zksync_types::{get_code_key, tokens::TokenMetadata, StorageLog, H256}; use super::*; - use crate::{ConnectionPool, Server, ServerDals}; + use crate::{ConnectionPool, Core, CoreDal}; fn test_token_info() -> TokenInfo { TokenInfo { @@ -140,8 +140,8 @@ mod tests { #[tokio::test] async fn adding_and_getting_tokens() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let tokens = [test_token_info(), eth_token_info()]; storage.tokens_dal().add_tokens(&tokens).await.unwrap(); @@ -187,8 +187,8 @@ mod tests { #[tokio::test] async fn rolling_back_tokens() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let eth_info = eth_token_info(); let eth_deployment_log = @@ -257,7 +257,7 @@ mod tests { ); } - async fn test_getting_all_tokens(storage: &mut StorageProcessor<'_, Server>) { + async fn test_getting_all_tokens(storage: &mut Connection<'_, Core>) { for at_miniblock in [None, Some(MiniblockNumber(2)), Some(MiniblockNumber(100))] { let all_tokens = storage .tokens_web3_dal() @@ -281,8 +281,8 @@ mod tests { #[tokio::test] async fn rolling_back_tokens_with_failed_deployment() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let test_info = test_token_info(); diff --git a/core/lib/dal/src/tokens_web3_dal.rs b/core/lib/dal/src/tokens_web3_dal.rs index efad971b4fb..f7c51ead6d3 100644 --- a/core/lib/dal/src/tokens_web3_dal.rs +++ b/core/lib/dal/src/tokens_web3_dal.rs @@ -1,10 +1,10 @@ -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use zksync_types::{ tokens::{TokenInfo, TokenMetadata}, Address, MiniblockNumber, }; -use crate::{Server, ServerDals}; +use crate::{Core, CoreDal}; #[derive(Debug)] struct StorageTokenInfo { @@ -31,7 +31,7 @@ impl From for TokenInfo { #[derive(Debug)] pub struct TokensWeb3Dal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl TokensWeb3Dal<'_, '_> { diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index a4a4ffbe7bd..6a0114a71db 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -5,7 +5,7 @@ use bigdecimal::BigDecimal; use itertools::Itertools; use sqlx::{error, types::chrono::NaiveDateTime}; use zksync_db_connection::{ - instrument::InstrumentExt, processor::StorageProcessor, utils::pg_interval_from_duration, + connection::Connection, instrument::InstrumentExt, utils::pg_interval_from_duration, }; use zksync_types::{ block::MiniblockExecutionData, @@ -22,7 +22,7 @@ use zksync_utils::u256_to_big_decimal; use crate::{ models::storage_transaction::{CallTrace, StorageTransaction}, - Server, + Core, }; #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] @@ -48,7 +48,7 @@ impl fmt::Display for L2TxSubmissionResult { #[derive(Debug)] pub struct TransactionsDal<'c, 'a> { - pub(crate) storage: &'c mut StorageProcessor<'a, Server>, + pub(crate) storage: &'c mut Connection<'a, Core>, } type TxLocations = Vec<(MiniblockNumber, Vec<(H256, u32, u16)>)>; @@ -1338,13 +1338,13 @@ mod tests { use super::*; use crate::{ tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, - ConnectionPool, Server, ServerDals, + ConnectionPool, Core, CoreDal, }; #[tokio::test] async fn getting_call_trace_for_transaction() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index 718b609354a..83c8d22247c 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -1,6 +1,6 @@ use sqlx::types::chrono::NaiveDateTime; use zksync_db_connection::{ - instrument::InstrumentExt, interpolate_query, match_query_as, processor::StorageProcessor, + connection::Connection, instrument::InstrumentExt, interpolate_query, match_query_as, }; use zksync_types::{ api, api::TransactionReceipt, Address, L2ChainId, MiniblockNumber, Transaction, @@ -12,7 +12,7 @@ use crate::{ StorageApiTransaction, StorageTransaction, StorageTransactionDetails, StorageTransactionReceipt, }, - Server, ServerDals, SqlxError, + Core, CoreDal, SqlxError, }; #[derive(Debug, Clone, Copy)] @@ -23,7 +23,7 @@ enum TransactionSelector<'a> { #[derive(Debug)] pub struct TransactionsWeb3Dal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Server>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl TransactionsWeb3Dal<'_, '_> { @@ -397,10 +397,10 @@ mod tests { use super::*; use crate::{ tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, - ConnectionPool, Server, ServerDals, + ConnectionPool, Core, CoreDal, }; - async fn prepare_transactions(conn: &mut StorageProcessor<'_, Server>, txs: Vec) { + async fn prepare_transactions(conn: &mut Connection<'_, Core>, txs: Vec) { conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) .await @@ -434,8 +434,8 @@ mod tests { #[tokio::test] async fn getting_transaction() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -484,8 +484,8 @@ mod tests { #[tokio::test] async fn getting_receipts() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -512,8 +512,8 @@ mod tests { #[tokio::test] async fn getting_miniblock_transactions() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -539,8 +539,8 @@ mod tests { #[tokio::test] async fn getting_next_nonce_by_initiator_account() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -609,8 +609,8 @@ mod tests { #[tokio::test] async fn getting_next_nonce_by_initiator_account_after_snapshot_recovery() { // Emulate snapshot recovery: no transactions with past nonces are present in the storage - let connection_pool = ConnectionPool::::test_pool().await; - let mut conn = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); let initiator = Address::repeat_byte(1); let next_nonce = conn .transactions_web3_dal() diff --git a/core/lib/db_connection/src/connection.rs b/core/lib/db_connection/src/connection.rs index a9330c00cdf..d288244a4dc 100644 --- a/core/lib/db_connection/src/connection.rs +++ b/core/lib/db_connection/src/connection.rs @@ -1,468 +1,264 @@ use std::{ - env, fmt, - future::Future, - marker::PhantomData, + collections::HashMap, + fmt, panic::Location, sync::{ - atomic::{AtomicU64, Ordering}, - Arc, + atomic::{AtomicUsize, Ordering}, + Mutex, }, - time::Duration, + time::{Instant, SystemTime}, }; -use anyhow::Context as _; -use rand::Rng; use sqlx::{ - pool::PoolConnection, - postgres::{PgConnectOptions, PgPool, PgPoolOptions, Postgres}, + pool::PoolConnection, types::chrono, Connection as _, PgConnection, Postgres, Transaction, }; -use crate::{ - metrics::CONNECTION_METRICS, - processor::{StorageMarker, StorageProcessor, StorageProcessorTags, TracedConnections}, -}; - -/// Builder for [`ConnectionPool`]s. -#[derive(Clone)] -pub struct ConnectionPoolBuilder { - database_url: String, - max_size: u32, - acquire_timeout: Duration, - statement_timeout: Option, - _marker: PhantomData, -} +use crate::{connection_pool::ConnectionPool, metrics::CONNECTION_METRICS, utils::InternalMarker}; -impl fmt::Debug for ConnectionPoolBuilder { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - // Database URL is potentially sensitive, thus we omit it. - formatter - .debug_struct("ConnectionPoolBuilder") - .field("max_size", &self.max_size) - .field("acquire_timeout", &self.acquire_timeout) - .field("statement_timeout", &self.statement_timeout) - .finish() - } +/// Tags that can be associated with a connection. +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct ConnectionTags { + pub requester: &'static str, + pub location: &'static Location<'static>, } -impl ConnectionPoolBuilder { - /// Overrides the maximum number of connections that can be allocated by the pool. - pub fn set_max_size(&mut self, max_size: u32) -> &mut Self { - self.max_size = max_size; - self - } - - /// Sets the acquire timeout for a single connection attempt. There are multiple attempts (currently 3) - /// before `access_storage*` methods return an error. If not specified, the acquire timeout will not be set. - pub fn set_acquire_timeout(&mut self, timeout: Option) -> &mut Self { - if let Some(timeout) = timeout { - self.acquire_timeout = timeout; - } - self - } - - /// Sets the statement timeout for the pool. See [Postgres docs] for semantics. - /// If not specified, the statement timeout will not be set. - /// - /// [Postgres docs]: https://www.postgresql.org/docs/14/runtime-config-client.html - pub fn set_statement_timeout(&mut self, timeout: Option) -> &mut Self { - self.statement_timeout = timeout; - self - } - - /// Returns the maximum number of connections that can be allocated by the pool. - pub fn max_size(&self) -> u32 { - self.max_size - } - - /// Builds a connection pool from this builder. - pub async fn build(&self) -> anyhow::Result> { - let options = PgPoolOptions::new() - .max_connections(self.max_size) - .acquire_timeout(self.acquire_timeout); - let mut connect_options: PgConnectOptions = self - .database_url - .parse() - .context("Failed parsing database URL")?; - if let Some(timeout) = self.statement_timeout { - let timeout_string = format!("{}s", timeout.as_secs()); - connect_options = connect_options.options([("statement_timeout", timeout_string)]); - } - let pool = options - .connect_with(connect_options) - .await - .context("Failed connecting to database")?; - tracing::info!("Created DB pool with parameters {self:?}"); - Ok(ConnectionPool { - database_url: self.database_url.clone(), - inner: pool, - max_size: self.max_size, - traced_connections: None, - _marker: Default::default(), - }) - } - - /// Builds a connection pool that has a single connection. - pub async fn build_singleton(&self) -> anyhow::Result> { - let singleton_builder = Self { - database_url: self.database_url.clone(), - max_size: 1, - acquire_timeout: self.acquire_timeout, - statement_timeout: self.statement_timeout, - _marker: self._marker, - }; - singleton_builder.build().await +impl ConnectionTags { + pub fn display(this: Option<&Self>) -> &(dyn fmt::Display + Send + Sync) { + this.map_or(&"not tagged", |tags| tags) } } -#[derive(Debug)] -pub struct TestTemplate(url::Url); - -impl TestTemplate { - fn db_name(&self) -> &str { - self.0.path().strip_prefix('/').unwrap() - } - - fn url(&self, db_name: &str) -> url::Url { - let mut url = self.0.clone(); - url.set_path(db_name); - url - } - - async fn connect_to(db_url: &url::Url) -> sqlx::Result { - use sqlx::Connection as _; - let mut attempts = 20; - loop { - match sqlx::PgConnection::connect(db_url.as_ref()).await { - Ok(conn) => return Ok(conn), - Err(err) => { - attempts -= 1; - if attempts == 0 { - return Err(err); - } - } - } - tokio::time::sleep(Duration::from_millis(200)).await; - } - } - - /// Obtains the test database URL from the environment variable. - pub fn empty() -> anyhow::Result { - let db_url = env::var("TEST_DATABASE_URL").context( - "TEST_DATABASE_URL must be set. Normally, this is done by the 'zk' tool. \ - Make sure that you are running the tests with 'zk test rust' command or equivalent.", - )?; - Ok(Self(db_url.parse()?)) - } - - /// Closes the connection pool, disallows connecting to the underlying db, - /// so that the db can be used as a template. - pub async fn freeze(pool: ConnectionPool) -> anyhow::Result { - use sqlx::Executor as _; - let mut conn = pool.acquire_connection_retried(None).await?; - conn.execute( - "UPDATE pg_database SET datallowconn = false WHERE datname = current_database()", +impl fmt::Display for ConnectionTags { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + formatter, + "requested by `{}` at {}:{}", + self.requester, + self.location.file(), + self.location.line() ) - .await - .context("SET dataallowconn = false")?; - drop(conn); - pool.inner.close().await; - Ok(Self(pool.database_url.parse()?)) } +} - /// Constructs a new temporary database (with a randomized name) - /// by cloning the database template pointed by TEST_DATABASE_URL env var. - /// The template is expected to have all migrations from dal/migrations applied. - /// For efficiency, the Postgres container of TEST_DATABASE_URL should be - /// configured with option "fsync=off" - it disables waiting for disk synchronization - /// whenever you write to the DBs, therefore making it as fast as an in-memory Postgres instance. - /// The database is not cleaned up automatically, but rather the whole Postgres - /// container is recreated whenever you call "zk test rust". - pub async fn create_db( - &self, - connections: u32, - ) -> anyhow::Result> { - use sqlx::Executor as _; - - let mut conn = Self::connect_to(&self.url("")) - .await - .context("connect_to()")?; - let db_old = self.db_name(); - let db_new = format!("test-{}", rand::thread_rng().gen::()); - conn.execute(format!("CREATE DATABASE \"{db_new}\" WITH TEMPLATE \"{db_old}\"").as_str()) - .await - .context("CREATE DATABASE")?; +struct TracedConnectionInfo { + tags: Option, + created_at: Instant, +} - Ok(ConnectionPool::::builder( - self.url(&db_new).as_ref(), - connections, - )) +impl fmt::Debug for TracedConnectionInfo { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + let timestamp: chrono::DateTime = + (SystemTime::now() - self.created_at.elapsed()).into(); + let tags_display = ConnectionTags::display(self.tags.as_ref()); + write!(formatter, "[{timestamp} - {tags_display}]") } } -/// Global DB connection parameters applied to all [`ConnectionPool`] instances. -#[derive(Debug)] -pub struct GlobalConnectionPoolConfig { - // We consider millisecond precision to be enough for config purposes. - long_connection_threshold_ms: AtomicU64, - slow_query_threshold_ms: AtomicU64, +/// Traced active connections for a connection pool. +#[derive(Default)] +pub struct TracedConnections { + connections: Mutex>, + next_id: AtomicUsize, } -impl GlobalConnectionPoolConfig { - const fn new() -> Self { - Self { - long_connection_threshold_ms: AtomicU64::new(5_000), // 5 seconds - slow_query_threshold_ms: AtomicU64::new(100), // 0.1 seconds +impl fmt::Debug for TracedConnections { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Ok(guard) = self.connections.lock() { + formatter.debug_set().entries(guard.values()).finish() + } else { + formatter.write_str("(poisoned)") } } +} - pub(crate) fn long_connection_threshold(&self) -> Duration { - Duration::from_millis(self.long_connection_threshold_ms.load(Ordering::Relaxed)) - } - - pub(crate) fn slow_query_threshold(&self) -> Duration { - Duration::from_millis(self.slow_query_threshold_ms.load(Ordering::Relaxed)) - } - - /// Sets the threshold for the DB connection lifetime to denote a connection as long-living and log its details. - pub fn set_long_connection_threshold(&self, threshold: Duration) -> anyhow::Result<&Self> { - let millis = u64::try_from(threshold.as_millis()) - .context("long_connection_threshold is unreasonably large")?; - self.long_connection_threshold_ms - .store(millis, Ordering::Relaxed); - tracing::info!("Set long connection threshold to {threshold:?}"); - Ok(self) +impl TracedConnections { + fn acquire(&self, tags: Option, created_at: Instant) -> usize { + let id = self.next_id.fetch_add(1, Ordering::SeqCst); + let mut guard = self + .connections + .lock() + .expect("`TracedConnections` is poisoned"); + let info = TracedConnectionInfo { tags, created_at }; + guard.insert(id, info); + id } - /// Sets the threshold to denote a DB query as "slow" and log its details. - pub fn set_slow_query_threshold(&self, threshold: Duration) -> anyhow::Result<&Self> { - let millis = u64::try_from(threshold.as_millis()) - .context("slow_query_threshold is unreasonably large")?; - self.slow_query_threshold_ms - .store(millis, Ordering::Relaxed); - tracing::info!("Set slow query threshold to {threshold:?}"); - Ok(self) + fn mark_as_dropped(&self, connection_id: usize) { + let mut guard = self + .connections + .lock() + .expect("`TracedConnections` is poisoned"); + guard.remove(&connection_id); } } -#[derive(Clone)] -pub struct ConnectionPool { - pub(crate) inner: PgPool, - database_url: String, - max_size: u32, - pub(crate) traced_connections: Option>, - _marker: PhantomData, +struct PooledConnection<'a> { + connection: PoolConnection, + tags: Option, + created_at: Instant, + traced: Option<(&'a TracedConnections, usize)>, } -impl fmt::Debug for ConnectionPool { +impl fmt::Debug for PooledConnection<'_> { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - // We don't print the `database_url`, as is may contain - // sensitive information (e.g. database password). formatter - .debug_struct("ConnectionPool") - .field("max_size", &self.max_size) + .debug_struct("PooledStorageProcessor") + .field("tags", &self.tags) + .field("created_at", &self.created_at) .finish_non_exhaustive() } } -impl ConnectionPool { - const TEST_ACQUIRE_TIMEOUT: Duration = Duration::from_secs(10); - - /// Returns a reference to the global configuration parameters applied for all DB pools. For consistency, these parameters - /// should be changed early in the app life cycle. - pub fn global_config() -> &'static GlobalConnectionPoolConfig { - static CONFIG: GlobalConnectionPoolConfig = GlobalConnectionPoolConfig::new(); - &CONFIG +impl Drop for PooledConnection<'_> { + fn drop(&mut self) { + if let Some(tags) = &self.tags { + let lifetime = self.created_at.elapsed(); + CONNECTION_METRICS.lifetime[&tags.requester].observe(lifetime); + + if lifetime + > ConnectionPool::::global_config().long_connection_threshold() + { + let file = tags.location.file(); + let line = tags.location.line(); + tracing::info!( + "Long-living connection for `{}` created at {file}:{line}: {lifetime:?}", + tags.requester + ); + } + } + if let Some((connections, id)) = self.traced { + connections.mark_as_dropped(id); + } } +} - /// Creates a test pool with a reasonably large number of connections. - /// - /// Test pools trace their active connections. If acquiring a connection fails (e.g., with a timeout), - /// the returned error will contain information on all active connections. - pub async fn test_pool() -> ConnectionPool { - const DEFAULT_CONNECTIONS: u32 = 50; // Expected to be enough for any unit test. - Self::constrained_test_pool(DEFAULT_CONNECTIONS).await - } +#[derive(Debug)] +enum ConnectionInner<'a> { + Pooled(PooledConnection<'a>), + Transaction { + transaction: Transaction<'a, Postgres>, + tags: Option<&'a ConnectionTags>, + }, +} - /// Same as [`Self::test_pool()`], but with a configurable number of connections. This is useful to test - /// behavior of components that rely on singleton / constrained pools in production. - pub async fn constrained_test_pool(connections: u32) -> ConnectionPool { - assert!(connections > 0, "Number of connections must be positive"); - let mut builder = TestTemplate::empty() - .expect("failed creating test template") - .create_db(connections) - .await - .expect("failed creating database for tests"); - let mut pool = builder - .set_acquire_timeout(Some(Self::TEST_ACQUIRE_TIMEOUT)) - .build() - .await - .expect("cannot build connection pool"); - pool.traced_connections = Some(Arc::default()); - pool - } +/// Marker trait for restricting using all possible types as a storage marker. +pub trait DbMarker {} + +/// Storage processor is the main storage interaction point. +/// It holds down the connection (either direct or pooled) to the database +/// and provide methods to obtain different storage schema. +#[derive(Debug)] +pub struct Connection<'a, DB: DbMarker> { + inner: ConnectionInner<'a>, + _marker: std::marker::PhantomData, +} - /// Initializes a builder for connection pools. - pub fn builder(database_url: &str, max_pool_size: u32) -> ConnectionPoolBuilder { - ConnectionPoolBuilder { - database_url: database_url.to_string(), - max_size: max_pool_size, - acquire_timeout: Duration::from_secs(30), // Default value used by `sqlx` - statement_timeout: None, +impl<'a, DB: DbMarker> Connection<'a, DB> { + /// Creates a `StorageProcessor` using a pool of connections. + /// This method borrows one of the connections from the pool, and releases it + /// after `drop`. + pub(crate) fn from_pool( + connection: PoolConnection, + tags: Option, + traced_connections: Option<&'a TracedConnections>, + ) -> Self { + let created_at = Instant::now(); + let inner = ConnectionInner::Pooled(PooledConnection { + connection, + tags, + created_at, + traced: traced_connections.map(|connections| { + let id = connections.acquire(tags, created_at); + (connections, id) + }), + }); + Self { + inner, _marker: Default::default(), } } - /// Initializes a builder for connection pools with a single connection. This is equivalent - /// to calling `Self::builder(db_url, 1)`. - pub fn singleton(database_url: &str) -> ConnectionPoolBuilder { - Self::builder(database_url, 1) - } - - /// Returns the maximum number of connections in this pool specified during its creation. - /// This number may be distinct from the current number of connections in the pool (including - /// idle ones). - pub fn max_size(&self) -> u32 { - self.max_size + pub async fn start_transaction(&mut self) -> sqlx::Result> { + let (conn, tags) = self.conn_and_tags(); + let inner = ConnectionInner::Transaction { + transaction: conn.begin().await?, + tags, + }; + Ok(Connection { + inner, + _marker: Default::default(), + }) } - /// Creates a `StorageProcessor` entity over a recoverable connection. - /// Upon a database outage connection will block the thread until - /// it will be able to recover the connection (or, if connection cannot - /// be restored after several retries, this will be considered as - /// irrecoverable database error and result in panic). - /// - /// This method is intended to be used in crucial contexts, where the - /// database access is must-have (e.g. block committer). - pub async fn access_storage(&self) -> anyhow::Result> { - self.access_storage_inner(None).await + /// Checks if the `StorageProcessor` is currently within database transaction. + pub fn in_transaction(&self) -> bool { + matches!(self.inner, ConnectionInner::Transaction { .. }) } - /// A version of `access_storage` that would also expose the duration of the connection - /// acquisition tagged to the `requester` name. It also tracks the caller location for the purposes - /// of logging (e.g., long-living connections) and debugging (when used with a test connection pool). - /// - /// WARN: This method should not be used if it will result in too many time series (e.g. - /// from witness generators or provers), otherwise Prometheus won't be able to handle it. - #[track_caller] // In order to use it, we have to de-sugar `async fn` - pub fn access_storage_tagged( - &self, - requester: &'static str, - ) -> impl Future>> + '_ { - let location = Location::caller(); - async move { - let tags = StorageProcessorTags { - requester, - location, - }; - self.access_storage_inner(Some(tags)).await + pub async fn commit(self) -> sqlx::Result<()> { + if let ConnectionInner::Transaction { + transaction: postgres, + .. + } = self.inner + { + postgres.commit().await + } else { + panic!("StorageProcessor::commit can only be invoked after calling StorageProcessor::begin_transaction"); } } - async fn access_storage_inner( - &self, - tags: Option, - ) -> anyhow::Result> { - let acquire_latency = CONNECTION_METRICS.acquire.start(); - let conn = self - .acquire_connection_retried(tags.as_ref()) - .await - .context("acquire_connection_retried()")?; - let elapsed = acquire_latency.observe(); - if let Some(tags) = &tags { - CONNECTION_METRICS.acquire_tagged[&tags.requester].observe(elapsed); - } - - Ok(StorageProcessor::::from_pool( - conn, - tags, - self.traced_connections.as_deref(), - )) + pub fn conn(&mut self) -> &mut PgConnection { + self.conn_and_tags().0 } - async fn acquire_connection_retried( - &self, - tags: Option<&StorageProcessorTags>, - ) -> anyhow::Result> { - const DB_CONNECTION_RETRIES: usize = 3; - const AVG_BACKOFF_INTERVAL: Duration = Duration::from_secs(1); - - for _ in 0..DB_CONNECTION_RETRIES { - CONNECTION_METRICS - .pool_size - .observe(self.inner.size() as usize); - CONNECTION_METRICS.pool_idle.observe(self.inner.num_idle()); - - let connection = self.inner.acquire().await; - let connection_err = match connection { - Ok(connection) => return Ok(connection), - Err(err) => err, - }; - - Self::report_connection_error(&connection_err); - // Slightly randomize back-off interval so that we don't end up stampeding the DB. - let jitter = rand::thread_rng().gen_range(0.8..1.2); - let backoff_interval = AVG_BACKOFF_INTERVAL.mul_f32(jitter); - let tags_display = StorageProcessorTags::display(tags); - tracing::warn!( - "Failed to get connection to DB ({tags_display}), backing off for {backoff_interval:?}: {connection_err}" - ); - tokio::time::sleep(backoff_interval).await; + pub fn conn_and_tags(&mut self) -> (&mut PgConnection, Option<&ConnectionTags>) { + match &mut self.inner { + ConnectionInner::Pooled(pooled) => (&mut pooled.connection, pooled.tags.as_ref()), + ConnectionInner::Transaction { transaction, tags } => (transaction, *tags), } - - // Attempting to get the pooled connection for the last time - match self.inner.acquire().await { - Ok(conn) => Ok(conn), - Err(err) => { - Self::report_connection_error(&err); - let tags_display = StorageProcessorTags::display(tags); - if let Some(traced_connections) = &self.traced_connections { - anyhow::bail!( - "Run out of retries getting a DB connection ({tags_display}), last error: {err}\n\ - Active connections: {traced_connections:#?}" - ); - } else { - anyhow::bail!("Run out of retries getting a DB connection ({tags_display}), last error: {err}"); - } - } - } - } - - fn report_connection_error(err: &sqlx::Error) { - CONNECTION_METRICS.pool_acquire_error[&err.into()].inc(); } } #[cfg(test)] mod tests { - use assert_matches::assert_matches; + use crate::{connection_pool::ConnectionPool, utils::InternalMarker}; - use super::*; - use crate::utils::InternalMarker; + #[tokio::test] + async fn processor_tags_propagate_to_transactions() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut connection = pool.connection_tagged("test").await.unwrap(); + assert!(!connection.in_transaction()); + let original_tags = *connection.conn_and_tags().1.unwrap(); + assert_eq!(original_tags.requester, "test"); + + let mut transaction = connection.start_transaction().await.unwrap(); + let transaction_tags = *transaction.conn_and_tags().1.unwrap(); + assert_eq!(transaction_tags, original_tags); + } #[tokio::test] - async fn setting_statement_timeout() { - let db_url = TestTemplate::empty() - .unwrap() - .create_db::(1) - .await - .unwrap() - .database_url; + async fn tracing_connections() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let connection = pool.connection_tagged("test").await.unwrap(); + let traced = pool.traced_connections.as_deref().unwrap(); + { + let traced = traced.connections.lock().unwrap(); + assert_eq!(traced.len(), 1); + let tags = traced.values().next().unwrap().tags.unwrap(); + assert_eq!(tags.requester, "test"); + assert!(tags.location.file().contains("connection.rs"), "{tags:?}"); + } + drop(connection); - let pool = ConnectionPool::::singleton(&db_url) - .set_statement_timeout(Some(Duration::from_secs(1))) - .build() - .await - .unwrap(); + { + let traced = traced.connections.lock().unwrap(); + assert!(traced.is_empty()); + } - let mut storage = pool.access_storage().await.unwrap(); - let err = sqlx::query("SELECT pg_sleep(2)") - .map(drop) - .fetch_optional(storage.conn()) - .await - .unwrap_err(); - assert_matches!( - err, - sqlx::Error::Database(db_err) if db_err.message().contains("statement timeout") - ); + let _connection = pool.connection_tagged("test").await.unwrap(); + let err = format!("{:?}", pool.connection().await.unwrap_err()); + // Matching strings in error messages is an anti-pattern, but we really want to test DevEx here. + assert!(err.contains("Active connections"), "{err}"); + assert!(err.contains("requested by `test`"), "{err}"); } } diff --git a/core/lib/db_connection/src/connection_pool.rs b/core/lib/db_connection/src/connection_pool.rs new file mode 100644 index 00000000000..682d6176eeb --- /dev/null +++ b/core/lib/db_connection/src/connection_pool.rs @@ -0,0 +1,468 @@ +use std::{ + env, fmt, + future::Future, + marker::PhantomData, + panic::Location, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; + +use anyhow::Context as _; +use rand::Rng; +use sqlx::{ + pool::PoolConnection, + postgres::{PgConnectOptions, PgPool, PgPoolOptions, Postgres}, +}; + +use crate::{ + connection::{Connection, ConnectionTags, DbMarker, TracedConnections}, + metrics::CONNECTION_METRICS, +}; + +/// Builder for [`ConnectionPool`]s. +#[derive(Clone)] +pub struct ConnectionPoolBuilder { + database_url: String, + max_size: u32, + acquire_timeout: Duration, + statement_timeout: Option, + _marker: PhantomData, +} + +impl fmt::Debug for ConnectionPoolBuilder { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + // Database URL is potentially sensitive, thus we omit it. + formatter + .debug_struct("ConnectionPoolBuilder") + .field("max_size", &self.max_size) + .field("acquire_timeout", &self.acquire_timeout) + .field("statement_timeout", &self.statement_timeout) + .finish() + } +} + +impl ConnectionPoolBuilder { + /// Overrides the maximum number of connections that can be allocated by the pool. + pub fn set_max_size(&mut self, max_size: u32) -> &mut Self { + self.max_size = max_size; + self + } + + /// Sets the acquire timeout for a single connection attempt. There are multiple attempts (currently 3) + /// before `connection*` methods return an error. If not specified, the acquire timeout will not be set. + pub fn set_acquire_timeout(&mut self, timeout: Option) -> &mut Self { + if let Some(timeout) = timeout { + self.acquire_timeout = timeout; + } + self + } + + /// Sets the statement timeout for the pool. See [Postgres docs] for semantics. + /// If not specified, the statement timeout will not be set. + /// + /// [Postgres docs]: https://www.postgresql.org/docs/14/runtime-config-client.html + pub fn set_statement_timeout(&mut self, timeout: Option) -> &mut Self { + self.statement_timeout = timeout; + self + } + + /// Returns the maximum number of connections that can be allocated by the pool. + pub fn max_size(&self) -> u32 { + self.max_size + } + + /// Builds a connection pool from this builder. + pub async fn build(&self) -> anyhow::Result> { + let options = PgPoolOptions::new() + .max_connections(self.max_size) + .acquire_timeout(self.acquire_timeout); + let mut connect_options: PgConnectOptions = self + .database_url + .parse() + .context("Failed parsing database URL")?; + if let Some(timeout) = self.statement_timeout { + let timeout_string = format!("{}s", timeout.as_secs()); + connect_options = connect_options.options([("statement_timeout", timeout_string)]); + } + let pool = options + .connect_with(connect_options) + .await + .context("Failed connecting to database")?; + tracing::info!("Created DB pool with parameters {self:?}"); + Ok(ConnectionPool { + database_url: self.database_url.clone(), + inner: pool, + max_size: self.max_size, + traced_connections: None, + _marker: Default::default(), + }) + } + + /// Builds a connection pool that has a single connection. + pub async fn build_singleton(&self) -> anyhow::Result> { + let singleton_builder = Self { + database_url: self.database_url.clone(), + max_size: 1, + acquire_timeout: self.acquire_timeout, + statement_timeout: self.statement_timeout, + _marker: self._marker, + }; + singleton_builder.build().await + } +} + +#[derive(Debug)] +pub struct TestTemplate(url::Url); + +impl TestTemplate { + fn db_name(&self) -> &str { + self.0.path().strip_prefix('/').unwrap() + } + + fn url(&self, db_name: &str) -> url::Url { + let mut url = self.0.clone(); + url.set_path(db_name); + url + } + + async fn connect_to(db_url: &url::Url) -> sqlx::Result { + use sqlx::Connection as _; + let mut attempts = 20; + loop { + match sqlx::PgConnection::connect(db_url.as_ref()).await { + Ok(conn) => return Ok(conn), + Err(err) => { + attempts -= 1; + if attempts == 0 { + return Err(err); + } + } + } + tokio::time::sleep(Duration::from_millis(200)).await; + } + } + + /// Obtains the test database URL from the environment variable. + pub fn empty() -> anyhow::Result { + let db_url = env::var("TEST_DATABASE_URL").context( + "TEST_DATABASE_URL must be set. Normally, this is done by the 'zk' tool. \ + Make sure that you are running the tests with 'zk test rust' command or equivalent.", + )?; + Ok(Self(db_url.parse()?)) + } + + /// Closes the connection pool, disallows connecting to the underlying db, + /// so that the db can be used as a template. + pub async fn freeze(pool: ConnectionPool) -> anyhow::Result { + use sqlx::Executor as _; + let mut conn = pool.acquire_connection_retried(None).await?; + conn.execute( + "UPDATE pg_database SET datallowconn = false WHERE datname = current_database()", + ) + .await + .context("SET dataallowconn = false")?; + drop(conn); + pool.inner.close().await; + Ok(Self(pool.database_url.parse()?)) + } + + /// Constructs a new temporary database (with a randomized name) + /// by cloning the database template pointed by TEST_DATABASE_URL env var. + /// The template is expected to have all migrations from dal/migrations applied. + /// For efficiency, the Postgres container of TEST_DATABASE_URL should be + /// configured with option "fsync=off" - it disables waiting for disk synchronization + /// whenever you write to the DBs, therefore making it as fast as an in-memory Postgres instance. + /// The database is not cleaned up automatically, but rather the whole Postgres + /// container is recreated whenever you call "zk test rust". + pub async fn create_db( + &self, + connections: u32, + ) -> anyhow::Result> { + use sqlx::Executor as _; + + let mut conn = Self::connect_to(&self.url("")) + .await + .context("connect_to()")?; + let db_old = self.db_name(); + let db_new = format!("test-{}", rand::thread_rng().gen::()); + conn.execute(format!("CREATE DATABASE \"{db_new}\" WITH TEMPLATE \"{db_old}\"").as_str()) + .await + .context("CREATE DATABASE")?; + + Ok(ConnectionPool::::builder( + self.url(&db_new).as_ref(), + connections, + )) + } +} + +/// Global DB connection parameters applied to all [`ConnectionPool`] instances. +#[derive(Debug)] +pub struct GlobalConnectionPoolConfig { + // We consider millisecond precision to be enough for config purposes. + long_connection_threshold_ms: AtomicU64, + slow_query_threshold_ms: AtomicU64, +} + +impl GlobalConnectionPoolConfig { + const fn new() -> Self { + Self { + long_connection_threshold_ms: AtomicU64::new(5_000), // 5 seconds + slow_query_threshold_ms: AtomicU64::new(100), // 0.1 seconds + } + } + + pub(crate) fn long_connection_threshold(&self) -> Duration { + Duration::from_millis(self.long_connection_threshold_ms.load(Ordering::Relaxed)) + } + + pub(crate) fn slow_query_threshold(&self) -> Duration { + Duration::from_millis(self.slow_query_threshold_ms.load(Ordering::Relaxed)) + } + + /// Sets the threshold for the DB connection lifetime to denote a connection as long-living and log its details. + pub fn set_long_connection_threshold(&self, threshold: Duration) -> anyhow::Result<&Self> { + let millis = u64::try_from(threshold.as_millis()) + .context("long_connection_threshold is unreasonably large")?; + self.long_connection_threshold_ms + .store(millis, Ordering::Relaxed); + tracing::info!("Set long connection threshold to {threshold:?}"); + Ok(self) + } + + /// Sets the threshold to denote a DB query as "slow" and log its details. + pub fn set_slow_query_threshold(&self, threshold: Duration) -> anyhow::Result<&Self> { + let millis = u64::try_from(threshold.as_millis()) + .context("slow_query_threshold is unreasonably large")?; + self.slow_query_threshold_ms + .store(millis, Ordering::Relaxed); + tracing::info!("Set slow query threshold to {threshold:?}"); + Ok(self) + } +} + +#[derive(Clone)] +pub struct ConnectionPool { + pub(crate) inner: PgPool, + database_url: String, + max_size: u32, + pub(crate) traced_connections: Option>, + _marker: PhantomData, +} + +impl fmt::Debug for ConnectionPool { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + // We don't print the `database_url`, as is may contain + // sensitive information (e.g. database password). + formatter + .debug_struct("ConnectionPool") + .field("max_size", &self.max_size) + .finish_non_exhaustive() + } +} + +impl ConnectionPool { + const TEST_ACQUIRE_TIMEOUT: Duration = Duration::from_secs(10); + + /// Returns a reference to the global configuration parameters applied for all DB pools. For consistency, these parameters + /// should be changed early in the app life cycle. + pub fn global_config() -> &'static GlobalConnectionPoolConfig { + static CONFIG: GlobalConnectionPoolConfig = GlobalConnectionPoolConfig::new(); + &CONFIG + } + + /// Creates a test pool with a reasonably large number of connections. + /// + /// Test pools trace their active connections. If acquiring a connection fails (e.g., with a timeout), + /// the returned error will contain information on all active connections. + pub async fn test_pool() -> ConnectionPool { + const DEFAULT_CONNECTIONS: u32 = 50; // Expected to be enough for any unit test. + Self::constrained_test_pool(DEFAULT_CONNECTIONS).await + } + + /// Same as [`Self::test_pool()`], but with a configurable number of connections. This is useful to test + /// behavior of components that rely on singleton / constrained pools in production. + pub async fn constrained_test_pool(connections: u32) -> ConnectionPool { + assert!(connections > 0, "Number of connections must be positive"); + let mut builder = TestTemplate::empty() + .expect("failed creating test template") + .create_db(connections) + .await + .expect("failed creating database for tests"); + let mut pool = builder + .set_acquire_timeout(Some(Self::TEST_ACQUIRE_TIMEOUT)) + .build() + .await + .expect("cannot build connection pool"); + pool.traced_connections = Some(Arc::default()); + pool + } + + /// Initializes a builder for connection pools. + pub fn builder(database_url: &str, max_pool_size: u32) -> ConnectionPoolBuilder { + ConnectionPoolBuilder { + database_url: database_url.to_string(), + max_size: max_pool_size, + acquire_timeout: Duration::from_secs(30), // Default value used by `sqlx` + statement_timeout: None, + _marker: Default::default(), + } + } + + /// Initializes a builder for connection pools with a single connection. This is equivalent + /// to calling `Self::builder(db_url, 1)`. + pub fn singleton(database_url: &str) -> ConnectionPoolBuilder { + Self::builder(database_url, 1) + } + + /// Returns the maximum number of connections in this pool specified during its creation. + /// This number may be distinct from the current number of connections in the pool (including + /// idle ones). + pub fn max_size(&self) -> u32 { + self.max_size + } + + /// Creates a `Connection` entity over a recoverable connection. + /// Upon a database outage connection will block the thread until + /// it will be able to recover the connection (or, if connection cannot + /// be restored after several retries, this will be considered as + /// irrecoverable database error and result in panic). + /// + /// This method is intended to be used in crucial contexts, where the + /// database access is must-have (e.g. block committer). + pub async fn connection(&self) -> anyhow::Result> { + self.connection_inner(None).await + } + + /// A version of `connection` that would also expose the duration of the connection + /// acquisition tagged to the `requester` name. It also tracks the caller location for the purposes + /// of logging (e.g., long-living connections) and debugging (when used with a test connection pool). + /// + /// WARN: This method should not be used if it will result in too many time series (e.g. + /// from witness generators or provers), otherwise Prometheus won't be able to handle it. + #[track_caller] // In order to use it, we have to de-sugar `async fn` + pub fn connection_tagged( + &self, + requester: &'static str, + ) -> impl Future>> + '_ { + let location = Location::caller(); + async move { + let tags = ConnectionTags { + requester, + location, + }; + self.connection_inner(Some(tags)).await + } + } + + async fn connection_inner( + &self, + tags: Option, + ) -> anyhow::Result> { + let acquire_latency = CONNECTION_METRICS.acquire.start(); + let conn = self + .acquire_connection_retried(tags.as_ref()) + .await + .context("acquire_connection_retried()")?; + let elapsed = acquire_latency.observe(); + if let Some(tags) = &tags { + CONNECTION_METRICS.acquire_tagged[&tags.requester].observe(elapsed); + } + + Ok(Connection::::from_pool( + conn, + tags, + self.traced_connections.as_deref(), + )) + } + + async fn acquire_connection_retried( + &self, + tags: Option<&ConnectionTags>, + ) -> anyhow::Result> { + const DB_CONNECTION_RETRIES: usize = 3; + const AVG_BACKOFF_INTERVAL: Duration = Duration::from_secs(1); + + for _ in 0..DB_CONNECTION_RETRIES { + CONNECTION_METRICS + .pool_size + .observe(self.inner.size() as usize); + CONNECTION_METRICS.pool_idle.observe(self.inner.num_idle()); + + let connection = self.inner.acquire().await; + let connection_err = match connection { + Ok(connection) => return Ok(connection), + Err(err) => err, + }; + + Self::report_connection_error(&connection_err); + // Slightly randomize back-off interval so that we don't end up stampeding the DB. + let jitter = rand::thread_rng().gen_range(0.8..1.2); + let backoff_interval = AVG_BACKOFF_INTERVAL.mul_f32(jitter); + let tags_display = ConnectionTags::display(tags); + tracing::warn!( + "Failed to get connection to DB ({tags_display}), backing off for {backoff_interval:?}: {connection_err}" + ); + tokio::time::sleep(backoff_interval).await; + } + + // Attempting to get the pooled connection for the last time + match self.inner.acquire().await { + Ok(conn) => Ok(conn), + Err(err) => { + Self::report_connection_error(&err); + let tags_display = ConnectionTags::display(tags); + if let Some(traced_connections) = &self.traced_connections { + anyhow::bail!( + "Run out of retries getting a DB connection ({tags_display}), last error: {err}\n\ + Active connections: {traced_connections:#?}" + ); + } else { + anyhow::bail!("Run out of retries getting a DB connection ({tags_display}), last error: {err}"); + } + } + } + } + + fn report_connection_error(err: &sqlx::Error) { + CONNECTION_METRICS.pool_acquire_error[&err.into()].inc(); + } +} + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + + use super::*; + use crate::utils::InternalMarker; + + #[tokio::test] + async fn setting_statement_timeout() { + let db_url = TestTemplate::empty() + .unwrap() + .create_db::(1) + .await + .unwrap() + .database_url; + + let pool = ConnectionPool::::singleton(&db_url) + .set_statement_timeout(Some(Duration::from_secs(1))) + .build() + .await + .unwrap(); + + let mut storage = pool.connection().await.unwrap(); + let err = sqlx::query("SELECT pg_sleep(2)") + .map(drop) + .fetch_optional(storage.conn()) + .await + .unwrap_err(); + assert_matches!( + err, + sqlx::Error::Database(db_err) if db_err.message().contains("statement timeout") + ); + } +} diff --git a/core/lib/db_connection/src/healthcheck.rs b/core/lib/db_connection/src/healthcheck.rs index cfc2a71c245..81be78a64f1 100644 --- a/core/lib/db_connection/src/healthcheck.rs +++ b/core/lib/db_connection/src/healthcheck.rs @@ -1,7 +1,7 @@ use serde::Serialize; use zksync_health_check::{async_trait, CheckHealth, Health, HealthStatus}; -use crate::{connection::ConnectionPool, processor::StorageMarker}; +use crate::{connection::DbMarker, connection_pool::ConnectionPool}; #[derive(Debug, Serialize)] struct ConnectionPoolHealthDetails { @@ -10,7 +10,7 @@ struct ConnectionPoolHealthDetails { } impl ConnectionPoolHealthDetails { - fn new(pool: &ConnectionPool) -> Self { + fn new(pool: &ConnectionPool) -> Self { Self { pool_size: pool.inner.size(), max_size: pool.max_size(), @@ -22,18 +22,18 @@ impl ConnectionPoolHealthDetails { // This guarantees that the app can use it's main "communication" channel. // Used in the /health endpoint #[derive(Clone, Debug)] -pub struct ConnectionPoolHealthCheck { - connection_pool: ConnectionPool, +pub struct ConnectionPoolHealthCheck { + connection_pool: ConnectionPool, } -impl ConnectionPoolHealthCheck { - pub fn new(connection_pool: ConnectionPool) -> ConnectionPoolHealthCheck { +impl ConnectionPoolHealthCheck { + pub fn new(connection_pool: ConnectionPool) -> ConnectionPoolHealthCheck { Self { connection_pool } } } #[async_trait] -impl CheckHealth for ConnectionPoolHealthCheck { +impl CheckHealth for ConnectionPoolHealthCheck { fn name(&self) -> &'static str { "connection_pool" } @@ -41,7 +41,7 @@ impl CheckHealth for ConnectionPoolHe async fn check_health(&self) -> Health { // This check is rather feeble, plan to make reliable here: // https://linear.app/matterlabs/issue/PLA-255/revamp-db-connection-health-check - match self.connection_pool.access_storage().await { + match self.connection_pool.connection().await { Ok(_) => { let details = ConnectionPoolHealthDetails::new(&self.connection_pool); Health::from(HealthStatus::Ready).with_details(details) diff --git a/core/lib/db_connection/src/instrument.rs b/core/lib/db_connection/src/instrument.rs index 76517c06255..a441c69d927 100644 --- a/core/lib/db_connection/src/instrument.rs +++ b/core/lib/db_connection/src/instrument.rs @@ -21,9 +21,9 @@ use sqlx::{ use tokio::time::Instant; use crate::{ - connection::ConnectionPool, + connection::{Connection, ConnectionTags, DbMarker}, + connection_pool::ConnectionPool, metrics::REQUEST_METRICS, - processor::{StorageMarker, StorageProcessor, StorageProcessorTags}, utils::InternalMarker, }; @@ -121,7 +121,7 @@ impl<'a> InstrumentedData<'a> { async fn fetch( self, - connection_tags: Option<&StorageProcessorTags>, + connection_tags: Option<&ConnectionTags>, query_future: impl Future>, ) -> Result { let Self { @@ -142,7 +142,7 @@ impl<'a> InstrumentedData<'a> { let output = match output { Ok(output) => output, Err(_) => { - let connection_tags = StorageProcessorTags::display(connection_tags); + let connection_tags = ConnectionTags::display(connection_tags); if slow_query_reporting_enabled { tracing::warn!( "Query {name}{args} called at {file}:{line} [{connection_tags}] is executing for more than {slow_query_threshold:?}", @@ -161,7 +161,7 @@ impl<'a> InstrumentedData<'a> { REQUEST_METRICS.request[&name].observe(elapsed); } - let connection_tags = StorageProcessorTags::display(connection_tags); + let connection_tags = ConnectionTags::display(connection_tags); if let Err(err) = &output { tracing::warn!( "Query {name}{args} called at {file}:{line} [{connection_tags}] has resulted in error: {err}", @@ -222,18 +222,18 @@ where A: 'q + IntoArguments<'q, Postgres>, { /// Executes an SQL statement using this query. - pub async fn execute( + pub async fn execute( self, - storage: &mut StorageProcessor<'_, SM>, + storage: &mut Connection<'_, DB>, ) -> sqlx::Result { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.execute(conn)).await } /// Fetches an optional row using this query. - pub async fn fetch_optional( + pub async fn fetch_optional( self, - storage: &mut StorageProcessor<'_, SM>, + storage: &mut Connection<'_, DB>, ) -> Result, sqlx::Error> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_optional(conn)).await @@ -246,9 +246,9 @@ where O: Send + Unpin + for<'r> FromRow<'r, PgRow>, { /// Fetches all rows using this query and collects them into a `Vec`. - pub async fn fetch_all( + pub async fn fetch_all( self, - storage: &mut StorageProcessor<'_, SM>, + storage: &mut Connection<'_, DB>, ) -> sqlx::Result> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_all(conn)).await @@ -262,27 +262,27 @@ where A: 'q + Send + IntoArguments<'q, Postgres>, { /// Fetches an optional row using this query. - pub async fn fetch_optional( + pub async fn fetch_optional( self, - storage: &mut StorageProcessor<'_, SM>, + storage: &mut Connection<'_, DB>, ) -> sqlx::Result> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_optional(conn)).await } /// Fetches a single row using this query. - pub async fn fetch_one( + pub async fn fetch_one( self, - storage: &mut StorageProcessor<'_, SM>, + storage: &mut Connection<'_, DB>, ) -> sqlx::Result { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_one(conn)).await } /// Fetches all rows using this query and collects them into a `Vec`. - pub async fn fetch_all( + pub async fn fetch_all( self, - storage: &mut StorageProcessor<'_, SM>, + storage: &mut Connection<'_, DB>, ) -> sqlx::Result> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_all(conn)).await @@ -294,14 +294,14 @@ mod tests { use zksync_basic_types::{MiniblockNumber, H256}; use super::*; - use crate::{connection::ConnectionPool, utils::InternalMarker}; + use crate::{connection_pool::ConnectionPool, utils::InternalMarker}; #[tokio::test] async fn instrumenting_erroneous_query() { let pool = ConnectionPool::::test_pool().await; // Add `vlog::init()` here to debug this test - let mut conn = pool.access_storage().await.unwrap(); + let mut conn = pool.connection().await.unwrap(); sqlx::query("WHAT") .map(drop) .instrument("erroneous") @@ -317,7 +317,7 @@ mod tests { let pool = ConnectionPool::::test_pool().await; // Add `vlog::init()` here to debug this test - let mut conn = pool.access_storage().await.unwrap(); + let mut conn = pool.connection().await.unwrap(); sqlx::query("SELECT pg_sleep(1.5)") .map(drop) .instrument("slow") diff --git a/core/lib/db_connection/src/lib.rs b/core/lib/db_connection/src/lib.rs index 2e05648b2b7..04a6cf7ac10 100644 --- a/core/lib/db_connection/src/lib.rs +++ b/core/lib/db_connection/src/lib.rs @@ -1,8 +1,8 @@ pub mod connection; +pub mod connection_pool; pub mod healthcheck; pub mod instrument; pub mod metrics; -pub mod processor; #[macro_use] pub mod macro_utils; pub mod utils; diff --git a/core/lib/db_connection/src/processor.rs b/core/lib/db_connection/src/processor.rs deleted file mode 100644 index d0bf1e4eb38..00000000000 --- a/core/lib/db_connection/src/processor.rs +++ /dev/null @@ -1,262 +0,0 @@ -use std::{ - collections::HashMap, - fmt, - panic::Location, - sync::{ - atomic::{AtomicUsize, Ordering}, - Mutex, - }, - time::{Instant, SystemTime}, -}; - -use sqlx::{pool::PoolConnection, types::chrono, Connection, PgConnection, Postgres, Transaction}; - -use crate::{connection::ConnectionPool, metrics::CONNECTION_METRICS, utils::InternalMarker}; - -/// Tags that can be associated with a connection. -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct StorageProcessorTags { - pub requester: &'static str, - pub location: &'static Location<'static>, -} - -impl StorageProcessorTags { - pub fn display(this: Option<&Self>) -> &(dyn fmt::Display + Send + Sync) { - this.map_or(&"not tagged", |tags| tags) - } -} - -impl fmt::Display for StorageProcessorTags { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - formatter, - "requested by `{}` at {}:{}", - self.requester, - self.location.file(), - self.location.line() - ) - } -} - -struct TracedConnectionInfo { - tags: Option, - created_at: Instant, -} - -impl fmt::Debug for TracedConnectionInfo { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - let timestamp: chrono::DateTime = - (SystemTime::now() - self.created_at.elapsed()).into(); - let tags_display = StorageProcessorTags::display(self.tags.as_ref()); - write!(formatter, "[{timestamp} - {tags_display}]") - } -} - -/// Traced active connections for a connection pool. -#[derive(Default)] -pub struct TracedConnections { - connections: Mutex>, - next_id: AtomicUsize, -} - -impl fmt::Debug for TracedConnections { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Ok(guard) = self.connections.lock() { - formatter.debug_set().entries(guard.values()).finish() - } else { - formatter.write_str("(poisoned)") - } - } -} - -impl TracedConnections { - fn acquire(&self, tags: Option, created_at: Instant) -> usize { - let id = self.next_id.fetch_add(1, Ordering::SeqCst); - let mut guard = self - .connections - .lock() - .expect("`TracedConnections` is poisoned"); - let info = TracedConnectionInfo { tags, created_at }; - guard.insert(id, info); - id - } - - fn mark_as_dropped(&self, connection_id: usize) { - let mut guard = self - .connections - .lock() - .expect("`TracedConnections` is poisoned"); - guard.remove(&connection_id); - } -} - -struct PooledStorageProcessor<'a> { - connection: PoolConnection, - tags: Option, - created_at: Instant, - traced: Option<(&'a TracedConnections, usize)>, -} - -impl fmt::Debug for PooledStorageProcessor<'_> { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter - .debug_struct("PooledStorageProcessor") - .field("tags", &self.tags) - .field("created_at", &self.created_at) - .finish_non_exhaustive() - } -} - -impl Drop for PooledStorageProcessor<'_> { - fn drop(&mut self) { - if let Some(tags) = &self.tags { - let lifetime = self.created_at.elapsed(); - CONNECTION_METRICS.lifetime[&tags.requester].observe(lifetime); - - if lifetime - > ConnectionPool::::global_config().long_connection_threshold() - { - let file = tags.location.file(); - let line = tags.location.line(); - tracing::info!( - "Long-living connection for `{}` created at {file}:{line}: {lifetime:?}", - tags.requester - ); - } - } - if let Some((connections, id)) = self.traced { - connections.mark_as_dropped(id); - } - } -} - -#[derive(Debug)] -enum StorageProcessorInner<'a> { - Pooled(PooledStorageProcessor<'a>), - Transaction { - transaction: Transaction<'a, Postgres>, - tags: Option<&'a StorageProcessorTags>, - }, -} - -/// Marker trait for restricting using all possible types as a storage marker. -pub trait StorageMarker {} - -/// Storage processor is the main storage interaction point. -/// It holds down the connection (either direct or pooled) to the database -/// and provide methods to obtain different storage schema. -#[derive(Debug)] -pub struct StorageProcessor<'a, SM: StorageMarker> { - inner: StorageProcessorInner<'a>, - _marker: std::marker::PhantomData, -} - -impl<'a, SM: StorageMarker> StorageProcessor<'a, SM> { - /// Creates a `StorageProcessor` using a pool of connections. - /// This method borrows one of the connections from the pool, and releases it - /// after `drop`. - pub(crate) fn from_pool( - connection: PoolConnection, - tags: Option, - traced_connections: Option<&'a TracedConnections>, - ) -> Self { - let created_at = Instant::now(); - let inner = StorageProcessorInner::Pooled(PooledStorageProcessor { - connection, - tags, - created_at, - traced: traced_connections.map(|connections| { - let id = connections.acquire(tags, created_at); - (connections, id) - }), - }); - Self { - inner, - _marker: Default::default(), - } - } - - pub async fn start_transaction(&mut self) -> sqlx::Result> { - let (conn, tags) = self.conn_and_tags(); - let inner = StorageProcessorInner::Transaction { - transaction: conn.begin().await?, - tags, - }; - Ok(StorageProcessor { - inner, - _marker: Default::default(), - }) - } - - /// Checks if the `StorageProcessor` is currently within database transaction. - pub fn in_transaction(&self) -> bool { - matches!(self.inner, StorageProcessorInner::Transaction { .. }) - } - - pub async fn commit(self) -> sqlx::Result<()> { - if let StorageProcessorInner::Transaction { - transaction: postgres, - .. - } = self.inner - { - postgres.commit().await - } else { - panic!("StorageProcessor::commit can only be invoked after calling StorageProcessor::begin_transaction"); - } - } - - pub fn conn(&mut self) -> &mut PgConnection { - self.conn_and_tags().0 - } - - pub fn conn_and_tags(&mut self) -> (&mut PgConnection, Option<&StorageProcessorTags>) { - match &mut self.inner { - StorageProcessorInner::Pooled(pooled) => (&mut pooled.connection, pooled.tags.as_ref()), - StorageProcessorInner::Transaction { transaction, tags } => (transaction, *tags), - } - } -} - -#[cfg(test)] -mod tests { - use crate::{connection::ConnectionPool, utils::InternalMarker}; - - #[tokio::test] - async fn processor_tags_propagate_to_transactions() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut connection = pool.access_storage_tagged("test").await.unwrap(); - assert!(!connection.in_transaction()); - let original_tags = *connection.conn_and_tags().1.unwrap(); - assert_eq!(original_tags.requester, "test"); - - let mut transaction = connection.start_transaction().await.unwrap(); - let transaction_tags = *transaction.conn_and_tags().1.unwrap(); - assert_eq!(transaction_tags, original_tags); - } - - #[tokio::test] - async fn tracing_connections() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let connection = pool.access_storage_tagged("test").await.unwrap(); - let traced = pool.traced_connections.as_deref().unwrap(); - { - let traced = traced.connections.lock().unwrap(); - assert_eq!(traced.len(), 1); - let tags = traced.values().next().unwrap().tags.unwrap(); - assert_eq!(tags.requester, "test"); - assert!(tags.location.file().contains("processor.rs"), "{tags:?}"); - } - drop(connection); - - { - let traced = traced.connections.lock().unwrap(); - assert!(traced.is_empty()); - } - - let _connection = pool.access_storage_tagged("test").await.unwrap(); - let err = format!("{:?}", pool.access_storage().await.unwrap_err()); - // Matching strings in error messages is an anti-pattern, but we really want to test DevEx here. - assert!(err.contains("Active connections"), "{err}"); - assert!(err.contains("requested by `test`"), "{err}"); - } -} diff --git a/core/lib/db_connection/src/utils.rs b/core/lib/db_connection/src/utils.rs index bed4f0ffea6..2b067dea2f0 100644 --- a/core/lib/db_connection/src/utils.rs +++ b/core/lib/db_connection/src/utils.rs @@ -2,12 +2,12 @@ use std::time::Duration; use sqlx::{postgres::types::PgInterval, types::chrono::NaiveTime}; -use crate::processor::StorageMarker; +use crate::connection::DbMarker; #[derive(Debug)] pub(crate) struct InternalMarker; -impl StorageMarker for InternalMarker {} +impl DbMarker for InternalMarker {} pub fn duration_to_naive_time(duration: Duration) -> NaiveTime { let total_seconds = duration.as_secs() as u32; diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index 97a344e44ce..181f4aec9d6 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -6,7 +6,7 @@ use anyhow::Context as _; use async_trait::async_trait; use serde::Serialize; use tokio::sync::Semaphore; -use zksync_dal::{ConnectionPool, Server, ServerDals, SqlxError, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, SqlxError}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_object_store::{ObjectStore, ObjectStoreError}; use zksync_types::{ @@ -186,7 +186,7 @@ impl SnapshotsApplierConfig { /// - Storage contains at least one L1 batch pub async fn run( self, - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, main_node_client: &dyn SnapshotsApplierMainNodeClient, blob_store: &dyn ObjectStore, ) -> anyhow::Result<()> { @@ -234,7 +234,7 @@ impl SnapshotsApplierConfig { /// Applying application-level storage snapshots to the Postgres storage. #[derive(Debug)] struct SnapshotsApplier<'a> { - connection_pool: &'a ConnectionPool, + connection_pool: &'a ConnectionPool, main_node_client: &'a dyn SnapshotsApplierMainNodeClient, blob_store: &'a dyn ObjectStore, applied_snapshot_status: SnapshotRecoveryStatus, @@ -246,7 +246,7 @@ struct SnapshotsApplier<'a> { impl<'a> SnapshotsApplier<'a> { /// Recovers [`SnapshotRecoveryStatus`] from the storage and the main node. async fn prepare_applied_snapshot_status( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, main_node_client: &dyn SnapshotsApplierMainNodeClient, ) -> Result<(SnapshotRecoveryStatus, bool), SnapshotsApplierError> { let latency = @@ -307,7 +307,7 @@ impl<'a> SnapshotsApplier<'a> { } async fn load_snapshot( - connection_pool: &'a ConnectionPool, + connection_pool: &'a ConnectionPool, main_node_client: &'a dyn SnapshotsApplierMainNodeClient, blob_store: &'a dyn ObjectStore, health_updater: &'a HealthUpdater, @@ -315,7 +315,7 @@ impl<'a> SnapshotsApplier<'a> { health_updater.update(HealthStatus::Ready.into()); let mut storage = connection_pool - .access_storage_tagged("snapshots_applier") + .connection_tagged("snapshots_applier") .await?; let mut storage_transaction = storage.start_transaction().await.map_err(|err| { SnapshotsApplierError::db(err, "failed starting initial DB transaction") @@ -428,7 +428,7 @@ impl<'a> SnapshotsApplier<'a> { async fn recover_factory_deps( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> Result<(), SnapshotsApplierError> { let latency = METRICS.initial_stage_duration[&InitialStage::ApplyFactoryDeps].start(); @@ -472,7 +472,7 @@ impl<'a> SnapshotsApplier<'a> { &self, chunk_id: u64, storage_logs: &[SnapshotStorageLog], - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> Result<(), SnapshotsApplierError> { storage .storage_logs_dedup_dal() @@ -490,7 +490,7 @@ impl<'a> SnapshotsApplier<'a> { &self, chunk_id: u64, storage_logs: &[SnapshotStorageLog], - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> Result<(), SnapshotsApplierError> { storage .storage_logs_dal() @@ -542,7 +542,7 @@ impl<'a> SnapshotsApplier<'a> { let mut storage = self .connection_pool - .access_storage_tagged("snapshots_applier") + .connection_tagged("snapshots_applier") .await?; let mut storage_transaction = storage.start_transaction().await.map_err(|err| { let context = format!("cannot start DB transaction for storage logs chunk {chunk_id}"); @@ -608,7 +608,7 @@ impl<'a> SnapshotsApplier<'a> { let mut storage = self .connection_pool - .access_storage_tagged("snapshots_applier") + .connection_tagged("snapshots_applier") .await?; // This DB query is slow, but this is fine for verification purposes. let total_log_count = storage @@ -643,7 +643,7 @@ impl<'a> SnapshotsApplier<'a> { // Check whether tokens are already recovered. let mut storage = self .connection_pool - .access_storage_tagged("snapshots_applier") + .connection_tagged("snapshots_applier") .await?; let all_token_addresses = storage .tokens_dal() @@ -670,7 +670,7 @@ impl<'a> SnapshotsApplier<'a> { let l2_addresses = tokens.iter().map(|token| token.l2_address); let mut storage = self .connection_pool - .access_storage_tagged("snapshots_applier") + .connection_tagged("snapshots_applier") .await?; let filtered_addresses = storage .storage_logs_dal() diff --git a/core/lib/snapshots_applier/src/tests/mod.rs b/core/lib/snapshots_applier/src/tests/mod.rs index c721c0daaf1..5029b5e60ee 100644 --- a/core/lib/snapshots_applier/src/tests/mod.rs +++ b/core/lib/snapshots_applier/src/tests/mod.rs @@ -24,9 +24,9 @@ async fn snapshots_creator_can_successfully_recover_db( with_object_store_errors: bool, ) { let pool = if let Some(pool_size) = pool_size { - ConnectionPool::::constrained_test_pool(pool_size).await + ConnectionPool::::constrained_test_pool(pool_size).await } else { - ConnectionPool::::test_pool().await + ConnectionPool::::test_pool().await }; let expected_status = mock_recovery_status(); let storage_logs = random_storage_logs(expected_status.l1_batch_number, 200); @@ -56,7 +56,7 @@ async fn snapshots_creator_can_successfully_recover_db( .await .unwrap(); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); let mut recovery_dal = storage.snapshot_recovery_dal(); let current_db_status = recovery_dal.get_applied_snapshot_status().await.unwrap(); @@ -98,10 +98,10 @@ async fn snapshots_creator_can_successfully_recover_db( #[tokio::test] async fn applier_errors_after_genesis() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; // We don't want to depend on the core crate, so instead we cheaply emulate it. - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); storage .protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -154,7 +154,7 @@ async fn applier_errors_after_genesis() { #[tokio::test] async fn applier_errors_without_snapshots() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; let client = MockMainNodeClient::default(); @@ -167,7 +167,7 @@ async fn applier_errors_without_snapshots() { #[tokio::test] async fn applier_returns_error_on_fatal_object_store_error() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let expected_status = mock_recovery_status(); let storage_logs = random_storage_logs(expected_status.l1_batch_number, 100); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; @@ -189,7 +189,7 @@ async fn applier_returns_error_on_fatal_object_store_error() { #[tokio::test] async fn applier_returns_error_after_too_many_object_store_retries() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let expected_status = mock_recovery_status(); let storage_logs = random_storage_logs(expected_status.l1_batch_number, 100); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; @@ -211,7 +211,7 @@ async fn applier_returns_error_after_too_many_object_store_retries() { #[tokio::test] async fn recovering_tokens() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let expected_status = mock_recovery_status(); let tokens = mock_tokens(); let mut storage_logs = random_storage_logs(expected_status.l1_batch_number, 200); @@ -235,7 +235,7 @@ async fn recovering_tokens() { .unwrap(); // Check that tokens are successfully restored. - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); let recovered_tokens = storage .tokens_web3_dal() .get_all_tokens(None) diff --git a/core/lib/state/src/postgres/mod.rs b/core/lib/state/src/postgres/mod.rs index 6e48124c002..958434742c0 100644 --- a/core/lib/state/src/postgres/mod.rs +++ b/core/lib/state/src/postgres/mod.rs @@ -11,7 +11,7 @@ use tokio::{ watch, }, }; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_types::{L1BatchNumber, MiniblockNumber, StorageKey, StorageValue, H256}; use self::metrics::{Method, ValuesUpdateStage, CACHE_METRICS, STORAGE_METRICS}; @@ -150,7 +150,7 @@ impl ValuesCache { &self, from_miniblock: MiniblockNumber, to_miniblock: MiniblockNumber, - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { const MAX_MINIBLOCKS_LAG: u32 = 5; @@ -295,7 +295,7 @@ impl PostgresStorageCaches { pub fn configure_storage_values_cache( &mut self, capacity: u64, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, ) -> PostgresStorageCachesTask { assert!( capacity > 0, @@ -344,7 +344,7 @@ impl PostgresStorageCaches { /// An asynchronous task that updates the VM storage values cache. #[derive(Debug)] pub struct PostgresStorageCachesTask { - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, values_cache: ValuesCache, command_receiver: UnboundedReceiver, } @@ -369,7 +369,7 @@ impl PostgresStorageCachesTask { } let mut connection = self .connection_pool - .access_storage_tagged("values_cache_updater") + .connection_tagged("values_cache_updater") .await?; self.values_cache .update(current_miniblock, to_miniblock, &mut connection) @@ -391,7 +391,7 @@ impl PostgresStorageCachesTask { #[derive(Debug)] pub struct PostgresStorage<'a> { rt_handle: Handle, - connection: StorageProcessor<'a, Server>, + connection: Connection<'a, Core>, miniblock_number: MiniblockNumber, l1_batch_number_for_miniblock: L1BatchNumber, pending_l1_batch_number: L1BatchNumber, @@ -407,7 +407,7 @@ impl<'a> PostgresStorage<'a> { /// Panics on Postgres errors. pub fn new( rt_handle: Handle, - connection: StorageProcessor<'a, Server>, + connection: Connection<'a, Core>, block_number: MiniblockNumber, consider_new_l1_batch: bool, ) -> Self { @@ -429,7 +429,7 @@ impl<'a> PostgresStorage<'a> { /// Propagates Postgres errors. pub async fn new_async( rt_handle: Handle, - mut connection: StorageProcessor<'a, Server>, + mut connection: Connection<'a, Core>, block_number: MiniblockNumber, consider_new_l1_batch: bool, ) -> anyhow::Result> { diff --git a/core/lib/state/src/postgres/tests.rs b/core/lib/state/src/postgres/tests.rs index 8dc8f439128..17638cf89bd 100644 --- a/core/lib/state/src/postgres/tests.rs +++ b/core/lib/state/src/postgres/tests.rs @@ -14,11 +14,11 @@ use super::*; use crate::test_utils::{create_l1_batch, create_miniblock, gen_storage_logs, prepare_postgres}; fn test_postgres_storage_basics( - pool: &ConnectionPool, + pool: &ConnectionPool, rt_handle: Handle, cache_initial_writes: bool, ) { - let mut connection = rt_handle.block_on(pool.access_storage()).unwrap(); + let mut connection = rt_handle.block_on(pool.connection()).unwrap(); rt_handle.block_on(prepare_postgres(&mut connection)); let mut storage = PostgresStorage::new(rt_handle, connection, MiniblockNumber(0), true); if cache_initial_writes { @@ -126,7 +126,7 @@ fn test_postgres_storage_basics( #[tokio::test] async fn postgres_storage_basics() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; tokio::task::spawn_blocking(move || { test_postgres_storage_basics(&pool, Handle::current(), false); }) @@ -136,7 +136,7 @@ async fn postgres_storage_basics() { #[tokio::test] async fn postgres_storage_with_initial_writes_cache() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; tokio::task::spawn_blocking(move || { test_postgres_storage_basics(&pool, Handle::current(), true); }) @@ -145,11 +145,11 @@ async fn postgres_storage_with_initial_writes_cache() { } fn test_postgres_storage_after_sealing_miniblock( - pool: &ConnectionPool, + pool: &ConnectionPool, rt_handle: Handle, consider_new_l1_batch: bool, ) { - let mut connection = rt_handle.block_on(pool.access_storage()).unwrap(); + let mut connection = rt_handle.block_on(pool.connection()).unwrap(); rt_handle.block_on(prepare_postgres(&mut connection)); let new_logs = gen_storage_logs(20..30); @@ -192,7 +192,7 @@ fn test_postgres_storage_after_sealing_miniblock( #[tokio::test] async fn postgres_storage_after_sealing_miniblock() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; tokio::task::spawn_blocking(move || { println!("Considering new L1 batch"); test_postgres_storage_after_sealing_miniblock(&pool, Handle::current(), true); @@ -203,8 +203,8 @@ async fn postgres_storage_after_sealing_miniblock() { .unwrap(); } -fn test_factory_deps_cache(pool: &ConnectionPool, rt_handle: Handle) { - let mut connection = rt_handle.block_on(pool.access_storage()).unwrap(); +fn test_factory_deps_cache(pool: &ConnectionPool, rt_handle: Handle) { + let mut connection = rt_handle.block_on(pool.connection()).unwrap(); rt_handle.block_on(prepare_postgres(&mut connection)); let caches = PostgresStorageCaches::new(128 * 1_024 * 1_024, 1_024); @@ -248,15 +248,15 @@ fn test_factory_deps_cache(pool: &ConnectionPool, rt_handle: Handle) { #[tokio::test] async fn using_factory_deps_cache() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let handle = Handle::current(); tokio::task::spawn_blocking(move || test_factory_deps_cache(&pool, handle)) .await .unwrap(); } -fn test_initial_writes_cache(pool: &ConnectionPool, rt_handle: Handle) { - let connection = rt_handle.block_on(pool.access_storage()).unwrap(); +fn test_initial_writes_cache(pool: &ConnectionPool, rt_handle: Handle) { + let connection = rt_handle.block_on(pool.connection()).unwrap(); let caches = PostgresStorageCaches::new(1_024, 4 * 1_024 * 1_024); let mut storage = PostgresStorage::new(rt_handle, connection, MiniblockNumber(0), false) .with_caches(caches.clone()); @@ -354,7 +354,7 @@ fn test_initial_writes_cache(pool: &ConnectionPool, rt_handle: Handle) { #[tokio::test] async fn using_initial_writes_cache() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let handle = Handle::current(); tokio::task::spawn_blocking(move || test_initial_writes_cache(&pool, handle)) .await @@ -384,7 +384,7 @@ impl ValuesCache { } } -fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { +fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); let _ = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); // We cannot use an update task since it requires having concurrent DB connections @@ -393,7 +393,7 @@ fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { let old_miniblock_assertions = values_cache.assertions(MiniblockNumber(0)); let new_miniblock_assertions = values_cache.assertions(MiniblockNumber(1)); - let mut connection = rt_handle.block_on(pool.access_storage()).unwrap(); + let mut connection = rt_handle.block_on(pool.connection()).unwrap(); rt_handle.block_on(prepare_postgres(&mut connection)); let mut storage = PostgresStorage::new(rt_handle, connection, MiniblockNumber(0), false) @@ -499,7 +499,7 @@ fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { #[tokio::test] async fn using_values_cache() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let handle = Handle::current(); tokio::task::spawn_blocking(move || test_values_cache(&pool, handle)) .await @@ -510,14 +510,14 @@ async fn using_values_cache() { /// on randomly generated `read_value()` queries. fn mini_fuzz_values_cache_inner( rng: &mut impl Rng, - pool: &ConnectionPool, + pool: &ConnectionPool, mut rt_handle: Handle, ) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); let _ = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); let values_cache = caches.values.as_ref().unwrap().cache.clone(); - let mut connection = rt_handle.block_on(pool.access_storage()).unwrap(); + let mut connection = rt_handle.block_on(pool.connection()).unwrap(); rt_handle.block_on(prepare_postgres(&mut connection)); let queried_keys: Vec<_> = gen_storage_logs(0..100) @@ -596,7 +596,7 @@ fn mini_fuzz_values_cache_inner( #[tokio::test] async fn mini_fuzz_values_cache() { const RNG_SEED: u64 = 123; - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let handle = Handle::current(); let mut rng = StdRng::seed_from_u64(RNG_SEED); diff --git a/core/lib/state/src/rocksdb/mod.rs b/core/lib/state/src/rocksdb/mod.rs index 3b9c8f78c2b..6ecb521582f 100644 --- a/core/lib/state/src/rocksdb/mod.rs +++ b/core/lib/state/src/rocksdb/mod.rs @@ -30,7 +30,7 @@ use std::{ use anyhow::Context as _; use itertools::{Either, Itertools}; use tokio::sync::watch; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_storage::{db::NamedColumnFamily, RocksDB}; use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; @@ -165,7 +165,7 @@ impl RocksbStorageBuilder { /// in Postgres. pub async fn synchronize( self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, stop_receiver: &watch::Receiver, ) -> anyhow::Result> { let mut inner = self.0; @@ -183,7 +183,7 @@ impl RocksbStorageBuilder { /// Propagates RocksDB and Postgres errors. pub async fn rollback( mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, last_l1_batch_to_keep: L1BatchNumber, ) -> anyhow::Result<()> { self.0.rollback(storage, last_l1_batch_to_keep).await @@ -230,7 +230,7 @@ impl RocksdbStorage { async fn update_from_postgres( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, stop_receiver: &watch::Receiver, ) -> Result<(), RocksdbSyncError> { let mut current_l1_batch_number = self @@ -316,7 +316,7 @@ impl RocksdbStorage { async fn apply_storage_logs( &mut self, storage_logs: HashMap, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { let db = self.db.clone(); let processed_logs = @@ -357,7 +357,7 @@ impl RocksdbStorage { async fn save_missing_enum_indices( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { let (true, Some(start_from)) = ( self.enum_index_migration_chunk_size > 0, @@ -481,7 +481,7 @@ impl RocksdbStorage { async fn rollback( &mut self, - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, last_l1_batch_to_keep: L1BatchNumber, ) -> anyhow::Result<()> { tracing::info!("Rolling back state keeper storage to L1 batch #{last_l1_batch_to_keep}..."); diff --git a/core/lib/state/src/rocksdb/recovery.rs b/core/lib/state/src/rocksdb/recovery.rs index d26c32bc5f5..c3e286b107c 100644 --- a/core/lib/state/src/rocksdb/recovery.rs +++ b/core/lib/state/src/rocksdb/recovery.rs @@ -4,7 +4,7 @@ use std::ops; use anyhow::Context as _; use tokio::sync::watch; -use zksync_dal::{storage_logs_dal::StorageRecoveryLogEntry, Server, ServerDals, StorageProcessor}; +use zksync_dal::{storage_logs_dal::StorageRecoveryLogEntry, Connection, Core, CoreDal}; use zksync_types::{ snapshots::{uniform_hashed_keys_chunk, SnapshotRecoveryStatus}, L1BatchNumber, MiniblockNumber, H256, @@ -30,7 +30,7 @@ impl RocksdbStorage { /// Returns the next L1 batch that should be fed to the storage. pub(super) async fn ensure_ready( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, desired_log_chunk_size: u64, stop_receiver: &watch::Receiver, ) -> Result { @@ -65,7 +65,7 @@ impl RocksdbStorage { /// (it would be considered complete even if it failed in the middle). async fn recover_from_snapshot( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, snapshot_recovery: &SnapshotRecoveryStatus, desired_log_chunk_size: u64, stop_receiver: &watch::Receiver, @@ -140,7 +140,7 @@ impl RocksdbStorage { async fn recover_factory_deps( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, snapshot_recovery: &SnapshotRecoveryStatus, ) -> anyhow::Result<()> { // We don't expect that many factory deps; that's why we recover factory deps in any case. @@ -169,7 +169,7 @@ impl RocksdbStorage { } async fn load_key_chunks( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, snapshot_recovery: &SnapshotRecoveryStatus, desired_log_chunk_size: u64, ) -> anyhow::Result> { @@ -219,7 +219,7 @@ impl RocksdbStorage { async fn recover_logs_chunk( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, snapshot_miniblock: MiniblockNumber, key_chunk: ops::RangeInclusive, ) -> anyhow::Result<()> { diff --git a/core/lib/state/src/rocksdb/tests.rs b/core/lib/state/src/rocksdb/tests.rs index 52f35e6f58b..7027f7672a4 100644 --- a/core/lib/state/src/rocksdb/tests.rs +++ b/core/lib/state/src/rocksdb/tests.rs @@ -5,7 +5,7 @@ use std::fmt; use assert_matches::assert_matches; use tempfile::TempDir; use test_casing::test_casing; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_types::{MiniblockNumber, StorageLog}; use super::*; @@ -76,10 +76,7 @@ async fn rocksdb_storage_basics() { } } -async fn sync_test_storage( - dir: &TempDir, - conn: &mut StorageProcessor<'_, Server>, -) -> RocksdbStorage { +async fn sync_test_storage(dir: &TempDir, conn: &mut Connection<'_, Core>) -> RocksdbStorage { let (_stop_sender, stop_receiver) = watch::channel(false); RocksdbStorage::builder(dir.path()) .await @@ -92,8 +89,8 @@ async fn sync_test_storage( #[tokio::test] async fn rocksdb_storage_syncing_with_postgres() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); prepare_postgres(&mut conn).await; let storage_logs = gen_storage_logs(20..40); create_miniblock(&mut conn, MiniblockNumber(1), storage_logs.clone()).await; @@ -110,8 +107,8 @@ async fn rocksdb_storage_syncing_with_postgres() { #[tokio::test] async fn rocksdb_storage_syncing_fault_tolerance() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); prepare_postgres(&mut conn).await; let storage_logs = gen_storage_logs(100..200); for (i, block_logs) in storage_logs.chunks(20).enumerate() { @@ -159,7 +156,7 @@ async fn rocksdb_storage_syncing_fault_tolerance() { } async fn insert_factory_deps( - conn: &mut StorageProcessor<'_, Server>, + conn: &mut Connection<'_, Core>, miniblock_number: MiniblockNumber, indices: impl Iterator, ) { @@ -174,8 +171,8 @@ async fn insert_factory_deps( #[tokio::test] async fn rocksdb_storage_revert() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); prepare_postgres(&mut conn).await; let storage_logs = gen_storage_logs(20..40); create_miniblock(&mut conn, MiniblockNumber(1), storage_logs[..10].to_vec()).await; @@ -245,8 +242,8 @@ async fn rocksdb_storage_revert() { #[tokio::test] async fn rocksdb_enum_index_migration() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); prepare_postgres(&mut conn).await; let storage_logs = gen_storage_logs(20..40); create_miniblock(&mut conn, MiniblockNumber(1), storage_logs.clone()).await; @@ -330,8 +327,8 @@ async fn rocksdb_enum_index_migration() { #[test_casing(4, [RocksdbStorage::DESIRED_LOG_CHUNK_SIZE, 20, 5, 1])] #[tokio::test] async fn low_level_snapshot_recovery(log_chunk_size: u64) { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let (snapshot_recovery, mut storage_logs) = prepare_postgres_for_snapshot_recovery(&mut conn).await; @@ -362,8 +359,8 @@ async fn low_level_snapshot_recovery(log_chunk_size: u64) { #[tokio::test] async fn recovering_factory_deps_from_snapshot() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let (snapshot_recovery, _) = prepare_postgres_for_snapshot_recovery(&mut conn).await; let mut all_factory_deps = HashMap::new(); @@ -389,8 +386,8 @@ async fn recovering_factory_deps_from_snapshot() { #[tokio::test] async fn recovering_from_snapshot_and_following_logs() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let (snapshot_recovery, mut storage_logs) = prepare_postgres_for_snapshot_recovery(&mut conn).await; @@ -456,8 +453,8 @@ async fn recovering_from_snapshot_and_following_logs() { #[tokio::test] async fn recovery_fault_tolerance() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let (_, storage_logs) = prepare_postgres_for_snapshot_recovery(&mut conn).await; let log_chunk_size = storage_logs.len() as u64 / 5; diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 420a79ae089..317b9402fe1 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -2,7 +2,7 @@ use std::ops; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_types::{ block::{L1BatchHeader, MiniblockHeader}, snapshots::SnapshotRecoveryStatus, @@ -10,7 +10,7 @@ use zksync_types::{ StorageKey, StorageLog, H256, }; -pub(crate) async fn prepare_postgres(conn: &mut StorageProcessor<'_, Server>) { +pub(crate) async fn prepare_postgres(conn: &mut Connection<'_, Core>) { if conn.blocks_dal().is_genesis_needed().await.unwrap() { conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -68,7 +68,7 @@ pub(crate) fn gen_storage_logs(indices: ops::Range) -> Vec { #[allow(clippy::default_trait_access)] // ^ `BaseSystemContractsHashes::default()` would require a new direct dependency pub(crate) async fn create_miniblock( - conn: &mut StorageProcessor<'_, Server>, + conn: &mut Connection<'_, Core>, miniblock_number: MiniblockNumber, block_logs: Vec, ) { @@ -100,7 +100,7 @@ pub(crate) async fn create_miniblock( #[allow(clippy::default_trait_access)] // ^ `BaseSystemContractsHashes::default()` would require a new direct dependency pub(crate) async fn create_l1_batch( - conn: &mut StorageProcessor<'_, Server>, + conn: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, logs_for_initial_writes: &[StorageLog], ) { @@ -123,7 +123,7 @@ pub(crate) async fn create_l1_batch( } pub(crate) async fn prepare_postgres_for_snapshot_recovery( - conn: &mut StorageProcessor<'_, Server>, + conn: &mut Connection<'_, Core>, ) -> (SnapshotRecoveryStatus, Vec) { conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) diff --git a/core/lib/vm_utils/src/lib.rs b/core/lib/vm_utils/src/lib.rs index 12b3be659e5..96ca056281a 100644 --- a/core/lib/vm_utils/src/lib.rs +++ b/core/lib/vm_utils/src/lib.rs @@ -7,7 +7,7 @@ use multivm::{ VmInstance, }; use tokio::runtime::Handle; -use zksync_dal::{Server, StorageProcessor}; +use zksync_dal::{Connection, Core}; use zksync_state::{PostgresStorage, StoragePtr, StorageView, WriteStorage}; use zksync_types::{L1BatchNumber, L2ChainId, Transaction}; @@ -21,7 +21,7 @@ pub type VmAndStorage<'a> = ( pub fn create_vm( rt_handle: Handle, l1_batch_number: L1BatchNumber, - mut connection: StorageProcessor<'_, Server>, + mut connection: Connection<'_, Core>, l2_chain_id: L2ChainId, ) -> anyhow::Result { let l1_batch_params_provider = rt_handle diff --git a/core/lib/vm_utils/src/storage.rs b/core/lib/vm_utils/src/storage.rs index bc22ec7e99f..b38fc57518f 100644 --- a/core/lib/vm_utils/src/storage.rs +++ b/core/lib/vm_utils/src/storage.rs @@ -7,7 +7,7 @@ use multivm::{ zk_evm_latest::ethereum_types::H256, }; use zksync_contracts::BaseSystemContracts; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_types::{ block::MiniblockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, ZKPORTER_IS_AVAILABLE, @@ -89,7 +89,7 @@ pub struct L1BatchParamsProvider { } impl L1BatchParamsProvider { - pub async fn new(storage: &mut StorageProcessor<'_, Server>) -> anyhow::Result { + pub async fn new(storage: &mut Connection<'_, Core>) -> anyhow::Result { let snapshot = storage .snapshot_recovery_dal() .get_applied_snapshot_status() @@ -101,7 +101,7 @@ impl L1BatchParamsProvider { /// if necessary. pub async fn wait_for_l1_batch_params( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, number: L1BatchNumber, ) -> anyhow::Result<(H256, u64)> { let first_l1_batch = if let Some(snapshot) = &self.snapshot { @@ -122,7 +122,7 @@ impl L1BatchParamsProvider { } async fn wait_for_l1_batch_params_unchecked( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, number: L1BatchNumber, ) -> anyhow::Result<(H256, u64)> { // If the state root is not known yet, this duration will be used to back off in the while loops @@ -148,7 +148,7 @@ impl L1BatchParamsProvider { pub async fn load_l1_batch_protocol_version( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { if let Some(snapshot) = &self.snapshot { @@ -172,7 +172,7 @@ impl L1BatchParamsProvider { /// Returns a header of the first miniblock in the specified L1 batch regardless of whether the batch is sealed or not. pub async fn load_first_miniblock_in_batch( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { let miniblock_number = self @@ -196,7 +196,7 @@ impl L1BatchParamsProvider { #[doc(hidden)] // public for testing purposes pub async fn load_number_of_first_miniblock_in_batch( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { if l1_batch_number == L1BatchNumber(0) { @@ -232,7 +232,7 @@ impl L1BatchParamsProvider { /// Loads VM-related L1 batch parameters for the specified batch. pub async fn load_l1_batch_params( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, first_miniblock_in_batch: &FirstMiniblockInBatch, validation_computational_gas_limit: u32, chain_id: L2ChainId, diff --git a/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs b/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs index 75db409dfe0..7d52970d228 100644 --- a/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs +++ b/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs @@ -1,16 +1,16 @@ use actix_web::web; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; #[derive(Debug, Clone)] pub struct RestApi { - pub(super) master_connection_pool: ConnectionPool, - pub(super) replica_connection_pool: ConnectionPool, + pub(super) master_connection_pool: ConnectionPool, + pub(super) replica_connection_pool: ConnectionPool, } impl RestApi { pub fn new( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, + master_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, ) -> Self { Self { master_connection_pool, diff --git a/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs b/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs index b164d640a64..2660888e5c6 100644 --- a/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs +++ b/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs @@ -3,7 +3,7 @@ use actix_web::{ HttpResponse, Result as ActixResult, }; use serde::Serialize; -use zksync_dal::ServerDals; +use zksync_dal::CoreDal; use zksync_types::{contract_verification_api::VerificationIncomingRequest, Address}; use super::{api_decl::RestApi, metrics::METRICS}; @@ -36,7 +36,7 @@ impl RestApi { } let mut storage = self_ .master_connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await .unwrap(); @@ -68,7 +68,7 @@ impl RestApi { let method_latency = METRICS.call[&"contract_verification_request_status"].start(); let status = self_ .replica_connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await .unwrap() .contract_verification_dal() @@ -88,7 +88,7 @@ impl RestApi { let method_latency = METRICS.call[&"contract_verification_zksolc_versions"].start(); let versions = self_ .replica_connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await .unwrap() .contract_verification_dal() @@ -105,7 +105,7 @@ impl RestApi { let method_latency = METRICS.call[&"contract_verification_solc_versions"].start(); let versions = self_ .replica_connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await .unwrap() .contract_verification_dal() @@ -122,7 +122,7 @@ impl RestApi { let method_latency = METRICS.call[&"contract_verification_zkvyper_versions"].start(); let versions = self_ .replica_connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await .unwrap() .contract_verification_dal() @@ -139,7 +139,7 @@ impl RestApi { let method_latency = METRICS.call[&"contract_verification_vyper_versions"].start(); let versions = self_ .replica_connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await .unwrap() .contract_verification_dal() @@ -160,7 +160,7 @@ impl RestApi { let info = self_ .replica_connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await .unwrap() .contract_verification_dal() diff --git a/core/lib/zksync_core/src/api_server/contract_verification/mod.rs b/core/lib/zksync_core/src/api_server/contract_verification/mod.rs index 3f6dd27e73b..931f12abb4c 100644 --- a/core/lib/zksync_core/src/api_server/contract_verification/mod.rs +++ b/core/lib/zksync_core/src/api_server/contract_verification/mod.rs @@ -43,8 +43,8 @@ fn start_server(api: RestApi, bind_to: SocketAddr) -> Server { /// Start HTTP REST API pub fn start_server_thread_detached( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, + master_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, api_config: ContractVerificationApiConfig, mut stop_receiver: watch::Receiver, ) -> JoinHandle> { diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs index 2159bf85186..bc8bfb27c98 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs @@ -16,7 +16,7 @@ use multivm::{ VmInstance, }; use tokio::runtime::Handle; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_state::{PostgresStorage, ReadStorage, StoragePtr, StorageView, WriteStorage}; use zksync_system_constants::{ SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, @@ -51,7 +51,7 @@ struct Sandbox<'a> { impl<'a> Sandbox<'a> { async fn new( - mut connection: StorageProcessor<'a, Server>, + mut connection: Connection<'a, Core>, shared_args: TxSharedArgs, execution_args: &'a TxExecutionArgs, block_args: BlockArgs, @@ -108,7 +108,7 @@ impl<'a> Sandbox<'a> { } async fn load_l2_block_info( - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, is_pending_block: bool, resolved_block_info: &ResolvedBlockInfo, ) -> anyhow::Result<(L2BlockEnv, Option)> { @@ -291,7 +291,7 @@ pub(super) fn apply_vm_in_sandbox( // current L1 prices for gas or pubdata. adjust_pubdata_price: bool, execution_args: &TxExecutionArgs, - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, tx: Transaction, block_args: BlockArgs, apply: impl FnOnce( @@ -304,7 +304,7 @@ pub(super) fn apply_vm_in_sandbox( let rt_handle = vm_permit.rt_handle(); let connection = rt_handle - .block_on(connection_pool.access_storage_tagged("api")) + .block_on(connection_pool.connection_tagged("api")) .context("failed acquiring DB connection")?; let connection_acquire_time = stage_started_at.elapsed(); // We don't want to emit too many logs. @@ -353,7 +353,7 @@ struct StoredL2BlockInfo { impl StoredL2BlockInfo { /// If `miniblock_hash` is `None`, it needs to be fetched from the storage. async fn new( - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, miniblock_number: MiniblockNumber, miniblock_hash: Option, ) -> anyhow::Result { @@ -427,7 +427,7 @@ impl BlockArgs { async fn resolve_block_info( &self, - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, ) -> anyhow::Result { let (state_l2_block_number, vm_l1_batch_number, l1_batch_timestamp); diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs index 03936c3822e..b3440984b32 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs @@ -8,7 +8,7 @@ use multivm::{ MultiVMTracer, }; use tracing::{span, Level}; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_types::{ fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, @@ -108,7 +108,7 @@ impl TransactionExecutor { // current L1 prices for gas or pubdata. adjust_pubdata_price: bool, execution_args: TxExecutionArgs, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, tx: Transaction, block_args: BlockArgs, custom_tracers: Vec, @@ -169,7 +169,7 @@ impl TransactionExecutor { &self, vm_permit: VmPermit, shared_args: TxSharedArgs, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, mut tx: L2Tx, block_args: BlockArgs, vm_execution_cache_misses_limit: Option, diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs index 224afd08847..c540ca56dd2 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, time::Duration}; use anyhow::Context as _; use tokio::runtime::Handle; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_state::{PostgresStorage, PostgresStorageCaches, ReadStorage, StorageView}; use zksync_system_constants::PUBLISH_BYTECODE_OVERHEAD; use zksync_types::{ @@ -149,7 +149,7 @@ impl VmConcurrencyLimiter { } async fn get_pending_state( - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, ) -> anyhow::Result<(api::BlockId, MiniblockNumber)> { let block_id = api::BlockId::Number(api::BlockNumber::Pending); let resolved_block_number = connection @@ -164,7 +164,7 @@ async fn get_pending_state( /// Returns the number of the pubdata that the transaction will spend on factory deps. pub(super) async fn get_pubdata_for_factory_deps( _vm_permit: &VmPermit, - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, factory_deps: &[Vec], storage_caches: PostgresStorageCaches, ) -> anyhow::Result { @@ -173,7 +173,7 @@ pub(super) async fn get_pubdata_for_factory_deps( } let mut storage = connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await .context("failed acquiring DB connection")?; let (_, block_number) = get_pending_state(&mut storage).await?; @@ -184,7 +184,7 @@ pub(super) async fn get_pubdata_for_factory_deps( let factory_deps = factory_deps.to_vec(); tokio::task::spawn_blocking(move || { let connection = rt_handle - .block_on(connection_pool.access_storage_tagged("api")) + .block_on(connection_pool.connection_tagged("api")) .context("failed acquiring DB connection")?; let storage = PostgresStorage::new(rt_handle, connection, block_number, false) .with_caches(storage_caches); @@ -243,7 +243,7 @@ pub(crate) struct BlockStartInfo { } impl BlockStartInfo { - pub async fn new(storage: &mut StorageProcessor<'_, Server>) -> anyhow::Result { + pub async fn new(storage: &mut Connection<'_, Core>) -> anyhow::Result { let snapshot_recovery = storage .snapshot_recovery_dal() .get_applied_snapshot_status() @@ -296,9 +296,7 @@ pub(crate) struct BlockArgs { } impl BlockArgs { - pub(crate) async fn pending( - connection: &mut StorageProcessor<'_, Server>, - ) -> anyhow::Result { + pub(crate) async fn pending(connection: &mut Connection<'_, Core>) -> anyhow::Result { let (block_id, resolved_block_number) = get_pending_state(connection).await?; Ok(Self { block_id, @@ -309,7 +307,7 @@ impl BlockArgs { /// Loads block information from DB. pub async fn new( - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, block_id: api::BlockId, start_info: BlockStartInfo, ) -> Result { diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs index 144c3b45268..f062c8df3c4 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs @@ -11,8 +11,8 @@ use crate::{ #[tokio::test] async fn creating_block_args() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -66,8 +66,8 @@ async fn creating_block_args() { #[tokio::test] async fn creating_block_args_after_snapshot_recovery() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -158,8 +158,8 @@ async fn creating_block_args_after_snapshot_recovery() { #[tokio::test] async fn instantiating_vm() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -173,7 +173,7 @@ async fn instantiating_vm() { test_instantiating_vm(pool.clone(), block_args).await; } -async fn test_instantiating_vm(pool: ConnectionPool, block_args: BlockArgs) { +async fn test_instantiating_vm(pool: ConnectionPool, block_args: BlockArgs) { let (vm_concurrency_limiter, _) = VmConcurrencyLimiter::new(1); let vm_permit = vm_concurrency_limiter.acquire().await.unwrap(); let transaction = create_l2_transaction(10, 100).into(); diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs index 0bc4db366d5..f403a5c17ec 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs @@ -10,7 +10,7 @@ use multivm::{ vm_latest::HistoryDisabled, MultiVMTracer, }; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_types::{l2::L2Tx, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; use super::{ @@ -33,7 +33,7 @@ pub(crate) enum ValidationError { impl TransactionExecutor { pub(crate) async fn validate_tx_in_sandbox( &self, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, vm_permit: VmPermit, tx: L2Tx, shared_args: TxSharedArgs, @@ -47,7 +47,7 @@ impl TransactionExecutor { let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); let mut connection = connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await .context("failed acquiring DB connection")?; let validation_params = @@ -117,7 +117,7 @@ impl TransactionExecutor { /// trusted to change between validation and execution in general case, but /// sometimes we can safely rely on them to not change often. async fn get_validation_params( - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, tx: &L2Tx, computational_gas_limit: u32, ) -> anyhow::Result { diff --git a/core/lib/zksync_core/src/api_server/tree/tests.rs b/core/lib/zksync_core/src/api_server/tree/tests.rs index a279253816a..d5932293823 100644 --- a/core/lib/zksync_core/src/api_server/tree/tests.rs +++ b/core/lib/zksync_core/src/api_server/tree/tests.rs @@ -4,7 +4,7 @@ use std::net::Ipv4Addr; use assert_matches::assert_matches; use tempfile::TempDir; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use super::*; use crate::metadata_calculator::tests::{ @@ -13,7 +13,7 @@ use crate::metadata_calculator::tests::{ #[tokio::test] async fn merkle_tree_api() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; let api_addr = (Ipv4Addr::LOCALHOST, 0).into(); @@ -75,7 +75,7 @@ async fn merkle_tree_api() { #[tokio::test] async fn local_merkle_tree_client() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; diff --git a/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs b/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs index 355055371d9..ebe03341cdb 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs @@ -1,4 +1,4 @@ -use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, Server, ServerDals}; +use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, Core, CoreDal}; use zksync_types::{fee::TransactionExecutionMetrics, l2::L2Tx}; use super::{tx_sink::TxSink, SubmitTxError}; @@ -7,11 +7,11 @@ use crate::metrics::{TxStage, APP_METRICS}; /// Wrapper for the master DB pool that allows to submit transactions to the mempool. #[derive(Debug)] pub struct MasterPoolSink { - master_pool: ConnectionPool, + master_pool: ConnectionPool, } impl MasterPoolSink { - pub fn new(master_pool: ConnectionPool) -> Self { + pub fn new(master_pool: ConnectionPool) -> Self { Self { master_pool } } } @@ -25,7 +25,7 @@ impl TxSink for MasterPoolSink { ) -> Result { let submission_res_handle = self .master_pool - .access_storage_tagged("api") + .connection_tagged("api") .await? .transactions_dal() .insert_transaction_l2(tx, execution_metrics) diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index 2ecc982afab..7edc5dac457 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -11,7 +11,7 @@ use multivm::{ use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig}; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ - transactions_dal::L2TxSubmissionResult, ConnectionPool, Server, ServerDals, StorageProcessor, + transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, }; use zksync_state::PostgresStorageCaches; use zksync_types::{ @@ -149,7 +149,7 @@ pub struct TxSenderBuilder { /// Shared TxSender configuration. config: TxSenderConfig, /// Connection pool for read requests. - replica_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, /// Sink to be used to persist transactions. tx_sink: Arc, /// Batch sealer used to check whether transaction can be executed by the sequencer. @@ -159,7 +159,7 @@ pub struct TxSenderBuilder { impl TxSenderBuilder { pub fn new( config: TxSenderConfig, - replica_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, tx_sink: Arc, ) -> Self { Self { @@ -242,7 +242,7 @@ pub struct TxSenderInner { pub(super) sender_config: TxSenderConfig, /// Sink to be used to persist transactions. pub tx_sink: Arc, - pub replica_connection_pool: ConnectionPool, + pub replica_connection_pool: ConnectionPool, // Used to keep track of gas prices for the fee ticker. pub batch_fee_input_provider: Arc, pub(super) api_contracts: ApiContracts, @@ -273,10 +273,10 @@ impl TxSender { self.0.storage_caches.clone() } - async fn acquire_replica_connection(&self) -> anyhow::Result> { + async fn acquire_replica_connection(&self) -> anyhow::Result> { self.0 .replica_connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await .context("failed acquiring connection to replica DB") } diff --git a/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs b/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs index c83a6d5ee8b..3a7f93f13ab 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs @@ -6,7 +6,7 @@ use std::{ }; use tokio::sync::{watch, RwLock}; -use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, Server, ServerDals}; +use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, Core, CoreDal}; use zksync_types::{ api::{BlockId, Transaction, TransactionDetails, TransactionId}, fee::TransactionExecutionMetrics, @@ -64,7 +64,7 @@ impl TxCache { async fn run_updates( self, - pool: ConnectionPool, + pool: ConnectionPool, stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { const UPDATE_INTERVAL: Duration = Duration::from_secs(1); @@ -79,7 +79,7 @@ impl TxCache { let inner = self.inner.read().await; inner.nonces_by_account.keys().copied().collect() }; - let mut storage = pool.access_storage_tagged("api").await?; + let mut storage = pool.connection_tagged("api").await?; let nonces_for_accounts = storage .storage_web3_dal() .get_nonces_for_addresses(&addresses) @@ -204,7 +204,7 @@ impl TxProxy { pub fn run_account_nonce_sweeper( &self, - pool: ConnectionPool, + pool: ConnectionPool, stop_receiver: watch::Receiver, ) -> impl Future> { let tx_cache = self.tx_cache.clone(); diff --git a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs index e62033a6357..d78730ed353 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs @@ -10,7 +10,7 @@ use crate::{ }; pub(crate) async fn create_test_tx_sender( - pool: ConnectionPool, + pool: ConnectionPool, l2_chain_id: L2ChainId, tx_executor: TransactionExecutor, ) -> (TxSender, VmConcurrencyBarrier) { @@ -39,8 +39,8 @@ pub(crate) async fn create_test_tx_sender( async fn getting_nonce_for_account() { let l2_chain_id = L2ChainId::default(); let test_address = Address::repeat_byte(1); - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, l2_chain_id, &GenesisParams::mock()) .await .unwrap(); @@ -86,8 +86,8 @@ async fn getting_nonce_for_account() { async fn getting_nonce_for_account_after_snapshot_recovery() { const SNAPSHOT_MINIBLOCK_NUMBER: MiniblockNumber = MiniblockNumber(42); - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let test_address = Address::repeat_byte(1); let other_address = Address::repeat_byte(2); let nonce_logs = [ diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs index 40b545c25b0..f6bae130616 100644 --- a/core/lib/zksync_core/src/api_server/web3/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/mod.rs @@ -9,7 +9,7 @@ use tokio::{ task::JoinHandle, }; use tower_http::{cors::CorsLayer, metrics::InFlightRequestsLayer}; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_health_check::{HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_types::MiniblockNumber; use zksync_web3_decl::{ @@ -123,8 +123,8 @@ struct OptionalApiParams { /// maintenance tasks. #[derive(Debug)] pub struct ApiServer { - pool: ConnectionPool, - updaters_pool: ConnectionPool, + pool: ConnectionPool, + updaters_pool: ConnectionPool, health_updater: Arc, config: InternalApiConfig, transport: ApiTransport, @@ -137,8 +137,8 @@ pub struct ApiServer { #[derive(Debug)] pub struct ApiBuilder { - pool: ConnectionPool, - updaters_pool: ConnectionPool, + pool: ConnectionPool, + updaters_pool: ConnectionPool, config: InternalApiConfig, polling_interval: Duration, // Mandatory params that must be set using builder methods. @@ -154,7 +154,7 @@ pub struct ApiBuilder { impl ApiBuilder { const DEFAULT_POLLING_INTERVAL: Duration = Duration::from_millis(200); - pub fn jsonrpsee_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { + pub fn jsonrpsee_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { Self { updaters_pool: pool.clone(), pool, @@ -182,7 +182,7 @@ impl ApiBuilder { /// such as last mined block number or account nonces. This pool is used to execute /// in a background task. If not called, the main pool will be used. If the API server is under high load, /// it may make sense to supply a single-connection pool to reduce pool contention with the API methods. - pub fn with_updaters_pool(mut self, pool: ConnectionPool) -> Self { + pub fn with_updaters_pool(mut self, pool: ConnectionPool) -> Self { self.updaters_pool = pool; self } @@ -298,7 +298,7 @@ impl ApiServer { self, last_sealed_miniblock: SealedMiniblockNumber, ) -> anyhow::Result { - let mut storage = self.updaters_pool.access_storage_tagged("api").await?; + let mut storage = self.updaters_pool.connection_tagged("api").await?; let start_info = BlockStartInfo::new(&mut storage).await?; drop(storage); diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs index fe9539c7c80..1edfa1f7f9f 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use anyhow::Context as _; use multivm::{interface::ExecutionResult, vm_latest::constants::BLOCK_GAS_LIMIT}; use once_cell::sync::OnceCell; -use zksync_dal::ServerDals; +use zksync_dal::CoreDal; use zksync_system_constants::MAX_ENCODED_TX_SIZE; use zksync_types::{ api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, @@ -67,11 +67,7 @@ impl DebugNamespace { let only_top_call = options .map(|options| options.tracer_config.only_top_call) .unwrap_or(false); - let mut connection = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut connection = self.state.connection_pool.connection_tagged("api").await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.current_method() .set_block_diff(self.state.last_sealed_miniblock.diff(block_number)); @@ -114,11 +110,7 @@ impl DebugNamespace { let only_top_call = options .map(|options| options.tracer_config.only_top_call) .unwrap_or(false); - let mut connection = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut connection = self.state.connection_pool.connection_tagged("api").await?; let call_trace = connection .transactions_dal() .get_call_trace(tx_hash) @@ -147,11 +139,7 @@ impl DebugNamespace { .map(|options| options.tracer_config.only_top_call) .unwrap_or(false); - let mut connection = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut connection = self.state.connection_pool.connection_tagged("api").await?; let block_args = self .state .resolve_block_args(&mut connection, block_id) diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs index 85cc254afa4..ede0fe04a08 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs @@ -1,5 +1,5 @@ use anyhow::Context as _; -use zksync_dal::ServerDals; +use zksync_dal::CoreDal; use zksync_types::{api::en, tokens::TokenInfo, MiniblockNumber}; use zksync_web3_decl::error::Web3Error; @@ -21,7 +21,7 @@ impl EnNamespace { let Some(genesis) = self .state .connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await? .consensus_dal() .genesis() @@ -44,11 +44,7 @@ impl EnNamespace { block_number: MiniblockNumber, include_transactions: bool, ) -> Result, Web3Error> { - let mut storage = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut storage = self.state.connection_pool.connection_tagged("api").await?; Ok(storage .sync_dal() .sync_block(block_number, include_transactions) @@ -61,11 +57,7 @@ impl EnNamespace { &self, block_number: Option, ) -> Result, Web3Error> { - let mut storage = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut storage = self.state.connection_pool.connection_tagged("api").await?; Ok(storage .tokens_web3_dal() .get_all_tokens(block_number) diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index 05bab4c535c..2a254866100 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -1,5 +1,5 @@ use anyhow::Context as _; -use zksync_dal::ServerDals; +use zksync_dal::CoreDal; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ api::{ @@ -44,11 +44,7 @@ impl EthNamespace { #[tracing::instrument(skip(self))] pub async fn get_block_number_impl(&self) -> Result { - let mut storage = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut storage = self.state.connection_pool.connection_tagged("api").await?; let block_number = storage .blocks_dal() .get_sealed_miniblock_number() @@ -67,11 +63,7 @@ impl EthNamespace { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); - let mut connection = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut connection = self.state.connection_pool.connection_tagged("api").await?; let block_args = self .state .resolve_block_args(&mut connection, block_id) @@ -154,11 +146,7 @@ impl EthNamespace { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); - let mut connection = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut connection = self.state.connection_pool.connection_tagged("api").await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; let balance = connection @@ -230,11 +218,7 @@ impl EthNamespace { self.current_method().set_block_id(block_id); self.state.start_info.ensure_not_pruned(block_id)?; - let mut storage = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut storage = self.state.connection_pool.connection_tagged("api").await?; let Some(block_number) = self .state .resolve_block_unchecked(&mut storage, block_id) @@ -296,11 +280,7 @@ impl EthNamespace { self.current_method().set_block_id(block_id); self.state.start_info.ensure_not_pruned(block_id)?; - let mut storage = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut storage = self.state.connection_pool.connection_tagged("api").await?; let Some(block_number) = self .state .resolve_block_unchecked(&mut storage, block_id) @@ -328,11 +308,7 @@ impl EthNamespace { self.current_method().set_block_id(block_id); self.state.start_info.ensure_not_pruned(block_id)?; - let mut storage = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut storage = self.state.connection_pool.connection_tagged("api").await?; let Some(block_number) = self .state .resolve_block_unchecked(&mut storage, block_id) @@ -368,11 +344,7 @@ impl EthNamespace { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); - let mut connection = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut connection = self.state.connection_pool.connection_tagged("api").await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.set_block_diff(block_number); @@ -400,11 +372,7 @@ impl EthNamespace { self.current_method().set_block_id(block_id); let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(idx)); - let mut connection = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut connection = self.state.connection_pool.connection_tagged("api").await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.set_block_diff(block_number); let value = connection @@ -425,11 +393,7 @@ impl EthNamespace { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); - let mut connection = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut connection = self.state.connection_pool.connection_tagged("api").await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.set_block_diff(block_number); @@ -471,11 +435,7 @@ impl EthNamespace { &self, id: TransactionId, ) -> Result, Web3Error> { - let mut storage = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut storage = self.state.connection_pool.connection_tagged("api").await?; let chain_id = self.state.api_config.l2_chain_id; let mut transaction = match id { TransactionId::Hash(hash) => storage @@ -520,7 +480,7 @@ impl EthNamespace { let receipts = self .state .connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await? .transactions_web3_dal() .get_transaction_receipts(&[hash]) @@ -536,11 +496,7 @@ impl EthNamespace { .installed_filters .as_ref() .ok_or(Web3Error::NotImplemented)?; - let mut storage = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut storage = self.state.connection_pool.connection_tagged("api").await?; let last_block_number = storage .blocks_dal() .get_sealed_miniblock_number() @@ -688,11 +644,7 @@ impl EthNamespace { .min(self.state.api_config.fee_history_limit) .max(1); - let mut connection = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut connection = self.state.connection_pool.connection_tagged("api").await?; let newest_miniblock = self .state .resolve_block(&mut connection, BlockId::Number(newest_block)) @@ -733,11 +685,7 @@ impl EthNamespace { ) -> Result { Ok(match typed_filter { TypedFilter::Blocks(from_block) => { - let mut conn = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut conn = self.state.connection_pool.connection_tagged("api").await?; let (block_hashes, last_block_number) = conn .blocks_web3_dal() .get_block_hashes_since(*from_block, self.state.api_config.req_entities_limit) @@ -753,11 +701,7 @@ impl EthNamespace { } TypedFilter::PendingTransactions(from_timestamp_excluded) => { - let mut conn = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut conn = self.state.connection_pool.connection_tagged("api").await?; let (tx_hashes, last_timestamp) = conn .transactions_web3_dal() .get_pending_txs_hashes_after( @@ -810,11 +754,7 @@ impl EthNamespace { topics, }; - let mut storage = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut storage = self.state.connection_pool.connection_tagged("api").await?; // Check if there is more than one block in range and there are more than `req_entities_limit` logs that satisfies filter. // In this case we should return error and suggest requesting logs with smaller block range. diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs index 5f713d65732..49b54df5782 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs @@ -1,5 +1,5 @@ use anyhow::Context as _; -use zksync_dal::ServerDals; +use zksync_dal::CoreDal; use zksync_types::{ snapshots::{AllSnapshots, SnapshotHeader, SnapshotStorageLogsChunkMetadata}, L1BatchNumber, @@ -23,11 +23,7 @@ impl SnapshotsNamespace { } pub async fn get_all_snapshots_impl(&self) -> Result { - let mut storage_processor = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut storage_processor = self.state.connection_pool.connection_tagged("api").await?; let mut snapshots_dal = storage_processor.snapshots_dal(); Ok(snapshots_dal .get_all_complete_snapshots() @@ -39,11 +35,7 @@ impl SnapshotsNamespace { &self, l1_batch_number: L1BatchNumber, ) -> Result, Web3Error> { - let mut storage_processor = self - .state - .connection_pool - .access_storage_tagged("api") - .await?; + let mut storage_processor = self.state.connection_pool.connection_tagged("api").await?; let snapshot_metadata = storage_processor .snapshots_dal() .get_snapshot_metadata(l1_batch_number) diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 3dd8a850c68..4d2123234a4 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -1,7 +1,7 @@ use std::{collections::HashMap, convert::TryInto}; use anyhow::Context as _; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ @@ -45,12 +45,8 @@ impl ZksNamespace { &self.state.current_method } - async fn access_storage(&self) -> Result, Web3Error> { - Ok(self - .state - .connection_pool - .access_storage_tagged("api") - .await?) + async fn connection(&self) -> Result, Web3Error> { + Ok(self.state.connection_pool.connection_tagged("api").await?) } #[tracing::instrument(skip(self, request))] @@ -141,7 +137,7 @@ impl ZksNamespace { from: u32, limit: u8, ) -> Result, Web3Error> { - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; let tokens = storage .tokens_web3_dal() .get_well_known_tokens() @@ -168,7 +164,7 @@ impl ZksNamespace { &self, address: Address, ) -> Result, Web3Error> { - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; let tokens = storage .tokens_dal() .get_all_l2_token_addresses() @@ -215,7 +211,7 @@ impl ZksNamespace { l2_log_position: Option, ) -> Result, Web3Error> { self.state.start_info.ensure_not_pruned(block_number)?; - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; let Some(l1_batch_number) = storage .blocks_web3_dal() .get_l1_batch_number_of_miniblock(block_number) @@ -275,7 +271,7 @@ impl ZksNamespace { async fn get_l2_to_l1_log_proof_inner( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, index_in_filtered_logs: usize, log_filter: impl Fn(&L2ToL1Log) -> bool, @@ -326,7 +322,7 @@ impl ZksNamespace { tx_hash: H256, index: Option, ) -> Result, Web3Error> { - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; let Some((l1_batch_number, l1_batch_tx_index)) = storage .blocks_web3_dal() .get_l1_batch_info_for_tx(tx_hash) @@ -349,7 +345,7 @@ impl ZksNamespace { #[tracing::instrument(skip(self))] pub async fn get_l1_batch_number_impl(&self) -> Result { - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; let l1_batch_number = storage .blocks_dal() .get_sealed_l1_batch_number() @@ -365,7 +361,7 @@ impl ZksNamespace { batch: L1BatchNumber, ) -> Result, Web3Error> { self.state.start_info.ensure_not_pruned(batch)?; - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; let range = storage .blocks_web3_dal() .get_miniblock_range_of_l1_batch(batch) @@ -380,7 +376,7 @@ impl ZksNamespace { block_number: MiniblockNumber, ) -> Result, Web3Error> { self.state.start_info.ensure_not_pruned(block_number)?; - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; Ok(storage .blocks_web3_dal() .get_block_details(block_number) @@ -394,7 +390,7 @@ impl ZksNamespace { block_number: MiniblockNumber, ) -> Result, Web3Error> { self.state.start_info.ensure_not_pruned(block_number)?; - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; Ok(storage .transactions_web3_dal() .get_raw_miniblock_transactions(block_number) @@ -407,7 +403,7 @@ impl ZksNamespace { &self, hash: H256, ) -> Result, Web3Error> { - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; let mut tx_details = storage .transactions_web3_dal() .get_transaction_details(hash) @@ -427,7 +423,7 @@ impl ZksNamespace { batch_number: L1BatchNumber, ) -> Result, Web3Error> { self.state.start_info.ensure_not_pruned(batch_number)?; - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; Ok(storage .blocks_web3_dal() .get_l1_batch_details(batch_number) @@ -440,7 +436,7 @@ impl ZksNamespace { &self, hash: H256, ) -> Result>, Web3Error> { - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; Ok(storage .factory_deps_dal() .get_factory_dep(hash) @@ -475,7 +471,7 @@ impl ZksNamespace { &self, version_id: Option, ) -> Result, Web3Error> { - let mut storage = self.access_storage().await?; + let mut storage = self.connection().await?; let protocol_version = match version_id { Some(id) => { storage diff --git a/core/lib/zksync_core/src/api_server/web3/pubsub.rs b/core/lib/zksync_core/src/api_server/web3/pubsub.rs index 4a46a0e1841..57c7d962a75 100644 --- a/core/lib/zksync_core/src/api_server/web3/pubsub.rs +++ b/core/lib/zksync_core/src/api_server/web3/pubsub.rs @@ -7,7 +7,7 @@ use tokio::{ task::JoinHandle, time::{interval, Duration}, }; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::{MiniblockNumber, H128, H256}; use zksync_web3_decl::{ jsonrpsee::{ @@ -51,7 +51,7 @@ pub(super) enum PubSubEvent { #[derive(Debug)] struct PubSubNotifier { sender: broadcast::Sender>, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, polling_interval: Duration, events_sender: Option>, } @@ -60,9 +60,9 @@ impl PubSubNotifier { async fn get_starting_miniblock_number(&self) -> anyhow::Result { let mut storage = self .connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await - .context("access_storage_tagged")?; + .context("connection_tagged")?; let sealed_miniblock_number = storage .blocks_dal() .get_sealed_miniblock_number() @@ -127,9 +127,9 @@ impl PubSubNotifier { last_block_number: MiniblockNumber, ) -> anyhow::Result> { self.connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await - .context("access_storage_tagged")? + .context("connection_tagged")? .blocks_web3_dal() .get_block_headers_after(last_block_number) .await @@ -165,9 +165,9 @@ impl PubSubNotifier { last_time: chrono::NaiveDateTime, ) -> anyhow::Result<(Vec, Option)> { self.connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await - .context("access_storage_tagged")? + .context("connection_tagged")? .transactions_web3_dal() .get_pending_txs_hashes_after(last_time, None) .await @@ -205,9 +205,9 @@ impl PubSubNotifier { async fn new_logs(&self, last_block_number: MiniblockNumber) -> anyhow::Result> { self.connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await - .context("access_storage_tagged")? + .context("connection_tagged")? .events_web3_dal() .get_all_logs(last_block_number) .await @@ -415,7 +415,7 @@ impl EthSubscribe { /// Spawns notifier tasks. This should be called once per instance. pub fn spawn_notifiers( &self, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, polling_interval: Duration, stop_receiver: watch::Receiver, ) -> Vec>> { diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index 78ed8dfdad4..d1242747d25 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -12,7 +12,7 @@ use lru::LruCache; use tokio::sync::{watch, Mutex}; use vise::GaugeGuard; use zksync_config::configs::{api::Web3JsonRpcConfig, chain::NetworkConfig, ContractsConfig}; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_types::{ api, l2::L2Tx, transaction_request::CallRequest, Address, L1BatchNumber, L1ChainId, L2ChainId, MiniblockNumber, H256, U256, U64, @@ -134,7 +134,7 @@ impl SealedMiniblockNumber { /// Creates a handle to the last sealed miniblock number together with a task that will update /// it on a schedule. pub fn new( - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, update_interval: Duration, stop_receiver: watch::Receiver, ) -> (Self, impl Future>) { @@ -148,7 +148,7 @@ impl SealedMiniblockNumber { return Ok(()); } - let mut connection = connection_pool.access_storage_tagged("api").await.unwrap(); + let mut connection = connection_pool.connection_tagged("api").await.unwrap(); let Some(last_sealed_miniblock) = connection .blocks_dal() .get_sealed_miniblock_number() @@ -202,7 +202,7 @@ impl SealedMiniblockNumber { pub(crate) struct RpcState { pub(super) current_method: Arc, pub(super) installed_filters: Option>>, - pub(super) connection_pool: ConnectionPool, + pub(super) connection_pool: ConnectionPool, pub(super) tree_api: Option>, pub(super) tx_sender: TxSender, pub(super) sync_state: Option, @@ -239,7 +239,7 @@ impl RpcState { /// Resolves the specified block ID to a block number, which is guaranteed to be present in the node storage. pub(crate) async fn resolve_block( &self, - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, block: api::BlockId, ) -> Result { self.start_info.ensure_not_pruned(block)?; @@ -260,7 +260,7 @@ impl RpcState { /// non-existing blocks. pub(crate) async fn resolve_block_unchecked( &self, - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, block: api::BlockId, ) -> Result, Web3Error> { self.start_info.ensure_not_pruned(block)?; @@ -279,7 +279,7 @@ impl RpcState { pub(crate) async fn resolve_block_args( &self, - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, block: api::BlockId, ) -> Result { BlockArgs::new(connection, block, self.start_info) @@ -301,7 +301,7 @@ impl RpcState { let block_number = block_number.unwrap_or(api::BlockNumber::Latest); let block_id = api::BlockId::Number(block_number); - let mut conn = self.connection_pool.access_storage_tagged("api").await?; + let mut conn = self.connection_pool.connection_tagged("api").await?; Ok(self.resolve_block(&mut conn, block_id).await.unwrap()) // ^ `unwrap()` is safe: `resolve_block_id(api::BlockId::Number(_))` can only return `None` // if called with an explicit number, and we've handled this case earlier. @@ -322,7 +322,7 @@ impl RpcState { (Some(block_hash), None, None) => { let block_number = self .connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await? .blocks_web3_dal() .resolve_block_id(api::BlockId::Hash(block_hash)) @@ -347,7 +347,7 @@ impl RpcState { ) -> Result { let pending_block = self .connection_pool - .access_storage_tagged("api") + .connection_tagged("api") .await? .blocks_web3_dal() .resolve_block_id(api::BlockId::Number(api::BlockNumber::Pending)) @@ -371,7 +371,7 @@ impl RpcState { if call_request.nonce.is_some() { return Ok(()); } - let mut connection = self.connection_pool.access_storage_tagged("api").await?; + let mut connection = self.connection_pool.connection_tagged("api").await?; let latest_block_id = api::BlockId::Number(api::BlockNumber::Latest); let latest_block_number = self.resolve_block(&mut connection, latest_block_id).await?; diff --git a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs index 386175668ca..b8813bc6ffe 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs @@ -34,9 +34,9 @@ struct TraceBlockTest(MiniblockNumber); #[async_trait] impl HttpTest for TraceBlockTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let tx_results = [0, 1, 2].map(execute_l2_transaction_with_traces); - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let new_miniblock = store_miniblock(&mut storage, self.0, &tx_results).await?; drop(storage); @@ -97,9 +97,9 @@ struct TraceBlockFlatTest(MiniblockNumber); #[async_trait] impl HttpTest for TraceBlockFlatTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let tx_results = [0, 1, 2].map(execute_l2_transaction_with_traces); - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let _new_miniblock = store_miniblock(&mut storage, self.0, &tx_results).await?; drop(storage); @@ -173,9 +173,9 @@ struct TraceTransactionTest; #[async_trait] impl HttpTest for TraceTransactionTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let tx_results = [execute_l2_transaction_with_traces(0)]; - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; store_miniblock(&mut storage, MiniblockNumber(1), &tx_results).await?; drop(storage); @@ -212,7 +212,7 @@ impl HttpTest for TraceBlockTestWithSnapshotRecovery { StorageInitialization::empty_recovery() } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let snapshot_miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK; let missing_miniblock_numbers = [ MiniblockNumber(0), diff --git a/core/lib/zksync_core/src/api_server/web3/tests/filters.rs b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs index 1a57d57de54..93e44f3f473 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/filters.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs @@ -22,7 +22,7 @@ impl HttpTest for BasicFilterChangesTest { } } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let block_filter_id = client.new_block_filter().await?; let tx_filter_id = client.new_pending_transaction_filter().await?; @@ -32,7 +32,7 @@ impl HttpTest for BasicFilterChangesTest { let tx_result = execute_l2_transaction(create_l2_transaction(1, 2)); let new_tx_hash = tx_result.hash; let new_miniblock = store_miniblock( - &mut pool.access_storage().await?, + &mut pool.connection().await?, if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 } else { @@ -104,7 +104,7 @@ impl HttpTest for LogFilterChangesTest { } } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let all_logs_filter_id = client.new_filter(Filter::default()).await?; let address_filter = Filter { address: Some(Address::repeat_byte(23).into()), @@ -117,7 +117,7 @@ impl HttpTest for LogFilterChangesTest { }; let topics_filter_id = client.new_filter(topics_filter).await?; - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let next_local_miniblock = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0 + 2 } else { @@ -175,7 +175,7 @@ struct LogFilterChangesWithBlockBoundariesTest; #[async_trait] impl HttpTest for LogFilterChangesWithBlockBoundariesTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let lower_bound_filter = Filter { from_block: Some(api::BlockNumber::Number(2.into())), ..Filter::default() @@ -193,7 +193,7 @@ impl HttpTest for LogFilterChangesWithBlockBoundariesTest { }; let bounded_filter_id = client.new_filter(bounded_filter).await?; - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let (_, events) = store_events(&mut storage, 1, 0).await?; drop(storage); let events: Vec<_> = events.iter().collect(); @@ -218,7 +218,7 @@ impl HttpTest for LogFilterChangesWithBlockBoundariesTest { assert_eq!(bounded_logs, upper_bound_logs); // Add another miniblock with events to the storage. - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let (_, new_events) = store_events(&mut storage, 2, 4).await?; drop(storage); let new_events: Vec<_> = new_events.iter().collect(); @@ -236,7 +236,7 @@ impl HttpTest for LogFilterChangesWithBlockBoundariesTest { // Add miniblock #3. It should not be picked up by the bounded and upper bound filters, // and should be picked up by the lower bound filter. - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let (_, new_events) = store_events(&mut storage, 3, 8).await?; drop(storage); let new_events: Vec<_> = new_events.iter().collect(); @@ -279,11 +279,7 @@ struct DisableFiltersTest; #[async_trait] impl HttpTest for DisableFiltersTest { - async fn test( - &self, - client: &HttpClient, - _pool: &ConnectionPool, - ) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { let filter = Filter { from_block: Some(api::BlockNumber::Number(2.into())), ..Filter::default() diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index 964eef15de6..c837b1e4e1b 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -15,9 +15,7 @@ use zksync_config::configs::{ chain::{NetworkConfig, StateKeeperConfig}, ContractsConfig, }; -use zksync_dal::{ - transactions_dal::L2TxSubmissionResult, ConnectionPool, ServerDals, StorageProcessor, -}; +use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, CoreDal}; use zksync_health_check::CheckHealth; use zksync_types::{ api, @@ -108,7 +106,7 @@ impl ApiServerHandles { pub(crate) async fn spawn_http_server( api_config: InternalApiConfig, - pool: ConnectionPool, + pool: ConnectionPool, tx_executor: MockTransactionExecutor, method_tracer: Arc, stop_receiver: watch::Receiver, @@ -128,7 +126,7 @@ pub(crate) async fn spawn_http_server( async fn spawn_ws_server( api_config: InternalApiConfig, - pool: ConnectionPool, + pool: ConnectionPool, stop_receiver: watch::Receiver, websocket_requests_per_minute_limit: Option, ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { @@ -147,7 +145,7 @@ async fn spawn_ws_server( async fn spawn_server( transport: ApiTransportLabel, api_config: InternalApiConfig, - pool: ConnectionPool, + pool: ConnectionPool, websocket_requests_per_minute_limit: Option, tx_executor: MockTransactionExecutor, method_tracer: Arc, @@ -203,7 +201,7 @@ trait HttpTest: Send + Sync { Arc::default() } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()>; + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()>; /// Overrides the `filters_disabled` configuration parameter for HTTP server startup fn filters_disabled(&self) -> bool { @@ -235,7 +233,7 @@ impl StorageInitialization { async fn prepare_storage( &self, network_config: &NetworkConfig, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { match self { Self::Genesis => { @@ -271,9 +269,9 @@ impl StorageInitialization { } async fn test_http_server(test: impl HttpTest) { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let network_config = NetworkConfig::for_tests(); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); test.storage_initialization() .prepare_storage(&network_config, &mut storage) .await @@ -342,7 +340,7 @@ fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { /// Stores miniblock #1 with a single transaction and returns the miniblock header + transaction hash. async fn store_miniblock( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, number: MiniblockNumber, transaction_results: &[TransactionExecutionResult], ) -> anyhow::Result { @@ -368,7 +366,7 @@ async fn store_miniblock( } async fn seal_l1_batch( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, number: L1BatchNumber, ) -> anyhow::Result<()> { let header = create_l1_batch(number.0); @@ -393,7 +391,7 @@ async fn seal_l1_batch( } async fn store_events( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, miniblock_number: u32, start_idx: u32, ) -> anyhow::Result<(IncludedTxLocation, Vec)> { @@ -453,11 +451,7 @@ struct HttpServerBasicsTest; #[async_trait] impl HttpTest for HttpServerBasicsTest { - async fn test( - &self, - client: &HttpClient, - _pool: &ConnectionPool, - ) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { let block_number = client.get_block_number().await?; assert_eq!(block_number, U64::from(0)); @@ -487,11 +481,7 @@ impl HttpTest for BlockMethodsWithSnapshotRecovery { StorageInitialization::empty_recovery() } - async fn test( - &self, - client: &HttpClient, - _pool: &ConnectionPool, - ) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { let block = client.get_block_by_number(1_000.into(), false).await?; assert!(block.is_none()); @@ -564,11 +554,7 @@ impl HttpTest for L1BatchMethodsWithSnapshotRecovery { StorageInitialization::empty_recovery() } - async fn test( - &self, - client: &HttpClient, - _pool: &ConnectionPool, - ) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { let miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; let l1_batch_number = StorageInitialization::SNAPSHOT_RECOVERY_BATCH + 1; assert_eq!( @@ -659,11 +645,7 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { StorageInitialization::Recovery { logs, factory_deps } } - async fn test( - &self, - client: &HttpClient, - _pool: &ConnectionPool, - ) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { let address = Address::repeat_byte(1); let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; for number in [0, 1, first_local_miniblock.0 - 1] { @@ -704,9 +686,9 @@ struct TransactionCountTest; #[async_trait] impl HttpTest for TransactionCountTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let test_address = Address::repeat_byte(11); - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let mut miniblock_number = MiniblockNumber(0); for nonce in [0, 1] { let mut committed_tx = create_l2_transaction(10, 200); @@ -801,7 +783,7 @@ impl HttpTest for TransactionCountAfterSnapshotRecoveryTest { } } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let test_address = Address::repeat_byte(11); let pending_count = client.get_transaction_count(test_address, None).await?; assert_eq!(pending_count, 3.into()); @@ -809,7 +791,7 @@ impl HttpTest for TransactionCountAfterSnapshotRecoveryTest { let mut pending_tx = create_l2_transaction(10, 200); pending_tx.common_data.initiator_address = test_address; pending_tx.common_data.nonce = Nonce(3); - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; storage .transactions_dal() .insert_transaction_l2(pending_tx, TransactionExecutionMetrics::default()) @@ -855,8 +837,8 @@ struct TransactionReceiptsTest; #[async_trait] impl HttpTest for TransactionReceiptsTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { - let mut storage = pool.access_storage().await?; + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + let mut storage = pool.connection().await?; let miniblock_number = MiniblockNumber(1); let tx1 = create_l2_transaction(10, 200); @@ -913,11 +895,11 @@ impl AllAccountBalancesTest { #[async_trait] impl HttpTest for AllAccountBalancesTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let balances = client.get_all_account_balances(Self::ADDRESS).await?; assert_eq!(balances, HashMap::new()); - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; store_miniblock(&mut storage, MiniblockNumber(1), &[]).await?; let eth_balance_key = storage_key_for_eth_balance(&Self::ADDRESS); @@ -985,11 +967,7 @@ impl HttpTest for RpcCallsTracingTest { self.tracer.clone() } - async fn test( - &self, - client: &HttpClient, - _pool: &ConnectionPool, - ) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { let block_number = client.get_block_number().await?; assert_eq!(block_number, U64::from(0)); diff --git a/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs index bb73962a355..42b0cd3913e 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs @@ -27,8 +27,8 @@ impl SnapshotBasicsTest { #[async_trait] impl HttpTest for SnapshotBasicsTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { - let mut storage = pool.access_storage().await.unwrap(); + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + let mut storage = pool.connection().await.unwrap(); store_miniblock( &mut storage, MiniblockNumber(1), diff --git a/core/lib/zksync_core/src/api_server/web3/tests/vm.rs b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs index 316b07e9681..e4364d0404a 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/vm.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs @@ -50,11 +50,7 @@ impl HttpTest for CallTest { Self::create_executor(MiniblockNumber(0)) } - async fn test( - &self, - client: &HttpClient, - _pool: &ConnectionPool, - ) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { let call_result = client.call(Self::call_request(b"pending"), None).await?; assert_eq!(call_result.0, b"output"); @@ -106,11 +102,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { CallTest::create_executor(first_local_miniblock) } - async fn test( - &self, - client: &HttpClient, - _pool: &ConnectionPool, - ) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { let call_result = client .call(CallTest::call_request(b"pending"), None) .await?; @@ -226,10 +218,10 @@ impl HttpTest for SendRawTransactionTest { tx_executor } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { if !self.snapshot_recovery { // Manually set sufficient balance for the transaction account. - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; storage .storage_logs_dal() .append_storage_logs( @@ -288,11 +280,7 @@ impl HttpTest for TraceCallTest { CallTest::create_executor(MiniblockNumber(0)) } - async fn test( - &self, - client: &HttpClient, - _pool: &ConnectionPool, - ) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { let call_request = CallTest::call_request(b"pending"); let call_result = client.trace_call(call_request.clone(), None, None).await?; Self::assert_debug_call(&call_request, &call_result); @@ -357,11 +345,7 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { CallTest::create_executor(number) } - async fn test( - &self, - client: &HttpClient, - _pool: &ConnectionPool, - ) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { let call_request = CallTest::call_request(b"pending"); let call_result = client.trace_call(call_request.clone(), None, None).await?; TraceCallTest::assert_debug_call(&call_request, &call_result); @@ -447,7 +431,7 @@ impl HttpTest for EstimateGasTest { tx_executor } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let l2_transaction = create_l2_transaction(10, 100); for threshold in [10_000, 50_000, 100_000, 1_000_000] { self.gas_limit_threshold.store(threshold, Ordering::Relaxed); @@ -468,7 +452,7 @@ impl HttpTest for EstimateGasTest { if !self.snapshot_recovery { // Manually set sufficient balance for the transaction account. let storage_log = SendRawTransactionTest::balance_storage_log(); - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; storage .storage_logs_dal() .append_storage_logs(MiniblockNumber(0), &[(H256::zero(), vec![storage_log])]) diff --git a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs index cbaa52645de..ff0f4237ef3 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs @@ -96,8 +96,8 @@ async fn wait_for_notifier_miniblock( #[tokio::test] async fn notifiers_start_after_snapshot_recovery() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); prepare_recovery_snapshot( &mut storage, StorageInitialization::SNAPSHOT_RECOVERY_BATCH, @@ -152,7 +152,7 @@ trait WsTest: Send + Sync { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()>; @@ -162,12 +162,12 @@ trait WsTest: Send + Sync { } async fn test_ws_server(test: impl WsTest) { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let network_config = NetworkConfig::for_tests(); let contracts_config = ContractsConfig::for_tests(); let web3_config = Web3JsonRpcConfig::for_tests(); let api_config = InternalApiConfig::new(&network_config, &web3_config, &contracts_config); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); test.storage_initialization() .prepare_storage(&network_config, &mut storage) .await @@ -202,7 +202,7 @@ impl WsTest for WsServerCanStartTest { async fn test( &self, client: &WsClient, - _pool: &ConnectionPool, + _pool: &ConnectionPool, _pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { let block_number = client.get_block_number().await?; @@ -243,7 +243,7 @@ impl WsTest for BasicSubscriptionsTest { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, mut pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { // Wait for the notifiers to get initialized so that they don't skip notifications @@ -266,7 +266,7 @@ impl WsTest for BasicSubscriptionsTest { .await?; wait_for_subscription(&mut pub_sub_events, SubscriptionType::Txs).await; - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let tx_result = execute_l2_transaction(create_l2_transaction(1, 2)); let new_tx_hash = tx_result.hash; let miniblock_number = if self.snapshot_recovery { @@ -382,7 +382,7 @@ impl WsTest for LogSubscriptionsTest { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, mut pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { let LogSubscriptions { @@ -391,7 +391,7 @@ impl WsTest for LogSubscriptionsTest { mut topic_subscription, } = LogSubscriptions::new(client, &mut pub_sub_events).await?; - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let next_miniblock_number = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0 + 2 } else { @@ -471,7 +471,7 @@ impl WsTest for LogSubscriptionsWithNewBlockTest { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, mut pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { let LogSubscriptions { @@ -480,7 +480,7 @@ impl WsTest for LogSubscriptionsWithNewBlockTest { .. } = LogSubscriptions::new(client, &mut pub_sub_events).await?; - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let (_, events) = store_events(&mut storage, 1, 0).await?; drop(storage); let events: Vec<_> = events.iter().collect(); @@ -489,7 +489,7 @@ impl WsTest for LogSubscriptionsWithNewBlockTest { assert_logs_match(&all_logs, &events); // Create a new block and wait for the pub-sub notifier to run. - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let (_, new_events) = store_events(&mut storage, 2, 4).await?; drop(storage); let new_events: Vec<_> = new_events.iter().collect(); @@ -519,7 +519,7 @@ impl WsTest for LogSubscriptionsWithManyBlocksTest { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, mut pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { let LogSubscriptions { @@ -529,7 +529,7 @@ impl WsTest for LogSubscriptionsWithManyBlocksTest { } = LogSubscriptions::new(client, &mut pub_sub_events).await?; // Add two blocks in the storage atomically. - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let mut transaction = storage.start_transaction().await?; let (_, events) = store_events(&mut transaction, 1, 0).await?; let events: Vec<_> = events.iter().collect(); @@ -565,14 +565,14 @@ impl WsTest for LogSubscriptionsWithDelayTest { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, mut pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { // Wait until notifiers are initialized. wait_for_notifiers(&mut pub_sub_events, &[SubscriptionType::Logs]).await; // Store a miniblock w/o subscriptions being present. - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; store_events(&mut storage, 1, 0).await?; drop(storage); @@ -600,7 +600,7 @@ impl WsTest for LogSubscriptionsWithDelayTest { wait_for_subscription(&mut pub_sub_events, SubscriptionType::Logs).await; } - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let (_, new_events) = store_events(&mut storage, 2, 4).await?; drop(storage); let new_events: Vec<_> = new_events.iter().collect(); @@ -612,7 +612,7 @@ impl WsTest for LogSubscriptionsWithDelayTest { // Check the behavior of remaining subscriptions if a subscription is dropped. all_logs_subscription.unsubscribe().await?; - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let (_, new_events) = store_events(&mut storage, 3, 8).await?; drop(storage); @@ -635,7 +635,7 @@ impl WsTest for RateLimitingTest { async fn test( &self, client: &WsClient, - _pool: &ConnectionPool, + _pool: &ConnectionPool, _pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { client.chain_id().await.unwrap(); @@ -672,7 +672,7 @@ impl WsTest for BatchGetsRateLimitedTest { async fn test( &self, client: &WsClient, - _pool: &ConnectionPool, + _pool: &ConnectionPool, _pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { client.chain_id().await.unwrap(); diff --git a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs index d4acd742381..0c44997faf7 100644 --- a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs +++ b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs @@ -6,7 +6,7 @@ use multivm::interface::{L2BlockEnv, VmInterface}; use tokio::{runtime::Handle, task::JoinHandle}; use vm_utils::{create_vm, execute_tx}; use zksync_dal::{ - basic_witness_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool, Server, ServerDals, + basic_witness_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool, Core, CoreDal, }; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_queued_job_processor::JobProcessor; @@ -22,14 +22,14 @@ mod metrics; /// to be run only using the object store information, having no other external dependency. #[derive(Debug)] pub struct BasicWitnessInputProducer { - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, l2_chain_id: L2ChainId, object_store: Arc, } impl BasicWitnessInputProducer { pub async fn new( - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, store_factory: &ObjectStoreFactory, l2_chain_id: L2ChainId, ) -> anyhow::Result { @@ -44,11 +44,11 @@ impl BasicWitnessInputProducer { rt_handle: Handle, l1_batch_number: L1BatchNumber, started_at: Instant, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, l2_chain_id: L2ChainId, ) -> anyhow::Result { let mut connection = rt_handle - .block_on(connection_pool.access_storage()) + .block_on(connection_pool.connection()) .context("failed to get connection for BasicWitnessInputProducer")?; let miniblocks_execution_data = rt_handle.block_on( @@ -114,7 +114,7 @@ impl JobProcessor for BasicWitnessInputProducer { const SERVICE_NAME: &'static str = "basic_witness_input_producer"; async fn get_next_job(&self) -> anyhow::Result> { - let mut connection = self.connection_pool.access_storage().await?; + let mut connection = self.connection_pool.connection().await?; let l1_batch_to_process = connection .basic_witness_input_producer_dal() .get_next_basic_witness_input_producer_job() @@ -126,7 +126,7 @@ impl JobProcessor for BasicWitnessInputProducer { async fn save_failure(&self, job_id: Self::JobId, started_at: Instant, error: String) { let attempts = self .connection_pool - .access_storage() + .connection() .await .unwrap() .basic_witness_input_producer_dal() @@ -176,7 +176,7 @@ impl JobProcessor for BasicWitnessInputProducer { .observe(upload_started_at.elapsed()); let mut connection = self .connection_pool - .access_storage() + .connection() .await .context("failed to acquire DB connection for BasicWitnessInputProducer")?; let mut transaction = connection @@ -203,7 +203,7 @@ impl JobProcessor for BasicWitnessInputProducer { async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { let mut connection = self .connection_pool - .access_storage() + .connection() .await .context("failed to acquire DB connection for BasicWitnessInputProducer")?; connection diff --git a/core/lib/zksync_core/src/block_reverter/mod.rs b/core/lib/zksync_core/src/block_reverter/mod.rs index 214d1756252..439ae3fc43f 100644 --- a/core/lib/zksync_core/src/block_reverter/mod.rs +++ b/core/lib/zksync_core/src/block_reverter/mod.rs @@ -5,7 +5,7 @@ use serde::Serialize; use tokio::time::sleep; use zksync_config::{ContractsConfig, ETHSenderConfig}; use zksync_contracts::zksync_contract; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_state::RocksdbStorage; @@ -100,7 +100,7 @@ pub struct BlockReverter { state_keeper_cache_path: String, merkle_tree_path: String, eth_config: Option, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, executed_batches_revert_mode: L1ExecutedBatchesRevert, } @@ -110,7 +110,7 @@ impl BlockReverter { state_keeper_cache_path: String, merkle_tree_path: String, eth_config: Option, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, executed_batches_revert_mode: L1ExecutedBatchesRevert, ) -> Self { Self { @@ -137,7 +137,7 @@ impl BlockReverter { self.executed_batches_revert_mode, L1ExecutedBatchesRevert::Disallowed ) { - let mut storage = self.connection_pool.access_storage().await.unwrap(); + let mut storage = self.connection_pool.connection().await.unwrap(); let last_executed_l1_batch = storage .blocks_dal() .get_number_of_last_l1_batch_executed_on_eth() @@ -167,7 +167,7 @@ impl BlockReverter { if rollback_tree { let storage_root_hash = self .connection_pool - .access_storage() + .connection() .await .unwrap() .blocks_dal() @@ -224,7 +224,7 @@ impl BlockReverter { .expect("Failed initializing state keeper cache"); if sk_cache.l1_batch_number().await > Some(last_l1_batch_to_keep + 1) { - let mut storage = self.connection_pool.access_storage().await.unwrap(); + let mut storage = self.connection_pool.connection().await.unwrap(); tracing::info!("Rolling back state keeper cache..."); sk_cache .rollback(&mut storage, last_l1_batch_to_keep) @@ -239,7 +239,7 @@ impl BlockReverter { /// If `node_role` is `Main` a consensus hard-fork is performed. async fn rollback_postgres(&self, last_l1_batch_to_keep: L1BatchNumber) { tracing::info!("rolling back postgres data..."); - let mut storage = self.connection_pool.access_storage().await.unwrap(); + let mut storage = self.connection_pool.connection().await.unwrap(); let mut transaction = storage.start_transaction().await.unwrap(); let (_, last_miniblock_to_keep) = transaction @@ -453,7 +453,7 @@ impl BlockReverter { pub async fn clear_failed_l1_transactions(&self) { tracing::info!("clearing failed L1 transactions..."); self.connection_pool - .access_storage() + .connection() .await .unwrap() .eth_sender_dal() diff --git a/core/lib/zksync_core/src/commitment_generator/mod.rs b/core/lib/zksync_core/src/commitment_generator/mod.rs index cd87f2f8b71..ac76a25b790 100644 --- a/core/lib/zksync_core/src/commitment_generator/mod.rs +++ b/core/lib/zksync_core/src/commitment_generator/mod.rs @@ -6,7 +6,7 @@ use metrics::{CommitmentStage, METRICS}; use multivm::zk_evm_latest::ethereum_types::U256; use tokio::{sync::watch, task::JoinHandle}; use zksync_commitment_utils::{bootloader_initial_content_commitment, events_queue_commitment}; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_l1_contract_interface::i_executor::commit::kzg::pubdata_to_blob_commitments; use zksync_types::{ @@ -23,12 +23,12 @@ const SLEEP_INTERVAL: Duration = Duration::from_millis(100); #[derive(Debug)] pub struct CommitmentGenerator { - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, health_updater: HealthUpdater, } impl CommitmentGenerator { - pub fn new(connection_pool: ConnectionPool) -> Self { + pub fn new(connection_pool: ConnectionPool) -> Self { Self { connection_pool, health_updater: ReactiveHealthCheck::new("commitment_generator").1, @@ -46,7 +46,7 @@ impl CommitmentGenerator { ) -> anyhow::Result { let mut connection = self .connection_pool - .access_storage_tagged("commitment_generator") + .connection_tagged("commitment_generator") .await?; let events_queue_from_db = connection .blocks_dal() @@ -127,7 +127,7 @@ impl CommitmentGenerator { let mut connection = self .connection_pool - .access_storage_tagged("commitment_generator") + .connection_tagged("commitment_generator") .await?; let header = connection .blocks_dal() @@ -277,7 +277,7 @@ impl CommitmentGenerator { let latency = METRICS.generate_commitment_latency_stage[&CommitmentStage::SaveResults].start(); self.connection_pool - .access_storage_tagged("commitment_generator") + .connection_tagged("commitment_generator") .await? .blocks_dal() .save_l1_batch_commitment_artifacts(l1_batch_number, &artifacts) @@ -305,7 +305,7 @@ impl CommitmentGenerator { let Some(l1_batch_number) = self .connection_pool - .access_storage_tagged("commitment_generator") + .connection_tagged("commitment_generator") .await? .blocks_dal() .get_next_l1_batch_ready_for_commitment_generation() diff --git a/core/lib/zksync_core/src/consensus/storage/mod.rs b/core/lib/zksync_core/src/consensus/storage/mod.rs index 318ec17c845..7335f797eff 100644 --- a/core/lib/zksync_core/src/consensus/storage/mod.rs +++ b/core/lib/zksync_core/src/consensus/storage/mod.rs @@ -5,7 +5,7 @@ use zksync_concurrency::{ctx, error::Wrap as _, sync, time}; use zksync_consensus_bft::PayloadManager; use zksync_consensus_roles::validator; use zksync_consensus_storage::{BlockStoreState, PersistentBlockStore, ReplicaState, ReplicaStore}; -use zksync_dal::{consensus_dal::Payload, ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{consensus_dal::Payload, ConnectionPool, Core, CoreDal}; use zksync_types::MiniblockNumber; #[cfg(test)] @@ -19,8 +19,8 @@ use crate::{ }, }; -/// Context-aware `zksync_dal::StorageProcessor` wrapper. -pub(super) struct Connection<'a>(pub(super) StorageProcessor<'a, Server>); +/// Context-aware `zksync_dal::Connection` wrapper. +pub(super) struct Connection<'a>(pub(super) zksync_dal::Connection<'a, Core>); impl<'a> Connection<'a> { /// Wrapper for `start_transaction()`. @@ -177,7 +177,7 @@ impl Cursor { /// Wrapper of `ConnectionPool` implementing `ReplicaStore` and `PayloadManager`. #[derive(Clone, Debug)] -pub struct Store(pub ConnectionPool); +pub struct Store(pub ConnectionPool); /// Wrapper of `ConnectionPool` implementing `PersistentBlockStore`. #[derive(Debug)] @@ -196,11 +196,10 @@ impl Store { } } - /// Wrapper for `access_storage_tagged()`. + /// Wrapper for `connection_tagged()`. pub(super) async fn access<'a>(&'a self, ctx: &ctx::Ctx) -> ctx::Result> { Ok(Connection( - ctx.wait(self.0.access_storage_tagged("consensus")) - .await??, + ctx.wait(self.0.connection_tagged("consensus")).await??, )) } diff --git a/core/lib/zksync_core/src/consensus/storage/testonly.rs b/core/lib/zksync_core/src/consensus/storage/testonly.rs index 9c08b2524a5..64ea99281ea 100644 --- a/core/lib/zksync_core/src/consensus/storage/testonly.rs +++ b/core/lib/zksync_core/src/consensus/storage/testonly.rs @@ -64,7 +64,7 @@ impl Store { pub(crate) async fn from_genesis() -> Self { let pool = ConnectionPool::test_pool().await; { - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -76,7 +76,7 @@ impl Store { pub(crate) async fn from_snapshot(snapshot: Snapshot) -> Self { let pool = ConnectionPool::test_pool().await; { - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); recover(&mut storage, snapshot).await; } Self(pool) diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs index e30fc401a49..769f81e0883 100644 --- a/core/lib/zksync_core/src/consensus/testonly.rs +++ b/core/lib/zksync_core/src/consensus/testonly.rs @@ -7,7 +7,7 @@ use zksync_concurrency::{ctx, error::Wrap as _, limiter, scope, sync, time}; use zksync_config::configs; use zksync_consensus_roles::validator; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::ServerDals; +use zksync_dal::CoreDal; use zksync_types::{ api, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, H256, diff --git a/core/lib/zksync_core/src/consistency_checker/mod.rs b/core/lib/zksync_core/src/consistency_checker/mod.rs index 60df7be6d86..5a2ac3d14d9 100644 --- a/core/lib/zksync_core/src/consistency_checker/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/mod.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use serde::Serialize; use tokio::sync::watch; use zksync_contracts::PRE_BOOJUM_COMMIT_FUNCTION; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{ clients::QueryClient, CallFunctionArgs, Error as L1ClientError, EthInterface, }; @@ -149,7 +149,7 @@ impl LocalL1BatchCommitData { /// Returns `Ok(None)` if Postgres doesn't contain all data necessary to check L1 commitment /// for the specified batch. async fn new( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, batch_number: L1BatchNumber, ) -> anyhow::Result> { let Some(storage_l1_batch) = storage @@ -253,7 +253,7 @@ pub struct ConsistencyChecker { l1_client: Box, event_handler: Box, l1_data_mismatch_behavior: L1DataMismatchBehavior, - pool: ConnectionPool, + pool: ConnectionPool, health_check: ReactiveHealthCheck, } @@ -263,7 +263,7 @@ impl ConsistencyChecker { pub fn new( web3_url: &str, max_batches_to_recheck: u32, - pool: ConnectionPool, + pool: ConnectionPool, ) -> anyhow::Result { let web3 = QueryClient::new(web3_url).context("cannot create L1 Web3 client")?; let (health_check, health_updater) = ConsistencyCheckerHealthUpdater::new(); @@ -436,7 +436,7 @@ impl ConsistencyChecker { async fn last_committed_batch(&self) -> anyhow::Result> { Ok(self .pool - .access_storage() + .connection() .await? .blocks_dal() .get_number_of_last_l1_batch_committed_on_eth() @@ -522,7 +522,7 @@ impl ConsistencyChecker { break; } - let mut storage = self.pool.access_storage().await?; + let mut storage = self.pool.connection().await?; // The batch might be already committed but not yet processed by the external node's tree // OR the batch might be processed by the external node's tree but not yet committed. // We need both. diff --git a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs index ac3f9154cda..b34255b9fe7 100644 --- a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs @@ -6,7 +6,7 @@ use assert_matches::assert_matches; use once_cell::sync::Lazy; use test_casing::{test_casing, Product}; use tokio::sync::mpsc; -use zksync_dal::StorageProcessor; +use zksync_dal::Connection; use zksync_eth_client::{clients::MockEthereum, Options}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; use zksync_types::{ @@ -72,7 +72,7 @@ fn build_commit_tx_input_data(batches: &[L1BatchWithMetadata]) -> Vec { encoded } -fn create_mock_checker(client: MockEthereum, pool: ConnectionPool) -> ConsistencyChecker { +fn create_mock_checker(client: MockEthereum, pool: ConnectionPool) -> ConsistencyChecker { let (health_check, health_updater) = ConsistencyCheckerHealthUpdater::new(); ConsistencyChecker { contract: zksync_contracts::zksync_contract(), @@ -230,7 +230,7 @@ enum SaveAction<'a> { impl SaveAction<'_> { async fn apply( self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, commit_tx_hash_by_l1_batch: &HashMap, ) { match self { @@ -361,8 +361,8 @@ async fn normal_checker_function( ) { println!("Using save_actions_mapper={mapper_name}"); - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -431,8 +431,8 @@ async fn checker_processes_pre_boojum_batches( ) { println!("Using save_actions_mapper={mapper_name}"); - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let genesis_params = GenesisParams { protocol_version: PRE_BOOJUM_PROTOCOL_VERSION, ..GenesisParams::mock() @@ -504,8 +504,8 @@ async fn checker_processes_pre_boojum_batches( #[test_casing(2, [false, true])] #[tokio::test] async fn checker_functions_after_snapshot_recovery(delay_batch_insertion: bool) { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); storage .protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -677,8 +677,8 @@ impl IncorrectDataKind { // ^ `snapshot_recovery = true` is tested below; we don't want to run it with all incorrect data kinds #[tokio::test] async fn checker_detects_incorrect_tx_data(kind: IncorrectDataKind, snapshot_recovery: bool) { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); if snapshot_recovery { storage .protocol_versions_dal() diff --git a/core/lib/zksync_core/src/eth_sender/aggregator.rs b/core/lib/zksync_core/src/eth_sender/aggregator.rs index 92c5c16fdb4..8c6bd7cd861 100644 --- a/core/lib/zksync_core/src/eth_sender/aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/aggregator.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use zksync_config::configs::eth_sender::{ProofLoadingMode, ProofSendingMode, SenderConfig}; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_l1_contract_interface::i_executor::methods::{ CommitBatches, ExecuteBatches, ProveBatches, }; @@ -108,7 +108,7 @@ impl Aggregator { pub async fn get_next_ready_operation( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, base_system_contracts_hashes: BaseSystemContractsHashes, protocol_version_id: ProtocolVersionId, l1_verifier_config: L1VerifierConfig, @@ -156,7 +156,7 @@ impl Aggregator { async fn get_execute_operations( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, limit: usize, last_sealed_l1_batch: L1BatchNumber, ) -> Option { @@ -182,7 +182,7 @@ impl Aggregator { async fn get_commit_operation( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, limit: usize, last_sealed_batch: L1BatchNumber, base_system_contracts_hashes: BaseSystemContractsHashes, @@ -243,7 +243,7 @@ impl Aggregator { } async fn load_dummy_proof_operations( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, limit: usize, is_4844_mode: bool, ) -> Vec { @@ -287,7 +287,7 @@ impl Aggregator { } async fn load_real_proof_operation( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_verifier_config: L1VerifierConfig, proof_loading_mode: &ProofLoadingMode, blob_store: &dyn ObjectStore, @@ -381,7 +381,7 @@ impl Aggregator { async fn prepare_dummy_proof_operation( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ready_for_proof_l1_batches: Vec, last_sealed_l1_batch: L1BatchNumber, ) -> Option { @@ -410,7 +410,7 @@ impl Aggregator { async fn get_proof_operation( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, limit: usize, last_sealed_l1_batch: L1BatchNumber, l1_verifier_config: L1VerifierConfig, @@ -473,7 +473,7 @@ impl Aggregator { } async fn extract_ready_subrange( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, publish_criteria: &mut [Box], unpublished_l1_batches: Vec, last_sealed_l1_batch: L1BatchNumber, diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs index e067524925e..8a281a2364d 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -3,7 +3,7 @@ use std::{convert::TryInto, sync::Arc}; use tokio::sync::watch; use zksync_config::configs::eth_sender::SenderConfig; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{BoundEthInterface, CallFunctionArgs}; use zksync_l1_contract_interface::{ i_executor::commit::kzg::{KzgInfo, ZK_SYNC_BYTES_PER_BLOB}, @@ -62,7 +62,7 @@ pub struct EthTxAggregator { /// transactions. The `Some` then contains the address of this custom operator /// address. custom_commit_sender_addr: Option
, - pool: ConnectionPool, + pool: ConnectionPool, } struct TxData { @@ -73,7 +73,7 @@ struct TxData { impl EthTxAggregator { #[allow(clippy::too_many_arguments)] pub async fn new( - pool: ConnectionPool, + pool: ConnectionPool, config: SenderConfig, aggregator: Aggregator, eth_client: Arc, @@ -119,7 +119,7 @@ impl EthTxAggregator { pub async fn run(mut self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { let pool = self.pool.clone(); loop { - let mut storage = pool.access_storage_tagged("eth_sender").await.unwrap(); + let mut storage = pool.connection_tagged("eth_sender").await.unwrap(); if *stop_receiver.borrow() { tracing::info!("Stop signal received, eth_tx_aggregator is shutting down"); @@ -342,7 +342,7 @@ impl EthTxAggregator { #[tracing::instrument(skip(self, storage))] async fn loop_iteration( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> Result<(), ETHSenderError> { let MulticallData { base_system_contracts_hashes, @@ -385,7 +385,7 @@ impl EthTxAggregator { } async fn report_eth_tx_saving( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, aggregated_op: AggregatedOperation, tx: &EthTx, ) { @@ -519,7 +519,7 @@ impl EthTxAggregator { pub(super) async fn save_eth_tx( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, aggregated_op: &AggregatedOperation, contracts_are_pre_shared_bridge: bool, ) -> Result { @@ -569,7 +569,7 @@ impl EthTxAggregator { async fn get_next_nonce( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, from_addr: Option
, ) -> Result { let db_nonce = storage diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs index ed34114f6e9..5a081ef343d 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration}; use anyhow::Context as _; use tokio::sync::watch; use zksync_config::configs::eth_sender::SenderConfig; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{ encode_blob_tx_with_sidecar, BoundEthInterface, Error, EthInterface, ExecutedTxStatus, Options, RawTransactionBytes, SignedCallResult, @@ -58,12 +58,12 @@ pub struct EthTxManager { ethereum_gateway_blobs: Option>, config: SenderConfig, gas_adjuster: Arc, - pool: ConnectionPool, + pool: ConnectionPool, } impl EthTxManager { pub fn new( - pool: ConnectionPool, + pool: ConnectionPool, config: SenderConfig, gas_adjuster: Arc, ethereum_gateway: Arc, @@ -90,7 +90,7 @@ impl EthTxManager { async fn check_all_sending_attempts( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, op: &EthTx, ) -> Option { // Checking history items, starting from most recently sent. @@ -118,7 +118,7 @@ impl EthTxManager { async fn calculate_fee( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, tx: &EthTx, time_in_mempool: u32, ) -> Result { @@ -193,7 +193,7 @@ impl EthTxManager { async fn increase_priority_fee( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, eth_tx_id: u32, base_fee_per_gas: u64, ) -> Result { @@ -229,7 +229,7 @@ impl EthTxManager { pub(crate) async fn send_eth_tx( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, tx: &EthTx, time_in_mempool: u32, current_block: L1BlockNumber, @@ -297,7 +297,7 @@ impl EthTxManager { async fn send_raw_transaction( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, tx_history_id: u32, raw_tx: RawTransactionBytes, current_block: L1BlockNumber, @@ -416,7 +416,7 @@ impl EthTxManager { // returns the one that has to be resent (if there is one). pub(super) async fn monitor_inflight_transactions( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_block_numbers: L1BlockNumbers, ) -> Result, ETHSenderError> { METRICS.track_block_numbers(&l1_block_numbers); @@ -454,7 +454,7 @@ impl EthTxManager { async fn monitor_inflight_transactions_inner( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_block_numbers: L1BlockNumbers, operator_nonce: OperatorNonce, operator_address: Option
, @@ -581,7 +581,7 @@ impl EthTxManager { async fn send_unsent_txs( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_block_numbers: L1BlockNumbers, ) { for tx in storage.eth_sender_dal().get_unsent_txs().await.unwrap() { @@ -623,7 +623,7 @@ impl EthTxManager { async fn apply_tx_status( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, tx: &EthTx, tx_status: ExecutedTxStatus, finalized_block: L1BlockNumber, @@ -646,7 +646,7 @@ impl EthTxManager { pub async fn fail_tx( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, tx: &EthTx, tx_status: ExecutedTxStatus, ) { @@ -674,7 +674,7 @@ impl EthTxManager { pub async fn confirm_tx( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, tx: &EthTx, tx_status: ExecutedTxStatus, ) { @@ -730,7 +730,7 @@ impl EthTxManager { .get_l1_block_numbers() .await .context("get_l1_block_numbers()")?; - let mut storage = pool.access_storage_tagged("eth_sender").await.unwrap(); + let mut storage = pool.connection_tagged("eth_sender").await.unwrap(); self.send_unsent_txs(&mut storage, l1_block_numbers).await; } @@ -738,7 +738,7 @@ impl EthTxManager { // will never check in-flight txs status let mut last_known_l1_block = L1BlockNumber(0); loop { - let mut storage = pool.access_storage_tagged("eth_sender").await.unwrap(); + let mut storage = pool.connection_tagged("eth_sender").await.unwrap(); if *stop_receiver.borrow() { tracing::info!("Stop signal received, eth_tx_manager is shutting down"); @@ -761,7 +761,7 @@ impl EthTxManager { async fn send_new_eth_txs( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, current_block: L1BlockNumber, ) { let number_inflight_txs = storage @@ -792,7 +792,7 @@ impl EthTxManager { #[tracing::instrument(skip(self, storage))] async fn loop_iteration( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, previous_block: L1BlockNumber, ) -> Result { let l1_block_numbers = self.get_l1_block_numbers().await?; diff --git a/core/lib/zksync_core/src/eth_sender/metrics.rs b/core/lib/zksync_core/src/eth_sender/metrics.rs index c2ae78db447..055413d2383 100644 --- a/core/lib/zksync_core/src/eth_sender/metrics.rs +++ b/core/lib/zksync_core/src/eth_sender/metrics.rs @@ -3,7 +3,7 @@ use std::{fmt, time::Duration}; use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_types::{aggregated_operations::AggregatedActionType, eth_sender::EthTx}; use zksync_utils::time::seconds_since_epoch; @@ -113,7 +113,7 @@ impl EthSenderMetrics { pub async fn track_eth_tx_metrics( &self, - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, l1_stage: BlockL1Stage, tx: &EthTx, ) { diff --git a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs index 933547147e8..28168c4e6df 100644 --- a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs +++ b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs @@ -2,7 +2,7 @@ use std::fmt; use async_trait::async_trait; use chrono::Utc; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_l1_contract_interface::{i_executor::structures::CommitBatchInfo, Tokenizable}; use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, ethabi, @@ -21,7 +21,7 @@ pub trait L1BatchPublishCriterion: fmt::Debug + Send + Sync { /// Otherwise, returns the number of the last L1 batch that needs to be published. async fn last_l1_batch_to_publish( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, consecutive_l1_batches: &[L1BatchWithMetadata], last_sealed_l1_batch: L1BatchNumber, ) -> Option; @@ -42,7 +42,7 @@ impl L1BatchPublishCriterion for NumberCriterion { async fn last_l1_batch_to_publish( &mut self, - _storage: &mut StorageProcessor<'_, Server>, + _storage: &mut Connection<'_, Core>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, ) -> Option { @@ -88,7 +88,7 @@ impl L1BatchPublishCriterion for TimestampDeadlineCriterion { async fn last_l1_batch_to_publish( &mut self, - _storage: &mut StorageProcessor<'_, Server>, + _storage: &mut Connection<'_, Core>, consecutive_l1_batches: &[L1BatchWithMetadata], last_sealed_l1_batch: L1BatchNumber, ) -> Option { @@ -133,7 +133,7 @@ impl GasCriterion { async fn get_gas_amount( &self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, batch_number: L1BatchNumber, ) -> u32 { storage @@ -152,7 +152,7 @@ impl L1BatchPublishCriterion for GasCriterion { async fn last_l1_batch_to_publish( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, ) -> Option { @@ -210,7 +210,7 @@ impl L1BatchPublishCriterion for DataSizeCriterion { async fn last_l1_batch_to_publish( &mut self, - _storage: &mut StorageProcessor<'_, Server>, + _storage: &mut Connection<'_, Core>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, ) -> Option { diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index b142fc19fe2..d2cb198a4eb 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -7,7 +7,7 @@ use zksync_config::{ configs::eth_sender::{ProofSendingMode, PubdataSendingMode, SenderConfig}, ContractsConfig, ETHSenderConfig, GasAdjusterConfig, }; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{clients::MockEthereum, EthInterface}; use zksync_l1_contract_interface::i_executor::methods::{ CommitBatches, ExecuteBatches, ProveBatches, @@ -64,7 +64,7 @@ fn mock_multicall_response() -> Token { #[derive(Debug)] struct EthSenderTester { - conn: ConnectionPool, + conn: ConnectionPool, gateway: Arc, manager: MockEthTxManager, aggregator: EthTxAggregator, @@ -76,7 +76,7 @@ impl EthSenderTester { const MAX_BASE_FEE_SAMPLES: usize = 3; async fn new( - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, history: Vec, non_ordering_confirmations: bool, aggregator_operate_4844_mode: bool, @@ -158,8 +158,8 @@ impl EthSenderTester { } } - async fn storage(&self) -> StorageProcessor<'_, Server> { - self.conn.access_storage().await.unwrap() + async fn storage(&self) -> Connection<'_, Core> { + self.conn.connection().await.unwrap() } async fn get_block_numbers(&self) -> L1BlockNumbers { @@ -177,7 +177,7 @@ impl EthSenderTester { #[test_casing(2, [false, true])] #[tokio::test] async fn confirm_many(aggregator_operate_4844_mode: bool) -> anyhow::Result<()> { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new( connection_pool, vec![10; 100], @@ -192,7 +192,7 @@ async fn confirm_many(aggregator_operate_4844_mode: bool) -> anyhow::Result<()> let tx = tester .aggregator .save_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &DUMMY_OPERATION, true, ) @@ -200,7 +200,7 @@ async fn confirm_many(aggregator_operate_4844_mode: bool) -> anyhow::Result<()> let hash = tester .manager .send_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &tx, 0, L1BlockNumber(tester.gateway.block_number("").await?.as_u32()), @@ -232,7 +232,7 @@ async fn confirm_many(aggregator_operate_4844_mode: bool) -> anyhow::Result<()> let to_resend = tester .manager .monitor_inflight_transactions( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), tester.get_block_numbers().await, ) .await?; @@ -259,7 +259,7 @@ async fn confirm_many(aggregator_operate_4844_mode: bool) -> anyhow::Result<()> // Tests that we resend first un-mined transaction every block with an increased gas price. #[tokio::test] async fn resend_each_block() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![7, 6, 5, 5, 5, 2, 1], false, false).await; @@ -271,7 +271,7 @@ async fn resend_each_block() -> anyhow::Result<()> { let tx = tester .aggregator .save_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &DUMMY_OPERATION, true, ) @@ -279,12 +279,7 @@ async fn resend_each_block() -> anyhow::Result<()> { let hash = tester .manager - .send_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), - &tx, - 0, - block, - ) + .send_eth_tx(&mut tester.conn.connection().await.unwrap(), &tx, 0, block) .await?; // check that we sent something and stored it in the db @@ -321,17 +316,14 @@ async fn resend_each_block() -> anyhow::Result<()> { let (to_resend, _) = tester .manager - .monitor_inflight_transactions( - &mut tester.conn.access_storage().await.unwrap(), - block_numbers, - ) + .monitor_inflight_transactions(&mut tester.conn.connection().await.unwrap(), block_numbers) .await? .unwrap(); let resent_hash = tester .manager .send_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &to_resend, 1, block_numbers.latest, @@ -371,12 +363,12 @@ async fn resend_each_block() -> anyhow::Result<()> { // we won't mark it as confirmed but also won't resend it. #[tokio::test] async fn dont_resend_already_mined() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], false, false).await; let tx = tester .aggregator .save_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &DUMMY_OPERATION, true, ) @@ -386,7 +378,7 @@ async fn dont_resend_already_mined() -> anyhow::Result<()> { let hash = tester .manager .send_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &tx, 0, L1BlockNumber(tester.gateway.block_number("").await.unwrap().as_u32()), @@ -416,7 +408,7 @@ async fn dont_resend_already_mined() -> anyhow::Result<()> { let to_resend = tester .manager .monitor_inflight_transactions( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), tester.get_block_numbers().await, ) .await?; @@ -442,7 +434,7 @@ async fn dont_resend_already_mined() -> anyhow::Result<()> { #[tokio::test] async fn three_scenarios() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool.clone(), vec![100; 100], false, false).await; let mut hashes = vec![]; @@ -451,7 +443,7 @@ async fn three_scenarios() -> anyhow::Result<()> { let tx = tester .aggregator .save_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &DUMMY_OPERATION, true, ) @@ -461,7 +453,7 @@ async fn three_scenarios() -> anyhow::Result<()> { let hash = tester .manager .send_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &tx, 0, L1BlockNumber(tester.gateway.block_number("").await.unwrap().as_u32()), @@ -487,7 +479,7 @@ async fn three_scenarios() -> anyhow::Result<()> { let (to_resend, _) = tester .manager .monitor_inflight_transactions( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), tester.get_block_numbers().await, ) .await? @@ -515,14 +507,14 @@ async fn three_scenarios() -> anyhow::Result<()> { #[should_panic(expected = "We can't operate after tx fail")] #[tokio::test] async fn failed_eth_tx() { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool.clone(), vec![100; 100], false, false).await; let tx = tester .aggregator .save_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &DUMMY_OPERATION, true, ) @@ -532,7 +524,7 @@ async fn failed_eth_tx() { let hash = tester .manager .send_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &tx, 0, L1BlockNumber(tester.gateway.block_number("").await.unwrap().as_u32()), @@ -547,7 +539,7 @@ async fn failed_eth_tx() { tester .manager .monitor_inflight_transactions( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), tester.get_block_numbers().await, ) .await @@ -587,7 +579,7 @@ fn l1_batch_with_metadata(header: L1BatchHeader) -> L1BatchWithMetadata { #[tokio::test] async fn correct_order_for_confirmations() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true, false).await; insert_genesis_protocol_version(&tester).await; let genesis_l1_batch = insert_l1_batch(&tester, L1BatchNumber(0)).await; @@ -648,7 +640,7 @@ async fn correct_order_for_confirmations() -> anyhow::Result<()> { #[tokio::test] async fn skipped_l1_batch_at_the_start() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true, false).await; insert_genesis_protocol_version(&tester).await; let genesis_l1_batch = insert_l1_batch(&tester, L1BatchNumber(0)).await; @@ -741,7 +733,7 @@ async fn skipped_l1_batch_at_the_start() -> anyhow::Result<()> { #[tokio::test] async fn skipped_l1_batch_in_the_middle() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true, false).await; insert_genesis_protocol_version(&tester).await; let genesis_l1_batch = insert_l1_batch(&tester, L1BatchNumber(0)).await; @@ -828,7 +820,7 @@ async fn skipped_l1_batch_in_the_middle() -> anyhow::Result<()> { #[tokio::test] async fn test_parse_multicall_data() { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let tester = EthSenderTester::new(connection_pool, vec![100; 100], false, false).await; assert!(tester @@ -893,7 +885,7 @@ async fn test_parse_multicall_data() { #[tokio::test] async fn get_multicall_data() { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], false, false).await; let multicall_data = tester.aggregator.get_multicall_data().await; assert!(multicall_data.is_ok()); @@ -988,7 +980,7 @@ async fn send_operation( let tx = tester .aggregator .save_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &aggregated_operation, true, ) @@ -998,7 +990,7 @@ async fn send_operation( let hash = tester .manager .send_eth_tx( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), &tx, 0, L1BlockNumber(tester.gateway.block_number("").await.unwrap().as_u32()), @@ -1019,7 +1011,7 @@ async fn confirm_tx(tester: &mut EthSenderTester, hash: H256) { tester .manager .monitor_inflight_transactions( - &mut tester.conn.access_storage().await.unwrap(), + &mut tester.conn.connection().await.unwrap(), tester.get_block_numbers().await, ) .await diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs b/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs index 6e33715f6e6..1a157cf6d52 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs @@ -1,6 +1,6 @@ use std::{convert::TryFrom, time::Instant}; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_types::{ ethabi::Contract, protocol_upgrade::GovernanceOperation, web3::types::Log, Address, ProtocolUpgrade, ProtocolVersionId, H256, @@ -41,7 +41,7 @@ impl GovernanceUpgradesEventProcessor { impl EventProcessor for GovernanceUpgradesEventProcessor { async fn process_events( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, client: &dyn EthClient, events: Vec, ) -> Result<(), Error> { diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs b/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs index 30172e835a9..1d945331f97 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs @@ -1,6 +1,6 @@ use std::fmt; -use zksync_dal::{Server, StorageProcessor}; +use zksync_dal::{Connection, Core}; use zksync_types::{web3::types::Log, H256}; use crate::eth_watch::client::{Error, EthClient}; @@ -14,7 +14,7 @@ pub trait EventProcessor: 'static + fmt::Debug + Send + Sync { /// Processes given events async fn process_events( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, client: &dyn EthClient, events: Vec, ) -> Result<(), Error>; diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/priority_ops.rs b/core/lib/zksync_core/src/eth_watch/event_processors/priority_ops.rs index 033215def65..c9f0561b54c 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/priority_ops.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/priority_ops.rs @@ -1,7 +1,7 @@ use std::convert::TryFrom; use zksync_contracts::zksync_contract; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_types::{l1::L1Tx, web3::types::Log, PriorityOpId, H256}; use crate::{ @@ -36,7 +36,7 @@ impl PriorityOpsEventProcessor { impl EventProcessor for PriorityOpsEventProcessor { async fn process_events( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, _client: &dyn EthClient, events: Vec, ) -> Result<(), Error> { diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs b/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs index 4ea2dc3960d..fbffe0f15bb 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs @@ -1,6 +1,6 @@ use std::convert::TryFrom; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_types::{web3::types::Log, ProtocolUpgrade, ProtocolVersionId, H256}; use crate::eth_watch::{ @@ -32,7 +32,7 @@ impl UpgradesEventProcessor { impl EventProcessor for UpgradesEventProcessor { async fn process_events( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, client: &dyn EthClient, events: Vec, ) -> Result<(), Error> { diff --git a/core/lib/zksync_core/src/eth_watch/mod.rs b/core/lib/zksync_core/src/eth_watch/mod.rs index 3b7847888c4..c0328953326 100644 --- a/core/lib/zksync_core/src/eth_watch/mod.rs +++ b/core/lib/zksync_core/src/eth_watch/mod.rs @@ -8,7 +8,7 @@ use std::{sync::Arc, time::Duration}; use tokio::{sync::watch, task::JoinHandle}; use zksync_config::ETHWatchConfig; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::EthInterface; use zksync_system_constants::PRIORITY_EXPIRATION; use zksync_types::{ @@ -45,7 +45,7 @@ pub struct EthWatch { event_processors: Vec>, last_processed_ethereum_block: u64, - pool: ConnectionPool, + pool: ConnectionPool, } impl EthWatch { @@ -53,10 +53,10 @@ impl EthWatch { diamond_proxy_address: Address, governance_contract: Option, mut client: Box, - pool: ConnectionPool, + pool: ConnectionPool, poll_interval: Duration, ) -> Self { - let mut storage = pool.access_storage_tagged("eth_watch").await.unwrap(); + let mut storage = pool.connection_tagged("eth_watch").await.unwrap(); let state = Self::initialize_state(&*client, &mut storage).await; @@ -98,7 +98,7 @@ impl EthWatch { async fn initialize_state( client: &dyn EthClient, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> EthWatchState { let next_expected_priority_id: PriorityOpId = storage .transactions_dal() @@ -147,7 +147,7 @@ impl EthWatch { timer.tick().await; METRICS.eth_poll.inc(); - let mut storage = pool.access_storage_tagged("eth_watch").await.unwrap(); + let mut storage = pool.connection_tagged("eth_watch").await.unwrap(); if let Err(error) = self.loop_iteration(&mut storage).await { // This is an error because otherwise we could potentially miss a priority operation // thus entering priority mode, which is not desired. @@ -162,10 +162,7 @@ impl EthWatch { } #[tracing::instrument(skip(self, storage))] - async fn loop_iteration( - &mut self, - storage: &mut StorageProcessor<'_, Server>, - ) -> Result<(), Error> { + async fn loop_iteration(&mut self, storage: &mut Connection<'_, Core>) -> Result<(), Error> { let stage_latency = METRICS.poll_eth_node[&PollStage::Request].start(); let to_block = self.client.finalized_block_number().await?; if to_block <= self.last_processed_ethereum_block { @@ -194,7 +191,7 @@ impl EthWatch { pub async fn start_eth_watch( config: ETHWatchConfig, - pool: ConnectionPool, + pool: ConnectionPool, eth_gateway: Arc, diamond_proxy_addr: Address, governance: (Contract, Address), diff --git a/core/lib/zksync_core/src/eth_watch/tests.rs b/core/lib/zksync_core/src/eth_watch/tests.rs index 0f08a4bd193..9a0c91c0687 100644 --- a/core/lib/zksync_core/src/eth_watch/tests.rs +++ b/core/lib/zksync_core/src/eth_watch/tests.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, convert::TryInto, sync::Arc}; use tokio::sync::RwLock; use zksync_contracts::{governance_contract, zksync_contract}; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_types::{ ethabi::{encode, Hash, Token}, l1::{L1Tx, OpProcessingType, PriorityQueueType}, @@ -202,7 +202,7 @@ fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx #[tokio::test] async fn test_normal_operation_l1_txs() { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -215,7 +215,7 @@ async fn test_normal_operation_l1_txs() { ) .await; - let mut storage = connection_pool.access_storage().await.unwrap(); + let mut storage = connection_pool.connection().await.unwrap(); client .add_transactions(&[build_l1_tx(0, 10), build_l1_tx(1, 14), build_l1_tx(2, 18)]) .await; @@ -250,7 +250,7 @@ async fn test_normal_operation_l1_txs() { #[tokio::test] async fn test_normal_operation_upgrades() { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -263,7 +263,7 @@ async fn test_normal_operation_upgrades() { ) .await; - let mut storage = connection_pool.access_storage().await.unwrap(); + let mut storage = connection_pool.connection().await.unwrap(); client .add_diamond_upgrades(&[ ( @@ -311,7 +311,7 @@ async fn test_normal_operation_upgrades() { #[tokio::test] async fn test_gap_in_upgrades() { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -324,7 +324,7 @@ async fn test_gap_in_upgrades() { ) .await; - let mut storage = connection_pool.access_storage().await.unwrap(); + let mut storage = connection_pool.connection().await.unwrap(); client .add_diamond_upgrades(&[( ProtocolUpgrade { @@ -350,7 +350,7 @@ async fn test_gap_in_upgrades() { #[tokio::test] async fn test_normal_operation_governance_upgrades() { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -363,7 +363,7 @@ async fn test_normal_operation_governance_upgrades() { ) .await; - let mut storage = connection_pool.access_storage().await.unwrap(); + let mut storage = connection_pool.connection().await.unwrap(); client .add_governance_upgrades(&[ ( @@ -412,7 +412,7 @@ async fn test_normal_operation_governance_upgrades() { #[tokio::test] #[should_panic] async fn test_gap_in_single_batch() { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -425,7 +425,7 @@ async fn test_gap_in_single_batch() { ) .await; - let mut storage = connection_pool.access_storage().await.unwrap(); + let mut storage = connection_pool.connection().await.unwrap(); client .add_transactions(&[ build_l1_tx(0, 10), @@ -442,7 +442,7 @@ async fn test_gap_in_single_batch() { #[tokio::test] #[should_panic] async fn test_gap_between_batches() { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -455,7 +455,7 @@ async fn test_gap_between_batches() { ) .await; - let mut storage = connection_pool.access_storage().await.unwrap(); + let mut storage = connection_pool.connection().await.unwrap(); client .add_transactions(&[ // this goes to the first batch @@ -477,7 +477,7 @@ async fn test_gap_between_batches() { #[tokio::test] async fn test_overlapping_batches() { - let connection_pool = ConnectionPool::::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -490,7 +490,7 @@ async fn test_overlapping_batches() { ) .await; - let mut storage = connection_pool.access_storage().await.unwrap(); + let mut storage = connection_pool.connection().await.unwrap(); client .add_transactions(&[ // this goes to the first batch @@ -523,7 +523,7 @@ async fn test_overlapping_batches() { assert_eq!(tx.common_data.serial_id.0, 4); } -async fn get_all_db_txs(storage: &mut StorageProcessor<'_, Server>) -> Vec { +async fn get_all_db_txs(storage: &mut Connection<'_, Core>) -> Vec { storage.transactions_dal().reset_mempool().await.unwrap(); storage .transactions_dal() @@ -761,9 +761,9 @@ fn upgrade_into_diamond_cut(upgrade: ProtocolUpgrade) -> Token { ]) } -async fn setup_db(connection_pool: &ConnectionPool) { +async fn setup_db(connection_pool: &ConnectionPool) { connection_pool - .access_storage() + .connection() .await .unwrap() .protocol_versions_dal() diff --git a/core/lib/zksync_core/src/fee_model.rs b/core/lib/zksync_core/src/fee_model.rs index cfeeea167d9..21d2635a524 100644 --- a/core/lib/zksync_core/src/fee_model.rs +++ b/core/lib/zksync_core/src/fee_model.rs @@ -1,6 +1,6 @@ use std::{fmt, sync::Arc}; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::{ fee_model::{ BatchFeeInput, FeeModelConfig, FeeModelConfigV2, FeeParams, FeeParamsV1, FeeParamsV2, @@ -84,13 +84,13 @@ impl MainNodeFeeInputProvider { #[derive(Debug)] pub(crate) struct ApiFeeInputProvider { inner: Arc, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, } impl ApiFeeInputProvider { pub fn new( inner: Arc, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, ) -> Self { Self { inner, @@ -112,7 +112,7 @@ impl BatchFeeModelInputProvider for ApiFeeInputProvider { .await; let last_miniblock_params = self .connection_pool - .access_storage_tagged("api_fee_input_provider") + .connection_tagged("api_fee_input_provider") .await .unwrap() .blocks_dal() diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index d27c9540f51..7514c4577aa 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -9,7 +9,7 @@ use multivm::{ zk_evm_latest::aux_structures::{LogQuery as MultiVmLogQuery, Timestamp as MultiVMTimestamp}, }; use zksync_contracts::{BaseSystemContracts, SET_CHAIN_ID_EVENT}; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_eth_client::{clients::QueryClient, EthInterface}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_system_constants::PRIORITY_EXPIRATION; @@ -58,7 +58,7 @@ impl GenesisParams { } pub async fn ensure_genesis_state( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, zksync_chain_id: L2ChainId, genesis_params: &GenesisParams, ) -> anyhow::Result { @@ -154,7 +154,7 @@ pub async fn ensure_genesis_state( // The code of the bootloader should not be deployed anywhere anywhere in the kernel space (i.e. addresses below 2^16) // because in this case we will have to worry about protecting it. async fn insert_base_system_contracts_to_factory_deps( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, contracts: &BaseSystemContracts, ) -> anyhow::Result<()> { let factory_deps = [&contracts.bootloader, &contracts.default_aa] @@ -170,7 +170,7 @@ async fn insert_base_system_contracts_to_factory_deps( } async fn insert_system_contracts( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, contracts: &[DeployedContract], chain_id: L2ChainId, ) -> anyhow::Result<()> { @@ -285,7 +285,7 @@ async fn insert_system_contracts( #[allow(clippy::too_many_arguments)] pub(crate) async fn create_genesis_l1_batch( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, first_validator_address: Address, chain_id: L2ChainId, protocol_version: ProtocolVersionId, @@ -362,7 +362,7 @@ pub(crate) async fn create_genesis_l1_batch( Ok(()) } -async fn add_eth_token(transaction: &mut StorageProcessor<'_, Server>) -> anyhow::Result<()> { +async fn add_eth_token(transaction: &mut Connection<'_, Core>) -> anyhow::Result<()> { assert!(transaction.in_transaction()); // sanity check let eth_token = TokenInfo { l1_address: ETHEREUM_ADDRESS, @@ -388,7 +388,7 @@ async fn add_eth_token(transaction: &mut StorageProcessor<'_, Server>) -> anyhow } async fn save_genesis_l1_batch_metadata( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, commitment: L1BatchCommitment, genesis_root_hash: H256, rollup_last_leaf_index: u64, @@ -423,7 +423,7 @@ pub(crate) async fn save_set_chain_id_tx( eth_client_url: &str, diamond_proxy_address: Address, state_transition_manager_address: Address, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { let eth_client = QueryClient::new(eth_client_url)?; let to = eth_client.block_number("fetch_chain_id_tx").await?.as_u64(); @@ -456,15 +456,15 @@ pub(crate) async fn save_set_chain_id_tx( #[cfg(test)] mod tests { - use zksync_dal::{ConnectionPool, Server, ServerDals}; + use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::system_contracts::get_system_smart_contracts; use super::*; #[tokio::test] async fn running_genesis() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.blocks_dal().delete_genesis().await.unwrap(); let params = GenesisParams { @@ -495,8 +495,8 @@ mod tests { #[tokio::test] async fn running_genesis_with_big_chain_id() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); conn.blocks_dal().delete_genesis().await.unwrap(); let params = GenesisParams { @@ -521,8 +521,8 @@ mod tests { #[tokio::test] async fn running_genesis_with_non_latest_protocol_version() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); let params = GenesisParams { protocol_version: ProtocolVersionId::Version10, ..GenesisParams::mock() diff --git a/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs b/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs index 75b10875e8c..38a9c410185 100644 --- a/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs +++ b/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_utils::time::seconds_since_epoch; use crate::{ @@ -10,11 +10,11 @@ use crate::{ #[derive(Debug)] pub struct L1BatchMetricsReporter { reporting_interval_ms: u64, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, } impl L1BatchMetricsReporter { - pub fn new(reporting_interval_ms: u64, connection_pool: ConnectionPool) -> Self { + pub fn new(reporting_interval_ms: u64, connection_pool: ConnectionPool) -> Self { Self { reporting_interval_ms, connection_pool, @@ -23,7 +23,7 @@ impl L1BatchMetricsReporter { async fn report_metrics(&self) { let mut block_metrics = vec![]; - let mut conn = self.connection_pool.access_storage().await.unwrap(); + let mut conn = self.connection_pool.connection().await.unwrap(); let last_l1_batch = conn .blocks_dal() .get_sealed_l1_batch_number() diff --git a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_job_retry_manager.rs b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_job_retry_manager.rs index 7081e64a1ae..9168ac938cb 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_job_retry_manager.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_job_retry_manager.rs @@ -1,7 +1,7 @@ use std::time::Duration; use async_trait::async_trait; -use prover_dal::{Prover, ProverDals}; +use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; @@ -38,7 +38,7 @@ impl PeriodicJob for FriProofCompressorJobRetryManager { async fn run_routine_task(&mut self) -> anyhow::Result<()> { let stuck_jobs = self .pool - .access_storage() + .connection() .await .unwrap() .fri_proof_compressor_dal() diff --git a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs index 7c90d490c08..c81a4a352a8 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use prover_dal::{Prover, ProverDals}; +use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; use zksync_types::prover_dal::JobCountStatistics; @@ -22,7 +22,7 @@ impl FriProofCompressorStatsReporter { } async fn get_job_statistics(pool: &ConnectionPool) -> JobCountStatistics { - pool.access_storage() + pool.connection() .await .unwrap() .fri_proof_compressor_dal() @@ -62,7 +62,7 @@ impl PeriodicJob for FriProofCompressorStatsReporter { let oldest_not_compressed_batch = self .pool - .access_storage() + .connection() .await .unwrap() .fri_proof_compressor_dal() diff --git a/core/lib/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs b/core/lib/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs index 1feec2cc74c..ffe18e50718 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs @@ -1,7 +1,7 @@ use std::time::Duration; use async_trait::async_trait; -use prover_dal::{Prover, ProverDals}; +use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; @@ -38,7 +38,7 @@ impl PeriodicJob for FriProverJobRetryManager { async fn run_routine_task(&mut self) -> anyhow::Result<()> { let stuck_jobs = self .pool - .access_storage() + .connection() .await .unwrap() .fri_prover_jobs_dal() diff --git a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs index 91a42c482a1..d8f276afe3f 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs @@ -1,7 +1,7 @@ use async_trait::async_trait; -use prover_dal::{Prover, ProverDals}; +use prover_dal::{Prover, ProverDal}; use zksync_config::configs::fri_prover_group::FriProverGroupConfig; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use crate::house_keeper::periodic_job::PeriodicJob; @@ -9,7 +9,7 @@ use crate::house_keeper::periodic_job::PeriodicJob; pub struct FriProverStatsReporter { reporting_interval_ms: u64, prover_connection_pool: ConnectionPool, - db_connection_pool: ConnectionPool, + db_connection_pool: ConnectionPool, config: FriProverGroupConfig, } @@ -17,7 +17,7 @@ impl FriProverStatsReporter { pub fn new( reporting_interval_ms: u64, prover_connection_pool: ConnectionPool, - db_connection_pool: ConnectionPool, + db_connection_pool: ConnectionPool, config: FriProverGroupConfig, ) -> Self { Self { @@ -35,7 +35,7 @@ impl PeriodicJob for FriProverStatsReporter { const SERVICE_NAME: &'static str = "FriProverStatsReporter"; async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let mut conn = self.prover_connection_pool.access_storage().await.unwrap(); + let mut conn = self.prover_connection_pool.connection().await.unwrap(); let stats = conn.fri_prover_jobs_dal().get_prover_jobs_stats().await; for ((circuit_id, aggregation_round), stats) in stats.into_iter() { @@ -90,7 +90,7 @@ impl PeriodicJob for FriProverStatsReporter { // FIXME: refactor metrics here - let mut db_conn = self.db_connection_pool.access_storage().await.unwrap(); + let mut db_conn = self.db_connection_pool.connection().await.unwrap(); let oldest_unpicked_batch = match db_conn .proof_generation_dal() diff --git a/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs b/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs index 3494d7e37de..3f763ba17cc 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use prover_dal::{Prover, ProverDals}; +use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; @@ -19,7 +19,7 @@ impl SchedulerCircuitQueuer { } pub async fn queue_scheduler_circuit_jobs(&mut self) { - let mut conn = self.pool.access_storage().await.unwrap(); + let mut conn = self.pool.connection().await.unwrap(); let l1_batch_numbers = conn .fri_scheduler_dependency_tracker_dal() .get_l1_batches_ready_for_queuing() diff --git a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs index 9afadec6c32..9035fd70280 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs @@ -1,7 +1,7 @@ use std::time::Duration; use async_trait::async_trait; -use prover_dal::{Prover, ProverDals}; +use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; @@ -32,7 +32,7 @@ impl FriWitnessGeneratorJobRetryManager { pub async fn requeue_stuck_witness_inputs_jobs(&mut self) { let stuck_jobs = self .pool - .access_storage() + .connection() .await .unwrap() .fri_witness_generator_dal() @@ -48,7 +48,7 @@ impl FriWitnessGeneratorJobRetryManager { pub async fn requeue_stuck_leaf_aggregations_jobs(&mut self) { let stuck_jobs = self .pool - .access_storage() + .connection() .await .unwrap() .fri_witness_generator_dal() @@ -67,7 +67,7 @@ impl FriWitnessGeneratorJobRetryManager { pub async fn requeue_stuck_node_aggregations_jobs(&mut self) { let stuck_jobs = self .pool - .access_storage() + .connection() .await .unwrap() .fri_witness_generator_dal() @@ -86,7 +86,7 @@ impl FriWitnessGeneratorJobRetryManager { pub async fn requeue_stuck_scheduler_jobs(&mut self) { let stuck_jobs = self .pool - .access_storage() + .connection() .await .unwrap() .fri_witness_generator_dal() diff --git a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs index 83dba44af52..11c4bf23909 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use async_trait::async_trait; -use prover_dal::{Prover, ProverDals}; +use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; use zksync_types::{basic_fri_types::AggregationRound, prover_dal::JobCountStatistics}; @@ -24,7 +24,7 @@ impl FriWitnessGeneratorStatsReporter { } async fn get_job_statistics(&self) -> HashMap { - let mut conn = self.pool.access_storage().await.unwrap(); + let mut conn = self.pool.connection().await.unwrap(); HashMap::from([ ( AggregationRound::BasicCircuits, diff --git a/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs b/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs index eadcc59c0c0..adbd1d36ae6 100644 --- a/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs +++ b/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use prover_dal::{Prover, ProverDals}; +use prover_dal::{Prover, ProverDal}; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; @@ -19,7 +19,7 @@ impl WaitingToQueuedFriWitnessJobMover { } async fn move_leaf_aggregation_jobs(&mut self) { - let mut conn = self.pool.access_storage().await.unwrap(); + let mut conn = self.pool.connection().await.unwrap(); let l1_batch_numbers = conn .fri_witness_generator_dal() .move_leaf_aggregation_jobs_from_waiting_to_queued() @@ -41,7 +41,7 @@ impl WaitingToQueuedFriWitnessJobMover { pub async fn move_node_aggregation_jobs_from_waiting_to_queued( &mut self, ) -> Vec<(i64, u8, u16)> { - let mut conn = self.pool.access_storage().await.unwrap(); + let mut conn = self.pool.connection().await.unwrap(); let mut jobs = conn .fri_witness_generator_dal() .move_depth_zero_node_aggregation_jobs() diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 08e08e8b4e1..ae5eb38b64e 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -33,7 +33,7 @@ use zksync_config::{ ApiConfig, ContractsConfig, DBConfig, ETHSenderConfig, PostgresConfig, }; use zksync_contracts::{governance_contract, BaseSystemContracts}; -use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Server, ServerDals}; +use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core, CoreDal}; use zksync_db_connection::healthcheck::ConnectionPoolHealthCheck; use zksync_eth_client::{ clients::{PKSigningClient, QueryClient}, @@ -117,11 +117,11 @@ pub async fn genesis_init( wait_for_set_chain_id: bool, ) -> anyhow::Result<()> { let db_url = postgres_config.master_url()?; - let pool = ConnectionPool::::singleton(db_url) + let pool = ConnectionPool::::singleton(db_url) .build() .await .context("failed to build connection_pool")?; - let mut storage = pool.access_storage().await.context("access_storage()")?; + let mut storage = pool.connection().await.context("connection()")?; let operator_address = PackedEthSignature::address_from_private_key( ð_sender .sender @@ -204,11 +204,11 @@ pub async fn genesis_init( pub async fn is_genesis_needed(postgres_config: &PostgresConfig) -> bool { let db_url = postgres_config.master_url().unwrap(); - let pool = ConnectionPool::::singleton(db_url) + let pool = ConnectionPool::::singleton(db_url) .build() .await .expect("failed to build connection_pool"); - let mut storage = pool.access_storage().await.expect("access_storage()"); + let mut storage = pool.connection().await.expect("connection()"); storage.blocks_dal().is_genesis_needed().await.unwrap() } @@ -317,22 +317,21 @@ pub async fn initialize_components( let postgres_config = configs.postgres_config.clone().context("postgres_config")?; if let Some(threshold) = postgres_config.slow_query_threshold() { - ConnectionPool::::global_config().set_slow_query_threshold(threshold)?; + ConnectionPool::::global_config().set_slow_query_threshold(threshold)?; } if let Some(threshold) = postgres_config.long_connection_threshold() { - ConnectionPool::::global_config().set_long_connection_threshold(threshold)?; + ConnectionPool::::global_config().set_long_connection_threshold(threshold)?; } let pool_size = postgres_config.max_connections()?; - let connection_pool = - ConnectionPool::::builder(postgres_config.master_url()?, pool_size) - .build() - .await - .context("failed to build connection_pool")?; + let connection_pool = ConnectionPool::::builder(postgres_config.master_url()?, pool_size) + .build() + .await + .context("failed to build connection_pool")?; // We're most interested in setting acquire / statement timeouts for the API server, which puts the most load // on Postgres. let replica_connection_pool = - ConnectionPool::::builder(postgres_config.replica_url()?, pool_size) + ConnectionPool::::builder(postgres_config.replica_url()?, pool_size) .set_acquire_timeout(postgres_config.acquire_timeout()) .set_statement_timeout(postgres_config.statement_timeout()) .build() @@ -631,7 +630,7 @@ pub async fn initialize_components( if components.contains(&Component::EthWatcher) { let started_at = Instant::now(); tracing::info!("initializing ETH-Watcher"); - let eth_watch_pool = ConnectionPool::::singleton(postgres_config.master_url()?) + let eth_watch_pool = ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await .context("failed to build eth_watch_pool")?; @@ -660,7 +659,7 @@ pub async fn initialize_components( if components.contains(&Component::EthTxAggregator) { let started_at = Instant::now(); tracing::info!("initializing ETH-TxAggregator"); - let eth_sender_pool = ConnectionPool::::singleton(postgres_config.master_url()?) + let eth_sender_pool = ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await .context("failed to build eth_sender_pool")?; @@ -707,7 +706,7 @@ pub async fn initialize_components( if components.contains(&Component::EthTxManager) { let started_at = Instant::now(); tracing::info!("initializing ETH-TxManager"); - let eth_manager_pool = ConnectionPool::::singleton(postgres_config.master_url()?) + let eth_manager_pool = ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await .context("failed to build eth_manager_pool")?; @@ -750,7 +749,7 @@ pub async fn initialize_components( if components.contains(&Component::BasicWitnessInputProducer) { let singleton_connection_pool = - ConnectionPool::::singleton(postgres_config.master_url()?) + ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await .context("failed to build singleton connection_pool")?; @@ -790,7 +789,7 @@ pub async fn initialize_components( if components.contains(&Component::CommitmentGenerator) { let commitment_generator_pool = - ConnectionPool::::singleton(postgres_config.master_url()?) + ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await .context("failed to build commitment_generator_pool")?; @@ -826,14 +825,14 @@ async fn add_state_keeper_to_task_futures( object_store: Arc, stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { - let pool_builder = ConnectionPool::::singleton(postgres_config.master_url()?); + let pool_builder = ConnectionPool::::singleton(postgres_config.master_url()?); let state_keeper_pool = pool_builder .build() .await .context("failed to build state_keeper_pool")?; let mempool = { let mut storage = state_keeper_pool - .access_storage() + .connection() .await .context("Access storage to build mempool")?; let mempool = MempoolGuard::from_storage(&mut storage, mempool_config.capacity).await; @@ -974,7 +973,7 @@ async fn run_tree( let tree_health_check = metadata_calculator.tree_health_check(); app_health.insert_component(tree_health_check); - let pool = ConnectionPool::::singleton(postgres_config.master_url()?) + let pool = ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await .context("failed to build connection pool")?; @@ -989,7 +988,7 @@ async fn run_tree( async fn add_basic_witness_input_producer_to_task_futures( task_futures: &mut Vec>>, - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, store_factory: &ObjectStoreFactory, l2_chain_id: L2ChainId, stop_receiver: watch::Receiver, @@ -1022,7 +1021,7 @@ async fn add_house_keeper_to_task_futures( .clone() .context("house_keeper_config")?; let postgres_config = configs.postgres_config.clone().context("postgres_config")?; - let connection_pool = ConnectionPool::::builder( + let connection_pool = ConnectionPool::::builder( postgres_config.replica_url()?, postgres_config.max_connections()?, ) @@ -1127,7 +1126,7 @@ async fn add_house_keeper_to_task_futures( fn build_storage_caches( configs: &TempConfigStore, - replica_connection_pool: &ConnectionPool, + replica_connection_pool: &ConnectionPool, task_futures: &mut Vec>>, stop_receiver: watch::Receiver, ) -> anyhow::Result { @@ -1153,8 +1152,8 @@ async fn build_tx_sender( tx_sender_config: &TxSenderConfig, web3_json_config: &Web3JsonRpcConfig, state_keeper_config: &StateKeeperConfig, - replica_pool: ConnectionPool, - master_pool: ConnectionPool, + replica_pool: ConnectionPool, + master_pool: ConnectionPool, batch_fee_model_input_provider: Arc, storage_caches: PostgresStorageCaches, ) -> (TxSender, VmConcurrencyBarrier) { @@ -1193,8 +1192,8 @@ async fn run_http_api( state_keeper_config: &StateKeeperConfig, internal_api: &InternalApiConfig, api_config: &ApiConfig, - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, + master_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, stop_receiver: watch::Receiver, batch_fee_model_input_provider: Arc, with_debug_namespace: bool, @@ -1217,7 +1216,7 @@ async fn run_http_api( } namespaces.push(Namespace::Snapshots); - let updaters_pool = ConnectionPool::::builder(postgres_config.replica_url()?, 2) + let updaters_pool = ConnectionPool::::builder(postgres_config.replica_url()?, 2) .build() .await .context("failed to build last_miniblock_pool")?; @@ -1258,8 +1257,8 @@ async fn run_ws_api( internal_api: &InternalApiConfig, api_config: &ApiConfig, batch_fee_model_input_provider: Arc, - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, + master_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, stop_receiver: watch::Receiver, storage_caches: PostgresStorageCaches, ) -> anyhow::Result<()> { @@ -1273,7 +1272,7 @@ async fn run_ws_api( storage_caches, ) .await; - let last_miniblock_pool = ConnectionPool::::singleton(postgres_config.replica_url()?) + let last_miniblock_pool = ConnectionPool::::singleton(postgres_config.replica_url()?) .build() .await .context("failed to build last_miniblock_pool")?; @@ -1325,7 +1324,7 @@ async fn circuit_breakers_for_components( .iter() .any(|c| matches!(c, Component::EthTxAggregator | Component::EthTxManager)) { - let pool = ConnectionPool::::singleton(postgres_config.replica_url()?) + let pool = ConnectionPool::::singleton(postgres_config.replica_url()?) .build() .await .context("failed to build a connection pool")?; @@ -1338,7 +1337,7 @@ async fn circuit_breakers_for_components( Component::HttpApi | Component::WsApi | Component::ContractVerificationApi ) }) { - let pool = ConnectionPool::::singleton(postgres_config.replica_url()?) + let pool = ConnectionPool::::singleton(postgres_config.replica_url()?) .build() .await?; circuit_breakers.push(Box::new(ReplicationLagChecker { diff --git a/core/lib/zksync_core/src/metadata_calculator/helpers.rs b/core/lib/zksync_core/src/metadata_calculator/helpers.rs index e07972e8247..bc826682575 100644 --- a/core/lib/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/lib/zksync_core/src/metadata_calculator/helpers.rs @@ -14,7 +14,7 @@ use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; use tokio::sync::watch; use zksync_config::configs::database::MerkleTreeMode; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus}; use zksync_merkle_tree::{ domain::{TreeMetadata, ZkSyncTree, ZkSyncTreeReader}, @@ -399,7 +399,7 @@ pub(crate) struct L1BatchWithLogs { impl L1BatchWithLogs { pub async fn new( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, ) -> Option { tracing::debug!("Loading storage logs data for L1 batch #{l1_batch_number}"); @@ -478,7 +478,7 @@ impl L1BatchWithLogs { #[cfg(test)] mod tests { use tempfile::TempDir; - use zksync_dal::{ConnectionPool, Server}; + use zksync_dal::{ConnectionPool, Core}; use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; use zksync_types::{L2ChainId, StorageKey, StorageLog}; @@ -491,7 +491,7 @@ mod tests { impl L1BatchWithLogs { /// Old, slower method of loading storage logs. We want to test its equivalence to the new implementation. async fn slow( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, ) -> Option { let header = storage @@ -560,9 +560,9 @@ mod tests { #[tokio::test] async fn loaded_logs_equivalence_basics() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; ensure_genesis_state( - &mut pool.access_storage().await.unwrap(), + &mut pool.connection().await.unwrap(), L2ChainId::from(270), &GenesisParams::mock(), ) @@ -570,7 +570,7 @@ mod tests { .unwrap(); reset_db_state(&pool, 5).await; - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); for l1_batch_number in 0..=5 { let l1_batch_number = L1BatchNumber(l1_batch_number); let batch_with_logs = L1BatchWithLogs::new(&mut storage, l1_batch_number) @@ -585,8 +585,8 @@ mod tests { #[tokio::test] async fn loaded_logs_equivalence_with_zero_no_op_logs() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await .unwrap(); @@ -621,7 +621,7 @@ mod tests { } async fn assert_log_equivalence( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, tree: &mut AsyncTree, l1_batch_number: L1BatchNumber, ) { @@ -676,8 +676,8 @@ mod tests { #[tokio::test] async fn loaded_logs_equivalence_with_non_zero_no_op_logs() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await .unwrap(); @@ -723,8 +723,8 @@ mod tests { #[tokio::test] async fn loaded_logs_equivalence_with_protective_reads() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await .unwrap(); diff --git a/core/lib/zksync_core/src/metadata_calculator/mod.rs b/core/lib/zksync_core/src/metadata_calculator/mod.rs index a4bcb919158..dffaae63680 100644 --- a/core/lib/zksync_core/src/metadata_calculator/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/mod.rs @@ -12,7 +12,7 @@ use zksync_config::configs::{ chain::OperationsManagerConfig, database::{MerkleTreeConfig, MerkleTreeMode}, }; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_health_check::{HealthUpdater, ReactiveHealthCheck}; use zksync_object_store::ObjectStore; @@ -143,7 +143,7 @@ impl MetadataCalculator { pub async fn run( self, - pool: ConnectionPool, + pool: ConnectionPool, stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let tree = self.create_tree().await?; diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs index cbe088509b8..a918d1a67f3 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs @@ -34,7 +34,7 @@ use anyhow::Context as _; use async_trait::async_trait; use futures::future; use tokio::sync::{watch, Mutex, Semaphore}; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_health_check::HealthUpdater; use zksync_merkle_tree::TreeEntry; use zksync_types::{ @@ -121,13 +121,13 @@ impl SnapshotParameters { const DESIRED_CHUNK_SIZE: u64 = 200_000; async fn new( - pool: &ConnectionPool, + pool: &ConnectionPool, recovery: &SnapshotRecoveryStatus, ) -> anyhow::Result { let miniblock = recovery.miniblock_number; let expected_root_hash = recovery.l1_batch_root_hash; - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let log_count = storage .storage_logs_dal() .get_storage_logs_row_count(miniblock) @@ -159,7 +159,7 @@ impl GenericAsyncTree { /// if necessary. pub async fn ensure_ready( self, - pool: &ConnectionPool, + pool: &ConnectionPool, stop_receiver: &watch::Receiver, health_updater: &HealthUpdater, ) -> anyhow::Result> { @@ -210,7 +210,7 @@ impl AsyncTreeRecovery { mut self, snapshot: SnapshotParameters, mut options: RecoveryOptions<'_>, - pool: &ConnectionPool, + pool: &ConnectionPool, stop_receiver: &watch::Receiver, ) -> anyhow::Result> { let chunk_count = options.chunk_count; @@ -221,7 +221,7 @@ impl AsyncTreeRecovery { "Recovering Merkle tree from Postgres snapshot in {chunk_count} concurrent chunks" ); - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let remaining_chunks = self .filter_chunks(&mut storage, snapshot.miniblock, &chunks) .await?; @@ -271,7 +271,7 @@ impl AsyncTreeRecovery { /// Filters out `key_chunks` for which recovery was successfully performed. async fn filter_chunks( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, snapshot_miniblock: MiniblockNumber, key_chunks: &[ops::RangeInclusive], ) -> anyhow::Result>> { @@ -318,12 +318,12 @@ impl AsyncTreeRecovery { tree: &Mutex, snapshot_miniblock: MiniblockNumber, key_chunk: ops::RangeInclusive, - pool: &ConnectionPool, + pool: &ConnectionPool, stop_receiver: &watch::Receiver, ) -> anyhow::Result<()> { let acquire_connection_latency = RECOVERY_METRICS.chunk_latency[&ChunkRecoveryStage::AcquireConnection].start(); - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; acquire_connection_latency.observe(); if *stop_receiver.borrow() { @@ -392,9 +392,9 @@ impl AsyncTreeRecovery { } async fn get_snapshot_recovery( - pool: &ConnectionPool, + pool: &ConnectionPool, ) -> anyhow::Result> { - let mut storage = pool.access_storage_tagged("metadata_calculator").await?; + let mut storage = pool.connection_tagged("metadata_calculator").await?; Ok(storage .snapshot_recovery_dal() .get_applied_snapshot_status() diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs index 6a4118b3a86..8145cfe8c25 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs @@ -10,7 +10,7 @@ use zksync_config::configs::{ chain::OperationsManagerConfig, database::{MerkleTreeConfig, MerkleTreeMode}, }; -use zksync_dal::ServerDals; +use zksync_dal::CoreDal; use zksync_health_check::{CheckHealth, HealthStatus, ReactiveHealthCheck}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; use zksync_types::{L1BatchNumber, L2ChainId, ProtocolVersionId, StorageLog}; @@ -60,7 +60,7 @@ async fn create_tree_recovery(path: PathBuf, l1_batch: L1BatchNumber) -> AsyncTr #[tokio::test] async fn basic_recovery_workflow() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let snapshot_recovery = prepare_recovery_snapshot_with_genesis(&pool, &temp_dir).await; let snapshot = SnapshotParameters::new(&pool, &snapshot_recovery) @@ -94,10 +94,10 @@ async fn basic_recovery_workflow() { } async fn prepare_recovery_snapshot_with_genesis( - pool: &ConnectionPool, + pool: &ConnectionPool, temp_dir: &TempDir, ) -> SnapshotRecoveryStatus { - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await .unwrap(); @@ -173,7 +173,7 @@ impl HandleRecoveryEvent for TestEventListener { #[test_casing(3, [5, 7, 8])] #[tokio::test] async fn recovery_fault_tolerance(chunk_count: u64) { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let snapshot_recovery = prepare_recovery_snapshot_with_genesis(&pool, &temp_dir).await; @@ -239,10 +239,10 @@ impl RecoveryWorkflowCase { #[test_casing(2, RecoveryWorkflowCase::ALL)] #[tokio::test] async fn entire_recovery_workflow(case: RecoveryWorkflowCase) { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; // Emulate the recovered view of Postgres. Unlike with previous tests, we don't perform genesis. let snapshot_logs = gen_storage_logs(100..300, 1).pop().unwrap(); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot( &mut storage, L1BatchNumber(23), diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index 2040ef1c311..29096380b18 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -10,7 +10,7 @@ use zksync_config::configs::{ chain::OperationsManagerConfig, database::{MerkleTreeConfig, MerkleTreeMode}, }; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_health_check::{CheckHealth, HealthStatus}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; @@ -42,7 +42,7 @@ where #[tokio::test] async fn genesis_creation() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; @@ -58,7 +58,7 @@ async fn genesis_creation() { #[tokio::test] async fn basic_workflow() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); @@ -85,8 +85,8 @@ async fn basic_workflow() { assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(2)); } -async fn expected_tree_hash(pool: &ConnectionPool) -> H256 { - let mut storage = pool.access_storage().await.unwrap(); +async fn expected_tree_hash(pool: &ConnectionPool) -> H256 { + let mut storage = pool.connection().await.unwrap(); let sealed_l1_batch_number = storage .blocks_dal() .get_sealed_l1_batch_number() @@ -104,7 +104,7 @@ async fn expected_tree_hash(pool: &ConnectionPool) -> H256 { #[tokio::test] async fn status_receiver_has_correct_states() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (mut calculator, _) = setup_calculator(temp_dir.path(), &pool).await; @@ -152,7 +152,7 @@ async fn status_receiver_has_correct_states() { #[tokio::test] async fn multi_l1_batch_workflow() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; // Collect all storage logs in a single L1 batch let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); @@ -188,7 +188,7 @@ async fn multi_l1_batch_workflow() { #[tokio::test] async fn running_metadata_calculator_with_additional_blocks() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let calculator = setup_lightweight_calculator(temp_dir.path(), &pool).await; @@ -210,7 +210,7 @@ async fn running_metadata_calculator_with_additional_blocks() { // Add some new blocks to the storage. let new_logs = gen_storage_logs(100..200, 10); - extend_db_state(&mut pool.access_storage().await.unwrap(), new_logs).await; + extend_db_state(&mut pool.connection().await.unwrap(), new_logs).await; // Wait until these blocks are processed. The calculator may have spurious delays, // thus we wait in a loop. @@ -238,7 +238,7 @@ async fn running_metadata_calculator_with_additional_blocks() { #[tokio::test] async fn shutting_down_calculator() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (merkle_tree_config, mut operation_config) = create_config(temp_dir.path(), MerkleTreeMode::Lightweight); @@ -263,7 +263,7 @@ async fn test_postgres_backup_recovery( sleep_between_batches: bool, insert_batch_without_metadata: bool, ) { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let calculator = setup_lightweight_calculator(temp_dir.path(), &pool).await; reset_db_state(&pool, 5).await; @@ -271,7 +271,7 @@ async fn test_postgres_backup_recovery( // Simulate recovery from a DB snapshot in which some newer L1 batches are erased. let last_batch_after_recovery = L1BatchNumber(3); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); let removed_batches = remove_l1_batches(&mut storage, last_batch_after_recovery).await; if insert_batch_without_metadata { @@ -304,7 +304,7 @@ async fn test_postgres_backup_recovery( assert_eq!(next_l1_batch, last_batch_after_recovery + 1); // Re-insert L1 batches to the storage after recovery. - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); for batch_header in &removed_batches { let mut txn = storage.start_transaction().await.unwrap(); txn.blocks_dal() @@ -354,7 +354,7 @@ async fn postgres_backup_recovery_with_excluded_metadata() { pub(crate) async fn setup_calculator( db_path: &Path, - pool: &ConnectionPool, + pool: &ConnectionPool, ) -> (MetadataCalculator, Arc) { let store_factory = ObjectStoreFactory::mock(); let store = store_factory.create_store().await; @@ -367,7 +367,7 @@ pub(crate) async fn setup_calculator( async fn setup_lightweight_calculator( db_path: &Path, - pool: &ConnectionPool, + pool: &ConnectionPool, ) -> MetadataCalculator { let (db_config, operation_config) = create_config(db_path, MerkleTreeMode::Lightweight); setup_calculator_with_options(&db_config, &operation_config, pool, None).await @@ -392,7 +392,7 @@ fn create_config( async fn setup_calculator_with_options( merkle_tree_config: &MerkleTreeConfig, operation_config: &OperationsManagerConfig, - pool: &ConnectionPool, + pool: &ConnectionPool, object_store: Option>, ) -> MetadataCalculator { let calculator_config = @@ -401,7 +401,7 @@ async fn setup_calculator_with_options( .await .unwrap(); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); if storage.blocks_dal().is_genesis_needed().await.unwrap() { ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await @@ -416,7 +416,7 @@ fn path_to_string(path: &Path) -> String { pub(crate) async fn run_calculator( mut calculator: MetadataCalculator, - pool: ConnectionPool, + pool: ConnectionPool, ) -> H256 { let (stop_sx, stop_rx) = watch::channel(false); let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); @@ -438,8 +438,8 @@ pub(crate) async fn run_calculator( delayer_handle.await.unwrap() } -pub(crate) async fn reset_db_state(pool: &ConnectionPool, num_batches: usize) { - let mut storage = pool.access_storage().await.unwrap(); +pub(crate) async fn reset_db_state(pool: &ConnectionPool, num_batches: usize) { + let mut storage = pool.connection().await.unwrap(); // Drops all L1 batches (except the L1 batch with number 0) and their storage logs. storage .storage_logs_dal() @@ -472,7 +472,7 @@ pub(crate) async fn reset_db_state(pool: &ConnectionPool, num_batches: u } pub(super) async fn extend_db_state( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, new_logs: impl IntoIterator>, ) { let mut storage = storage.start_transaction().await.unwrap(); @@ -487,7 +487,7 @@ pub(super) async fn extend_db_state( } pub(super) async fn extend_db_state_from_l1_batch( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, next_l1_batch: L1BatchNumber, new_logs: impl IntoIterator>, ) { @@ -525,7 +525,7 @@ pub(super) async fn extend_db_state_from_l1_batch( } async fn insert_initial_writes_for_batch( - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, ) { let written_non_zero_slots: Vec<_> = connection @@ -596,7 +596,7 @@ pub(crate) fn gen_storage_logs( } async fn remove_l1_batches( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, last_l1_batch_to_keep: L1BatchNumber, ) -> Vec { let sealed_l1_batch_number = storage @@ -632,8 +632,8 @@ async fn remove_l1_batches( #[tokio::test] async fn deduplication_works_as_expected() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await .unwrap(); diff --git a/core/lib/zksync_core/src/metadata_calculator/updater.rs b/core/lib/zksync_core/src/metadata_calculator/updater.rs index b8c422d9536..8105d7692fe 100644 --- a/core/lib/zksync_core/src/metadata_calculator/updater.rs +++ b/core/lib/zksync_core/src/metadata_calculator/updater.rs @@ -5,7 +5,7 @@ use std::{ops, sync::Arc, time::Instant}; use anyhow::Context as _; use futures::{future, FutureExt}; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_health_check::HealthUpdater; use zksync_merkle_tree::domain::TreeMetadata; use zksync_object_store::ObjectStore; @@ -86,7 +86,7 @@ impl TreeUpdater { /// is slow for whatever reason. async fn process_multiple_batches( &mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_batch_numbers: ops::RangeInclusive, ) -> L1BatchNumber { let start = Instant::now(); @@ -167,7 +167,7 @@ impl TreeUpdater { async fn step( &mut self, - mut storage: StorageProcessor<'_, Server>, + mut storage: Connection<'_, Core>, next_l1_batch_to_seal: &mut L1BatchNumber, ) { let Some(last_sealed_l1_batch) = storage @@ -199,7 +199,7 @@ impl TreeUpdater { pub async fn loop_updating_tree( mut self, delayer: Delayer, - pool: &ConnectionPool, + pool: &ConnectionPool, mut stop_receiver: watch::Receiver, health_updater: HealthUpdater, ) -> anyhow::Result<()> { @@ -208,7 +208,7 @@ impl TreeUpdater { else { return Ok(()); // Stop signal received }; - let mut storage = pool.access_storage_tagged("metadata_calculator").await?; + let mut storage = pool.connection_tagged("metadata_calculator").await?; // Ensure genesis creation let tree = &mut self.tree; @@ -277,7 +277,7 @@ impl TreeUpdater { tracing::info!("Stop signal received, metadata_calculator is shutting down"); break; } - let storage = pool.access_storage_tagged("metadata_calculator").await?; + let storage = pool.connection_tagged("metadata_calculator").await?; let snapshot = *next_l1_batch_to_seal; self.step(storage, &mut next_l1_batch_to_seal).await; @@ -312,7 +312,7 @@ impl TreeUpdater { } async fn check_initial_writes_consistency( - connection: &mut StorageProcessor<'_, Server>, + connection: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, tree_initial_writes: &[InitialStorageWrite], ) { diff --git a/core/lib/zksync_core/src/proof_data_handler/mod.rs b/core/lib/zksync_core/src/proof_data_handler/mod.rs index c0f6af1fecf..e5975e9caf7 100644 --- a/core/lib/zksync_core/src/proof_data_handler/mod.rs +++ b/core/lib/zksync_core/src/proof_data_handler/mod.rs @@ -7,7 +7,7 @@ use zksync_config::{ configs::{proof_data_handler::ProtocolVersionLoadingMode, ProofDataHandlerConfig}, ContractsConfig, }; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; use zksync_prover_interface::api::{ProofGenerationDataRequest, SubmitProofRequest}; use zksync_types::{ @@ -35,7 +35,7 @@ pub async fn run_server( config: ProofDataHandlerConfig, contracts_config: ContractsConfig, blob_store: Arc, - pool: ConnectionPool, + pool: ConnectionPool, mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); diff --git a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs index 9a2b2e61f21..3cae0c7428d 100644 --- a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs +++ b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs @@ -9,7 +9,7 @@ use axum::{ use zksync_config::configs::{ proof_data_handler::ProtocolVersionLoadingMode, ProofDataHandlerConfig, }; -use zksync_dal::{ConnectionPool, Server, ServerDals, SqlxError}; +use zksync_dal::{ConnectionPool, Core, CoreDal, SqlxError}; use zksync_object_store::{ObjectStore, ObjectStoreError}; use zksync_prover_interface::api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, @@ -27,7 +27,7 @@ use zksync_utils::u256_to_h256; #[derive(Clone)] pub(crate) struct RequestProcessor { blob_store: Arc, - pool: ConnectionPool, + pool: ConnectionPool, config: ProofDataHandlerConfig, l1_verifier_config: Option, } @@ -67,7 +67,7 @@ impl IntoResponse for RequestProcessorError { impl RequestProcessor { pub(crate) fn new( blob_store: Arc, - pool: ConnectionPool, + pool: ConnectionPool, config: ProofDataHandlerConfig, l1_verifier_config: Option, ) -> Self { @@ -87,7 +87,7 @@ impl RequestProcessor { let l1_batch_number_result = self .pool - .access_storage() + .connection() .await .unwrap() .proof_generation_dal() @@ -110,7 +110,7 @@ impl RequestProcessor { let header = self .pool - .access_storage() + .connection() .await .unwrap() .blocks_dal() @@ -124,7 +124,7 @@ impl RequestProcessor { let fri_protocol_version = FriProtocolVersionId::from(protocol_version); (self .pool - .access_storage() + .connection() .await .unwrap() .protocol_versions_dal() @@ -146,7 +146,7 @@ impl RequestProcessor { let storage_batch = self .pool - .access_storage() + .connection() .await .unwrap() .blocks_dal() @@ -198,7 +198,7 @@ impl RequestProcessor { let events_queue_state_from_prover = H256::from_slice(&proof.aggregation_result_coords[3]); - let mut storage = self.pool.access_storage().await.unwrap(); + let mut storage = self.pool.connection().await.unwrap(); let l1_batch = storage .blocks_dal() @@ -267,7 +267,7 @@ impl RequestProcessor { } SubmitProofRequest::SkippedProofGeneration => { self.pool - .access_storage() + .connection() .await .unwrap() .proof_generation_dal() diff --git a/core/lib/zksync_core/src/reorg_detector/mod.rs b/core/lib/zksync_core/src/reorg_detector/mod.rs index a943e6aab45..d7ba01abc0d 100644 --- a/core/lib/zksync_core/src/reorg_detector/mod.rs +++ b/core/lib/zksync_core/src/reorg_detector/mod.rs @@ -3,7 +3,7 @@ use std::{fmt, time::Duration}; use anyhow::Context as _; use async_trait::async_trait; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_types::{L1BatchNumber, MiniblockNumber, H256}; use zksync_web3_decl::{ @@ -215,7 +215,7 @@ impl HandleReorgDetectorEvent for HealthUpdater { pub struct ReorgDetector { client: Box, event_handler: Box, - pool: ConnectionPool, + pool: ConnectionPool, sleep_interval: Duration, health_check: ReactiveHealthCheck, } @@ -223,7 +223,7 @@ pub struct ReorgDetector { impl ReorgDetector { const DEFAULT_SLEEP_INTERVAL: Duration = Duration::from_secs(5); - pub fn new(client: HttpClient, pool: ConnectionPool) -> Self { + pub fn new(client: HttpClient, pool: ConnectionPool) -> Self { let (health_check, health_updater) = ReactiveHealthCheck::new("reorg_detector"); Self { client: Box::new(client), @@ -241,11 +241,7 @@ impl ReorgDetector { /// Returns `Ok(())` if no reorg was detected. /// Returns `Err::ReorgDetected()` if a reorg was detected. pub async fn check_consistency(&mut self) -> Result<(), Error> { - let mut storage = self - .pool - .access_storage() - .await - .context("access_storage()")?; + let mut storage = self.pool.connection().await.context("connection()")?; let Some(local_l1_batch) = storage .blocks_dal() .get_last_l1_batch_number_with_metadata() @@ -289,11 +285,7 @@ impl ReorgDetector { // Check that the first L1 batch matches, to make sure that // we are actually tracking the same chain as the main node. - let mut storage = self - .pool - .access_storage() - .await - .context("access_storage()")?; + let mut storage = self.pool.connection().await.context("connection()")?; let first_l1_batch = storage .blocks_dal() .get_earliest_l1_batch_number_with_metadata() @@ -321,11 +313,7 @@ impl ReorgDetector { &self, miniblock: MiniblockNumber, ) -> Result { - let mut storage = self - .pool - .access_storage() - .await - .context("access_storage()")?; + let mut storage = self.pool.connection().await.context("connection()")?; let local_hash = storage .blocks_dal() .get_miniblock_header(miniblock) @@ -354,11 +342,7 @@ impl ReorgDetector { /// Compares root hashes of the latest local batch and of the same batch from the main node. async fn root_hashes_match(&self, l1_batch: L1BatchNumber) -> Result { - let mut storage = self - .pool - .access_storage() - .await - .context("access_storage()")?; + let mut storage = self.pool.connection().await.context("connection()")?; let local_hash = storage .blocks_dal() .get_l1_batch_state_root(l1_batch) diff --git a/core/lib/zksync_core/src/reorg_detector/tests.rs b/core/lib/zksync_core/src/reorg_detector/tests.rs index 57fa3eff6b5..af3c62eb81b 100644 --- a/core/lib/zksync_core/src/reorg_detector/tests.rs +++ b/core/lib/zksync_core/src/reorg_detector/tests.rs @@ -8,7 +8,7 @@ use std::{ use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use tokio::sync::mpsc; -use zksync_dal::{ServerDals, StorageProcessor}; +use zksync_dal::{Connection, CoreDal}; use zksync_types::{ block::{MiniblockHasher, MiniblockHeader}, L2ChainId, ProtocolVersion, @@ -21,7 +21,7 @@ use crate::{ utils::testonly::{create_l1_batch, create_miniblock}, }; -async fn store_miniblock(storage: &mut StorageProcessor<'_, Server>, number: u32, hash: H256) { +async fn store_miniblock(storage: &mut Connection<'_, Core>, number: u32, hash: H256) { let header = MiniblockHeader { hash, ..create_miniblock(number) @@ -33,7 +33,7 @@ async fn store_miniblock(storage: &mut StorageProcessor<'_, Server>, number: u32 .unwrap(); } -async fn seal_l1_batch(storage: &mut StorageProcessor<'_, Server>, number: u32, hash: H256) { +async fn seal_l1_batch(storage: &mut Connection<'_, Core>, number: u32, hash: H256) { let header = create_l1_batch(number); storage .blocks_dal() @@ -152,7 +152,7 @@ impl HandleReorgDetectorEvent for mpsc::UnboundedSender<(MiniblockNumber, L1Batc } } -fn create_mock_detector(client: MockMainNodeClient, pool: ConnectionPool) -> ReorgDetector { +fn create_mock_detector(client: MockMainNodeClient, pool: ConnectionPool) -> ReorgDetector { let (health_check, health_updater) = ReactiveHealthCheck::new("reorg_detector"); ReorgDetector { client: Box::new(client), @@ -166,8 +166,8 @@ fn create_mock_detector(client: MockMainNodeClient, pool: ConnectionPool #[test_casing(4, Product(([false, true], [false, true])))] #[tokio::test] async fn normal_reorg_function(snapshot_recovery: bool, with_transient_errors: bool) { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let mut client = MockMainNodeClient::default(); if snapshot_recovery { storage @@ -250,8 +250,8 @@ async fn normal_reorg_function(snapshot_recovery: bool, with_transient_errors: b #[tokio::test] async fn detector_stops_on_fatal_rpc_error() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -267,8 +267,8 @@ async fn detector_stops_on_fatal_rpc_error() { #[tokio::test] async fn reorg_is_detected_on_batch_hash_mismatch() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let genesis_root_hash = ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -313,8 +313,8 @@ async fn reorg_is_detected_on_batch_hash_mismatch() { #[tokio::test] async fn reorg_is_detected_on_miniblock_hash_mismatch() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let mut client = MockMainNodeClient::default(); let genesis_root_hash = ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) @@ -384,10 +384,10 @@ async fn reorg_is_detected_on_historic_batch_hash_mismatch( (1_u32..=10, last_correct_batch) }; - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let earliest_l1_batch_number = l1_batch_numbers.start() - 1; { - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); storage .protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -422,7 +422,7 @@ async fn reorg_is_detected_on_historic_batch_hash_mismatch( let mut miniblock_and_l1_batch_hashes: Vec<_> = miniblock_and_l1_batch_hashes.collect(); if matches!(storage_update_strategy, StorageUpdateStrategy::Prefill) { - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); for &(number, miniblock_hash, l1_batch_hash) in &miniblock_and_l1_batch_hashes { store_miniblock(&mut storage, number, miniblock_hash).await; seal_l1_batch(&mut storage, number, l1_batch_hash).await; @@ -438,7 +438,7 @@ async fn reorg_is_detected_on_historic_batch_hash_mismatch( if matches!(storage_update_strategy, StorageUpdateStrategy::Sequential) { tokio::spawn(async move { - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); let mut last_number = earliest_l1_batch_number; while let Some((miniblock, l1_batch)) = block_update_receiver.recv().await { if miniblock == MiniblockNumber(last_number) @@ -463,8 +463,8 @@ async fn reorg_is_detected_on_historic_batch_hash_mismatch( #[tokio::test] async fn stopping_reorg_detector_while_waiting_for_l1_batch() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); assert!(storage.blocks_dal().is_genesis_needed().await.unwrap()); drop(storage); @@ -478,8 +478,8 @@ async fn stopping_reorg_detector_while_waiting_for_l1_batch() { #[tokio::test] async fn detector_errors_on_earliest_batch_hash_mismatch() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let genesis_root_hash = ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -503,7 +503,7 @@ async fn detector_errors_on_earliest_batch_hash_mismatch() { #[tokio::test] async fn detector_errors_on_earliest_batch_hash_mismatch_with_snapshot_recovery() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut client = MockMainNodeClient::default(); client .l1_batch_root_hashes @@ -515,7 +515,7 @@ async fn detector_errors_on_earliest_batch_hash_mismatch_with_snapshot_recovery( tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(20)).await; - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); storage .protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -532,8 +532,8 @@ async fn detector_errors_on_earliest_batch_hash_mismatch_with_snapshot_recovery( #[tokio::test] async fn reorg_is_detected_without_waiting_for_main_node_to_catch_up() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let genesis_root_hash = ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/main_executor.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/main_executor.rs index 5850321e57c..0d1eae39075 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/main_executor.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/main_executor.rs @@ -12,7 +12,7 @@ use multivm::{ }; use once_cell::sync::OnceCell; use tokio::sync::{mpsc, watch}; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_state::{RocksdbStorage, StorageView, WriteStorage}; use zksync_types::{vm_trace::Call, Transaction, U256}; use zksync_utils::bytecode::CompressedBytecodeInfo; @@ -31,7 +31,7 @@ use crate::{ #[derive(Debug, Clone)] pub struct MainBatchExecutor { state_keeper_db_path: String, - pool: ConnectionPool, + pool: ConnectionPool, save_call_traces: bool, max_allowed_tx_gas_limit: U256, upload_witness_inputs_to_gcs: bool, @@ -42,7 +42,7 @@ pub struct MainBatchExecutor { impl MainBatchExecutor { pub fn new( state_keeper_db_path: String, - pool: ConnectionPool, + pool: ConnectionPool, max_allowed_tx_gas_limit: U256, save_call_traces: bool, upload_witness_inputs_to_gcs: bool, @@ -73,11 +73,7 @@ impl BatchExecutor for MainBatchExecutor { .await .expect("Failed initializing state keeper storage"); secondary_storage.enable_enum_index_migration(self.enum_index_migration_chunk_size); - let mut conn = self - .pool - .access_storage_tagged("state_keeper") - .await - .unwrap(); + let mut conn = self.pool.connection_tagged("state_keeper").await.unwrap(); let secondary_storage = secondary_storage .synchronize(&mut conn, stop_receiver) .await diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs index 6e5adb65894..1fc7289cf94 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; use test_casing::test_casing; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_test_account::Account; use zksync_types::{get_nonce_key, utils::storage_key_for_eth_balance, PriorityOpId}; @@ -32,7 +32,7 @@ fn assert_reverted(execution_result: &TxExecutionResult) { /// Checks that we can successfully execute a single L2 tx in batch executor. #[tokio::test] async fn execute_l2_tx() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); tester.genesis().await; @@ -75,7 +75,7 @@ impl SnapshotRecoveryMutation { #[tokio::test] async fn execute_l2_tx_after_snapshot_recovery(mutation: Option) { let mut alice = Account::random(); - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut storage_snapshot = StorageSnapshot::new(&connection_pool, &mut alice, 10).await; assert!(storage_snapshot.storage_logs.len() > 10); // sanity check @@ -99,7 +99,7 @@ async fn execute_l2_tx_after_snapshot_recovery(mutation: Option::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -116,7 +116,7 @@ async fn execute_l1_tx() { /// Checks that we can successfully execute a single L2 tx and a single L1 tx in batch executor. #[tokio::test] async fn execute_l2_and_l1_txs() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -136,7 +136,7 @@ async fn execute_l2_and_l1_txs() { /// Checks that we can successfully rollback the transaction and execute it once again. #[tokio::test] async fn rollback() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -180,7 +180,7 @@ async fn rollback() { /// Checks that incorrect transactions are marked as rejected. #[tokio::test] async fn reject_tx() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -196,7 +196,7 @@ async fn reject_tx() { /// Checks that tx with too big gas limit is correctly rejected. #[tokio::test] async fn too_big_gas_limit() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -243,7 +243,7 @@ async fn too_big_gas_limit() { /// Checks that we can't execute the same transaction twice. #[tokio::test] async fn tx_cant_be_reexecuted() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -263,7 +263,7 @@ async fn tx_cant_be_reexecuted() { /// Checks that we can deploy and call the loadnext contract. #[tokio::test] async fn deploy_and_call_loadtest() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -289,7 +289,7 @@ async fn deploy_and_call_loadtest() { /// Checks that a tx that is reverted by the VM still can be included into a batch. #[tokio::test] async fn execute_reverted_tx() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -316,7 +316,7 @@ async fn execute_reverted_tx() { /// a batch with different operations, both successful and not. #[tokio::test] async fn execute_realistic_scenario() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let mut bob = Account::random(); @@ -366,7 +366,7 @@ async fn execute_realistic_scenario() { /// Checks that we handle the bootloader out of gas error on execution phase. #[tokio::test] async fn bootloader_out_of_gas_for_any_tx() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::with_config( @@ -392,7 +392,7 @@ async fn bootloader_out_of_gas_for_any_tx() { #[tokio::test] #[ignore] // This test fails. async fn bootloader_tip_out_of_gas() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let mut tester = Tester::new(connection_pool); diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index 9faffa960a2..28214c22075 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -11,7 +11,7 @@ use tempfile::TempDir; use tokio::sync::watch; use zksync_config::configs::chain::StateKeeperConfig; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ block::MiniblockHasher, ethabi::Token, fee::Fee, snapshots::SnapshotRecoveryStatus, @@ -67,16 +67,16 @@ impl TestConfig { pub(super) struct Tester { fee_account: Address, db_dir: TempDir, - pool: ConnectionPool, + pool: ConnectionPool, config: TestConfig, } impl Tester { - pub(super) fn new(pool: ConnectionPool) -> Self { + pub(super) fn new(pool: ConnectionPool) -> Self { Self::with_config(pool, TestConfig::new()) } - pub(super) fn with_config(pool: ConnectionPool, config: TestConfig) -> Self { + pub(super) fn with_config(pool: ConnectionPool, config: TestConfig) -> Self { Self { fee_account: Address::repeat_byte(0x01), db_dir: TempDir::new().unwrap(), @@ -157,11 +157,7 @@ impl Tester { /// Performs the genesis in the storage. pub(super) async fn genesis(&self) { - let mut storage = self - .pool - .access_storage_tagged("state_keeper") - .await - .unwrap(); + let mut storage = self.pool.connection_tagged("state_keeper").await.unwrap(); if storage.blocks_dal().is_genesis_needed().await.unwrap() { create_genesis_l1_batch( &mut storage, @@ -180,11 +176,7 @@ impl Tester { /// Adds funds for specified account list. /// Expects genesis to be performed (i.e. `setup_storage` called beforehand). pub(super) async fn fund(&self, addresses: &[Address]) { - let mut storage = self - .pool - .access_storage_tagged("state_keeper") - .await - .unwrap(); + let mut storage = self.pool.connection_tagged("state_keeper").await.unwrap(); let eth_amount = U256::from(10u32).pow(U256::from(32)); //10^32 wei @@ -376,7 +368,7 @@ pub(super) struct StorageSnapshot { impl StorageSnapshot { /// Generates a new snapshot by executing the specified number of transactions, each in a separate miniblock. pub async fn new( - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, alice: &mut Account, transaction_count: u32, ) -> Self { @@ -384,7 +376,7 @@ impl StorageSnapshot { tester.genesis().await; tester.fund(&[alice.address()]).await; - let mut storage = connection_pool.access_storage().await.unwrap(); + let mut storage = connection_pool.connection().await.unwrap(); let all_logs = storage .snapshots_creator_dal() .get_storage_logs_chunk( @@ -457,7 +449,7 @@ impl StorageSnapshot { ) .finalize(ProtocolVersionId::latest()); - let mut storage = connection_pool.access_storage().await.unwrap(); + let mut storage = connection_pool.connection().await.unwrap(); storage.blocks_dal().delete_genesis().await.unwrap(); Self { miniblock_number: MiniblockNumber(l2_block_env.number), @@ -469,13 +461,13 @@ impl StorageSnapshot { } /// Recovers storage from this snapshot. - pub async fn recover(self, connection_pool: &ConnectionPool) -> SnapshotRecoveryStatus { + pub async fn recover(self, connection_pool: &ConnectionPool) -> SnapshotRecoveryStatus { let snapshot_logs: Vec<_> = self .storage_logs .into_iter() .map(|(key, value)| StorageLog::new_write_log(key, value)) .collect(); - let mut storage = connection_pool.access_storage().await.unwrap(); + let mut storage = connection_pool.connection().await.unwrap(); let mut snapshot = prepare_recovery_snapshot( &mut storage, L1BatchNumber(1), diff --git a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs index 5303af221e5..1ba0890867a 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Context; use multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_types::{L1BatchNumber, MiniblockNumber, H256}; use super::PendingBatchData; @@ -30,7 +30,7 @@ pub(crate) struct IoCursor { impl IoCursor { /// Loads the cursor from Postgres. - pub async fn new(storage: &mut StorageProcessor<'_, Server>) -> anyhow::Result { + pub async fn new(storage: &mut Connection<'_, Core>) -> anyhow::Result { let last_sealed_l1_batch_number = storage .blocks_dal() .get_sealed_l1_batch_number() @@ -88,7 +88,7 @@ impl IoCursor { /// /// Propagates DB errors. Also returns an error if environment doesn't correspond to a pending L1 batch. pub(crate) async fn load_pending_batch( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result { diff --git a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs index 246a70da775..c95de933a6c 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs @@ -8,7 +8,7 @@ use std::{collections::HashMap, ops}; use futures::FutureExt; use vm_utils::storage::L1BatchParamsProvider; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_types::{ block::MiniblockHasher, fee::TransactionExecutionMetrics, L2ChainId, ProtocolVersion, ProtocolVersionId, @@ -35,8 +35,8 @@ fn test_poll_iters() { #[tokio::test] async fn creating_io_cursor_with_genesis() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -66,8 +66,8 @@ async fn creating_io_cursor_with_genesis() { #[tokio::test] async fn creating_io_cursor_with_snapshot_recovery() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -100,8 +100,8 @@ async fn creating_io_cursor_with_snapshot_recovery() { #[tokio::test] async fn waiting_for_l1_batch_params_with_genesis() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let genesis_root_hash = ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -128,7 +128,7 @@ async fn waiting_for_l1_batch_params_with_genesis() { assert!((&mut wait_future).now_or_never().is_none()); let expected_hash = H256::repeat_byte(1); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); storage .blocks_dal() .set_l1_batch_hash(L1BatchNumber(1), expected_hash) @@ -141,8 +141,8 @@ async fn waiting_for_l1_batch_params_with_genesis() { #[tokio::test] async fn waiting_for_l1_batch_params_after_snapshot_recovery() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -175,7 +175,7 @@ async fn waiting_for_l1_batch_params_after_snapshot_recovery() { assert!((&mut wait_future).now_or_never().is_none()); let expected_hash = H256::repeat_byte(1); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); storage .blocks_dal() .set_l1_batch_hash(new_l1_batch.number, expected_hash) @@ -188,8 +188,8 @@ async fn waiting_for_l1_batch_params_after_snapshot_recovery() { #[tokio::test] async fn getting_first_miniblock_in_batch_with_genesis() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -236,7 +236,7 @@ async fn getting_first_miniblock_in_batch_with_genesis() { async fn assert_first_miniblock_numbers( provider: &L1BatchParamsProvider, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, batches_and_miniblocks: &HashMap, ()>>, ) { for (&batch, &expected_miniblock) in batches_and_miniblocks { @@ -260,8 +260,8 @@ async fn assert_first_miniblock_numbers( #[tokio::test] async fn getting_first_miniblock_in_batch_after_snapshot_recovery() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -308,8 +308,8 @@ async fn getting_first_miniblock_in_batch_after_snapshot_recovery() { #[tokio::test] async fn loading_pending_batch_with_genesis() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let genesis_params = GenesisParams::mock(); ensure_genesis_state(&mut storage, L2ChainId::default(), &genesis_params) .await @@ -354,7 +354,7 @@ async fn loading_pending_batch_with_genesis() { } async fn store_pending_miniblocks( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, numbers: ops::RangeInclusive, contract_hashes: BaseSystemContractsHashes, ) { @@ -381,8 +381,8 @@ async fn store_pending_miniblocks( #[tokio::test] async fn loading_pending_batch_after_snapshot_recovery() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -441,8 +441,8 @@ async fn loading_pending_batch_after_snapshot_recovery() { #[tokio::test] async fn getting_batch_version_with_genesis() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let mut genesis_params = GenesisParams::mock(); genesis_params.protocol_version = ProtocolVersionId::Version5; ensure_genesis_state(&mut storage, L2ChainId::default(), &genesis_params) @@ -482,8 +482,8 @@ async fn getting_batch_version_with_genesis() { #[tokio::test] async fn getting_batch_version_after_snapshot_recovery() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; diff --git a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs index 9708c7cc131..160d64a0297 100644 --- a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs +++ b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs @@ -6,12 +6,12 @@ use std::time::{Duration, Instant}; use anyhow::Context as _; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_types::MiniblockNumber; /// Runs the migration for pending miniblocks. pub(crate) async fn migrate_pending_miniblocks( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { let started_at = Instant::now(); tracing::info!("Started migrating `fee_account_address` for pending miniblocks"); @@ -40,7 +40,7 @@ pub(crate) async fn migrate_pending_miniblocks( /// Runs the migration for non-pending miniblocks. Should be run as a background task. pub(crate) async fn migrate_miniblocks( - pool: ConnectionPool, + pool: ConnectionPool, last_miniblock: MiniblockNumber, stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { @@ -48,7 +48,7 @@ pub(crate) async fn migrate_miniblocks( // Since snapshot recovery is later that the fee address migration in terms of code versioning, // the migration is always no-op in case of snapshot recovery; all miniblocks added after recovery are guaranteed // to have their fee address set. - let mut storage = pool.access_storage_tagged("state_keeper").await?; + let mut storage = pool.connection_tagged("state_keeper").await?; if storage .snapshot_recovery_dal() .get_applied_snapshot_status() @@ -82,7 +82,7 @@ struct MigrationOutput { /// It's important for the `chunk_size` to be a constant; this ensures that each chunk is migrated atomically. async fn migrate_miniblocks_inner( - pool: ConnectionPool, + pool: ConnectionPool, last_miniblock: MiniblockNumber, chunk_size: u32, sleep_interval: Duration, @@ -90,7 +90,7 @@ async fn migrate_miniblocks_inner( ) -> anyhow::Result { anyhow::ensure!(chunk_size > 0, "Chunk size must be positive"); - let mut storage = pool.access_storage_tagged("state_keeper").await?; + let mut storage = pool.connection_tagged("state_keeper").await?; #[allow(deprecated)] let l1_batches_have_fee_account_address = storage .blocks_dal() @@ -114,7 +114,7 @@ async fn migrate_miniblocks_inner( let chunk_end = last_miniblock.min(chunk_start + chunk_size - 1); let chunk = chunk_start..=chunk_end; - let mut storage = pool.access_storage_tagged("state_keeper").await?; + let mut storage = pool.connection_tagged("state_keeper").await?; let is_chunk_migrated = is_fee_address_migrated(&mut storage, chunk_start).await?; if is_chunk_migrated { @@ -153,7 +153,7 @@ async fn migrate_miniblocks_inner( #[allow(deprecated)] async fn is_fee_address_migrated( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, miniblock: MiniblockNumber, ) -> anyhow::Result { storage @@ -175,7 +175,7 @@ mod tests { use super::*; use crate::utils::testonly::create_miniblock; - async fn prepare_storage(storage: &mut StorageProcessor<'_, Server>) { + async fn prepare_storage(storage: &mut Connection<'_, Core>) { storage .protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -216,7 +216,7 @@ mod tests { } } - async fn assert_migration(storage: &mut StorageProcessor<'_, Server>) { + async fn assert_migration(storage: &mut Connection<'_, Core>) { for number in 0..5 { assert!(is_fee_address_migrated(storage, MiniblockNumber(number)) .await @@ -237,8 +237,8 @@ mod tests { #[tokio::test] async fn migration_basics(chunk_size: u32) { // Replicate providing a pool with a single connection. - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); prepare_storage(&mut storage).await; drop(storage); @@ -256,7 +256,7 @@ mod tests { assert_eq!(result.miniblocks_affected, 5); // Check that all blocks are migrated. - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); assert_migration(&mut storage).await; drop(storage); @@ -277,8 +277,8 @@ mod tests { #[test_casing(3, [1, 2, 3])] #[tokio::test] async fn stopping_and_resuming_migration(chunk_size: u32) { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); prepare_storage(&mut storage).await; drop(storage); @@ -309,15 +309,15 @@ mod tests { .unwrap(); assert_eq!(result.miniblocks_affected, 5 - u64::from(chunk_size)); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); assert_migration(&mut storage).await; } #[test_casing(3, [1, 2, 3])] #[tokio::test] async fn new_blocks_added_during_migration(chunk_size: u32) { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); prepare_storage(&mut storage).await; let (_stop_sender, stop_receiver) = watch::channel(true); // signal stop right away diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index 5a04dcaac04..ab4cef13f08 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -13,7 +13,7 @@ use multivm::{ }; use vm_utils::storage::{l1_batch_params, L1BatchParamsProvider}; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_mempool::L2TxFilter; use zksync_object_store::ObjectStore; use zksync_types::{ @@ -47,7 +47,7 @@ use crate::{ #[derive(Debug)] pub struct MempoolIO { mempool: MempoolGuard, - pool: ConnectionPool, + pool: ConnectionPool, object_store: Arc, timeout_sealer: TimeoutSealer, filter: L2TxFilter, @@ -91,7 +91,7 @@ impl StateKeeperIO for MempoolIO { } async fn load_pending_batch(&mut self) -> anyhow::Result> { - let mut storage = self.pool.access_storage_tagged("state_keeper").await?; + let mut storage = self.pool.connection_tagged("state_keeper").await?; let pending_miniblock_header = self .l1_batch_params_provider @@ -178,7 +178,7 @@ impl StateKeeperIO for MempoolIO { self.current_l1_batch_number.0, self.filter.fee_input ); - let mut storage = self.pool.access_storage_tagged("state_keeper").await?; + let mut storage = self.pool.connection_tagged("state_keeper").await?; let (base_system_contracts, protocol_version) = storage .protocol_versions_dal() .base_system_contracts_by_timestamp(current_timestamp) @@ -273,7 +273,7 @@ impl StateKeeperIO for MempoolIO { self.mempool.rollback(rejected); // Mark tx as rejected in the storage. - let mut storage = self.pool.access_storage_tagged("state_keeper").await?; + let mut storage = self.pool.connection_tagged("state_keeper").await?; KEEPER_METRICS.rejected_transactions.inc(); tracing::warn!( "transaction {} is rejected with error: {error}", @@ -332,7 +332,7 @@ impl StateKeeperIO for MempoolIO { } let pool = self.pool.clone(); - let mut storage = pool.access_storage_tagged("state_keeper").await?; + let mut storage = pool.connection_tagged("state_keeper").await?; let fictive_miniblock = updates_manager .seal_l1_batch( @@ -349,7 +349,7 @@ impl StateKeeperIO for MempoolIO { } async fn load_previous_batch_version_id(&mut self) -> anyhow::Result { - let mut storage = self.pool.access_storage_tagged("state_keeper").await?; + let mut storage = self.pool.connection_tagged("state_keeper").await?; let prev_l1_batch_number = self.current_l1_batch_number - 1; self.l1_batch_params_provider .load_l1_batch_protocol_version(&mut storage, prev_l1_batch_number) @@ -364,7 +364,7 @@ impl StateKeeperIO for MempoolIO { &mut self, version_id: ProtocolVersionId, ) -> anyhow::Result> { - let mut storage = self.pool.access_storage_tagged("state_keeper").await?; + let mut storage = self.pool.connection_tagged("state_keeper").await?; Ok(storage .protocol_versions_dal() .get_protocol_upgrade_tx(version_id) @@ -426,7 +426,7 @@ impl MempoolIO { object_store: Arc, miniblock_sealer_handle: MiniblockSealerHandle, batch_fee_input_provider: Arc, - pool: ConnectionPool, + pool: ConnectionPool, config: &StateKeeperConfig, delay_interval: Duration, l2_erc20_bridge_addr: Address, @@ -442,7 +442,7 @@ impl MempoolIO { "Virtual blocks per miniblock must be positive" ); - let mut storage = pool.access_storage_tagged("state_keeper").await?; + let mut storage = pool.connection_tagged("state_keeper").await?; let cursor = IoCursor::new(&mut storage) .await .context("failed initializing I/O cursor")?; @@ -493,7 +493,7 @@ impl MempoolIO { ); let wait_latency = KEEPER_METRICS.wait_for_prev_hash_time.start(); - let mut storage = self.pool.access_storage_tagged("state_keeper").await?; + let mut storage = self.pool.connection_tagged("state_keeper").await?; let prev_l1_batch_number = self.current_l1_batch_number - 1; let (batch_hash, _) = self .l1_batch_params_provider diff --git a/core/lib/zksync_core/src/state_keeper/io/mod.rs b/core/lib/zksync_core/src/state_keeper/io/mod.rs index b57a33b1f42..747b576029e 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mod.rs @@ -6,7 +6,7 @@ use std::{ use async_trait::async_trait; use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; use tokio::sync::{mpsc, oneshot}; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_types::{ block::MiniblockExecutionData, protocol_upgrade::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, L1BatchNumber, MiniblockNumber, ProtocolVersionId, @@ -215,7 +215,7 @@ impl MiniblockSealerHandle { /// Component responsible for sealing miniblocks (i.e., storing their data to Postgres). #[derive(Debug)] pub struct MiniblockSealer { - pool: ConnectionPool, + pool: ConnectionPool, is_sync: bool, // Weak sender handle to get queue capacity stats. commands_sender: mpsc::WeakSender>, @@ -226,7 +226,7 @@ impl MiniblockSealer { /// Creates a sealer that will use the provided Postgres connection and will have the specified /// `command_capacity` for unprocessed sealing commands. pub fn new( - pool: ConnectionPool, + pool: ConnectionPool, mut command_capacity: usize, ) -> (Self, MiniblockSealerHandle) { let is_sync = command_capacity == 0; @@ -265,11 +265,7 @@ impl MiniblockSealer { // Commands must be processed sequentially: a later miniblock cannot be saved before // an earlier one. while let Some(completable) = self.next_command().await { - let mut conn = self - .pool - .access_storage_tagged("state_keeper") - .await - .unwrap(); + let mut conn = self.pool.connection_tagged("state_keeper").await.unwrap(); completable.command.seal(&mut conn).await; if let Some(delta) = miniblock_seal_delta { MINIBLOCK_METRICS.seal_delta.observe(delta.elapsed()); diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index 01034bb947b..64949448556 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -8,7 +8,7 @@ use multivm::{ interface::{FinishedL1Batch, L1BatchEnv}, utils::get_max_gas_per_pubdata_byte, }; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_types::{ block::{unpack_block_info, L1BatchHeader, MiniblockHeader}, event::{extract_added_tokens, extract_long_l2_to_l1_messages}, @@ -48,7 +48,7 @@ impl UpdatesManager { #[must_use = "fictive miniblock must be used to update I/O params"] pub(crate) async fn seal_l1_batch( mut self, - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, current_miniblock_number: MiniblockNumber, l1_batch_env: &L1BatchEnv, finished_batch: FinishedL1Batch, @@ -268,11 +268,11 @@ impl UpdatesManager { } impl MiniblockSealCommand { - pub async fn seal(&self, storage: &mut StorageProcessor<'_, Server>) { + pub async fn seal(&self, storage: &mut Connection<'_, Core>) { self.seal_inner(storage, false).await; } - async fn insert_transactions(&self, transaction: &mut StorageProcessor<'_, Server>) { + async fn insert_transactions(&self, transaction: &mut Connection<'_, Core>) { for tx_result in &self.miniblock.executed_transactions { let tx = tx_result.transaction.clone(); match &tx.common_data { @@ -315,7 +315,7 @@ impl MiniblockSealCommand { /// one for sending fees to the operator). /// /// `l2_erc20_bridge_addr` is required to extract the information on newly added tokens. - async fn seal_inner(&self, storage: &mut StorageProcessor<'_, Server>, is_fictive: bool) { + async fn seal_inner(&self, storage: &mut Connection<'_, Core>, is_fictive: bool) { self.assert_valid_miniblock(is_fictive); let mut transaction = storage.start_transaction().await.unwrap(); diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 0508203d428..5f8cef21dbc 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -3,7 +3,7 @@ use std::time::Duration; use futures::FutureExt; use multivm::utils::derive_base_fee_and_gas_per_pubdata; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_mempool::L2TxFilter; use zksync_types::{ block::{BlockGasCount, MiniblockHasher}, @@ -34,7 +34,7 @@ mod tester; /// Ensure that MempoolIO.filter is correctly initialized right after mempool initialization. #[tokio::test] async fn test_filter_initialization() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let tester = Tester::new(); // Genesis is needed for proper mempool initialization. @@ -48,7 +48,7 @@ async fn test_filter_initialization() { /// Ensure that MempoolIO.filter is modified correctly if there is a pending batch upon mempool initialization. #[tokio::test] async fn test_filter_with_pending_batch() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut tester = Tester::new(); tester.genesis(&connection_pool).await; @@ -93,7 +93,7 @@ async fn test_filter_with_pending_batch() { /// Ensure that `MempoolIO.filter` is modified correctly if there is no pending batch. #[tokio::test] async fn test_filter_with_no_pending_batch() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let tester = Tester::new(); tester.genesis(&connection_pool).await; @@ -134,7 +134,7 @@ async fn test_filter_with_no_pending_batch() { } async fn test_timestamps_are_distinct( - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, prev_miniblock_timestamp: u64, delay_prev_miniblock_compared_to_batch: bool, ) { @@ -171,35 +171,35 @@ async fn test_timestamps_are_distinct( #[tokio::test] async fn l1_batch_timestamp_basics() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let current_timestamp = seconds_since_epoch(); test_timestamps_are_distinct(connection_pool, current_timestamp, false).await; } #[tokio::test] async fn l1_batch_timestamp_with_clock_skew() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let current_timestamp = seconds_since_epoch(); test_timestamps_are_distinct(connection_pool, current_timestamp + 2, false).await; } #[tokio::test] async fn l1_batch_timestamp_respects_prev_miniblock() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let current_timestamp = seconds_since_epoch(); test_timestamps_are_distinct(connection_pool, current_timestamp, true).await; } #[tokio::test] async fn l1_batch_timestamp_respects_prev_miniblock_with_clock_skew() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let current_timestamp = seconds_since_epoch(); test_timestamps_are_distinct(connection_pool, current_timestamp + 2, true).await; } #[tokio::test] async fn processing_storage_logs_when_sealing_miniblock() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut miniblock = MiniblockUpdates::new(0, 1, H256::zero(), 1, ProtocolVersionId::latest()); let tx = create_transaction(10, 100); @@ -261,7 +261,7 @@ async fn processing_storage_logs_when_sealing_miniblock() { l2_erc20_bridge_addr: Address::default(), pre_insert_txs: false, }; - let mut conn = connection_pool.access_storage().await.unwrap(); + let mut conn = connection_pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(Default::default()) .await; @@ -296,7 +296,7 @@ async fn processing_storage_logs_when_sealing_miniblock() { #[tokio::test] async fn processing_events_when_sealing_miniblock() { - let pool = ConnectionPool::::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let l1_batch_number = L1BatchNumber(2); let mut miniblock = MiniblockUpdates::new(0, 1, H256::zero(), 1, ProtocolVersionId::latest()); @@ -339,7 +339,7 @@ async fn processing_events_when_sealing_miniblock() { l2_erc20_bridge_addr: Address::default(), pre_insert_txs: false, }; - let mut conn = pool.access_storage().await.unwrap(); + let mut conn = pool.connection().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(Default::default()) .await; @@ -359,14 +359,14 @@ async fn processing_events_when_sealing_miniblock() { } async fn test_miniblock_and_l1_batch_processing( - pool: ConnectionPool, + pool: ConnectionPool, miniblock_sealer_capacity: usize, ) { let tester = Tester::new(); // Genesis is needed for proper mempool initialization. tester.genesis(&pool).await; - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); // Save metadata for the genesis L1 batch so that we don't hang in `seal_l1_batch`. storage .blocks_dal() @@ -404,7 +404,7 @@ async fn test_miniblock_and_l1_batch_processing( .unwrap(); // Check that miniblock #1 and L1 batch #1 are persisted. - let mut conn = pool.access_storage().await.unwrap(); + let mut conn = pool.connection().await.unwrap(); assert_eq!( conn.blocks_dal() .get_sealed_miniblock_number() @@ -423,20 +423,20 @@ async fn test_miniblock_and_l1_batch_processing( #[tokio::test] async fn miniblock_and_l1_batch_processing() { - let pool = ConnectionPool::::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; test_miniblock_and_l1_batch_processing(pool, 1).await; } #[tokio::test] async fn miniblock_and_l1_batch_processing_with_sync_sealer() { - let pool = ConnectionPool::::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; test_miniblock_and_l1_batch_processing(pool, 0).await; } #[tokio::test] async fn miniblock_processing_after_snapshot_recovery() { - let connection_pool = ConnectionPool::::test_pool().await; - let mut storage = connection_pool.access_storage().await.unwrap(); + let connection_pool = ConnectionPool::::test_pool().await; + let mut storage = connection_pool.connection().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let tester = Tester::new(); @@ -575,7 +575,7 @@ async fn miniblock_processing_after_snapshot_recovery() { #[tokio::test] async fn miniblock_sealer_handle_blocking() { - let pool = ConnectionPool::::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let (mut sealer, mut sealer_handle) = MiniblockSealer::new(pool, 1); // The first command should be successfully submitted immediately. @@ -632,7 +632,7 @@ async fn miniblock_sealer_handle_blocking() { #[tokio::test] async fn miniblock_sealer_handle_parallel_processing() { - let pool = ConnectionPool::::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let (mut sealer, mut sealer_handle) = MiniblockSealer::new(pool, 5); // 5 miniblock sealing commands can be submitted without blocking. @@ -659,7 +659,7 @@ async fn miniblock_sealer_handle_parallel_processing() { /// Ensure that subsequent miniblocks that belong to the same L1 batch have different timestamps #[tokio::test] async fn different_timestamp_for_miniblocks_in_same_batch() { - let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let tester = Tester::new(); // Genesis is needed for proper mempool initialization. diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index 3ce1cb284a1..8807fa37dbc 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -8,7 +8,7 @@ use zksync_config::{ GasAdjusterConfig, }; use zksync_contracts::BaseSystemContracts; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_eth_client::clients::MockEthereum; use zksync_object_store::ObjectStoreFactory; use zksync_types::{ @@ -91,7 +91,7 @@ impl Tester { pub(super) async fn create_test_mempool_io( &self, - pool: ConnectionPool, + pool: ConnectionPool, miniblock_sealer_capacity: usize, ) -> (MempoolIO, MempoolGuard) { let gas_adjuster = Arc::new(self.create_gas_adjuster().await); @@ -138,8 +138,8 @@ impl Tester { self.current_timestamp = timestamp; } - pub(super) async fn genesis(&self, pool: &ConnectionPool) { - let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); + pub(super) async fn genesis(&self, pool: &ConnectionPool) { + let mut storage = pool.connection_tagged("state_keeper").await.unwrap(); if storage.blocks_dal().is_genesis_needed().await.unwrap() { create_genesis_l1_batch( &mut storage, @@ -157,12 +157,12 @@ impl Tester { pub(super) async fn insert_miniblock( &self, - pool: &ConnectionPool, + pool: &ConnectionPool, number: u32, base_fee_per_gas: u64, fee_input: BatchFeeInput, ) -> TransactionExecutionResult { - let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); + let mut storage = pool.connection_tagged("state_keeper").await.unwrap(); let tx = create_l2_transaction(10, 100); storage .transactions_dal() @@ -193,12 +193,12 @@ impl Tester { pub(super) async fn insert_sealed_batch( &self, - pool: &ConnectionPool, + pool: &ConnectionPool, number: u32, tx_results: &[TransactionExecutionResult], ) { let batch_header = create_l1_batch(number); - let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); + let mut storage = pool.connection_tagged("state_keeper").await.unwrap(); storage .blocks_dal() .insert_mock_l1_batch(&batch_header) diff --git a/core/lib/zksync_core/src/state_keeper/keeper.rs b/core/lib/zksync_core/src/state_keeper/keeper.rs index 84f8291299e..cf36df4a1ed 100644 --- a/core/lib/zksync_core/src/state_keeper/keeper.rs +++ b/core/lib/zksync_core/src/state_keeper/keeper.rs @@ -8,7 +8,7 @@ use std::{ use anyhow::Context as _; use multivm::interface::{Halt, L1BatchEnv, SystemEnv}; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_types::{ block::MiniblockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, protocol_version::ProtocolVersionId, storage_writes_deduplicator::StorageWritesDeduplicator, @@ -86,7 +86,7 @@ impl ZkSyncStateKeeper { /// Temporary method to migrate fee addresses from L1 batches to miniblocks. pub fn run_fee_address_migration( &self, - pool: ConnectionPool, + pool: ConnectionPool, ) -> impl Future> { let last_miniblock = self.io.current_miniblock_number() - 1; let stop_receiver = self.stop_receiver.clone(); diff --git a/core/lib/zksync_core/src/state_keeper/mempool_actor.rs b/core/lib/zksync_core/src/state_keeper/mempool_actor.rs index 813f4c18e26..c18d084fa9b 100644 --- a/core/lib/zksync_core/src/state_keeper/mempool_actor.rs +++ b/core/lib/zksync_core/src/state_keeper/mempool_actor.rs @@ -6,7 +6,7 @@ use multivm::utils::derive_base_fee_and_gas_per_pubdata; use tokio::sync::mpsc; use tokio::sync::watch; use zksync_config::configs::chain::MempoolConfig; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_mempool::L2TxFilter; #[cfg(test)] use zksync_types::H256; @@ -35,7 +35,7 @@ pub async fn l2_tx_filter( #[derive(Debug)] pub struct MempoolFetcher { mempool: MempoolGuard, - pool: ConnectionPool, + pool: ConnectionPool, batch_fee_input_provider: Arc, sync_interval: Duration, sync_batch_size: usize, @@ -49,7 +49,7 @@ impl MempoolFetcher { mempool: MempoolGuard, batch_fee_input_provider: Arc, config: &MempoolConfig, - pool: ConnectionPool, + pool: ConnectionPool, ) -> Self { Self { mempool, @@ -64,7 +64,7 @@ impl MempoolFetcher { } pub async fn run(mut self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { - let mut storage = self.pool.access_storage_tagged("state_keeper").await?; + let mut storage = self.pool.connection_tagged("state_keeper").await?; if let Some(stuck_tx_timeout) = self.stuck_tx_timeout { let removed_txs = storage .transactions_dal() @@ -86,7 +86,7 @@ impl MempoolFetcher { break; } let latency = KEEPER_METRICS.mempool_sync.start(); - let mut storage = self.pool.access_storage_tagged("state_keeper").await?; + let mut storage = self.pool.connection_tagged("state_keeper").await?; let mempool_info = self.mempool.get_mempool_info(); let protocol_version = pending_protocol_version(&mut storage) .await @@ -131,7 +131,7 @@ impl MempoolFetcher { /// Loads nonces for all distinct `transactions` initiators from the storage. async fn get_transaction_nonces( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, transactions: &[Transaction], ) -> anyhow::Result> { let (nonce_keys, address_by_nonce_key): (Vec<_>, HashMap<_, _>) = transactions @@ -183,8 +183,8 @@ mod tests { #[tokio::test] async fn getting_transaction_nonces() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let transaction = create_l2_transaction(10, 100); let transaction_initiator = transaction.initiator_account(); @@ -217,8 +217,8 @@ mod tests { #[tokio::test] async fn syncing_mempool_basics() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -244,7 +244,7 @@ mod tests { // Add a new transaction to the storage. let transaction = create_l2_transaction(base_fee, gas_per_pubdata); let transaction_hash = transaction.hash(); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); storage .transactions_dal() .insert_transaction_l2(transaction, TransactionExecutionMetrics::default()) @@ -274,8 +274,8 @@ mod tests { #[tokio::test] async fn ignoring_transaction_with_insufficient_fee() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -298,7 +298,7 @@ mod tests { // Add a transaction with insufficient fee to the storage. let transaction = create_l2_transaction(base_fee / 2, gas_per_pubdata / 2); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); storage .transactions_dal() .insert_transaction_l2(transaction, TransactionExecutionMetrics::default()) @@ -314,8 +314,8 @@ mod tests { #[tokio::test] async fn ignoring_transaction_with_old_nonce() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -344,7 +344,7 @@ mod tests { let transaction_hash = transaction.hash(); let nonce_key = get_nonce_key(&transaction.initiator_account()); let nonce_log = StorageLog::new_write_log(nonce_key, u256_to_h256(42.into())); - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); storage .storage_logs_dal() .append_storage_logs(MiniblockNumber(0), &[(H256::zero(), vec![nonce_log])]) diff --git a/core/lib/zksync_core/src/state_keeper/mod.rs b/core/lib/zksync_core/src/state_keeper/mod.rs index 1e45fd78305..8037d00dd22 100644 --- a/core/lib/zksync_core/src/state_keeper/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/mod.rs @@ -5,7 +5,7 @@ use zksync_config::{ configs::chain::{MempoolConfig, NetworkConfig, StateKeeperConfig}, ContractsConfig, DBConfig, }; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; pub use self::{ @@ -37,7 +37,7 @@ pub(crate) async fn create_state_keeper( db_config: &DBConfig, network_config: &NetworkConfig, mempool_config: &MempoolConfig, - pool: ConnectionPool, + pool: ConnectionPool, mempool: MempoolGuard, batch_fee_input_provider: Arc, miniblock_sealer_handle: MiniblockSealerHandle, diff --git a/core/lib/zksync_core/src/state_keeper/types.rs b/core/lib/zksync_core/src/state_keeper/types.rs index 73120bd4154..d785f93c1ed 100644 --- a/core/lib/zksync_core/src/state_keeper/types.rs +++ b/core/lib/zksync_core/src/state_keeper/types.rs @@ -4,7 +4,7 @@ use std::{ }; use multivm::interface::VmExecutionResultAndLogs; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_mempool::{L2TxFilter, MempoolInfo, MempoolStore}; use zksync_types::{ block::BlockGasCount, tx::ExecutionMetrics, Address, Nonce, PriorityOpId, Transaction, @@ -17,10 +17,7 @@ use crate::gas_tracker::{gas_count_from_metrics, gas_count_from_tx_and_metrics}; pub struct MempoolGuard(Arc>); impl MempoolGuard { - pub async fn from_storage( - storage_processor: &mut StorageProcessor<'_, Server>, - capacity: u64, - ) -> Self { + pub async fn from_storage(storage_processor: &mut Connection<'_, Core>, capacity: u64) -> Self { let next_priority_id = storage_processor .transactions_dal() .next_priority_id() diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs index adbfcb52f64..830c4a6291c 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs @@ -9,7 +9,7 @@ use serde::Serialize; #[cfg(test)] use tokio::sync::mpsc; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_types::{ aggregated_operations::AggregatedActionType, api, L1BatchNumber, MiniblockNumber, H256, @@ -126,7 +126,7 @@ struct UpdaterCursor { } impl UpdaterCursor { - async fn new(storage: &mut StorageProcessor<'_, Server>) -> anyhow::Result { + async fn new(storage: &mut Connection<'_, Core>) -> anyhow::Result { let first_l1_batch_number = projected_first_l1_batch(storage).await?; // Use the snapshot L1 batch, or the genesis batch if we are not using a snapshot. Technically, the snapshot L1 batch // is not necessarily proven / executed yet, but since it and earlier batches are not stored, it serves @@ -243,7 +243,7 @@ impl UpdaterCursor { #[derive(Debug)] pub struct BatchStatusUpdater { client: Box, - pool: ConnectionPool, + pool: ConnectionPool, health_updater: HealthUpdater, sleep_interval: Duration, /// Test-only sender of status changes each time they are produced and applied to the storage. @@ -254,13 +254,13 @@ pub struct BatchStatusUpdater { impl BatchStatusUpdater { const DEFAULT_SLEEP_INTERVAL: Duration = Duration::from_secs(5); - pub fn new(client: HttpClient, pool: ConnectionPool) -> Self { + pub fn new(client: HttpClient, pool: ConnectionPool) -> Self { Self::from_parts(Box::new(client), pool, Self::DEFAULT_SLEEP_INTERVAL) } fn from_parts( client: Box, - pool: ConnectionPool, + pool: ConnectionPool, sleep_interval: Duration, ) -> Self { Self { @@ -278,7 +278,7 @@ impl BatchStatusUpdater { } pub async fn run(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { - let mut storage = self.pool.access_storage_tagged("sync_layer").await?; + let mut storage = self.pool.connection_tagged("sync_layer").await?; let mut cursor = UpdaterCursor::new(&mut storage).await?; drop(storage); tracing::info!("Initialized batch status updater cursor: {cursor:?}"); @@ -327,7 +327,7 @@ impl BatchStatusUpdater { let total_latency = EN_METRICS.update_batch_statuses.start(); let Some(last_sealed_batch) = self .pool - .access_storage_tagged("sync_layer") + .connection_tagged("sync_layer") .await? .blocks_dal() .get_sealed_l1_batch_number() @@ -390,7 +390,7 @@ impl BatchStatusUpdater { changes: StatusChanges, ) -> anyhow::Result<()> { let total_latency = EN_METRICS.batch_status_updater_loop_iteration.start(); - let mut connection = self.pool.access_storage_tagged("sync_layer").await?; + let mut connection = self.pool.connection_tagged("sync_layer").await?; let mut transaction = connection.start_transaction().await?; let last_sealed_batch = transaction .blocks_dal() diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs index 50aa78898b7..7d4a34f333f 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs @@ -6,7 +6,7 @@ use chrono::TimeZone; use test_casing::{test_casing, Product}; use tokio::sync::{watch, Mutex}; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::StorageProcessor; +use zksync_dal::Connection; use zksync_types::{Address, L2ChainId, ProtocolVersionId}; use super::*; @@ -16,7 +16,7 @@ use crate::{ utils::testonly::{create_l1_batch, create_miniblock, prepare_recovery_snapshot}, }; -async fn seal_l1_batch(storage: &mut StorageProcessor<'_, Server>, number: L1BatchNumber) { +async fn seal_l1_batch(storage: &mut Connection<'_, Core>, number: L1BatchNumber) { let mut storage = storage.start_transaction().await.unwrap(); // Insert a mock miniblock so that `get_block_details()` will return values. let miniblock = create_miniblock(number.0); @@ -105,7 +105,7 @@ impl L1BatchStagesMap { } } - async fn assert_storage(&self, storage: &mut StorageProcessor<'_, Server>) { + async fn assert_storage(&self, storage: &mut Connection<'_, Core>) { for (number, stage) in self.iter() { let local_details = storage .blocks_web3_dal() @@ -214,7 +214,7 @@ fn mock_change(number: L1BatchNumber) -> BatchStatusChange { fn mock_updater( client: MockMainNodeClient, - pool: ConnectionPool, + pool: ConnectionPool, ) -> (BatchStatusUpdater, mpsc::UnboundedReceiver) { let (changes_sender, changes_receiver) = mpsc::unbounded_channel(); let mut updater = @@ -225,8 +225,8 @@ fn mock_updater( #[tokio::test] async fn updater_cursor_for_storage_with_genesis_block() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -260,8 +260,8 @@ async fn updater_cursor_for_storage_with_genesis_block() { #[tokio::test] async fn updater_cursor_after_snapshot_recovery() { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let cursor = UpdaterCursor::new(&mut storage).await.unwrap(); @@ -273,8 +273,8 @@ async fn updater_cursor_after_snapshot_recovery() { #[test_casing(4, Product(([false, true], [false, true])))] #[tokio::test] async fn normal_updater_operation(snapshot_recovery: bool, async_batches: bool) { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let first_batch_number = if snapshot_recovery { prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; L1BatchNumber(24) @@ -316,7 +316,7 @@ async fn normal_updater_operation(snapshot_recovery: bool, async_batches: bool) let batches_task = if async_batches { let pool = pool.clone(); tokio::spawn(async move { - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); for &number in &batch_numbers { seal_l1_batch(&mut storage, number).await; tokio::time::sleep(Duration::from_millis(15)).await; @@ -345,8 +345,8 @@ async fn normal_updater_operation(snapshot_recovery: bool, async_batches: bool) #[test_casing(2, [false, true])] #[tokio::test] async fn updater_with_gradual_main_node_updates(snapshot_recovery: bool) { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let first_batch_number = if snapshot_recovery { prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; L1BatchNumber(24) @@ -416,10 +416,7 @@ async fn updater_with_gradual_main_node_updates(snapshot_recovery: bool) { test_resuming_updater(pool, target_batch_stages).await; } -async fn test_resuming_updater( - pool: ConnectionPool, - initial_batch_stages: L1BatchStagesMap, -) { +async fn test_resuming_updater(pool: ConnectionPool, initial_batch_stages: L1BatchStagesMap) { let target_batch_stages = L1BatchStagesMap::new( initial_batch_stages.first_batch_number, vec![L1BatchStage::Executed; 6], @@ -439,7 +436,7 @@ async fn test_resuming_updater( } } - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); target_batch_stages.assert_storage(&mut storage).await; stop_sender.send_replace(true); updater_task.await.unwrap().expect("updater failed"); diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index fa4e492e769..54d1c8325d9 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -5,7 +5,7 @@ use async_trait::async_trait; use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; use vm_utils::storage::{l1_batch_params, L1BatchParamsProvider}; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; -use zksync_dal::{ConnectionPool, Server, ServerDals}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::{ ethabi::Address, fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, L1BatchNumber, L2ChainId, MiniblockNumber, @@ -44,7 +44,7 @@ const POLL_INTERVAL: Duration = Duration::from_millis(100); #[derive(Debug)] pub struct ExternalIO { miniblock_sealer_handle: MiniblockSealerHandle, - pool: ConnectionPool, + pool: ConnectionPool, current_l1_batch_number: L1BatchNumber, current_miniblock_number: MiniblockNumber, @@ -65,7 +65,7 @@ impl ExternalIO { #[allow(clippy::too_many_arguments)] pub async fn new( miniblock_sealer_handle: MiniblockSealerHandle, - pool: ConnectionPool, + pool: ConnectionPool, actions: ActionQueue, sync_state: SyncState, main_node_client: Box, @@ -73,7 +73,7 @@ impl ExternalIO { validation_computational_gas_limit: u32, chain_id: L2ChainId, ) -> anyhow::Result { - let mut storage = pool.access_storage_tagged("sync_layer").await?; + let mut storage = pool.connection_tagged("sync_layer").await?; let cursor = IoCursor::new(&mut storage) .await .context("failed initializing I/O cursor")?; @@ -127,7 +127,7 @@ impl ExternalIO { "Getting previous L1 batch hash for L1 batch #{}", self.current_l1_batch_number ); - let mut storage = self.pool.access_storage_tagged("sync_layer").await?; + let mut storage = self.pool.connection_tagged("sync_layer").await?; let wait_latency = KEEPER_METRICS.wait_for_prev_hash_time.start(); let prev_l1_batch_number = self.current_l1_batch_number - 1; let (hash, _) = self @@ -147,7 +147,7 @@ impl ExternalIO { ) -> anyhow::Result { let base_system_contracts = self .pool - .access_storage_tagged("sync_layer") + .connection_tagged("sync_layer") .await? .protocol_versions_dal() .load_base_system_contracts_by_version_id(id as u16) @@ -166,7 +166,7 @@ impl ExternalIO { .context("failed to fetch protocol version from the main node")? .context("protocol version is missing on the main node")?; self.pool - .access_storage_tagged("sync_layer") + .connection_tagged("sync_layer") .await? .protocol_versions_dal() .save_protocol_version( @@ -204,7 +204,7 @@ impl ExternalIO { async fn get_base_system_contract(&self, hash: H256) -> anyhow::Result { let bytecode = self .pool - .access_storage_tagged("sync_layer") + .connection_tagged("sync_layer") .await? .factory_deps_dal() .get_factory_dep(hash) @@ -228,7 +228,7 @@ impl ExternalIO { .context("failed to fetch base system contract bytecode from the main node")? .context("base system contract is missing on the main node")?; self.pool - .access_storage_tagged("sync_layer") + .connection_tagged("sync_layer") .await? .factory_deps_dal() .insert_factory_deps( @@ -270,7 +270,7 @@ impl StateKeeperIO for ExternalIO { } async fn load_pending_batch(&mut self) -> anyhow::Result> { - let mut storage = self.pool.access_storage_tagged("sync_layer").await?; + let mut storage = self.pool.connection_tagged("sync_layer").await?; let pending_miniblock_header = self .l1_batch_params_provider @@ -526,7 +526,7 @@ impl StateKeeperIO for ExternalIO { // We cannot start sealing an L1 batch until we've sealed all miniblocks included in it. self.miniblock_sealer_handle.wait_for_all_commands().await; - let mut storage = self.pool.access_storage_tagged("sync_layer").await?; + let mut storage = self.pool.connection_tagged("sync_layer").await?; let fictive_miniblock = updates_manager .seal_l1_batch( &mut storage, @@ -545,7 +545,7 @@ impl StateKeeperIO for ExternalIO { } async fn load_previous_batch_version_id(&mut self) -> anyhow::Result { - let mut storage = self.pool.access_storage_tagged("sync_layer").await?; + let mut storage = self.pool.connection_tagged("sync_layer").await?; let prev_l1_batch_number = self.current_l1_batch_number - 1; self.l1_batch_params_provider .load_l1_batch_protocol_version(&mut storage, prev_l1_batch_number) diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index e8fe5196824..455912006d0 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -1,5 +1,5 @@ use anyhow::Context as _; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_types::{ api::en::SyncBlock, block::MiniblockHasher, helpers::unix_timestamp_ms, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, @@ -102,9 +102,7 @@ impl TryFrom for FetchedBlock { impl IoCursor { /// Loads this cursor from storage and modifies it to account for the pending L1 batch if necessary. - pub(crate) async fn for_fetcher( - storage: &mut StorageProcessor<'_, Server>, - ) -> anyhow::Result { + pub(crate) async fn for_fetcher(storage: &mut Connection<'_, Core>) -> anyhow::Result { let mut this = Self::new(storage).await?; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. diff --git a/core/lib/zksync_core/src/sync_layer/genesis.rs b/core/lib/zksync_core/src/sync_layer/genesis.rs index d2db0c6b594..2e31d45b18f 100644 --- a/core/lib/zksync_core/src/sync_layer/genesis.rs +++ b/core/lib/zksync_core/src/sync_layer/genesis.rs @@ -1,6 +1,6 @@ use anyhow::Context as _; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_types::{ block::DeployedContract, protocol_version::L1VerifierConfig, system_contracts::get_system_smart_contracts, AccountTreeId, L1BatchNumber, L2ChainId, H256, @@ -10,7 +10,7 @@ use super::client::MainNodeClient; use crate::genesis::{ensure_genesis_state, GenesisParams}; pub async fn perform_genesis_if_needed( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, zksync_chain_id: L2ChainId, client: &dyn MainNodeClient, ) -> anyhow::Result<()> { diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 5e03e328337..2f04aacaa81 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -9,7 +9,7 @@ use std::{ use test_casing::test_casing; use tokio::{sync::watch, task::JoinHandle}; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_types::{ api, block::MiniblockHasher, @@ -56,7 +56,7 @@ pub(super) struct StateKeeperHandles { impl StateKeeperHandles { /// `tx_hashes` are grouped by the L1 batch. pub async fn new( - pool: ConnectionPool, + pool: ConnectionPool, main_node_client: MockMainNodeClient, actions: ActionQueue, tx_hashes: &[&[H256]], @@ -125,7 +125,7 @@ impl StateKeeperHandles { } } -async fn ensure_genesis(storage: &mut StorageProcessor<'_, Server>) { +async fn ensure_genesis(storage: &mut Connection<'_, Core>) { if storage.blocks_dal().is_genesis_needed().await.unwrap() { ensure_genesis_state(storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -163,8 +163,8 @@ fn genesis_snapshot_recovery_status() -> SnapshotRecoveryStatus { #[test_casing(2, [false, true])] #[tokio::test] async fn external_io_basics(snapshot_recovery: bool) { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let snapshot = if snapshot_recovery { prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await } else { @@ -235,8 +235,8 @@ async fn external_io_basics(snapshot_recovery: bool) { #[test_casing(2, [false, true])] #[tokio::test] async fn external_io_works_without_local_protocol_version(snapshot_recovery: bool) { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); let snapshot = if snapshot_recovery { prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await } else { @@ -314,10 +314,10 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo } pub(super) async fn run_state_keeper_with_multiple_miniblocks( - pool: ConnectionPool, + pool: ConnectionPool, snapshot_recovery: bool, ) -> (SnapshotRecoveryStatus, Vec) { - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); let snapshot = if snapshot_recovery { prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await } else { @@ -374,7 +374,7 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks( #[test_casing(2, [false, true])] #[tokio::test] async fn external_io_with_multiple_miniblocks(snapshot_recovery: bool) { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let (snapshot, tx_hashes) = run_state_keeper_with_multiple_miniblocks(pool.clone(), snapshot_recovery).await; assert_eq!(tx_hashes.len(), 8); @@ -384,7 +384,7 @@ async fn external_io_with_multiple_miniblocks(snapshot_recovery: bool) { (snapshot.miniblock_number + 1, &tx_hashes[..5]), (snapshot.miniblock_number + 2, &tx_hashes[5..]), ]; - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); for (number, expected_tx_hashes) in tx_hashes_by_miniblock { let miniblock = storage .blocks_dal() @@ -413,7 +413,7 @@ async fn external_io_with_multiple_miniblocks(snapshot_recovery: bool) { } async fn test_external_io_recovery( - pool: ConnectionPool, + pool: ConnectionPool, snapshot: &SnapshotRecoveryStatus, mut tx_hashes: Vec, ) { @@ -448,7 +448,7 @@ async fn test_external_io_recovery( .wait(|state| state.get_local_block() == snapshot.miniblock_number + 3) .await; - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); let miniblock = storage .blocks_dal() .get_miniblock_header(snapshot.miniblock_number + 3) @@ -459,9 +459,9 @@ async fn test_external_io_recovery( assert_eq!(miniblock.timestamp, snapshot.miniblock_timestamp + 3); } -pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: u32) { +pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: u32) { loop { - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); let last_l1_batch_number = storage .blocks_dal() .get_sealed_l1_batch_number() @@ -484,10 +484,10 @@ pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, /// Returns tx hashes of all generated transactions, grouped by the L1 batch. pub(super) async fn run_state_keeper_with_multiple_l1_batches( - pool: ConnectionPool, + pool: ConnectionPool, snapshot_recovery: bool, ) -> (SnapshotRecoveryStatus, Vec>) { - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); let snapshot = if snapshot_recovery { prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await } else { @@ -551,10 +551,10 @@ pub(super) async fn run_state_keeper_with_multiple_l1_batches( #[tokio::test] async fn external_io_with_multiple_l1_batches() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; run_state_keeper_with_multiple_l1_batches(pool.clone(), false).await; - let mut storage = pool.access_storage().await.unwrap(); + let mut storage = pool.connection().await.unwrap(); let l1_batch_header = storage .blocks_dal() .get_l1_batch_header(L1BatchNumber(1)) diff --git a/core/lib/zksync_core/src/utils/mod.rs b/core/lib/zksync_core/src/utils/mod.rs index 7e667d7a02b..645d7422ff7 100644 --- a/core/lib/zksync_core/src/utils/mod.rs +++ b/core/lib/zksync_core/src/utils/mod.rs @@ -9,7 +9,7 @@ use std::{ use anyhow::Context as _; use async_trait::async_trait; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_types::{L1BatchNumber, ProtocolVersionId}; #[cfg(test)] @@ -58,7 +58,7 @@ pub(crate) async fn binary_search_with( /// /// Returns the number of the *earliest* L1 batch, or `None` if the stop signal is received. pub(crate) async fn wait_for_l1_batch( - pool: &ConnectionPool, + pool: &ConnectionPool, poll_interval: Duration, stop_receiver: &mut watch::Receiver, ) -> anyhow::Result> { @@ -67,7 +67,7 @@ pub(crate) async fn wait_for_l1_batch( return Ok(None); } - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let sealed_l1_batch_number = storage.blocks_dal().get_earliest_l1_batch_number().await?; drop(storage); @@ -89,7 +89,7 @@ pub(crate) async fn wait_for_l1_batch( /// /// Returns the number of the *earliest* L1 batch with metadata, or `None` if the stop signal is received. pub(crate) async fn wait_for_l1_batch_with_metadata( - pool: &ConnectionPool, + pool: &ConnectionPool, poll_interval: Duration, stop_receiver: &mut watch::Receiver, ) -> anyhow::Result> { @@ -98,7 +98,7 @@ pub(crate) async fn wait_for_l1_batch_with_metadata( return Ok(None); } - let mut storage = pool.access_storage().await?; + let mut storage = pool.connection().await?; let sealed_l1_batch_number = storage .blocks_dal() .get_earliest_l1_batch_number_with_metadata() @@ -120,7 +120,7 @@ pub(crate) async fn wait_for_l1_batch_with_metadata( /// Returns the projected number of the first locally available L1 batch. The L1 batch is **not** /// guaranteed to be present in the storage! pub(crate) async fn projected_first_l1_batch( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> anyhow::Result { let snapshot_recovery = storage .snapshot_recovery_dal() @@ -133,7 +133,7 @@ pub(crate) async fn projected_first_l1_batch( /// Obtains a protocol version projected to be applied for the next miniblock. This is either the version used by the last /// sealed miniblock, or (if there are no miniblocks), one referenced in the snapshot recovery record. pub(crate) async fn pending_protocol_version( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, ) -> anyhow::Result { static WARNED_ABOUT_NO_VERSION: AtomicBool = AtomicBool::new(false); @@ -181,13 +181,13 @@ mod tests { #[tokio::test] async fn waiting_for_l1_batch_success() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let (_stop_sender, mut stop_receiver) = watch::channel(false); let pool_copy = pool.clone(); tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(25)).await; - let mut storage = pool_copy.access_storage().await.unwrap(); + let mut storage = pool_copy.connection().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await .unwrap(); @@ -201,7 +201,7 @@ mod tests { #[tokio::test] async fn waiting_for_l1_batch_cancellation() { - let pool = ConnectionPool::::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let (stop_sender, mut stop_receiver) = watch::channel(false); tokio::spawn(async move { diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index 452bb4c9783..202b50515e9 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use multivm::utils::get_max_gas_per_pubdata_byte; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::{Server, ServerDals, StorageProcessor}; +use zksync_dal::{Connection, Core, CoreDal}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{ @@ -201,7 +201,7 @@ impl Snapshot { /// Prepares a recovery snapshot without performing genesis. pub(crate) async fn prepare_recovery_snapshot( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, l1_batch: L1BatchNumber, miniblock: MiniblockNumber, storage_logs: &[StorageLog], @@ -210,7 +210,7 @@ pub(crate) async fn prepare_recovery_snapshot( } /// Takes a storage snapshot at the last sealed L1 batch. -pub(crate) async fn snapshot(storage: &mut StorageProcessor<'_, Server>) -> Snapshot { +pub(crate) async fn snapshot(storage: &mut Connection<'_, Core>) -> Snapshot { let l1_batch = storage .blocks_dal() .get_sealed_l1_batch_number() @@ -259,7 +259,7 @@ pub(crate) async fn snapshot(storage: &mut StorageProcessor<'_, Server>) -> Snap /// Recovers storage from a snapshot. /// Miniblock and L1 batch are intentionally **not** inserted into the storage. pub(crate) async fn recover( - storage: &mut StorageProcessor<'_, Server>, + storage: &mut Connection<'_, Core>, snapshot: Snapshot, ) -> SnapshotRecoveryStatus { let mut storage = storage.start_transaction().await.unwrap(); diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index 105e2c424da..43fad775034 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -3,7 +3,7 @@ use std::time::Duration; use zksync_config::{ContractsConfig, ETHWatchConfig}; use zksync_contracts::governance_contract; use zksync_core::eth_watch::{client::EthHttpQueryClient, EthWatch}; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_types::{ethabi::Contract, Address}; use crate::{ @@ -60,7 +60,7 @@ impl WiringLayer for EthWatchLayer { #[derive(Debug)] struct EthWatchTask { - main_pool: ConnectionPool, + main_pool: ConnectionPool, client: EthHttpQueryClient, governance_contract: Option, diamond_proxy_address: Address, diff --git a/core/node/node_framework/src/implementations/layers/house_keeper.rs b/core/node/node_framework/src/implementations/layers/house_keeper.rs index 16b8029c733..20d0902199e 100644 --- a/core/node/node_framework/src/implementations/layers/house_keeper.rs +++ b/core/node/node_framework/src/implementations/layers/house_keeper.rs @@ -16,7 +16,7 @@ use zksync_core::house_keeper::{ periodic_job::PeriodicJob, waiting_to_queued_fri_witness_job_mover::WaitingToQueuedFriWitnessJobMover, }; -use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Server}; +use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core}; use crate::{ implementations::resources::pools::{ProverPoolResource, ReplicaPoolResource}, @@ -167,7 +167,7 @@ impl WiringLayer for HouseKeeperLayer { #[derive(Debug)] struct PoolForMetricsTask { - pool_for_metrics: ConnectionPool, + pool_for_metrics: ConnectionPool, } #[async_trait::async_trait] diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 7a7e4d8d976..bd7bba618ba 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -1,5 +1,5 @@ use zksync_core::metadata_calculator::{MetadataCalculator, MetadataCalculatorConfig}; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_storage::RocksDB; use crate::{ @@ -26,7 +26,7 @@ pub struct MetadataCalculatorLayer(pub MetadataCalculatorConfig); #[derive(Debug)] pub struct MetadataCalculatorTask { metadata_calculator: MetadataCalculator, - main_pool: ConnectionPool, + main_pool: ConnectionPool, } #[async_trait::async_trait] diff --git a/core/node/node_framework/src/implementations/layers/pools_layer.rs b/core/node/node_framework/src/implementations/layers/pools_layer.rs index 5c2c7706bc0..83ddd947b10 100644 --- a/core/node/node_framework/src/implementations/layers/pools_layer.rs +++ b/core/node/node_framework/src/implementations/layers/pools_layer.rs @@ -1,6 +1,6 @@ use prover_dal::Prover; use zksync_config::configs::PostgresConfig; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use crate::{ implementations::resources::pools::{ @@ -75,7 +75,7 @@ impl WiringLayer for PoolsLayer { } if self.with_master { - let mut master_pool = ConnectionPool::::builder( + let mut master_pool = ConnectionPool::::builder( self.config.master_url()?, self.config.max_connections()?, ); @@ -84,7 +84,7 @@ impl WiringLayer for PoolsLayer { } if self.with_replica { - let mut replica_pool = ConnectionPool::::builder( + let mut replica_pool = ConnectionPool::::builder( self.config.replica_url()?, self.config.max_connections()?, ); diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index 493a3dcc1be..87388c0b95c 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use zksync_config::{configs::ProofDataHandlerConfig, ContractsConfig}; use zksync_core::proof_data_handler; -use zksync_dal::{ConnectionPool, Server}; +use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; use crate::{ @@ -65,7 +65,7 @@ struct ProofDataHandlerTask { proof_data_handler_config: ProofDataHandlerConfig, contracts_config: ContractsConfig, blob_store: Arc, - main_pool: ConnectionPool, + main_pool: ConnectionPool, } #[async_trait::async_trait] diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index 8d0044db772..057bc95ea67 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -54,7 +54,7 @@ impl MempoolIOLayer { .await .context("Get connection pool")?; let mut storage = connection_pool - .access_storage() + .connection() .await .context("Access storage to build mempool")?; let mempool = MempoolGuard::from_storage(&mut storage, self.mempool_config.capacity).await; diff --git a/core/node/node_framework/src/implementations/resources/pools.rs b/core/node/node_framework/src/implementations/resources/pools.rs index 39866723783..da71007f6e4 100644 --- a/core/node/node_framework/src/implementations/resources/pools.rs +++ b/core/node/node_framework/src/implementations/resources/pools.rs @@ -4,8 +4,8 @@ use std::sync::{ }; use prover_dal::Prover; -use zksync_dal::{ConnectionPool, Server}; -use zksync_db_connection::connection::ConnectionPoolBuilder; +use zksync_dal::{ConnectionPool, Core}; +use zksync_db_connection::connection_pool::ConnectionPoolBuilder; use crate::resource::Resource; @@ -13,7 +13,7 @@ use crate::resource::Resource; #[derive(Debug, Clone)] pub struct MasterPoolResource { connections_count: Arc, - builder: ConnectionPoolBuilder, + builder: ConnectionPoolBuilder, } impl Resource for MasterPoolResource { @@ -23,14 +23,14 @@ impl Resource for MasterPoolResource { } impl MasterPoolResource { - pub fn new(builder: ConnectionPoolBuilder) -> Self { + pub fn new(builder: ConnectionPoolBuilder) -> Self { Self { connections_count: Arc::new(AtomicU32::new(0)), builder, } } - pub async fn get(&self) -> anyhow::Result> { + pub async fn get(&self) -> anyhow::Result> { let result = self.builder.build().await; if result.is_ok() { @@ -45,11 +45,11 @@ impl MasterPoolResource { result } - pub async fn get_singleton(&self) -> anyhow::Result> { + pub async fn get_singleton(&self) -> anyhow::Result> { self.get_custom(1).await } - pub async fn get_custom(&self, size: u32) -> anyhow::Result> { + pub async fn get_custom(&self, size: u32) -> anyhow::Result> { let result = self.builder.clone().set_max_size(size).build().await; if result.is_ok() { @@ -68,7 +68,7 @@ impl MasterPoolResource { #[derive(Debug, Clone)] pub struct ReplicaPoolResource { connections_count: Arc, - builder: ConnectionPoolBuilder, + builder: ConnectionPoolBuilder, } impl Resource for ReplicaPoolResource { @@ -78,14 +78,14 @@ impl Resource for ReplicaPoolResource { } impl ReplicaPoolResource { - pub fn new(builder: ConnectionPoolBuilder) -> Self { + pub fn new(builder: ConnectionPoolBuilder) -> Self { Self { connections_count: Arc::new(AtomicU32::new(0)), builder, } } - pub async fn get(&self) -> anyhow::Result> { + pub async fn get(&self) -> anyhow::Result> { let result = self.builder.build().await; if result.is_ok() { @@ -100,11 +100,11 @@ impl ReplicaPoolResource { result } - pub async fn get_singleton(&self) -> anyhow::Result> { + pub async fn get_singleton(&self) -> anyhow::Result> { self.get_custom(1).await } - pub async fn get_custom(&self, size: u32) -> anyhow::Result> { + pub async fn get_custom(&self, size: u32) -> anyhow::Result> { let result = self.builder.clone().set_max_size(size).build().await; if result.is_ok() { diff --git a/prover/proof_fri_compressor/src/compressor.rs b/prover/proof_fri_compressor/src/compressor.rs index aefda558c3a..0e7fced7673 100644 --- a/prover/proof_fri_compressor/src/compressor.rs +++ b/prover/proof_fri_compressor/src/compressor.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; use circuit_sequencer_api::proof::FinalProof; -use prover_dal::{ConnectionPool, Prover, ProverDals}; +use prover_dal::{ConnectionPool, Prover, ProverDal}; use tokio::task::JoinHandle; use zkevm_test_harness::proof_wrapper_utils::{wrap_proof, WrapperConfig}; use zkevm_test_harness_1_3_3::{ @@ -131,7 +131,7 @@ impl JobProcessor for ProofCompressor { const SERVICE_NAME: &'static str = "ProofCompressor"; async fn get_next_job(&self) -> anyhow::Result> { - let mut conn = self.pool.access_storage().await.unwrap(); + let mut conn = self.pool.connection().await.unwrap(); let pod_name = get_current_pod_name(); let Some(l1_batch_number) = conn .fri_proof_compressor_dal() @@ -170,7 +170,7 @@ impl JobProcessor for ProofCompressor { async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { self.pool - .access_storage() + .connection() .await .unwrap() .fri_proof_compressor_dal() @@ -224,7 +224,7 @@ impl JobProcessor for ProofCompressor { .observe(blob_save_started_at.elapsed()); self.pool - .access_storage() + .connection() .await .unwrap() .fri_proof_compressor_dal() @@ -240,7 +240,7 @@ impl JobProcessor for ProofCompressor { async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { let mut prover_storage = self .pool - .access_storage() + .connection() .await .context("failed to acquire DB connection for ProofCompressor")?; prover_storage diff --git a/prover/prover_dal/src/fri_gpu_prover_queue_dal.rs b/prover/prover_dal/src/fri_gpu_prover_queue_dal.rs index 1d31f306c0f..231f1b6599d 100644 --- a/prover/prover_dal/src/fri_gpu_prover_queue_dal.rs +++ b/prover/prover_dal/src/fri_gpu_prover_queue_dal.rs @@ -1,13 +1,13 @@ use std::time::Duration; use zksync_basic_types::prover_dal::{GpuProverInstanceStatus, SocketAddress}; -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use crate::{pg_interval_from_duration, Prover}; #[derive(Debug)] pub struct FriGpuProverQueueDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Prover>, + pub(crate) storage: &'a mut Connection<'c, Prover>, } impl FriGpuProverQueueDal<'_, '_> { diff --git a/prover/prover_dal/src/fri_proof_compressor_dal.rs b/prover/prover_dal/src/fri_proof_compressor_dal.rs index ffcba4a8bf3..01231d33b00 100644 --- a/prover/prover_dal/src/fri_proof_compressor_dal.rs +++ b/prover/prover_dal/src/fri_proof_compressor_dal.rs @@ -7,13 +7,13 @@ use zksync_basic_types::{ prover_dal::{JobCountStatistics, StuckJobs}, L1BatchNumber, }; -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use crate::{duration_to_naive_time, pg_interval_from_duration, Prover}; #[derive(Debug)] pub struct FriProofCompressorDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Prover>, + pub(crate) storage: &'a mut Connection<'c, Prover>, } #[derive(Debug, EnumString, Display)] diff --git a/prover/prover_dal/src/fri_protocol_versions_dal.rs b/prover/prover_dal/src/fri_protocol_versions_dal.rs index acf4a35d1d4..d6f192d9af4 100644 --- a/prover/prover_dal/src/fri_protocol_versions_dal.rs +++ b/prover/prover_dal/src/fri_protocol_versions_dal.rs @@ -1,13 +1,13 @@ use std::convert::TryFrom; use zksync_basic_types::protocol_version::{FriProtocolVersionId, L1VerifierConfig}; -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use crate::Prover; #[derive(Debug)] pub struct FriProtocolVersionsDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c, Prover>, + pub storage: &'a mut Connection<'c, Prover>, } impl FriProtocolVersionsDal<'_, '_> { diff --git a/prover/prover_dal/src/fri_prover_dal.rs b/prover/prover_dal/src/fri_prover_dal.rs index e7eba9f11ba..b2a1011d135 100644 --- a/prover/prover_dal/src/fri_prover_dal.rs +++ b/prover/prover_dal/src/fri_prover_dal.rs @@ -8,14 +8,14 @@ use zksync_basic_types::{ L1BatchNumber, }; use zksync_db_connection::{ - instrument::InstrumentExt, metrics::MethodLatency, processor::StorageProcessor, + connection::Connection, instrument::InstrumentExt, metrics::MethodLatency, }; use crate::{duration_to_naive_time, pg_interval_from_duration, Prover}; #[derive(Debug)] pub struct FriProverDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Prover>, + pub(crate) storage: &'a mut Connection<'c, Prover>, } impl FriProverDal<'_, '_> { diff --git a/prover/prover_dal/src/fri_scheduler_dependency_tracker_dal.rs b/prover/prover_dal/src/fri_scheduler_dependency_tracker_dal.rs index 0182e038877..5e47fb1ec06 100644 --- a/prover/prover_dal/src/fri_scheduler_dependency_tracker_dal.rs +++ b/prover/prover_dal/src/fri_scheduler_dependency_tracker_dal.rs @@ -1,13 +1,13 @@ use zksync_basic_types::{ basic_fri_types::FinalProofIds, prover_dal::EIP_4844_CIRCUIT_ID, L1BatchNumber, }; -use zksync_db_connection::processor::StorageProcessor; +use zksync_db_connection::connection::Connection; use crate::Prover; #[derive(Debug)] pub struct FriSchedulerDependencyTrackerDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c, Prover>, + pub storage: &'a mut Connection<'c, Prover>, } impl FriSchedulerDependencyTrackerDal<'_, '_> { diff --git a/prover/prover_dal/src/fri_witness_generator_dal.rs b/prover/prover_dal/src/fri_witness_generator_dal.rs index f9be7912ebd..703b4aa2a5e 100644 --- a/prover/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/prover_dal/src/fri_witness_generator_dal.rs @@ -10,13 +10,13 @@ use zksync_basic_types::{ }, L1BatchNumber, }; -use zksync_db_connection::{metrics::MethodLatency, processor::StorageProcessor}; +use zksync_db_connection::{connection::Connection, metrics::MethodLatency}; use crate::{duration_to_naive_time, pg_interval_from_duration, Prover}; #[derive(Debug)] pub struct FriWitnessGeneratorDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c, Prover>, + pub(crate) storage: &'a mut Connection<'c, Prover>, } #[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] diff --git a/prover/prover_dal/src/lib.rs b/prover/prover_dal/src/lib.rs index af030f7d0da..011ffd8e08e 100644 --- a/prover/prover_dal/src/lib.rs +++ b/prover/prover_dal/src/lib.rs @@ -1,7 +1,7 @@ -use zksync_db_connection::processor::StorageMarker; +use zksync_db_connection::connection::DbMarker; pub use zksync_db_connection::{ - connection::ConnectionPool, - processor::StorageProcessor, + connection::Connection, + connection_pool::ConnectionPool, utils::{duration_to_naive_time, pg_interval_from_duration}, }; @@ -27,7 +27,7 @@ mod private { // Here we are making the trait sealed, because it should be public to function correctly, but we don't // want to allow any other downstream implementations of this trait. -pub trait ProverDals<'a>: private::Sealed +pub trait ProverDal<'a>: private::Sealed where Self: 'a, { @@ -47,12 +47,12 @@ where #[derive(Clone, Debug)] pub struct Prover; -// Implement the marker trait for the Prover to be able to use it in StorageProcessor. -impl StorageMarker for Prover {} +// Implement the marker trait for the Prover to be able to use it in Connection. +impl DbMarker for Prover {} // Implement the sealed trait for the StorageProcessor. -impl private::Sealed for StorageProcessor<'_, Prover> {} +impl private::Sealed for Connection<'_, Prover> {} -impl<'a> ProverDals<'a> for StorageProcessor<'a, Prover> { +impl<'a> ProverDal<'a> for Connection<'a, Prover> { fn fri_witness_generator_dal(&mut self) -> FriWitnessGeneratorDal<'_, 'a> { FriWitnessGeneratorDal { storage: self } } diff --git a/prover/prover_fri/src/gpu_prover_job_processor.rs b/prover/prover_fri/src/gpu_prover_job_processor.rs index 481dda7e4f9..c2748f005de 100644 --- a/prover/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/prover_fri/src/gpu_prover_job_processor.rs @@ -3,7 +3,7 @@ pub mod gpu_prover { use std::{collections::HashMap, sync::Arc, time::Instant}; use anyhow::Context as _; - use prover_dal::{ConnectionPool, ProverDals}; + use prover_dal::{ConnectionPool, ProverDal}; use shivini::{ gpu_proof_config::GpuProofConfig, gpu_prove_from_external_witness_data, ProverContext, }; @@ -224,7 +224,7 @@ pub mod gpu_prover { Ok(item) => { if is_full { self.prover_connection_pool - .access_storage() + .connection() .await .unwrap() .fri_gpu_prover_queue_dal() @@ -249,7 +249,7 @@ pub mod gpu_prover { async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { self.prover_connection_pool - .access_storage() + .connection() .await .unwrap() .fri_prover_jobs_dal() @@ -281,7 +281,7 @@ pub mod gpu_prover { ) -> anyhow::Result<()> { METRICS.gpu_total_proving_time.observe(started_at.elapsed()); - let mut storage_processor = self.prover_connection_pool.access_storage().await.unwrap(); + let mut storage_processor = self.prover_connection_pool.connection().await.unwrap(); save_proof( job_id, started_at, @@ -302,7 +302,7 @@ pub mod gpu_prover { async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { let mut prover_storage = self .prover_connection_pool - .access_storage() + .connection() .await .context("failed to acquire DB connection for Prover")?; prover_storage diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs index dc1a459253b..29dcf5ea693 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/prover_fri/src/main.rs @@ -4,7 +4,7 @@ use std::{future::Future, sync::Arc}; use anyhow::Context as _; use local_ip_address::local_ip; use prometheus_exporter::PrometheusExporterConfig; -use prover_dal::{ConnectionPool, Prover, ProverDals}; +use prover_dal::{ConnectionPool, Prover, ProverDal}; use tokio::{ sync::{oneshot, watch::Receiver}, task::JoinHandle, @@ -44,7 +44,7 @@ async fn graceful_shutdown(port: u16) -> anyhow::Result let zone = get_zone(zone_url).await.context("get_zone()")?; let address = SocketAddress { host, port }; Ok(async move { - pool.access_storage() + pool.connection() .await .unwrap() .fri_gpu_prover_queue_dal() diff --git a/prover/prover_fri/src/prover_job_processor.rs b/prover/prover_fri/src/prover_job_processor.rs index 6067496b495..8f45aa5621a 100644 --- a/prover/prover_fri/src/prover_job_processor.rs +++ b/prover/prover_fri/src/prover_job_processor.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, sync::Arc, time::Instant}; use anyhow::Context as _; use circuit_definitions::{circuit_definitions::eip4844::EIP4844Circuit, eip4844_proof_config}; -use prover_dal::{ConnectionPool, ProverDals}; +use prover_dal::{ConnectionPool, ProverDal}; use tokio::task::JoinHandle; use zkevm_test_harness::prover_utils::{ prove_base_layer_circuit, prove_eip4844_circuit, prove_recursion_layer_circuit, @@ -231,7 +231,7 @@ impl JobProcessor for Prover { const SERVICE_NAME: &'static str = "FriCpuProver"; async fn get_next_job(&self) -> anyhow::Result> { - let mut storage = self.prover_connection_pool.access_storage().await.unwrap(); + let mut storage = self.prover_connection_pool.connection().await.unwrap(); let Some(prover_job) = fetch_next_circuit( &mut storage, &*self.blob_store, @@ -247,7 +247,7 @@ impl JobProcessor for Prover { async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { self.prover_connection_pool - .access_storage() + .connection() .await .unwrap() .fri_prover_jobs_dal() @@ -279,7 +279,7 @@ impl JobProcessor for Prover { ) -> anyhow::Result<()> { METRICS.cpu_total_proving_time.observe(started_at.elapsed()); - let mut storage_processor = self.prover_connection_pool.access_storage().await.unwrap(); + let mut storage_processor = self.prover_connection_pool.connection().await.unwrap(); save_proof( job_id, started_at, @@ -300,7 +300,7 @@ impl JobProcessor for Prover { async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { let mut prover_storage = self .prover_connection_pool - .access_storage() + .connection() .await .context("failed to acquire DB connection for Prover")?; prover_storage diff --git a/prover/prover_fri/src/socket_listener.rs b/prover/prover_fri/src/socket_listener.rs index 75f5f4dab46..d6abc3678b4 100644 --- a/prover/prover_fri/src/socket_listener.rs +++ b/prover/prover_fri/src/socket_listener.rs @@ -3,7 +3,7 @@ pub mod gpu_socket_listener { use std::{net::SocketAddr, time::Instant}; use anyhow::Context as _; - use prover_dal::{ConnectionPool, Prover, ProverDals}; + use prover_dal::{ConnectionPool, Prover, ProverDal}; use tokio::{ io::copy, net::{TcpListener, TcpStream}, @@ -55,7 +55,7 @@ pub mod gpu_socket_listener { let _lock = self.queue.lock().await; self.pool - .access_storage() + .connection() .await .unwrap() .fri_gpu_prover_queue_dal() @@ -141,7 +141,7 @@ pub mod gpu_socket_listener { }; self.pool - .access_storage() + .connection() .await .unwrap() .fri_gpu_prover_queue_dal() diff --git a/prover/prover_fri/src/utils.rs b/prover/prover_fri/src/utils.rs index 86f24f9f40b..8bb58ed4cc0 100644 --- a/prover/prover_fri/src/utils.rs +++ b/prover/prover_fri/src/utils.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, time::Instant}; -use prover_dal::{Prover, ProverDals, StorageProcessor}; +use prover_dal::{Connection, Prover, ProverDal}; use tokio::sync::Mutex; use zkevm_test_harness::prover_utils::{ verify_base_layer_proof, verify_eip4844_proof, verify_recursion_layer_proof, @@ -66,7 +66,7 @@ pub async fn save_proof( blob_store: &dyn ObjectStore, public_blob_store: Option<&dyn ObjectStore>, shall_save_to_public_bucket: bool, - storage_processor: &mut StorageProcessor<'_, Prover>, + storage_processor: &mut Connection<'_, Prover>, ) { tracing::info!( "Successfully proven job: {}, total time taken: {:?}", diff --git a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs index 926177dcf77..2aeb4309cf6 100644 --- a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use prover_dal::ProverDals; +use prover_dal::ProverDal; use zksync_prover_interface::api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, }; @@ -13,7 +13,7 @@ impl PeriodicApiStruct { .put(data.l1_batch_number, &data.data) .await .expect("Failed to save proof generation data to GCS"); - let mut connection = self.pool.access_storage().await.unwrap(); + let mut connection = self.pool.connection().await.unwrap(); connection .fri_protocol_versions_dal() .save_prover_protocol_version(data.fri_protocol_version_id, data.l1_verifier_config) diff --git a/prover/prover_fri_gateway/src/proof_submitter.rs b/prover/prover_fri_gateway/src/proof_submitter.rs index add9657c48a..1c5850d31a0 100644 --- a/prover/prover_fri_gateway/src/proof_submitter.rs +++ b/prover/prover_fri_gateway/src/proof_submitter.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use prover_dal::{fri_proof_compressor_dal::ProofCompressionJobStatus, ProverDals}; +use prover_dal::{fri_proof_compressor_dal::ProofCompressionJobStatus, ProverDal}; use zksync_prover_interface::api::{SubmitProofRequest, SubmitProofResponse}; use zksync_types::L1BatchNumber; @@ -9,7 +9,7 @@ impl PeriodicApiStruct { async fn next_submit_proof_request(&self) -> Option<(L1BatchNumber, SubmitProofRequest)> { let (l1_batch_number, status) = self .pool - .access_storage() + .connection() .await .unwrap() .fri_proof_compressor_dal() @@ -37,7 +37,7 @@ impl PeriodicApiStruct { async fn save_successful_sent_proof(&self, l1_batch_number: L1BatchNumber) { self.pool - .access_storage() + .connection() .await .unwrap() .fri_proof_compressor_dal() diff --git a/prover/prover_fri_utils/src/lib.rs b/prover/prover_fri_utils/src/lib.rs index 66e30876f92..8a57871e8a2 100644 --- a/prover/prover_fri_utils/src/lib.rs +++ b/prover/prover_fri_utils/src/lib.rs @@ -1,6 +1,6 @@ use std::time::Instant; -use prover_dal::{Prover, ProverDals, StorageProcessor}; +use prover_dal::{Connection, Prover, ProverDal}; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::{ @@ -25,7 +25,7 @@ pub mod region_fetcher; pub mod socket_utils; pub async fn fetch_next_circuit( - storage: &mut StorageProcessor<'_, Prover>, + storage: &mut Connection<'_, Prover>, blob_store: &dyn ObjectStore, circuit_ids_for_round_to_be_proven: &Vec, vk_commitments: &L1VerifierConfig, diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index af9c674c43d..4f7256a4bc0 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -19,7 +19,7 @@ use multivm::vm_latest::{ constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle, }; use prover_dal::{ - fri_witness_generator_dal::FriWitnessJobStatus, ConnectionPool, Prover, ProverDals, + fri_witness_generator_dal::FriWitnessJobStatus, ConnectionPool, Prover, ProverDal, }; use rand::Rng; use serde::{Deserialize, Serialize}; @@ -29,7 +29,7 @@ use zkevm_test_harness::{ zkevm_circuits::eip_4844::input::EIP4844OutputDataWitness, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::{Server, ServerDals}; +use zksync_dal::{Core, CoreDal}; use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory, StoredObject}; use zksync_prover_fri_types::{ circuit_definitions::{ @@ -105,7 +105,7 @@ pub struct BasicWitnessGenerator { config: Arc, object_store: Arc, public_blob_store: Option>, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, protocol_versions: Vec, } @@ -115,7 +115,7 @@ impl BasicWitnessGenerator { config: FriWitnessGeneratorConfig, store_factory: &ObjectStoreFactory, public_blob_store: Option>, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, protocol_versions: Vec, ) -> Self { @@ -131,7 +131,7 @@ impl BasicWitnessGenerator { async fn process_job_impl( object_store: Arc, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, prover_connection_pool: ConnectionPool, basic_job: BasicWitnessGeneratorJob, started_at: Instant, @@ -159,7 +159,7 @@ impl BasicWitnessGenerator { blocks_proving_percentage ); - let mut prover_storage = prover_connection_pool.access_storage().await.unwrap(); + let mut prover_storage = prover_connection_pool.connection().await.unwrap(); let mut transaction = prover_storage.start_transaction().await.unwrap(); transaction .fri_proof_compressor_dal() @@ -206,7 +206,7 @@ impl JobProcessor for BasicWitnessGenerator { const SERVICE_NAME: &'static str = "fri_basic_circuit_witness_generator"; async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.access_storage().await.unwrap(); + let mut prover_connection = self.prover_connection_pool.connection().await.unwrap(); let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); let pod_name = get_current_pod_name(); match prover_connection @@ -237,7 +237,7 @@ impl JobProcessor for BasicWitnessGenerator { async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { self.prover_connection_pool - .access_storage() + .connection() .await .unwrap() .fri_witness_generator_dal() @@ -315,7 +315,7 @@ impl JobProcessor for BasicWitnessGenerator { async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { let mut prover_storage = self .prover_connection_pool - .access_storage() + .connection() .await .context("failed to acquire DB connection for BasicWitnessGenerator")?; prover_storage @@ -331,7 +331,7 @@ impl JobProcessor for BasicWitnessGenerator { async fn process_basic_circuits_job( object_store: &dyn ObjectStore, config: Arc, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, started_at: Instant, block_number: L1BatchNumber, job: PrepareBasicCircuitsJob, @@ -372,7 +372,7 @@ async fn update_database( block_number: L1BatchNumber, blob_urls: BlobUrls, ) { - let mut prover_connection = prover_connection_pool.access_storage().await.unwrap(); + let mut prover_connection = prover_connection_pool.connection().await.unwrap(); let protocol_version_id = prover_connection .fri_witness_generator_dal() .protocol_version_for_l1_batch(block_number) @@ -487,11 +487,11 @@ async fn save_recursion_queue( // If making changes to this method, consider moving this logic to the DAL layer and make // `PrepareBasicCircuitsJob` have all fields of `BasicCircuitWitnessGeneratorInput`. async fn build_basic_circuits_witness_generator_input( - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, witness_merkle_input: PrepareBasicCircuitsJob, l1_batch_number: L1BatchNumber, ) -> BasicCircuitWitnessGeneratorInput { - let mut connection = connection_pool.access_storage().await.unwrap(); + let mut connection = connection_pool.connection().await.unwrap(); let block_header = connection .blocks_dal() .get_l1_batch_header(l1_batch_number) @@ -531,7 +531,7 @@ async fn generate_witness( block_number: L1BatchNumber, object_store: &dyn ObjectStore, config: Arc, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, input: BasicCircuitWitnessGeneratorInput, eip_4844_blobs: Eip4844Blobs, ) -> ( @@ -545,7 +545,7 @@ async fn generate_witness( >, BlockAuxilaryOutputWitness, ) { - let mut connection = connection_pool.access_storage().await.unwrap(); + let mut connection = connection_pool.connection().await.unwrap(); let header = connection .blocks_dal() .get_l1_batch_header(input.block_number) @@ -667,9 +667,7 @@ async fn generate_witness( let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); let make_circuits = tokio::task::spawn_blocking(move || { - let connection = rt_handle - .block_on(connection_pool.access_storage()) - .unwrap(); + let connection = rt_handle.block_on(connection_pool.connection()).unwrap(); let storage = PostgresStorage::new(rt_handle, connection, last_miniblock_number, true); let storage_view = StorageView::new(storage).to_rc_ptr(); diff --git a/prover/witness_generator/src/leaf_aggregation.rs b/prover/witness_generator/src/leaf_aggregation.rs index 13ab1475c69..ffc6e5c26a8 100644 --- a/prover/witness_generator/src/leaf_aggregation.rs +++ b/prover/witness_generator/src/leaf_aggregation.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; -use prover_dal::{Prover, ProverDals}; +use prover_dal::{Prover, ProverDal}; use zkevm_test_harness::witness::recursive_aggregation::{ compute_leaf_params, create_leaf_witnesses, }; @@ -115,7 +115,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { const SERVICE_NAME: &'static str = "fri_leaf_aggregation_witness_generator"; async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.access_storage().await.unwrap(); + let mut prover_connection = self.prover_connection_pool.connection().await.unwrap(); let pod_name = get_current_pod_name(); let Some(metadata) = prover_connection .fri_witness_generator_dal() @@ -135,7 +135,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { self.prover_connection_pool - .access_storage() + .connection() .await .unwrap() .fri_witness_generator_dal() @@ -191,7 +191,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { let mut prover_storage = self .prover_connection_pool - .access_storage() + .connection() .await .context("failed to acquire DB connection for LeafAggregationWitnessGenerator")?; prover_storage @@ -296,7 +296,7 @@ async fn update_database( block_number.0, circuit_id, ); - let mut prover_connection = prover_connection_pool.access_storage().await.unwrap(); + let mut prover_connection = prover_connection_pool.connection().await.unwrap(); let mut transaction = prover_connection.start_transaction().await.unwrap(); let number_of_dependent_jobs = blob_urls.circuit_ids_and_urls.len(); let protocol_version_id = transaction diff --git a/prover/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs index 429631d9180..9174e7ce9c6 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -5,7 +5,7 @@ use std::time::Instant; use anyhow::{anyhow, Context as _}; use futures::{channel::mpsc, executor::block_on, SinkExt}; use prometheus_exporter::PrometheusExporterConfig; -use prover_dal::{ConnectionPool, Prover, ProverDals}; +use prover_dal::{ConnectionPool, Prover, ProverDal}; use structopt::StructOpt; use tokio::sync::watch; use zksync_config::{ @@ -36,7 +36,7 @@ mod utils; #[cfg(not(target_env = "msvc"))] use jemallocator::Jemalloc; -use zksync_dal::Server; +use zksync_dal::Core; #[cfg(not(target_env = "msvc"))] #[global_allocator] @@ -97,7 +97,7 @@ async fn main() -> anyhow::Result<()> { FriWitnessGeneratorConfig::from_env().context("FriWitnessGeneratorConfig::from_env()")?; let prometheus_config = PrometheusConfig::from_env().context("PrometheusConfig::from_env()")?; let postgres_config = PostgresConfig::from_env().context("PostgresConfig::from_env()")?; - let connection_pool = ConnectionPool::::builder( + let connection_pool = ConnectionPool::::builder( postgres_config.master_url()?, postgres_config.max_connections()?, ) @@ -111,7 +111,7 @@ async fn main() -> anyhow::Result<()> { let (stop_sender, stop_receiver) = watch::channel(false); let vk_commitments = get_cached_commitments(); let protocol_versions = prover_connection_pool - .access_storage() + .connection() .await .unwrap() .fri_protocol_versions_dal() diff --git a/prover/witness_generator/src/node_aggregation.rs b/prover/witness_generator/src/node_aggregation.rs index b479de99eeb..17a9cfd1e58 100644 --- a/prover/witness_generator/src/node_aggregation.rs +++ b/prover/witness_generator/src/node_aggregation.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; -use prover_dal::{Prover, ProverDals}; +use prover_dal::{Prover, ProverDal}; use zkevm_test_harness::witness::recursive_aggregation::{ compute_node_vk_commitment, create_node_witnesses, }; @@ -148,7 +148,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { const SERVICE_NAME: &'static str = "fri_node_aggregation_witness_generator"; async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.access_storage().await.unwrap(); + let mut prover_connection = self.prover_connection_pool.connection().await.unwrap(); let pod_name = get_current_pod_name(); let Some(metadata) = prover_connection .fri_witness_generator_dal() @@ -168,7 +168,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { self.prover_connection_pool - .access_storage() + .connection() .await .unwrap() .fri_witness_generator_dal() @@ -217,7 +217,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { let mut prover_storage = self .prover_connection_pool - .access_storage() + .connection() .await .context("failed to acquire DB connection for NodeAggregationWitnessGenerator")?; prover_storage @@ -291,7 +291,7 @@ async fn update_database( blob_urls: BlobUrls, shall_continue_node_aggregations: bool, ) { - let mut prover_connection = prover_connection_pool.access_storage().await.unwrap(); + let mut prover_connection = prover_connection_pool.connection().await.unwrap(); let mut transaction = prover_connection.start_transaction().await.unwrap(); let dependent_jobs = blob_urls.circuit_ids_and_urls.len(); let protocol_version_id = transaction diff --git a/prover/witness_generator/src/scheduler.rs b/prover/witness_generator/src/scheduler.rs index 865844ec49e..86f039632f1 100644 --- a/prover/witness_generator/src/scheduler.rs +++ b/prover/witness_generator/src/scheduler.rs @@ -9,7 +9,7 @@ use circuit_definitions::{ }, eip4844_proof_config, }; -use prover_dal::{Prover, ProverDals}; +use prover_dal::{Prover, ProverDal}; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; @@ -133,7 +133,7 @@ impl JobProcessor for SchedulerWitnessGenerator { const SERVICE_NAME: &'static str = "fri_scheduler_witness_generator"; async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.access_storage().await.unwrap(); + let mut prover_connection = self.prover_connection_pool.connection().await.unwrap(); let pod_name = get_current_pod_name(); let Some(l1_batch_number) = prover_connection .fri_witness_generator_dal() @@ -157,7 +157,7 @@ impl JobProcessor for SchedulerWitnessGenerator { async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { self.prover_connection_pool - .access_storage() + .connection() .await .unwrap() .fri_witness_generator_dal() @@ -196,7 +196,7 @@ impl JobProcessor for SchedulerWitnessGenerator { WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::Scheduler.into()] .observe(blob_save_started_at.elapsed()); - let mut prover_connection = self.prover_connection_pool.access_storage().await.unwrap(); + let mut prover_connection = self.prover_connection_pool.connection().await.unwrap(); let mut transaction = prover_connection.start_transaction().await.unwrap(); let protocol_version_id = transaction .fri_witness_generator_dal() @@ -232,7 +232,7 @@ impl JobProcessor for SchedulerWitnessGenerator { async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { let mut prover_storage = self .prover_connection_pool - .access_storage() + .connection() .await .context("failed to acquire DB connection for SchedulerWitnessGenerator")?; prover_storage diff --git a/prover/witness_vector_generator/src/generator.rs b/prover/witness_vector_generator/src/generator.rs index cc8213877c4..e7cb9cfff95 100644 --- a/prover/witness_vector_generator/src/generator.rs +++ b/prover/witness_vector_generator/src/generator.rs @@ -6,7 +6,7 @@ use std::{ use anyhow::Context as _; use async_trait::async_trait; -use prover_dal::{ConnectionPool, Prover, ProverDals}; +use prover_dal::{ConnectionPool, Prover, ProverDal}; use tokio::{task::JoinHandle, time::sleep}; use zksync_config::configs::FriWitnessVectorGeneratorConfig; use zksync_object_store::ObjectStore; @@ -97,7 +97,7 @@ impl JobProcessor for WitnessVectorGenerator { const SERVICE_NAME: &'static str = "WitnessVectorGenerator"; async fn get_next_job(&self) -> anyhow::Result> { - let mut storage = self.pool.access_storage().await.unwrap(); + let mut storage = self.pool.connection().await.unwrap(); let Some(job) = fetch_next_circuit( &mut storage, &*self.blob_store, @@ -113,7 +113,7 @@ impl JobProcessor for WitnessVectorGenerator { async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { self.pool - .access_storage() + .connection() .await .unwrap() .fri_prover_jobs_dal() @@ -157,7 +157,7 @@ impl JobProcessor for WitnessVectorGenerator { while now.elapsed() < self.config.prover_instance_wait_timeout() { let prover = self .pool - .access_storage() + .connection() .await .unwrap() .fri_gpu_prover_queue_dal() @@ -216,7 +216,7 @@ impl JobProcessor for WitnessVectorGenerator { async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { let mut prover_storage = self .pool - .access_storage() + .connection() .await .context("failed to acquire DB connection for WitnessVectorGenerator")?; prover_storage @@ -246,7 +246,7 @@ async fn handle_send_result( METRICS.blob_sending_time[&blob_size_in_mb.to_string()].observe(*elapsed); - pool.access_storage() + pool.connection() .await .unwrap() .fri_prover_jobs_dal() @@ -261,7 +261,7 @@ async fn handle_send_result( ); // mark prover instance in `gpu_prover_queue` dead - pool.access_storage() + pool.connection() .await .unwrap() .fri_gpu_prover_queue_dal() @@ -273,7 +273,7 @@ async fn handle_send_result( .await; // mark the job as failed - pool.access_storage() + pool.connection() .await .unwrap() .fri_prover_jobs_dal()