Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Merge main into kl factory #1740

Merged
merged 10 commits into from
Apr 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -39,3 +39,8 @@ contracts/.git
!bellman-cuda
!prover/vk_setup_data_generator_server_fri/data/
!.github/release-please/manifest.json

!etc/env/file_based
!etc/env/dev.toml
!etc/env/consensus_secrets.yaml
!etc/env/consensus_config.yaml
3 changes: 0 additions & 3 deletions .github/workflows/build-local-node-docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -56,16 +56,13 @@ jobs:
- name: init
run: |
ci_run git config --global --add safe.directory /usr/src/zksync
ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen
ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts
ci_run git config --global --add safe.directory /usr/src/zksync/contracts

ci_run zk
ci_run zk run yarn
ci_run cp etc/tokens/{test,localhost}.json
ci_run zk compiler all
ci_run zk contract build
ci_run zk f yarn run l2-contracts build

- name: update-image
run: |
Expand Down
45 changes: 44 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ members = [
"core/node/node_framework",
"core/node/block_reverter",
"core/node/commitment_generator",
"core/node/house_keeper",
# Libraries
"core/lib/db_connection",
"core/lib/zksync_core",
Expand Down Expand Up @@ -86,6 +87,7 @@ clap = "4.2.2"
codegen = "0.2.0"
criterion = "0.4.0"
ctrlc = "3.1"
derive_more = "=1.0.0-beta.6"
envy = "0.4"
ethabi = "18.0.0"
flate2 = "1.0.28"
Expand Down Expand Up @@ -226,3 +228,4 @@ zksync_eth_watch = { path = "core/node/eth_watch" }
zksync_shared_metrics = { path = "core/node/shared_metrics" }
zksync_block_reverter = { path = "core/node/block_reverter"}
zksync_commitment_generator = { path = "core/node/commitment_generator" }
zksync_house_keeper = { path = "core/node/house_keeper" }
17 changes: 10 additions & 7 deletions core/bin/external_node/src/config/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ use zksync_core::{
#[cfg(test)]
use zksync_dal::{ConnectionPool, Core};
use zksync_protobuf_config::proto;
use zksync_snapshots_applier::SnapshotsApplierConfig;
use zksync_types::{api::BridgeAddresses, fee_model::FeeParams, ETHEREUM_ADDRESS};
use zksync_web3_decl::{
client::L2Client,
Expand Down Expand Up @@ -312,9 +313,6 @@ pub(crate) struct OptionalENConfig {
// Other config settings
/// Port on which the Prometheus exporter server is listening.
pub prometheus_port: Option<u16>,
/// Number of keys that is processed by enum_index migration in State Keeper each L1 batch.
#[serde(default = "OptionalENConfig::default_enum_index_migration_chunk_size")]
pub enum_index_migration_chunk_size: usize,
/// Capacity of the queue for asynchronous miniblock sealing. Once this many miniblocks are queued,
/// sealing will block until some of the miniblocks from the queue are processed.
/// 0 means that sealing is synchronous; this is mostly useful for performance comparison, testing etc.
Expand Down Expand Up @@ -344,6 +342,11 @@ pub(crate) struct OptionalENConfig {
/// This is an experimental and incomplete feature; do not use unless you know what you're doing.
#[serde(default)]
pub snapshots_recovery_enabled: bool,
/// Maximum concurrency factor for the concurrent parts of snapshot recovery for Postgres. It may be useful to
/// reduce this factor to about 5 if snapshot recovery overloads I/O capacity of the node. Conversely,
/// if I/O capacity of your infra is high, you may increase concurrency to speed up Postgres recovery.
#[serde(default = "OptionalENConfig::default_snapshots_recovery_postgres_max_concurrency")]
pub snapshots_recovery_postgres_max_concurrency: NonZeroUsize,

#[serde(default = "OptionalENConfig::default_pruning_chunk_size")]
pub pruning_chunk_size: u32,
Expand Down Expand Up @@ -457,10 +460,6 @@ impl OptionalENConfig {
10
}

const fn default_enum_index_migration_chunk_size() -> usize {
5000
}

const fn default_miniblock_seal_queue_capacity() -> usize {
10
}
Expand All @@ -485,6 +484,10 @@ impl OptionalENConfig {
L1BatchCommitDataGeneratorMode::Rollup
}

fn default_snapshots_recovery_postgres_max_concurrency() -> NonZeroUsize {
SnapshotsApplierConfig::default().max_concurrency
}

const fn default_pruning_chunk_size() -> u32 {
10
}
Expand Down
20 changes: 11 additions & 9 deletions core/bin/external_node/src/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use zksync_core::sync_layer::genesis::perform_genesis_if_needed;
use zksync_dal::{ConnectionPool, Core, CoreDal};
use zksync_health_check::AppHealthCheck;
use zksync_object_store::ObjectStoreFactory;
use zksync_snapshots_applier::SnapshotsApplierConfig;
use zksync_snapshots_applier::{SnapshotsApplierConfig, SnapshotsApplierTask};
use zksync_web3_decl::client::BoxedL2Client;

use crate::config::read_snapshots_recovery_config;
Expand All @@ -20,7 +20,7 @@ enum InitDecision {
}

pub(crate) async fn ensure_storage_initialized(
pool: &ConnectionPool<Core>,
pool: ConnectionPool<Core>,
main_node_client: BoxedL2Client,
app_health: &AppHealthCheck,
l2_chain_id: L2ChainId,
Expand Down Expand Up @@ -89,13 +89,15 @@ pub(crate) async fn ensure_storage_initialized(
.await;

let config = SnapshotsApplierConfig::default();
app_health.insert_component(config.health_check());
config
.run(
pool,
&main_node_client.for_component("snapshot_recovery"),
&blob_store,
)
let snapshots_applier_task = SnapshotsApplierTask::new(
config,
pool,
Box::new(main_node_client.for_component("snapshot_recovery")),
blob_store,
);
app_health.insert_component(snapshots_applier_task.health_check());
snapshots_applier_task
.run()
.await
.context("snapshot recovery failed")?;
tracing::info!("Snapshot recovery is complete");
Expand Down
35 changes: 25 additions & 10 deletions core/bin/external_node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,11 +90,8 @@ async fn build_state_keeper(
// We only need call traces on the external node if the `debug_` namespace is enabled.
let save_call_traces = config.optional.api_namespaces().contains(&Namespace::Debug);

let (storage_factory, task) = AsyncRocksdbCache::new(
connection_pool.clone(),
state_keeper_db_path,
config.optional.enum_index_migration_chunk_size,
);
let (storage_factory, task) =
AsyncRocksdbCache::new(connection_pool.clone(), state_keeper_db_path);
let mut stop_receiver_clone = stop_receiver.clone();
task_handles.push(tokio::task::spawn(async move {
let result = task.run(stop_receiver_clone.clone()).await;
Expand Down Expand Up @@ -147,9 +144,27 @@ async fn run_tree(
memtable_capacity: config.optional.merkle_tree_memtable_capacity(),
stalled_writes_timeout: config.optional.merkle_tree_stalled_writes_timeout(),
};
let metadata_calculator = MetadataCalculator::new(metadata_calculator_config, None)
.await
.context("failed initializing metadata calculator")?;

let max_concurrency = config
.optional
.snapshots_recovery_postgres_max_concurrency
.get();
let max_concurrency = u32::try_from(max_concurrency).with_context(|| {
format!("snapshot recovery max concurrency ({max_concurrency}) is too large")
})?;
let recovery_pool = ConnectionPool::builder(
tree_pool.database_url(),
max_concurrency.min(config.postgres.max_connections),
)
.build()
.await
.context("failed creating DB pool for Merkle tree recovery")?;

let metadata_calculator =
MetadataCalculator::new(metadata_calculator_config, None, tree_pool, recovery_pool)
.await
.context("failed initializing metadata calculator")?;

let tree_reader = Arc::new(metadata_calculator.tree_reader());
app_health.insert_component(metadata_calculator.tree_health_check());

Expand All @@ -166,7 +181,7 @@ async fn run_tree(
}));
}

let tree_handle = task::spawn(metadata_calculator.run(tree_pool, stop_receiver));
let tree_handle = task::spawn(metadata_calculator.run(stop_receiver));

task_futures.push(tree_handle);
Ok(tree_reader)
Expand Down Expand Up @@ -909,7 +924,7 @@ async fn run_node(

// Make sure that the node storage is initialized either via genesis or snapshot recovery.
ensure_storage_initialized(
&connection_pool,
connection_pool.clone(),
main_node_client.clone(),
&app_health,
config.remote.l2_chain_id,
Expand Down
12 changes: 6 additions & 6 deletions core/bin/external_node/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use test_casing::test_casing;
use zksync_basic_types::protocol_version::ProtocolVersionId;
use zksync_core::genesis::{insert_genesis_batch, GenesisParams};
use zksync_eth_client::clients::MockEthereum;
use zksync_types::{api, ethabi, fee_model::FeeParams, L1BatchNumber, MiniblockNumber, H256};
use zksync_types::{api, ethabi, fee_model::FeeParams, L1BatchNumber, L2BlockNumber, H256};
use zksync_web3_decl::client::{BoxedL2Client, MockL2Client};

use super::*;
Expand Down Expand Up @@ -101,20 +101,20 @@ async fn external_node_basics(components_str: &'static str) {
let _guard = vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging
let temp_dir = tempfile::TempDir::new().unwrap();

// Simplest case to mock: the EN already has a genesis L1 batch / miniblock, and it's the only L1 batch / miniblock
// Simplest case to mock: the EN already has a genesis L1 batch / L2 block, and it's the only L1 batch / L2 block
// in the network.
let connection_pool = ConnectionPool::test_pool().await;
let singleton_pool_builder = ConnectionPool::singleton(connection_pool.database_url());
let mut storage = connection_pool.connection().await.unwrap();
let genesis_params = insert_genesis_batch(&mut storage, &GenesisParams::mock())
.await
.unwrap();
let genesis_miniblock = storage
let genesis_l2_block = storage
.blocks_dal()
.get_miniblock_header(MiniblockNumber(0))
.get_l2_block_header(L2BlockNumber(0))
.await
.unwrap()
.expect("No genesis miniblock");
.expect("No genesis L2 block");
drop(storage);

let components: ComponentsToRun = components_str.parse().unwrap();
Expand Down Expand Up @@ -149,7 +149,7 @@ async fn external_node_basics(components_str: &'static str) {
assert_eq!(number, api::BlockNumber::Number(0.into()));
Ok(serde_json::to_value(
api::Block::<api::TransactionVariant> {
hash: genesis_miniblock.hash,
hash: genesis_l2_block.hash,
..api::Block::default()
},
)?)
Expand Down
Loading
Loading