From 08eedb434238c0043438fd4c988b5778973102f6 Mon Sep 17 00:00:00 2001 From: pompon0 Date: Tue, 9 Apr 2024 15:29:01 +0200 Subject: [PATCH 01/29] chore: bumped consensus and vise deps (#1618) bumped consensus and vise deps --- Cargo.lock | 35 ++++++++++++++++++----------------- Cargo.toml | 24 ++++++++++++------------ prover/Cargo.lock | 27 ++++++++++++++------------- prover/Cargo.toml | 2 +- 4 files changed, 45 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4708e6e7d61..12dca9f4bc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1195,7 +1195,8 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "compile-fmt" version = "0.1.0" -source = "git+https://github.com/slowli/compile-fmt.git?rev=c6a41c846c9a6f70cdba4b44c9f3922242ffcf12#c6a41c846c9a6f70cdba4b44c9f3922242ffcf12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bed69047ed42e52c7e38d6421eeb8ceefb4f2a2b52eed59137f7bad7908f6800" [[package]] name = "concurrent-queue" @@ -4642,9 +4643,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.21.2" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" +checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" dependencies = [ "dtoa", "itoa", @@ -7233,7 +7234,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vise" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1#1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" +source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" dependencies = [ "compile-fmt", "elsa", @@ -7246,7 +7247,7 @@ dependencies = [ [[package]] name = "vise-exporter" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1#1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" +source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" dependencies = [ "hyper", "metrics-exporter-prometheus", @@ -7259,7 +7260,7 @@ dependencies = [ [[package]] name = "vise-macros" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1#1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" +source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" dependencies = [ "proc-macro2 1.0.69", "quote 1.0.33", @@ -8076,7 +8077,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "once_cell", @@ -8106,7 +8107,7 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "async-trait", @@ -8127,7 +8128,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "blst", @@ -8145,7 +8146,7 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "rand 0.8.5", @@ -8165,7 +8166,7 @@ dependencies = [ [[package]] name = "zksync_consensus_network" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "async-trait", @@ -8190,7 +8191,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "bit-vec", @@ -8210,7 +8211,7 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "async-trait", @@ -8228,7 +8229,7 @@ dependencies = [ [[package]] name = "zksync_consensus_sync_blocks" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "thiserror", @@ -8243,7 +8244,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "rand 0.8.5", "thiserror", @@ -8697,7 +8698,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "bit-vec", @@ -8717,7 +8718,7 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/Cargo.toml b/Cargo.toml index 588ea8dbd11..5284954d01d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -159,24 +159,24 @@ circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", git = "https: circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.5.0" } crypto_codegen = { package = "codegen", git = "https://github.com/matter-labs/solidity_plonk_verifier.git", branch = "dev" } kzg = { package = "kzg", git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.2" } -vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } -vise-exporter = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "a5bb80c9ce7168663114ee30e794d6dc32159ee4" } +vise-exporter = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "a5bb80c9ce7168663114ee30e794d6dc32159ee4" } zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } zk_evm_1_3_1 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.1-rc2" } zk_evm_1_3_3 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } zk_evm_1_4_0 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.0" } zk_evm_1_4_1 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.1" } zk_evm_1_5_0 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.5.0" } -zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "8833a9b7fef89d1ffc5c08d53a3560164bc1c694" } -zksync_consensus_bft = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "8833a9b7fef89d1ffc5c08d53a3560164bc1c694" } -zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "8833a9b7fef89d1ffc5c08d53a3560164bc1c694" } -zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "8833a9b7fef89d1ffc5c08d53a3560164bc1c694" } -zksync_consensus_network = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "8833a9b7fef89d1ffc5c08d53a3560164bc1c694" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "8833a9b7fef89d1ffc5c08d53a3560164bc1c694" } -zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "8833a9b7fef89d1ffc5c08d53a3560164bc1c694" } -zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "8833a9b7fef89d1ffc5c08d53a3560164bc1c694" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "8833a9b7fef89d1ffc5c08d53a3560164bc1c694" } -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "8833a9b7fef89d1ffc5c08d53a3560164bc1c694" } +zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } +zksync_consensus_bft = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } +zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } +zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } +zksync_consensus_network = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } +zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } +zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } # "Local" dependencies multivm = { path = "core/lib/multivm" } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index a0d1b1dcb84..a0f470ce63e 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -1079,7 +1079,8 @@ dependencies = [ [[package]] name = "compile-fmt" version = "0.1.0" -source = "git+https://github.com/slowli/compile-fmt.git?rev=c6a41c846c9a6f70cdba4b44c9f3922242ffcf12#c6a41c846c9a6f70cdba4b44c9f3922242ffcf12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bed69047ed42e52c7e38d6421eeb8ceefb4f2a2b52eed59137f7bad7908f6800" [[package]] name = "const-decoder" @@ -4141,9 +4142,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.21.2" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" +checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" dependencies = [ "dtoa", "itoa", @@ -6607,7 +6608,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vise" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1#1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" +source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" dependencies = [ "compile-fmt", "elsa", @@ -6620,7 +6621,7 @@ dependencies = [ [[package]] name = "vise-exporter" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1#1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" +source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" dependencies = [ "hyper", "metrics-exporter-prometheus", @@ -6633,7 +6634,7 @@ dependencies = [ [[package]] name = "vise-macros" version = "0.1.0" -source = "git+https://github.com/matter-labs/vise.git?rev=1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1#1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" +source = "git+https://github.com/matter-labs/vise.git?rev=a5bb80c9ce7168663114ee30e794d6dc32159ee4#a5bb80c9ce7168663114ee30e794d6dc32159ee4" dependencies = [ "proc-macro2 1.0.78", "quote 1.0.35", @@ -7475,7 +7476,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "once_cell", @@ -7505,7 +7506,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "blst", @@ -7523,7 +7524,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "bit-vec", @@ -7543,7 +7544,7 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "async-trait", @@ -7561,7 +7562,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "rand 0.8.5", "thiserror", @@ -7747,7 +7748,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "bit-vec", @@ -7767,7 +7768,7 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=8833a9b7fef89d1ffc5c08d53a3560164bc1c694#8833a9b7fef89d1ffc5c08d53a3560164bc1c694" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 2e9ee65ce7a..5968bdccf65 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -66,7 +66,7 @@ tokio = "1" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = { version = "0.3" } -vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "a5bb80c9ce7168663114ee30e794d6dc32159ee4" } vk_setup_data_generator_server_fri = { path = "vk_setup_data_generator_server_fri" } vlog = { path = "../core/lib/vlog" } zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.1" } From ef12df73a891579a87903055acae02a25da03ff6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Tue, 9 Apr 2024 16:15:28 +0200 Subject: [PATCH 02/29] fix(en): improved tree recovery logs (#1619) Signed-off-by: tomg10 --- .../zksync_core/src/metadata_calculator/recovery/mod.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs index d1211ba1b86..c5d96197ead 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs @@ -28,6 +28,7 @@ use std::{ fmt, ops, sync::atomic::{AtomicU64, Ordering}, + time::Instant, }; use anyhow::Context as _; @@ -213,6 +214,7 @@ impl AsyncTreeRecovery { pool: &ConnectionPool, stop_receiver: &watch::Receiver, ) -> anyhow::Result> { + let start_time = Instant::now(); let chunk_count = options.chunk_count; let chunks: Vec<_> = (0..chunk_count) .map(|chunk_id| uniform_hashed_keys_chunk(chunk_id, chunk_count)) @@ -261,9 +263,10 @@ impl AsyncTreeRecovery { snapshot.expected_root_hash ); let tree = tree.finalize().await; - let finalize_latency = finalize_latency.observe(); + finalize_latency.observe(); tracing::info!( - "Finished tree recovery in {finalize_latency:?}; resuming normal tree operation" + "Tree recovery has finished, the recovery took {:?}! resuming normal tree operation", + start_time.elapsed() ); Ok(Some(tree)) } From c0f3104b63b32f681cb11233d3d41efd09b888a7 Mon Sep 17 00:00:00 2001 From: Joonatan Saarhelo Date: Tue, 9 Apr 2024 15:19:21 +0100 Subject: [PATCH 03/29] fix: instruction count diff always N/A in VM perf comparison (#1608) Instruction count diff should now be correct. The problem wasn't found initially because counting instructions legitimately fails if the PR starts from a version where that wasn't supported yet. Also found and fixed a bug that hid performance improvements. --- .github/workflows/vm-perf-comparison.yml | 4 ++-- core/tests/vm-benchmark/src/compare_iai_results.rs | 2 +- core/tests/vm-benchmark/src/instruction_counts.rs | 12 +++++++++++- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 555d13c9300..a6b2b71ce60 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -46,7 +46,7 @@ jobs: ci_run zk ci_run zk compiler system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai - ci_run cd core/tests/vm_benchmark && cargo run --release --bin instruction-counts | tee base-opcodes || touch base-opcodes + ci_run cargo run --package vm-benchmark --release --bin instruction-counts | tee base-opcodes || touch base-opcodes ci_run yarn workspace system-contracts clean - name: checkout PR @@ -58,7 +58,7 @@ jobs: ci_run zk ci_run zk compiler system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai - ci_run cd core/tests/vm_benchmark && cargo run --release --bin instruction-counts | tee pr-opcodes || touch pr-opcodes + ci_run cargo run --package vm-benchmark --release --bin instruction-counts | tee pr-opcodes || touch pr-opcodes EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) echo "speedup<<$EOF" >> $GITHUB_OUTPUT diff --git a/core/tests/vm-benchmark/src/compare_iai_results.rs b/core/tests/vm-benchmark/src/compare_iai_results.rs index 7ffadfc76be..b9b6440704c 100644 --- a/core/tests/vm-benchmark/src/compare_iai_results.rs +++ b/core/tests/vm-benchmark/src/compare_iai_results.rs @@ -25,7 +25,7 @@ fn main() { .intersection(&iai_after.keys().collect()) .flat_map(|&name| { let diff = percent_difference(iai_before[name], iai_after[name]); - if diff > 2. { + if diff.abs() > 2. { Some((name, format!("{:+.1}%", diff))) } else { None diff --git a/core/tests/vm-benchmark/src/instruction_counts.rs b/core/tests/vm-benchmark/src/instruction_counts.rs index 11f5d4c5ff6..a80d8a7ffd6 100644 --- a/core/tests/vm-benchmark/src/instruction_counts.rs +++ b/core/tests/vm-benchmark/src/instruction_counts.rs @@ -1,9 +1,19 @@ //! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. +use std::path::Path; + use vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; fn main() { - for path in std::fs::read_dir("deployment_benchmarks").unwrap() { + // using source file location because this is just a script, the binary isn't meant to be reused + let benchmark_folder = Path::new(file!()) + .parent() + .unwrap() + .parent() + .unwrap() + .join("deployment_benchmarks"); + + for path in std::fs::read_dir(benchmark_folder).unwrap() { let path = path.unwrap().path(); let test_contract = std::fs::read(&path).expect("failed to read file"); From d32a01918b2e42b8187fb6740b510b9b8798cafe Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Tue, 9 Apr 2024 18:08:14 +0300 Subject: [PATCH 04/29] feat: remove unused variables in prover configs (#1564) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Remove unused variables from prover configs ## Why ❔ To keep the codebase clean ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- core/bin/zksync_server/src/main.rs | 3 +- core/lib/config/src/configs/fri_prover.rs | 3 -- core/lib/config/src/configs/mod.rs | 2 - .../config/src/configs/witness_generator.rs | 45 ----------------- core/lib/config/src/testonly.rs | 32 ------------ core/lib/env_config/src/fri_prover.rs | 6 --- core/lib/env_config/src/lib.rs | 1 - core/lib/env_config/src/witness_generator.rs | 50 ------------------- .../protobuf_config/src/proto/prover.proto | 4 +- core/lib/protobuf_config/src/prover.rs | 28 ----------- core/lib/zksync_core/src/proto/mod.proto | 2 +- .../zksync_core/src/temp_config_store/mod.rs | 3 +- etc/env/base/fri_prover.toml | 3 -- etc/env/base/witness_generator.toml | 7 --- etc/env/dev.toml | 1 - etc/env/file_based/general.yaml | 3 -- 16 files changed, 4 insertions(+), 189 deletions(-) delete mode 100644 core/lib/config/src/configs/witness_generator.rs delete mode 100644 core/lib/env_config/src/witness_generator.rs delete mode 100644 etc/env/base/witness_generator.toml diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index b6d6a3e7af8..1d8daf1b857 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -13,7 +13,7 @@ use zksync_config::{ house_keeper::HouseKeeperConfig, ContractsConfig, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig, - PrometheusConfig, ProofDataHandlerConfig, WitnessGeneratorConfig, + PrometheusConfig, ProofDataHandlerConfig, }, ApiConfig, ContractVerifierConfig, DBConfig, ETHConfig, ETHWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, @@ -269,7 +269,6 @@ fn load_env_config() -> anyhow::Result { fri_witness_generator_config: FriWitnessGeneratorConfig::from_env().ok(), prometheus_config: PrometheusConfig::from_env().ok(), proof_data_handler_config: ProofDataHandlerConfig::from_env().ok(), - witness_generator_config: WitnessGeneratorConfig::from_env().ok(), api_config: ApiConfig::from_env().ok(), db_config: DBConfig::from_env().ok(), eth_sender_config: ETHConfig::from_env().ok(), diff --git a/core/lib/config/src/configs/fri_prover.rs b/core/lib/config/src/configs/fri_prover.rs index f8b9b8adf1c..8afc40e9ca0 100644 --- a/core/lib/config/src/configs/fri_prover.rs +++ b/core/lib/config/src/configs/fri_prover.rs @@ -17,11 +17,8 @@ pub struct FriProverConfig { pub prometheus_port: u16, pub max_attempts: u32, pub generation_timeout_in_secs: u16, - pub base_layer_circuit_ids_to_be_verified: Vec, - pub recursive_layer_circuit_ids_to_be_verified: Vec, pub setup_load_mode: SetupLoadMode, pub specialized_group_id: u8, - pub witness_vector_generator_thread_count: Option, pub queue_capacity: usize, pub witness_vector_receiver_port: u16, pub zone_read_url: String, diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 19905e5d757..94fe69a441d 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -18,7 +18,6 @@ pub use self::{ proof_data_handler::ProofDataHandlerConfig, snapshots_creator::SnapshotsCreatorConfig, utils::PrometheusConfig, - witness_generator::WitnessGeneratorConfig, }; pub mod api; @@ -43,6 +42,5 @@ pub mod proof_data_handler; pub mod snapshots_creator; pub mod utils; pub mod wallets; -pub mod witness_generator; const BYTES_IN_MEGABYTE: usize = 1_024 * 1_024; diff --git a/core/lib/config/src/configs/witness_generator.rs b/core/lib/config/src/configs/witness_generator.rs deleted file mode 100644 index 010742e4b04..00000000000 --- a/core/lib/config/src/configs/witness_generator.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::time::Duration; - -// Built-in uses -// External uses -use serde::Deserialize; - -#[derive(Debug, Deserialize, Clone, PartialEq)] -pub enum BasicWitnessGeneratorDataSource { - FromPostgres, - FromPostgresShadowBlob, - FromBlob, -} - -/// Configuration for the witness generation -#[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct WitnessGeneratorConfig { - /// Max time for witness to be generated - pub generation_timeout_in_secs: u16, - /// Currently only a single (largest) key is supported. - pub initial_setup_key_path: String, - /// https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key - pub key_download_url: String, - /// Max attempts for generating witness - pub max_attempts: u32, - // Percentage of the blocks that gets proven in the range [0.0, 1.0] - // when 0.0 implies all blocks are skipped and 1.0 implies all blocks are proven. - pub blocks_proving_percentage: Option, - pub dump_arguments_for_blocks: Vec, - // Optional l1 batch number to process block until(inclusive). - // This parameter is used in case of performing circuit upgrades(VK/Setup keys), - // to not let witness-generator pick new job and finish all the existing jobs with old circuit. - pub last_l1_batch_to_process: Option, - /// Where will basic Witness Generator load its data from - pub data_source: BasicWitnessGeneratorDataSource, -} - -impl WitnessGeneratorConfig { - pub fn witness_generation_timeout(&self) -> Duration { - Duration::from_secs(self.generation_timeout_in_secs as u64) - } - - pub fn last_l1_batch_to_process(&self) -> u32 { - self.last_l1_batch_to_process.unwrap_or(u32::MAX) - } -} diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 4c23573881d..39c1af11810 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -422,11 +422,8 @@ impl Distribution for EncodeDist { prometheus_port: self.sample(rng), max_attempts: self.sample(rng), generation_timeout_in_secs: self.sample(rng), - base_layer_circuit_ids_to_be_verified: self.sample_collect(rng), - recursive_layer_circuit_ids_to_be_verified: self.sample_collect(rng), setup_load_mode: self.sample(rng), specialized_group_id: self.sample(rng), - witness_vector_generator_thread_count: self.sample(rng), queue_capacity: self.sample(rng), witness_vector_receiver_port: self.sample(rng), zone_read_url: self.sample(rng), @@ -622,35 +619,6 @@ impl Distribution for EncodeDist { } } -impl Distribution for EncodeDist { - fn sample( - &self, - rng: &mut R, - ) -> configs::witness_generator::BasicWitnessGeneratorDataSource { - type T = configs::witness_generator::BasicWitnessGeneratorDataSource; - match rng.gen_range(0..2) { - 0 => T::FromPostgres, - 1 => T::FromPostgresShadowBlob, - _ => T::FromBlob, - } - } -} - -impl Distribution for EncodeDist { - fn sample(&self, rng: &mut R) -> configs::WitnessGeneratorConfig { - configs::WitnessGeneratorConfig { - generation_timeout_in_secs: self.sample(rng), - initial_setup_key_path: self.sample(rng), - key_download_url: self.sample(rng), - max_attempts: self.sample(rng), - blocks_proving_percentage: self.sample(rng), - dump_arguments_for_blocks: self.sample_collect(rng), - last_l1_batch_to_process: self.sample(rng), - data_source: self.sample(rng), - } - } -} - impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::ObservabilityConfig { configs::ObservabilityConfig { diff --git a/core/lib/env_config/src/fri_prover.rs b/core/lib/env_config/src/fri_prover.rs index 373d1e6f990..2a08472b680 100644 --- a/core/lib/env_config/src/fri_prover.rs +++ b/core/lib/env_config/src/fri_prover.rs @@ -28,11 +28,8 @@ mod tests { prometheus_port: 3315, max_attempts: 10, generation_timeout_in_secs: 300, - base_layer_circuit_ids_to_be_verified: vec![1, 5], - recursive_layer_circuit_ids_to_be_verified: vec![1, 2, 3], setup_load_mode: SetupLoadMode::FromDisk, specialized_group_id: 10, - witness_vector_generator_thread_count: Some(5), queue_capacity: 10, witness_vector_receiver_port: 3316, zone_read_url: "http://metadata.google.internal/computeMetadata/v1/instance/zone" @@ -57,11 +54,8 @@ mod tests { FRI_PROVER_PROMETHEUS_PORT="3315" FRI_PROVER_MAX_ATTEMPTS="10" FRI_PROVER_GENERATION_TIMEOUT_IN_SECS="300" - FRI_PROVER_BASE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED="1,5" - FRI_PROVER_RECURSIVE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED="1,2,3" FRI_PROVER_SETUP_LOAD_MODE="FromDisk" FRI_PROVER_SPECIALIZED_GROUP_ID="10" - FRI_PROVER_WITNESS_VECTOR_GENERATOR_THREAD_COUNT="5" FRI_PROVER_QUEUE_CAPACITY="10" FRI_PROVER_WITNESS_VECTOR_RECEIVER_PORT="3316" FRI_PROVER_ZONE_READ_URL="http://metadata.google.internal/computeMetadata/v1/instance/zone" diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index a937cc89a3f..f6290020f38 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -20,7 +20,6 @@ mod observability; mod proof_data_handler; mod snapshots_creator; mod utils; -mod witness_generator; mod genesis; #[cfg(test)] diff --git a/core/lib/env_config/src/witness_generator.rs b/core/lib/env_config/src/witness_generator.rs deleted file mode 100644 index 28d7a9bbbd9..00000000000 --- a/core/lib/env_config/src/witness_generator.rs +++ /dev/null @@ -1,50 +0,0 @@ -use zksync_config::configs::WitnessGeneratorConfig; - -use crate::{envy_load, FromEnv}; - -impl FromEnv for WitnessGeneratorConfig { - fn from_env() -> anyhow::Result { - envy_load("witness", "WITNESS_") - } -} - -#[cfg(test)] -mod tests { - use zksync_config::configs::witness_generator::BasicWitnessGeneratorDataSource; - - use super::*; - use crate::test_utils::EnvMutex; - - static MUTEX: EnvMutex = EnvMutex::new(); - - fn expected_config() -> WitnessGeneratorConfig { - WitnessGeneratorConfig { - generation_timeout_in_secs: 900_u16, - initial_setup_key_path: "key".to_owned(), - key_download_url: "value".to_owned(), - max_attempts: 4, - blocks_proving_percentage: Some(30), - dump_arguments_for_blocks: vec![2, 3], - last_l1_batch_to_process: None, - data_source: BasicWitnessGeneratorDataSource::FromBlob, - } - } - - #[test] - fn from_env() { - let mut lock = MUTEX.lock(); - let config = r#" - WITNESS_GENERATION_TIMEOUT_IN_SECS=900 - WITNESS_INITIAL_SETUP_KEY_PATH="key" - WITNESS_KEY_DOWNLOAD_URL="value" - WITNESS_MAX_ATTEMPTS=4 - WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3" - WITNESS_BLOCKS_PROVING_PERCENTAGE="30" - WITNESS_DATA_SOURCE="FromBlob" - "#; - lock.set_env(config); - - let actual = WitnessGeneratorConfig::from_env().unwrap(); - assert_eq!(actual, expected_config()); - } -} diff --git a/core/lib/protobuf_config/src/proto/prover.proto b/core/lib/protobuf_config/src/proto/prover.proto index 36700cd555e..a365a2e2886 100644 --- a/core/lib/protobuf_config/src/proto/prover.proto +++ b/core/lib/protobuf_config/src/proto/prover.proto @@ -26,17 +26,15 @@ message Prover { optional uint32 prometheus_port = 2; // required; u16 optional uint32 max_attempts = 3; // required optional uint32 generation_timeout_in_secs = 4; // required; s - repeated uint32 base_layer_circuit_ids_to_be_verified = 5; // required - repeated uint32 recursive_layer_circuit_ids_to_be_verified = 6; // required optional SetupLoadMode setup_load_mode = 7; // required optional uint32 specialized_group_id = 8; // required; u8 - optional uint64 witness_vector_generator_thread_count = 9; // optional optional uint64 queue_capacity = 10; // required optional uint32 witness_vector_receiver_port = 11; // required; u16 optional string zone_read_url = 12; // required optional uint32 availability_check_interval_in_secs = 21; // required; s optional bool shall_save_to_public_bucket = 13; // required optional config.object_store.ObjectStore object_store = 20; + reserved 5, 6, 9; reserved "base_layer_circuit_ids_to_be_verified", "recursive_layer_circuit_ids_to_be_verified", "witness_vector_generator_thread_count"; } diff --git a/core/lib/protobuf_config/src/prover.rs b/core/lib/protobuf_config/src/prover.rs index 12ff2378251..fc13db9d7d9 100644 --- a/core/lib/protobuf_config/src/prover.rs +++ b/core/lib/protobuf_config/src/prover.rs @@ -304,16 +304,6 @@ impl ProtoRepr for proto::Prover { generation_timeout_in_secs: required(&self.generation_timeout_in_secs) .and_then(|x| Ok((*x).try_into()?)) .context("generation_timeout_in_secs")?, - base_layer_circuit_ids_to_be_verified: self - .base_layer_circuit_ids_to_be_verified - .iter() - .map(|a| *a as u8) - .collect(), - recursive_layer_circuit_ids_to_be_verified: self - .recursive_layer_circuit_ids_to_be_verified - .iter() - .map(|a| *a as u8) - .collect(), setup_load_mode: required(&self.setup_load_mode) .and_then(|x| Ok(proto::SetupLoadMode::try_from(*x)?)) .context("setup_load_mode")? @@ -321,11 +311,6 @@ impl ProtoRepr for proto::Prover { specialized_group_id: required(&self.specialized_group_id) .and_then(|x| Ok((*x).try_into()?)) .context("specialized_group_id")?, - witness_vector_generator_thread_count: self - .witness_vector_generator_thread_count - .map(|x| x.try_into()) - .transpose() - .context("witness_vector_generator_thread_count")?, queue_capacity: required(&self.queue_capacity) .and_then(|x| Ok((*x).try_into()?)) .context("queue_capacity")?, @@ -351,21 +336,8 @@ impl ProtoRepr for proto::Prover { prometheus_port: Some(this.prometheus_port.into()), max_attempts: Some(this.max_attempts), generation_timeout_in_secs: Some(this.generation_timeout_in_secs.into()), - base_layer_circuit_ids_to_be_verified: this - .base_layer_circuit_ids_to_be_verified - .iter() - .map(|a| *a as u32) - .collect(), - recursive_layer_circuit_ids_to_be_verified: this - .recursive_layer_circuit_ids_to_be_verified - .iter() - .map(|a| *a as u32) - .collect(), setup_load_mode: Some(proto::SetupLoadMode::new(&this.setup_load_mode).into()), specialized_group_id: Some(this.specialized_group_id.into()), - witness_vector_generator_thread_count: this - .witness_vector_generator_thread_count - .map(|x| x.try_into().unwrap()), queue_capacity: Some(this.queue_capacity.try_into().unwrap()), witness_vector_receiver_port: Some(this.witness_vector_receiver_port.into()), zone_read_url: Some(this.zone_read_url.clone()), diff --git a/core/lib/zksync_core/src/proto/mod.proto b/core/lib/zksync_core/src/proto/mod.proto index 596eb505db7..7876c28c641 100644 --- a/core/lib/zksync_core/src/proto/mod.proto +++ b/core/lib/zksync_core/src/proto/mod.proto @@ -5,5 +5,5 @@ package zksync.core; import "zksync/core/consensus.proto"; message Secrets { - optional consensus.Secrets consensus = 1; // optional + optional consensus.Secrets consensus = 1; // optional } diff --git a/core/lib/zksync_core/src/temp_config_store/mod.rs b/core/lib/zksync_core/src/temp_config_store/mod.rs index 2c5769f1f86..f471a7b251c 100644 --- a/core/lib/zksync_core/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core/src/temp_config_store/mod.rs @@ -11,7 +11,7 @@ use zksync_config::{ wallets::{AddressWallet, EthSender, StateKeeper, Wallet, Wallets}, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, - ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, WitnessGeneratorConfig, + ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, }, ApiConfig, ContractVerifierConfig, DBConfig, ETHConfig, ETHWatchConfig, GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, @@ -56,7 +56,6 @@ pub struct TempConfigStore { pub fri_witness_generator_config: Option, pub prometheus_config: Option, pub proof_data_handler_config: Option, - pub witness_generator_config: Option, pub api_config: Option, pub db_config: Option, pub eth_sender_config: Option, diff --git a/etc/env/base/fri_prover.toml b/etc/env/base/fri_prover.toml index fc99e756cf5..7183d2c848c 100644 --- a/etc/env/base/fri_prover.toml +++ b/etc/env/base/fri_prover.toml @@ -3,11 +3,8 @@ setup_data_path = "/usr/src/setup-data" prometheus_port = 3315 max_attempts = 10 generation_timeout_in_secs = 600 -base_layer_circuit_ids_to_be_verified = "1" -recursive_layer_circuit_ids_to_be_verified = "1" setup_load_mode = "FromDisk" specialized_group_id = 100 -witness_vector_generator_thread_count = 5 queue_capacity = 10 witness_vector_receiver_port = 3316 zone_read_url = "http://metadata.google.internal/computeMetadata/v1/instance/zone" diff --git a/etc/env/base/witness_generator.toml b/etc/env/base/witness_generator.toml deleted file mode 100644 index c5df54919ad..00000000000 --- a/etc/env/base/witness_generator.toml +++ /dev/null @@ -1,7 +0,0 @@ -[witness] -generation_timeout_in_secs=900 -initial_setup_key_path="./keys/setup/setup_2^22.key" -key_download_url="https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^22.key" -max_attempts=1 -dump_arguments_for_blocks="2,3" -data_source="FromPostgres" \ No newline at end of file diff --git a/etc/env/dev.toml b/etc/env/dev.toml index e95ad7b9e9e..b50b6ecc5bf 100644 --- a/etc/env/dev.toml +++ b/etc/env/dev.toml @@ -14,7 +14,6 @@ base = [ 'base/nfs.toml', 'base/rust.toml', 'base/private.toml', - 'base/witness_generator.toml', 'base/house_keeper.toml', 'base/fri_prover.toml', 'base/fri_witness_generator.toml', diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 2cf7a420465..c251c667d33 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -162,11 +162,8 @@ prover: prometheus_port: 3315 max_attempts: 10 generation_timeout_in_secs: 600 - base_layer_circuit_ids_to_be_verified: [ 1 ] - recursive_layer_circuit_ids_to_be_verified: [ 1 ] setup_load_mode: FROM_DISK specialized_group_id: 100 - witness_vector_generator_thread_count: 5 queue_capacity: 10 witness_vector_receiver_port: 3316 availability_check_interval_in_secs: 10000 From e3b826030c952d6326f2ab9a971462297ec26cf1 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 9 Apr 2024 18:32:09 +0300 Subject: [PATCH 05/29] chore(db): Wrap `sqlx` errors in DAL (misc queries) (#1607) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Continues wrapping `sqlx` errors in the core DAL crate, dealing with most queries (other than a couple of large components: Ethereum sender and contracts verifier). ## Why ❔ See #1522 for the reasoning. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- core/lib/circuit_breaker/src/lib.rs | 4 +- core/lib/circuit_breaker/src/metrics.rs | 4 +- .../circuit_breaker/src/replication_lag.rs | 21 +- core/lib/config/src/configs/chain.rs | 5 + ...fd17f833fb15b58b0110c7cc566946db98e76.json | 94 --------- ...5ede2be9770b3799a9ab64fe9690b6eb0a48b.json | 94 +++++++++ .../src/basic_witness_input_producer_dal.rs | 11 +- core/lib/dal/src/blocks_dal.rs | 180 +++++++++--------- core/lib/dal/src/blocks_web3_dal.rs | 37 ++-- core/lib/dal/src/events_web3_dal.rs | 110 ++++++----- core/lib/dal/src/factory_deps_dal.rs | 15 +- .../lib/dal/src/protocol_versions_web3_dal.rs | 30 +-- core/lib/dal/src/snapshots_dal.rs | 7 +- core/lib/dal/src/storage_logs_dedup_dal.rs | 6 +- core/lib/dal/src/storage_web3_dal.rs | 14 +- core/lib/dal/src/system_dal.rs | 14 +- core/lib/dal/src/transactions_dal.rs | 98 +++------- core/lib/db_connection/src/instrument.rs | 49 ++++- core/lib/state/src/rocksdb/mod.rs | 5 +- core/lib/vm_utils/src/storage.rs | 4 +- .../src/api_server/execution_sandbox/apply.rs | 4 +- .../src/api_server/execution_sandbox/mod.rs | 8 +- .../src/api_server/web3/namespaces/eth.rs | 6 +- .../src/api_server/web3/namespaces/zks.rs | 21 +- .../zksync_core/src/api_server/web3/state.rs | 25 +-- .../src/basic_witness_input_producer/mod.rs | 27 +-- .../src/house_keeper/blocks_state_reporter.rs | 32 ++-- core/lib/zksync_core/src/lib.rs | 2 +- .../src/metadata_calculator/helpers.rs | 3 +- .../src/state_keeper/io/common/mod.rs | 3 +- .../state_keeper/io/fee_address_migration.rs | 12 +- .../src/state_keeper/mempool_actor.rs | 6 +- .../lib/zksync_core/src/sync_layer/fetcher.rs | 7 +- .../node/node_framework/examples/main_node.rs | 2 +- .../implementations/layers/web3_api/server.rs | 8 +- 35 files changed, 481 insertions(+), 487 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-d70cfc158e31dd2d5c942d24f81fd17f833fb15b58b0110c7cc566946db98e76.json create mode 100644 core/lib/dal/.sqlx/query-dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b.json diff --git a/core/lib/circuit_breaker/src/lib.rs b/core/lib/circuit_breaker/src/lib.rs index ad2f3e20a79..adb56db1f3e 100644 --- a/core/lib/circuit_breaker/src/lib.rs +++ b/core/lib/circuit_breaker/src/lib.rs @@ -33,8 +33,8 @@ impl CircuitBreakers { pub enum CircuitBreakerError { #[error("System has failed L1 transaction")] FailedL1Transaction, - #[error("Replication lag ({0}) is above the threshold ({1})")] - ReplicationLag(u32, u32), + #[error("Replication lag ({lag:?}) is above the threshold ({threshold:?})")] + ReplicationLag { lag: Duration, threshold: Duration }, #[error("Internal error running circuit breaker checks")] Internal(#[from] anyhow::Error), } diff --git a/core/lib/circuit_breaker/src/metrics.rs b/core/lib/circuit_breaker/src/metrics.rs index 8bd622d6f73..32b67c2417e 100644 --- a/core/lib/circuit_breaker/src/metrics.rs +++ b/core/lib/circuit_breaker/src/metrics.rs @@ -1,12 +1,14 @@ //! Circuit breaker metrics. +use std::time::Duration; + use vise::{Gauge, Global, Metrics}; #[derive(Debug, Metrics)] #[metrics(prefix = "circuit_breaker")] pub(crate) struct CircuitBreakerMetrics { /// Replication lag for Postgres in seconds. - pub replication_lag: Gauge, + pub replication_lag: Gauge, } #[vise::register] diff --git a/core/lib/circuit_breaker/src/replication_lag.rs b/core/lib/circuit_breaker/src/replication_lag.rs index ad55bb99402..001f46d2436 100644 --- a/core/lib/circuit_breaker/src/replication_lag.rs +++ b/core/lib/circuit_breaker/src/replication_lag.rs @@ -1,12 +1,13 @@ -use anyhow::Context as _; -use zksync_dal::{ConnectionPool, Core, CoreDal}; +use std::time::Duration; + +use zksync_dal::{ConnectionPool, Core, CoreDal, DalError}; use crate::{metrics::METRICS, CircuitBreaker, CircuitBreakerError}; #[derive(Debug)] pub struct ReplicationLagChecker { pub pool: ConnectionPool, - pub replication_lag_limit_sec: Option, + pub replication_lag_limit: Option, } #[async_trait::async_trait] @@ -21,15 +22,15 @@ impl CircuitBreaker for ReplicationLagChecker { .connection_tagged("circuit_breaker") .await? .system_dal() - .get_replication_lag_sec() + .get_replication_lag() .await - .context("failed getting replication lag")?; - METRICS.replication_lag.set(lag.into()); + .map_err(DalError::generalize)?; + METRICS.replication_lag.set(lag); - match self.replication_lag_limit_sec { - Some(replication_lag_limit_sec) if lag > replication_lag_limit_sec => Err( - CircuitBreakerError::ReplicationLag(lag, replication_lag_limit_sec), - ), + match self.replication_lag_limit { + Some(threshold) if lag > threshold => { + Err(CircuitBreakerError::ReplicationLag { lag, threshold }) + } _ => Ok(()), } } diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 10e62681283..42c4063243c 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -239,6 +239,11 @@ impl CircuitBreakerConfig { pub fn http_req_retry_interval(&self) -> Duration { Duration::from_secs(self.http_req_retry_interval_sec as u64) } + + pub fn replication_lag_limit(&self) -> Option { + self.replication_lag_limit_sec + .map(|limit| Duration::from_secs(limit.into())) + } } #[derive(Debug, Deserialize, Clone, PartialEq)] diff --git a/core/lib/dal/.sqlx/query-d70cfc158e31dd2d5c942d24f81fd17f833fb15b58b0110c7cc566946db98e76.json b/core/lib/dal/.sqlx/query-d70cfc158e31dd2d5c942d24f81fd17f833fb15b58b0110c7cc566946db98e76.json deleted file mode 100644 index bff9c151373..00000000000 --- a/core/lib/dal/.sqlx/query-d70cfc158e31dd2d5c942d24f81fd17f833fb15b58b0110c7cc566946db98e76.json +++ /dev/null @@ -1,94 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n events_select AS (\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n miniblock_number > $1\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n )\n SELECT\n miniblocks.hash AS \"block_hash?\",\n address AS \"address!\",\n topic1 AS \"topic1!\",\n topic2 AS \"topic2!\",\n topic3 AS \"topic3!\",\n topic4 AS \"topic4!\",\n value AS \"value!\",\n miniblock_number AS \"miniblock_number!\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n tx_hash AS \"tx_hash!\",\n tx_index_in_block AS \"tx_index_in_block!\",\n event_index_in_block AS \"event_index_in_block!\",\n event_index_in_tx AS \"event_index_in_tx!\"\n FROM\n events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "block_hash?", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "address!", - "type_info": "Bytea" - }, - { - "ordinal": 2, - "name": "topic1!", - "type_info": "Bytea" - }, - { - "ordinal": 3, - "name": "topic2!", - "type_info": "Bytea" - }, - { - "ordinal": 4, - "name": "topic3!", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "topic4!", - "type_info": "Bytea" - }, - { - "ordinal": 6, - "name": "value!", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "miniblock_number!", - "type_info": "Int8" - }, - { - "ordinal": 8, - "name": "l1_batch_number?", - "type_info": "Int8" - }, - { - "ordinal": 9, - "name": "tx_hash!", - "type_info": "Bytea" - }, - { - "ordinal": 10, - "name": "tx_index_in_block!", - "type_info": "Int4" - }, - { - "ordinal": 11, - "name": "event_index_in_block!", - "type_info": "Int4" - }, - { - "ordinal": 12, - "name": "event_index_in_tx!", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - false, - false, - false, - false - ] - }, - "hash": "d70cfc158e31dd2d5c942d24f81fd17f833fb15b58b0110c7cc566946db98e76" -} diff --git a/core/lib/dal/.sqlx/query-dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b.json b/core/lib/dal/.sqlx/query-dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b.json new file mode 100644 index 00000000000..0ee5b247c33 --- /dev/null +++ b/core/lib/dal/.sqlx/query-dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b.json @@ -0,0 +1,94 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n events_select AS (\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n miniblock_number > $1\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n )\n SELECT\n miniblocks.hash AS \"block_hash?\",\n address AS \"address!\",\n topic1 AS \"topic1!\",\n topic2 AS \"topic2!\",\n topic3 AS \"topic3!\",\n topic4 AS \"topic4!\",\n value AS \"value!\",\n miniblock_number AS \"miniblock_number!\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n tx_hash AS \"tx_hash!\",\n tx_index_in_block AS \"tx_index_in_block!\",\n event_index_in_block AS \"event_index_in_block!\",\n event_index_in_tx AS \"event_index_in_tx!\"\n FROM\n events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "block_hash?", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "address!", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "topic1!", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "topic2!", + "type_info": "Bytea" + }, + { + "ordinal": 4, + "name": "topic3!", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "topic4!", + "type_info": "Bytea" + }, + { + "ordinal": 6, + "name": "value!", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "miniblock_number!", + "type_info": "Int8" + }, + { + "ordinal": 8, + "name": "l1_batch_number?", + "type_info": "Int8" + }, + { + "ordinal": 9, + "name": "tx_hash!", + "type_info": "Bytea" + }, + { + "ordinal": 10, + "name": "tx_index_in_block!", + "type_info": "Int4" + }, + { + "ordinal": 11, + "name": "event_index_in_block!", + "type_info": "Int4" + }, + { + "ordinal": 12, + "name": "event_index_in_tx!", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + false + ] + }, + "hash": "dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b" +} diff --git a/core/lib/dal/src/basic_witness_input_producer_dal.rs b/core/lib/dal/src/basic_witness_input_producer_dal.rs index f207dfa77c4..2a30226a761 100644 --- a/core/lib/dal/src/basic_witness_input_producer_dal.rs +++ b/core/lib/dal/src/basic_witness_input_producer_dal.rs @@ -129,7 +129,7 @@ impl BasicWitnessInputProducerDal<'_, '_> { pub async fn get_basic_witness_input_producer_job_attempts( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let attempts = sqlx::query!( r#" SELECT @@ -141,7 +141,9 @@ impl BasicWitnessInputProducerDal<'_, '_> { "#, i64::from(l1_batch_number.0), ) - .fetch_optional(self.storage.conn()) + .instrument("get_basic_witness_input_producer_job_attempts") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_optional(self.storage) .await? .map(|job| job.attempts as u32); @@ -218,13 +220,14 @@ impl BasicWitnessInputProducerDal<'_, '_> { /// These functions should only be used for tests. impl BasicWitnessInputProducerDal<'_, '_> { - pub async fn delete_all_jobs(&mut self) -> sqlx::Result<()> { + pub async fn delete_all_jobs(&mut self) -> DalResult<()> { sqlx::query!( r#" DELETE FROM basic_witness_input_producer_jobs "# ) - .execute(self.storage.conn()) + .instrument("delete_all_bwip_jobs") + .execute(self.storage) .await?; Ok(()) } diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index d930784612c..aa66cfe251e 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -403,7 +403,7 @@ impl BlocksDal<'_, '_> { number_range: ops::RangeInclusive, eth_tx_id: u32, aggregation_type: AggregatedActionType, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { match aggregation_type { AggregatedActionType::Commit => { sqlx::query!( @@ -419,7 +419,10 @@ impl BlocksDal<'_, '_> { i64::from(number_range.start().0), i64::from(number_range.end().0) ) - .execute(self.storage.conn()) + .instrument("set_eth_tx_id#commit") + .with_arg("number_range", &number_range) + .with_arg("eth_tx_id", ð_tx_id) + .execute(self.storage) .await?; } AggregatedActionType::PublishProofOnchain => { @@ -436,7 +439,10 @@ impl BlocksDal<'_, '_> { i64::from(number_range.start().0), i64::from(number_range.end().0) ) - .execute(self.storage.conn()) + .instrument("set_eth_tx_id#prove") + .with_arg("number_range", &number_range) + .with_arg("eth_tx_id", ð_tx_id) + .execute(self.storage) .await?; } AggregatedActionType::Execute => { @@ -453,7 +459,10 @@ impl BlocksDal<'_, '_> { i64::from(number_range.start().0), i64::from(number_range.end().0) ) - .execute(self.storage.conn()) + .instrument("set_eth_tx_id#execute") + .with_arg("number_range", &number_range) + .with_arg("eth_tx_id", ð_tx_id) + .execute(self.storage) .await?; } } @@ -1018,7 +1027,7 @@ impl BlocksDal<'_, '_> { /// Returns the number of the last L1 batch for which an Ethereum commit tx was sent and confirmed. pub async fn get_number_of_last_l1_batch_committed_on_eth( &mut self, - ) -> Result, sqlx::Error> { + ) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -1034,13 +1043,14 @@ impl BlocksDal<'_, '_> { 1 "# ) - .fetch_optional(self.storage.conn()) + .instrument("get_number_of_last_l1_batch_committed_on_eth") + .fetch_optional(self.storage) .await? .map(|row| L1BatchNumber(row.number as u32))) } /// Returns the number of the last L1 batch for which an Ethereum prove tx exists in the database. - pub async fn get_last_l1_batch_with_prove_tx(&mut self) -> sqlx::Result { + pub async fn get_last_l1_batch_with_prove_tx(&mut self) -> DalResult { let row = sqlx::query!( r#" SELECT @@ -1051,7 +1061,8 @@ impl BlocksDal<'_, '_> { eth_prove_tx_id IS NOT NULL "# ) - .fetch_one(self.storage.conn()) + .instrument("get_last_l1_batch_with_prove_tx") + .fetch_one(self.storage) .await?; Ok(L1BatchNumber(row.number as u32)) @@ -1060,7 +1071,7 @@ impl BlocksDal<'_, '_> { pub async fn get_eth_commit_tx_id( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let row = sqlx::query!( r#" SELECT @@ -1072,7 +1083,9 @@ impl BlocksDal<'_, '_> { "#, i64::from(l1_batch_number.0) ) - .fetch_optional(self.storage.conn()) + .instrument("get_eth_commit_tx_id") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_optional(self.storage) .await?; Ok(row.and_then(|row| row.eth_commit_tx_id.map(|n| n as u64))) @@ -1081,7 +1094,7 @@ impl BlocksDal<'_, '_> { /// Returns the number of the last L1 batch for which an Ethereum prove tx was sent and confirmed. pub async fn get_number_of_last_l1_batch_proven_on_eth( &mut self, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -1097,7 +1110,8 @@ impl BlocksDal<'_, '_> { 1 "# ) - .fetch_optional(self.storage.conn()) + .instrument("get_number_of_last_l1_batch_proven_on_eth") + .fetch_optional(self.storage) .await? .map(|record| L1BatchNumber(record.number as u32))) } @@ -1105,7 +1119,7 @@ impl BlocksDal<'_, '_> { /// Returns the number of the last L1 batch for which an Ethereum execute tx was sent and confirmed. pub async fn get_number_of_last_l1_batch_executed_on_eth( &mut self, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -1121,7 +1135,8 @@ impl BlocksDal<'_, '_> { 1 "# ) - .fetch_optional(self.storage.conn()) + .instrument("get_number_of_last_l1_batch_executed_on_eth") + .fetch_optional(self.storage) .await? .map(|row| L1BatchNumber(row.number as u32))) } @@ -1209,25 +1224,6 @@ impl BlocksDal<'_, '_> { Ok(l1_batches) } - pub async fn set_skip_proof_for_l1_batch( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> sqlx::Result<()> { - sqlx::query!( - r#" - UPDATE l1_batches - SET - skip_proof = TRUE - WHERE - number = $1 - "#, - i64::from(l1_batch_number.0) - ) - .execute(self.storage.conn()) - .await?; - Ok(()) - } - /// This method returns batches that are committed on L1 and witness jobs for them are skipped. pub async fn get_skipped_for_proof_l1_batches( &mut self, @@ -1695,7 +1691,7 @@ impl BlocksDal<'_, '_> { pub async fn get_l1_batch_state_root_and_timestamp( &mut self, number: L1BatchNumber, - ) -> Result, sqlx::Error> { + ) -> DalResult> { let Some(row) = sqlx::query!( r#" SELECT @@ -1708,7 +1704,9 @@ impl BlocksDal<'_, '_> { "#, i64::from(number.0) ) - .fetch_optional(self.storage.conn()) + .instrument("get_l1_batch_state_root_and_timestamp") + .with_arg("number", &number) + .fetch_optional(self.storage) .await? else { return Ok(None); @@ -1968,11 +1966,12 @@ impl BlocksDal<'_, '_> { /// Returns `true` if there exists a non-sealed batch (i.e. there is one+ stored miniblock that isn't assigned /// to any batch yet). - pub async fn pending_batch_exists(&mut self) -> sqlx::Result { + pub async fn pending_batch_exists(&mut self) -> DalResult { let count = sqlx::query_scalar!( "SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL" ) - .fetch_one(self.storage.conn()) + .instrument("pending_batch_exists") + .fetch_one(self.storage) .await? .unwrap_or(0); @@ -1981,7 +1980,7 @@ impl BlocksDal<'_, '_> { // methods used for measuring Eth tx stage transition latencies // and emitting metrics base on these measured data - pub async fn oldest_uncommitted_batch_timestamp(&mut self) -> sqlx::Result> { + pub async fn oldest_uncommitted_batch_timestamp(&mut self) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -1997,12 +1996,13 @@ impl BlocksDal<'_, '_> { 1 "#, ) - .fetch_optional(self.storage.conn()) + .instrument("oldest_uncommitted_batch_timestamp") + .fetch_optional(self.storage) .await? .map(|row| row.timestamp as u64)) } - pub async fn oldest_unproved_batch_timestamp(&mut self) -> sqlx::Result> { + pub async fn oldest_unproved_batch_timestamp(&mut self) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -2018,12 +2018,13 @@ impl BlocksDal<'_, '_> { 1 "#, ) - .fetch_optional(self.storage.conn()) + .instrument("oldest_unproved_batch_timestamp") + .fetch_optional(self.storage) .await? .map(|row| row.timestamp as u64)) } - pub async fn oldest_unexecuted_batch_timestamp(&mut self) -> Result, sqlx::Error> { + pub async fn oldest_unexecuted_batch_timestamp(&mut self) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -2039,7 +2040,8 @@ impl BlocksDal<'_, '_> { 1 "#, ) - .fetch_optional(self.storage.conn()) + .instrument("oldest_unexecuted_batch_timestamp") + .fetch_optional(self.storage) .await? .map(|row| row.timestamp as u64)) } @@ -2153,30 +2155,10 @@ impl BlocksDal<'_, '_> { Ok(Some(Address::from_slice(&row.fee_account_address))) } - pub async fn get_virtual_blocks_for_miniblock( - &mut self, - miniblock_number: MiniblockNumber, - ) -> sqlx::Result> { - Ok(sqlx::query!( - r#" - SELECT - virtual_blocks - FROM - miniblocks - WHERE - number = $1 - "#, - miniblock_number.0 as i32 - ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| row.virtual_blocks as u32)) - } - pub async fn get_first_l1_batch_number_for_version( &mut self, protocol_version: ProtocolVersionId, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -2188,7 +2170,9 @@ impl BlocksDal<'_, '_> { "#, protocol_version as i32 ) - .fetch_optional(self.storage.conn()) + .instrument("get_first_l1_batch_number_for_version") + .with_arg("protocol_version", &protocol_version) + .fetch_optional(self.storage) .await? .and_then(|row| row.min) .map(|min| L1BatchNumber(min as u32))) @@ -2198,7 +2182,7 @@ impl BlocksDal<'_, '_> { &mut self, l1_batch_range: ops::RangeInclusive, protocol_version: ProtocolVersionId, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" UPDATE l1_batches @@ -2211,7 +2195,10 @@ impl BlocksDal<'_, '_> { i64::from(l1_batch_range.start().0), i64::from(l1_batch_range.end().0), ) - .execute(self.storage.conn()) + .instrument("reset_protocol_version_for_l1_batches") + .with_arg("l1_batch_range", &l1_batch_range) + .with_arg("protocol_version", &protocol_version) + .execute(self.storage) .await?; Ok(()) } @@ -2220,7 +2207,7 @@ impl BlocksDal<'_, '_> { &mut self, miniblock_range: ops::RangeInclusive, protocol_version: ProtocolVersionId, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" UPDATE miniblocks @@ -2233,7 +2220,10 @@ impl BlocksDal<'_, '_> { i64::from(miniblock_range.start().0), i64::from(miniblock_range.end().0), ) - .execute(self.storage.conn()) + .instrument("reset_protocol_version_for_miniblocks") + .with_arg("miniblock_range", &miniblock_range) + .with_arg("protocol_version", &protocol_version) + .execute(self.storage) .await?; Ok(()) } @@ -2292,7 +2282,7 @@ impl BlocksDal<'_, '_> { /// Copies `fee_account_address` for pending miniblocks (ones without an associated L1 batch) /// from the last L1 batch. Returns the number of affected rows. - pub async fn copy_fee_account_address_for_pending_miniblocks(&mut self) -> sqlx::Result { + pub async fn copy_fee_account_address_for_pending_miniblocks(&mut self) -> DalResult { let execution_result = sqlx::query!( r#" UPDATE miniblocks @@ -2312,13 +2302,14 @@ impl BlocksDal<'_, '_> { AND fee_account_address = '\x0000000000000000000000000000000000000000'::bytea "# ) - .execute(self.storage.conn()) + .instrument("copy_fee_account_address_for_pending_miniblocks") + .execute(self.storage) .await?; Ok(execution_result.rows_affected()) } - pub async fn check_l1_batches_have_fee_account_address(&mut self) -> sqlx::Result { + pub async fn check_l1_batches_have_fee_account_address(&mut self) -> DalResult { let count = sqlx::query_scalar!( r#" SELECT COUNT(*) @@ -2326,7 +2317,8 @@ impl BlocksDal<'_, '_> { WHERE table_name = 'l1_batches' AND column_name = 'fee_account_address' "# ) - .fetch_one(self.storage.conn()) + .instrument("check_l1_batches_have_fee_account_address") + .fetch_one(self.storage) .await? .unwrap_or(0); @@ -2338,7 +2330,7 @@ impl BlocksDal<'_, '_> { pub async fn copy_fee_account_address_for_miniblocks( &mut self, numbers: ops::RangeInclusive, - ) -> sqlx::Result { + ) -> DalResult { let execution_result = sqlx::query!( r#" UPDATE miniblocks @@ -2354,7 +2346,9 @@ impl BlocksDal<'_, '_> { i64::from(numbers.start().0), i64::from(numbers.end().0) ) - .execute(self.storage.conn()) + .instrument("copy_fee_account_address_for_miniblocks") + .with_arg("numbers", &numbers) + .execute(self.storage) .await?; Ok(execution_result.rows_affected()) @@ -2365,7 +2359,7 @@ impl BlocksDal<'_, '_> { &mut self, l1_batch: L1BatchNumber, fee_account_address: Address, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" UPDATE l1_batches @@ -2377,7 +2371,10 @@ impl BlocksDal<'_, '_> { fee_account_address.as_bytes(), i64::from(l1_batch.0) ) - .execute(self.storage.conn()) + .instrument("set_l1_batch_fee_address") + .with_arg("l1_batch", &l1_batch) + .with_arg("fee_account_address", &fee_account_address) + .execute(self.storage) .await?; Ok(()) } @@ -2388,9 +2385,9 @@ impl BlocksDal<'_, '_> { // The actual l1 batch hash is only set by the metadata calculator. pub async fn set_l1_batch_hash( &mut self, - batch_num: L1BatchNumber, + batch_number: L1BatchNumber, hash: H256, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" UPDATE l1_batches @@ -2400,9 +2397,12 @@ impl BlocksDal<'_, '_> { number = $2 "#, hash.as_bytes(), - i64::from(batch_num.0) + i64::from(batch_number.0) ) - .execute(self.storage.conn()) + .instrument("set_l1_batch_hash") + .with_arg("batch_number", &batch_number) + .with_arg("hash", &hash) + .execute(self.storage) .await?; Ok(()) } @@ -2420,19 +2420,11 @@ impl BlocksDal<'_, '_> { } /// Deletes all miniblocks and L1 batches, including the genesis ones. Should only be used in tests. - pub async fn delete_genesis(&mut self) -> anyhow::Result<()> { - self.delete_miniblocks_inner(None) - .await - .context("delete_miniblocks_inner()")?; - self.delete_l1_batches_inner(None) - .await - .context("delete_l1_batches_inner()")?; - self.delete_initial_writes_inner(None) - .await - .context("delete_initial_writes_inner()")?; - self.delete_logs_inner() - .await - .context("delete_logs_inner()")?; + pub async fn delete_genesis(&mut self) -> DalResult<()> { + self.delete_miniblocks_inner(None).await?; + self.delete_l1_batches_inner(None).await?; + self.delete_initial_writes_inner(None).await?; + self.delete_logs_inner().await?; Ok(()) } } diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 5af1e4def1c..3e5d8ca918c 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -32,7 +32,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_api_block( &mut self, block_number: MiniblockNumber, - ) -> sqlx::Result>> { + ) -> DalResult>> { let rows = sqlx::query!( r#" SELECT @@ -59,7 +59,9 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(block_number.0) ) - .fetch_all(self.storage.conn()) + .instrument("get_api_block") + .with_arg("block_number", &block_number) + .fetch_all(self.storage) .await?; let block = rows.into_iter().fold(None, |prev_block, row| { @@ -106,7 +108,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_block_tx_count( &mut self, block_number: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let tx_count = sqlx::query_scalar!( r#" SELECT l1_tx_count + l2_tx_count AS tx_count FROM miniblocks @@ -114,7 +116,9 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(block_number.0) ) - .fetch_optional(self.storage.conn()) + .instrument("get_block_tx_count") + .with_arg("block_number", &block_number) + .fetch_optional(self.storage) .await? .flatten(); @@ -206,7 +210,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn resolve_block_id( &mut self, block_id: api::BlockId, - ) -> sqlx::Result> { + ) -> DalResult> { struct BlockNumberRow { number: Option, } @@ -256,7 +260,11 @@ impl BlocksWeb3Dal<'_, '_> { } ); - let row = query.fetch_optional(self.storage.conn()).await?; + let row = query + .instrument("resolve_block_id") + .with_arg("block_id", &block_id) + .fetch_optional(self.storage) + .await?; let block_number = row .and_then(|row| row.number) .map(|number| MiniblockNumber(number as u32)); @@ -270,7 +278,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_expected_l1_batch_timestamp( &mut self, l1_batch_number: &ResolvedL1BatchForMiniblock, - ) -> sqlx::Result> { + ) -> DalResult> { if let Some(miniblock_l1_batch) = l1_batch_number.miniblock_l1_batch { Ok(sqlx::query!( r#" @@ -287,7 +295,9 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(miniblock_l1_batch.0) ) - .fetch_optional(self.storage.conn()) + .instrument("get_expected_l1_batch_timestamp#sealed_miniblock") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_optional(self.storage) .await? .map(|row| row.timestamp as u64)) } else { @@ -300,6 +310,7 @@ impl BlocksWeb3Dal<'_, '_> { } else { l1_batch_number.pending_l1_batch - 1 }; + Ok(sqlx::query!( r#" SELECT @@ -328,7 +339,9 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(prev_l1_batch_number.0) ) - .fetch_optional(self.storage.conn()) + .instrument("get_expected_l1_batch_timestamp#pending_miniblock") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_optional(self.storage) .await? .map(|row| row.timestamp as u64)) } @@ -337,7 +350,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_miniblock_hash( &mut self, block_number: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let hash = sqlx::query!( r#" SELECT @@ -349,7 +362,9 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(block_number.0) ) - .fetch_optional(self.storage.conn()) + .instrument("get_miniblock_hash") + .with_arg("block_number", &block_number) + .fetch_optional(self.storage) .await? .map(|row| H256::from_slice(&row.hash)); Ok(hash) diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index bcf8525b412..43c1d08c5b8 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -184,62 +184,60 @@ impl EventsWeb3Dal<'_, '_> { } pub async fn get_all_logs(&mut self, from_block: MiniblockNumber) -> DalResult> { - { - let db_logs: Vec = sqlx::query_as!( - StorageWeb3Log, - r#" - WITH - events_select AS ( - SELECT - address, - topic1, - topic2, - topic3, - topic4, - value, - miniblock_number, - tx_hash, - tx_index_in_block, - event_index_in_block, - event_index_in_tx - FROM - events - WHERE - miniblock_number > $1 - ORDER BY - miniblock_number ASC, - event_index_in_block ASC - ) - SELECT - miniblocks.hash AS "block_hash?", - address AS "address!", - topic1 AS "topic1!", - topic2 AS "topic2!", - topic3 AS "topic3!", - topic4 AS "topic4!", - value AS "value!", - miniblock_number AS "miniblock_number!", - miniblocks.l1_batch_number AS "l1_batch_number?", - tx_hash AS "tx_hash!", - tx_index_in_block AS "tx_index_in_block!", - event_index_in_block AS "event_index_in_block!", - event_index_in_tx AS "event_index_in_tx!" - FROM - events_select - INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number - ORDER BY - miniblock_number ASC, - event_index_in_block ASC - "#, - i64::from(from_block.0) - ) - .instrument("get_all_logs") - .with_arg("from_block", &from_block) - .fetch_all(self.storage) - .await?; - let logs = db_logs.into_iter().map(Into::into).collect(); - Ok(logs) - } + let db_logs: Vec = sqlx::query_as!( + StorageWeb3Log, + r#" + WITH + events_select AS ( + SELECT + address, + topic1, + topic2, + topic3, + topic4, + value, + miniblock_number, + tx_hash, + tx_index_in_block, + event_index_in_block, + event_index_in_tx + FROM + events + WHERE + miniblock_number > $1 + ORDER BY + miniblock_number ASC, + event_index_in_block ASC + ) + SELECT + miniblocks.hash AS "block_hash?", + address AS "address!", + topic1 AS "topic1!", + topic2 AS "topic2!", + topic3 AS "topic3!", + topic4 AS "topic4!", + value AS "value!", + miniblock_number AS "miniblock_number!", + miniblocks.l1_batch_number AS "l1_batch_number?", + tx_hash AS "tx_hash!", + tx_index_in_block AS "tx_index_in_block!", + event_index_in_block AS "event_index_in_block!", + event_index_in_tx AS "event_index_in_tx!" + FROM + events_select + INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number + ORDER BY + miniblock_number ASC, + event_index_in_block ASC + "#, + i64::from(from_block.0) + ) + .instrument("get_all_logs") + .with_arg("from_block", &from_block) + .fetch_all(self.storage) + .await?; + let logs = db_logs.into_iter().map(Into::into).collect(); + Ok(logs) } } diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index 7b963b49350..66d86816845 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -143,7 +143,7 @@ impl FactoryDepsDal<'_, '_> { pub async fn get_factory_deps_for_revert( &mut self, block_number: MiniblockNumber, - ) -> sqlx::Result> { + ) -> DalResult> { Ok(sqlx::query!( r#" SELECT @@ -155,7 +155,9 @@ impl FactoryDepsDal<'_, '_> { "#, i64::from(block_number.0) ) - .fetch_all(self.storage.conn()) + .instrument("get_factory_deps_for_revert") + .with_arg("block_number", &block_number) + .fetch_all(self.storage) .await? .into_iter() .map(|row| H256::from_slice(&row.bytecode_hash)) @@ -163,10 +165,7 @@ impl FactoryDepsDal<'_, '_> { } /// Removes all factory deps with a miniblock number strictly greater than the specified `block_number`. - pub async fn rollback_factory_deps( - &mut self, - block_number: MiniblockNumber, - ) -> sqlx::Result<()> { + pub async fn rollback_factory_deps(&mut self, block_number: MiniblockNumber) -> DalResult<()> { sqlx::query!( r#" DELETE FROM factory_deps @@ -175,7 +174,9 @@ impl FactoryDepsDal<'_, '_> { "#, i64::from(block_number.0) ) - .execute(self.storage.conn()) + .instrument("rollback_factory_deps") + .with_arg("block_number", &block_number) + .execute(self.storage) .await?; Ok(()) } diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs index fd5ee2e9d30..974cdc824da 100644 --- a/core/lib/dal/src/protocol_versions_web3_dal.rs +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -1,4 +1,4 @@ -use zksync_db_connection::connection::Connection; +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; use zksync_types::api::ProtocolVersion; use crate::{models::storage_protocol_version::StorageProtocolVersion, Core}; @@ -9,8 +9,11 @@ pub struct ProtocolVersionsWeb3Dal<'a, 'c> { } impl ProtocolVersionsWeb3Dal<'_, '_> { - pub async fn get_protocol_version_by_id(&mut self, version_id: u16) -> Option { - let storage_protocol_version: Option = sqlx::query_as!( + pub async fn get_protocol_version_by_id( + &mut self, + version_id: u16, + ) -> DalResult> { + let storage_protocol_version = sqlx::query_as!( StorageProtocolVersion, r#" SELECT @@ -22,15 +25,16 @@ impl ProtocolVersionsWeb3Dal<'_, '_> { "#, i32::from(version_id) ) - .fetch_optional(self.storage.conn()) - .await - .unwrap(); + .instrument("get_protocol_version_by_id") + .with_arg("version_id", &version_id) + .fetch_optional(self.storage) + .await?; - storage_protocol_version.map(ProtocolVersion::from) + Ok(storage_protocol_version.map(ProtocolVersion::from)) } - pub async fn get_latest_protocol_version(&mut self) -> ProtocolVersion { - let storage_protocol_version: StorageProtocolVersion = sqlx::query_as!( + pub async fn get_latest_protocol_version(&mut self) -> DalResult { + let storage_protocol_version = sqlx::query_as!( StorageProtocolVersion, r#" SELECT @@ -43,10 +47,10 @@ impl ProtocolVersionsWeb3Dal<'_, '_> { 1 "#, ) - .fetch_one(self.storage.conn()) - .await - .unwrap(); + .instrument("get_latest_protocol_version") + .fetch_one(self.storage) + .await?; - ProtocolVersion::from(storage_protocol_version) + Ok(ProtocolVersion::from(storage_protocol_version)) } } diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs index 36010b9a7de..4fcfd019de1 100644 --- a/core/lib/dal/src/snapshots_dal.rs +++ b/core/lib/dal/src/snapshots_dal.rs @@ -84,7 +84,7 @@ impl SnapshotsDal<'_, '_> { l1_batch_number: L1BatchNumber, chunk_id: u64, storage_logs_filepath: &str, - ) -> sqlx::Result<()> { + ) -> DalResult<()> { sqlx::query!( r#" UPDATE snapshots @@ -98,7 +98,10 @@ impl SnapshotsDal<'_, '_> { chunk_id as i32 + 1, storage_logs_filepath, ) - .execute(self.storage.conn()) + .instrument("add_storage_logs_filepath_for_snapshot") + .with_arg("l1_batch_number", &l1_batch_number) + .with_arg("chunk_id", &chunk_id) + .execute(self.storage) .await?; Ok(()) diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index 86dc3417c19..f204df1da29 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -127,7 +127,7 @@ impl StorageLogsDedupDal<'_, '_> { pub async fn get_protective_reads_for_l1_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { + ) -> DalResult> { let rows = sqlx::query!( r#" SELECT @@ -140,7 +140,9 @@ impl StorageLogsDedupDal<'_, '_> { "#, i64::from(l1_batch_number.0) ) - .fetch_all(self.storage.conn()) + .instrument("get_protective_reads_for_l1_batch") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_all(self.storage) .await?; Ok(rows diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 149b8f129d2..ec1b862b8ea 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -9,7 +9,7 @@ use zksync_types::{ }; use zksync_utils::h256_to_u256; -use crate::{models::storage_block::ResolvedL1BatchForMiniblock, Core, CoreDal, SqlxError}; +use crate::{models::storage_block::ResolvedL1BatchForMiniblock, Core, CoreDal}; #[derive(Debug)] pub struct StorageWeb3Dal<'a, 'c> { @@ -133,7 +133,7 @@ impl StorageWeb3Dal<'_, '_> { pub async fn resolve_l1_batch_number_of_miniblock( &mut self, miniblock_number: MiniblockNumber, - ) -> Result { + ) -> DalResult { let row = sqlx::query!( r#" SELECT @@ -163,7 +163,9 @@ impl StorageWeb3Dal<'_, '_> { "#, i64::from(miniblock_number.0) ) - .fetch_one(self.storage.conn()) + .instrument("resolve_l1_batch_number_of_miniblock") + .with_arg("miniblock_number", &miniblock_number) + .fetch_one(self.storage) .await?; Ok(ResolvedL1BatchForMiniblock { @@ -245,7 +247,7 @@ impl StorageWeb3Dal<'_, '_> { pub async fn get_factory_dep( &mut self, hash: H256, - ) -> sqlx::Result, MiniblockNumber)>> { + ) -> DalResult, MiniblockNumber)>> { let row = sqlx::query!( r#" SELECT @@ -258,7 +260,9 @@ impl StorageWeb3Dal<'_, '_> { "#, hash.as_bytes(), ) - .fetch_optional(self.storage.conn()) + .instrument("get_factory_dep") + .with_arg("hash", &hash) + .fetch_optional(self.storage) .await?; Ok(row.map(|row| (row.bytecode, MiniblockNumber(row.miniblock_number as u32)))) diff --git a/core/lib/dal/src/system_dal.rs b/core/lib/dal/src/system_dal.rs index a1cf48219dd..f6dda53b158 100644 --- a/core/lib/dal/src/system_dal.rs +++ b/core/lib/dal/src/system_dal.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::{collections::HashMap, time::Duration}; use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; @@ -12,12 +12,13 @@ pub(crate) struct TableSize { pub total_size: u64, } +#[derive(Debug)] pub struct SystemDal<'a, 'c> { - pub storage: &'a mut Connection<'c, Core>, + pub(crate) storage: &'a mut Connection<'c, Core>, } impl SystemDal<'_, '_> { - pub async fn get_replication_lag_sec(&mut self) -> sqlx::Result { + pub async fn get_replication_lag(&mut self) -> DalResult { // NOTE: lag (seconds) has a special meaning here // (it is not the same that `replay_lag/write_lag/flush_lag` from `pg_stat_replication` view) // and it is only useful when synced column is false, @@ -33,12 +34,13 @@ impl SystemDal<'_, '_> { )::INT AS LAG "# ) - .fetch_one(self.storage.conn()) + .instrument("get_replication_lag") + .fetch_one(self.storage) .await?; Ok(match row.synced { - Some(false) => row.lag.unwrap_or(0) as u32, - _ => 0, // We are synced, no lag + Some(false) => Duration::from_secs(row.lag.unwrap_or(0) as u64), + _ => Duration::ZERO, // We are synced, no lag }) } diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 1541b55a3a1..00b82d8475e 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -1,6 +1,5 @@ use std::{collections::HashMap, fmt, time::Duration}; -use anyhow::Context as _; use bigdecimal::BigDecimal; use itertools::Itertools; use sqlx::types::chrono::NaiveDateTime; @@ -56,8 +55,6 @@ pub struct TransactionsDal<'c, 'a> { pub(crate) storage: &'c mut Connection<'a, Core>, } -type TxLocations = Vec<(MiniblockNumber, Vec<(H256, u32, u16)>)>; - impl TransactionsDal<'_, '_> { pub async fn insert_transaction_l1( &mut self, @@ -1149,7 +1146,7 @@ impl TransactionsDal<'_, '_> { Ok(transactions) } - pub async fn reset_mempool(&mut self) -> sqlx::Result<()> { + pub async fn reset_mempool(&mut self) -> DalResult<()> { sqlx::query!( r#" UPDATE transactions @@ -1159,7 +1156,8 @@ impl TransactionsDal<'_, '_> { in_mempool = TRUE "# ) - .execute(self.storage.conn()) + .instrument("reset_mempool") + .execute(self.storage) .await?; Ok(()) } @@ -1233,9 +1231,7 @@ impl TransactionsDal<'_, '_> { /// These are the transactions that are included to some miniblock, /// but not included to L1 batch. The order of the transactions is the same as it was /// during the previous execution. - pub async fn get_miniblocks_to_reexecute( - &mut self, - ) -> anyhow::Result> { + pub async fn get_miniblocks_to_reexecute(&mut self) -> DalResult> { let transactions = sqlx::query_as!( StorageTransaction, r#" @@ -1251,7 +1247,8 @@ impl TransactionsDal<'_, '_> { index_in_block "#, ) - .fetch_all(self.storage.conn()) + .instrument("get_miniblocks_to_reexecute#transactions") + .fetch_all(self.storage) .await?; self.map_transactions_to_execution_data(transactions).await @@ -1263,7 +1260,7 @@ impl TransactionsDal<'_, '_> { pub async fn get_miniblocks_to_execute_for_l1_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { + ) -> DalResult> { let transactions = sqlx::query_as!( StorageTransaction, r#" @@ -1279,7 +1276,9 @@ impl TransactionsDal<'_, '_> { "#, i64::from(l1_batch_number.0) ) - .fetch_all(self.storage.conn()) + .instrument("get_miniblocks_to_execute_for_l1_batch") + .with_arg("l1_batch_number", &l1_batch_number) + .fetch_all(self.storage) .await?; self.map_transactions_to_execution_data(transactions).await @@ -1288,7 +1287,7 @@ impl TransactionsDal<'_, '_> { async fn map_transactions_to_execution_data( &mut self, transactions: Vec, - ) -> anyhow::Result> { + ) -> DalResult> { let transactions_by_miniblock: Vec<(MiniblockNumber, Vec)> = transactions .into_iter() .group_by(|tx| tx.miniblock_number.unwrap()) @@ -1322,13 +1321,19 @@ impl TransactionsDal<'_, '_> { i64::from(from_miniblock.0), i64::from(to_miniblock.0) ) - .fetch_all(self.storage.conn()) + .instrument("map_transactions_to_execution_data#miniblocks") + .with_arg("from_miniblock", &from_miniblock) + .with_arg("to_miniblock", &to_miniblock) + .fetch_all(self.storage) .await?; - anyhow::ensure!( - miniblock_data.len() == transactions_by_miniblock.len(), - "Not enough miniblock data retrieved" - ); + if miniblock_data.len() != transactions_by_miniblock.len() { + let err = Instrumented::new("map_transactions_to_execution_data") + .with_arg("transactions_by_miniblock", &transactions_by_miniblock) + .with_arg("miniblock_data", &miniblock_data) + .constraint_error(anyhow::anyhow!("not enough miniblock data retrieved")); + return Err(err); + } let prev_miniblock_hashes = sqlx::query!( r#" @@ -1345,7 +1350,10 @@ impl TransactionsDal<'_, '_> { i64::from(from_miniblock.0) - 1, i64::from(to_miniblock.0) - 1, ) - .fetch_all(self.storage.conn()) + .instrument("map_transactions_to_execution_data#prev_miniblock_hashes") + .with_arg("from_miniblock", &(from_miniblock - 1)) + .with_arg("to_miniblock", &(to_miniblock - 1)) + .fetch_all(self.storage) .await?; let prev_miniblock_hashes: HashMap<_, _> = prev_miniblock_hashes @@ -1378,14 +1386,10 @@ impl TransactionsDal<'_, '_> { "#, prev_miniblock_number.0 as i32 ) - .fetch_optional(self.storage.conn()) - .await? - .with_context(|| { - format!( - "miniblock #{prev_miniblock_number} is not in storage, and its hash is not \ - in snapshot recovery data" - ) - })?; + .instrument("map_transactions_to_execution_data#snapshot_recovery") + .with_arg("prev_miniblock_number", &prev_miniblock_number) + .fetch_one(self.storage) + .await?; H256::from_slice(&row.miniblock_hash) } }; @@ -1401,48 +1405,6 @@ impl TransactionsDal<'_, '_> { Ok(data) } - pub async fn get_tx_locations(&mut self, l1_batch_number: L1BatchNumber) -> TxLocations { - { - sqlx::query!( - r#" - SELECT - miniblock_number AS "miniblock_number!", - hash, - index_in_block AS "index_in_block!", - l1_batch_tx_index AS "l1_batch_tx_index!" - FROM - transactions - WHERE - l1_batch_number = $1 - ORDER BY - miniblock_number, - index_in_block - "#, - i64::from(l1_batch_number.0) - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .group_by(|tx| tx.miniblock_number) - .into_iter() - .map(|(miniblock_number, rows)| { - ( - MiniblockNumber(miniblock_number as u32), - rows.map(|row| { - ( - H256::from_slice(&row.hash), - row.index_in_block as u32, - row.l1_batch_tx_index as u16, - ) - }) - .collect::>(), - ) - }) - .collect() - } - } - pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { let protocol_version: ProtocolVersionId = sqlx::query!( r#" diff --git a/core/lib/db_connection/src/instrument.rs b/core/lib/db_connection/src/instrument.rs index a0fb29f71e5..06226db5f28 100644 --- a/core/lib/db_connection/src/instrument.rs +++ b/core/lib/db_connection/src/instrument.rs @@ -15,7 +15,7 @@ use std::{fmt, future::Future, panic::Location}; use sqlx::{ postgres::{PgCopyIn, PgQueryResult, PgRow}, - query::{Map, Query, QueryAs}, + query::{Map, Query, QueryAs, QueryScalar}, FromRow, IntoArguments, PgConnection, Postgres, }; use tokio::time::Instant; @@ -94,6 +94,19 @@ where } } +impl<'q, O, A> InstrumentExt for QueryScalar<'q, Postgres, O, A> +where + A: 'q + IntoArguments<'q, Postgres>, +{ + #[track_caller] + fn instrument(self, name: &'static str) -> Instrumented<'static, Self> { + Instrumented { + query: self, + data: InstrumentedData::new(name, Location::caller()), + } + } +} + impl<'q, F, O, A> InstrumentExt for Map<'q, Postgres, F, A> where F: FnMut(PgRow) -> Result + Send, @@ -298,6 +311,18 @@ impl<'a> Instrumented<'a, ()> { .into() } + /// Wraps a provided application-level data constraint error. + pub fn constraint_error(&self, err: anyhow::Error) -> DalError { + let err = err.context("application-level data constraint violation"); + DalRequestError::new( + sqlx::Error::Decode(err.into()), + self.data.name, + self.data.location, + ) + .with_args(self.data.args.to_owned()) + .into() + } + pub fn with(self, query: Q) -> Instrumented<'a, Q> { Instrumented { query, @@ -364,6 +389,28 @@ where } } +impl<'q, O, A> Instrumented<'_, QueryScalar<'q, Postgres, O, A>> +where + A: 'q + IntoArguments<'q, Postgres>, + O: Send + Unpin, + (O,): for<'r> FromRow<'r, PgRow>, +{ + /// Fetches an optional row using this query. + pub async fn fetch_optional( + self, + storage: &mut Connection<'_, DB>, + ) -> DalResult> { + let (conn, tags) = storage.conn_and_tags(); + self.data.fetch(tags, self.query.fetch_optional(conn)).await + } + + /// Fetches a single row using this query. + pub async fn fetch_one(self, storage: &mut Connection<'_, DB>) -> DalResult { + let (conn, tags) = storage.conn_and_tags(); + self.data.fetch(tags, self.query.fetch_one(conn)).await + } +} + impl<'q, F, O, A> Instrumented<'_, Map<'q, Postgres, F, A>> where F: FnMut(PgRow) -> Result + Send, diff --git a/core/lib/state/src/rocksdb/mod.rs b/core/lib/state/src/rocksdb/mod.rs index 36607ae3593..cb8c3bfd30c 100644 --- a/core/lib/state/src/rocksdb/mod.rs +++ b/core/lib/state/src/rocksdb/mod.rs @@ -523,10 +523,7 @@ impl RocksdbStorage { let factory_deps = connection .factory_deps_dal() .get_factory_deps_for_revert(last_miniblock_to_keep) - .await - .with_context(|| { - format!("failed fetching factory deps for miniblock #{last_miniblock_to_keep}") - })?; + .await?; tracing::info!( "Got {} factory deps, took {:?}", factory_deps.len(), diff --git a/core/lib/vm_utils/src/storage.rs b/core/lib/vm_utils/src/storage.rs index 9c90452ba3c..d0870f9bb1e 100644 --- a/core/lib/vm_utils/src/storage.rs +++ b/core/lib/vm_utils/src/storage.rs @@ -7,7 +7,7 @@ use multivm::{ zk_evm_latest::ethereum_types::H256, }; use zksync_contracts::BaseSystemContracts; -use zksync_dal::{Connection, Core, CoreDal}; +use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_types::{ block::MiniblockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, ZKPORTER_IS_AVAILABLE, @@ -268,7 +268,7 @@ impl L1BatchParamsProvider { .blocks_web3_dal() .get_miniblock_hash(prev_miniblock_number) .await - .context("failed getting hash for previous miniblock")? + .map_err(DalError::generalize)? .context("previous miniblock disappeared from storage")?, }; tracing::info!( diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs index 8b895c17d65..8c6231f32f4 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs @@ -16,7 +16,7 @@ use multivm::{ VmInstance, }; use tokio::runtime::Handle; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; use zksync_state::{PostgresStorage, ReadStorage, StoragePtr, StorageView, WriteStorage}; use zksync_system_constants::{ SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, @@ -386,7 +386,7 @@ impl StoredL2BlockInfo { .blocks_web3_dal() .get_miniblock_hash(miniblock_number) .await - .with_context(|| format!("failed getting hash for miniblock #{miniblock_number}"))? + .map_err(DalError::generalize)? .with_context(|| format!("miniblock #{miniblock_number} not present in storage"))? }; diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs index bad188a808b..1e858f630f7 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, time::Duration}; use anyhow::Context as _; use tokio::runtime::Handle; -use zksync_dal::{Connection, Core, CoreDal}; +use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_state::PostgresStorageCaches; use zksync_types::{ api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2ChainId, @@ -155,7 +155,7 @@ async fn get_pending_state( .blocks_web3_dal() .resolve_block_id(block_id) .await - .with_context(|| format!("failed resolving block ID {block_id:?}"))? + .map_err(DalError::generalize)? .context("pending block should always be present in Postgres")?; Ok((block_id, resolved_block_number)) } @@ -279,7 +279,7 @@ impl BlockArgs { .blocks_web3_dal() .resolve_block_id(block_id) .await - .with_context(|| format!("failed resolving block ID {block_id:?}"))?; + .map_err(DalError::generalize)?; let Some(resolved_block_number) = resolved_block_number else { return Err(BlockArgsError::Missing); }; @@ -295,7 +295,7 @@ impl BlockArgs { .blocks_web3_dal() .get_expected_l1_batch_timestamp(&l1_batch) .await - .with_context(|| format!("failed getting timestamp for {l1_batch:?}"))? + .map_err(DalError::generalize)? .context("missing timestamp for non-pending block")?; Ok(Self { block_id, diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index 15265d80ba2..bc50abac48b 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -230,7 +230,7 @@ impl EthNamespace { .blocks_web3_dal() .get_api_block(block_number) .await - .with_context(|| format!("get_api_block({block_number})"))? + .map_err(DalError::generalize)? else { return Ok(None); }; @@ -292,7 +292,7 @@ impl EthNamespace { .blocks_web3_dal() .get_block_tx_count(block_number) .await - .with_context(|| format!("get_block_tx_count({block_number})"))?; + .map_err(DalError::generalize)?; if tx_count.is_some() { self.set_block_diff(block_number); // only report block diff for existing miniblocks @@ -320,7 +320,7 @@ impl EthNamespace { .blocks_web3_dal() .get_api_block(block_number) .await - .with_context(|| format!("get_api_block({block_number})"))? + .map_err(DalError::generalize)? else { return Ok(None); }; diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 7ccca7f2bee..06f96ee046c 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -468,19 +468,20 @@ impl ZksNamespace { version_id: Option, ) -> Result, Web3Error> { let mut storage = self.state.acquire_connection().await?; - let protocol_version = match version_id { - Some(id) => { - storage - .protocol_versions_web3_dal() - .get_protocol_version_by_id(id) - .await - } - None => Some( + let protocol_version = if let Some(id) = version_id { + storage + .protocol_versions_web3_dal() + .get_protocol_version_by_id(id) + .await + .map_err(DalError::generalize)? + } else { + Some( storage .protocol_versions_web3_dal() .get_latest_protocol_version() - .await, - ), + .await + .map_err(DalError::generalize)?, + ) }; Ok(protocol_version) } diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index 238bceb585f..45839ae2798 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -286,7 +286,7 @@ impl RpcState { .blocks_web3_dal() .resolve_block_id(block) .await - .context("resolve_block_id")? + .map_err(DalError::generalize)? .ok_or(Web3Error::NoBlock) } @@ -312,7 +312,7 @@ impl RpcState { .blocks_web3_dal() .resolve_block_id(block) .await - .context("resolve_block_id")?), + .map_err(DalError::generalize)?), } } @@ -359,15 +359,10 @@ impl RpcState { pub async fn resolve_filter_block_hash(&self, filter: &mut Filter) -> Result<(), Web3Error> { match (filter.block_hash, filter.from_block, filter.to_block) { (Some(block_hash), None, None) => { + let mut storage = self.acquire_connection().await?; let block_number = self - .acquire_connection() - .await? - .blocks_web3_dal() - .resolve_block_id(api::BlockId::Hash(block_hash)) - .await - .context("resolve_block_id")? - .ok_or(Web3Error::NoBlock)?; - + .resolve_block(&mut storage, api::BlockId::Hash(block_hash)) + .await?; filter.from_block = Some(api::BlockNumber::Number(block_number.0.into())); filter.to_block = Some(api::BlockNumber::Number(block_number.0.into())); Ok(()) @@ -383,13 +378,13 @@ impl RpcState { &self, filter: &Filter, ) -> Result { + let mut connection = self.acquire_connection().await?; let pending_block = self - .acquire_connection() + .resolve_block_unchecked( + &mut connection, + api::BlockId::Number(api::BlockNumber::Pending), + ) .await? - .blocks_web3_dal() - .resolve_block_id(api::BlockId::Number(api::BlockNumber::Pending)) - .await - .context("resolve_block_id")? .context("Pending block number shouldn't be None")?; let block_number = match filter.from_block { Some(api::BlockNumber::Number(number)) => { diff --git a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs index 30ef53c0163..8cdcda9fe93 100644 --- a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs +++ b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs @@ -174,23 +174,13 @@ impl JobProcessor for BasicWitnessInputProducer { METRICS .upload_input_time .observe(upload_started_at.elapsed()); - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for BasicWitnessInputProducer")?; - let mut transaction = connection - .start_transaction() - .await - .context("failed to acquire DB transaction for BasicWitnessInputProducer")?; + let mut connection = self.connection_pool.connection().await?; + let mut transaction = connection.start_transaction().await?; transaction .basic_witness_input_producer_dal() .mark_job_as_successful(job_id, started_at, &object_path) .await?; - transaction - .commit() - .await - .context("failed to commit DB transaction for BasicWitnessInputProducer")?; + transaction.commit().await?; METRICS.block_number_processed.set(job_id.0 as i64); Ok(()) } @@ -200,16 +190,11 @@ impl JobProcessor for BasicWitnessInputProducer { } async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for BasicWitnessInputProducer")?; - connection + let mut connection = self.connection_pool.connection().await?; + Ok(connection .basic_witness_input_producer_dal() .get_basic_witness_input_producer_job_attempts(*job_id) .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for BasicWitnessInputProducer") + .map(|attempts| attempts.unwrap_or(0))?) } } diff --git a/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs b/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs index bdff4b25001..99b4fa4f5bc 100644 --- a/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs +++ b/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs @@ -19,14 +19,10 @@ impl L1BatchMetricsReporter { } } - async fn report_metrics(&self) { + async fn report_metrics(&self) -> anyhow::Result<()> { let mut block_metrics = vec![]; - let mut conn = self.connection_pool.connection().await.unwrap(); - let last_l1_batch = conn - .blocks_dal() - .get_sealed_l1_batch_number() - .await - .unwrap(); + let mut conn = self.connection_pool.connection().await?; + let last_l1_batch = conn.blocks_dal().get_sealed_l1_batch_number().await?; if let Some(number) = last_l1_batch { block_metrics.push((number, BlockStage::Sealed)); } @@ -34,13 +30,12 @@ impl L1BatchMetricsReporter { let last_l1_batch_with_metadata = conn .blocks_dal() .get_last_l1_batch_number_with_metadata() - .await - .unwrap(); + .await?; if let Some(number) = last_l1_batch_with_metadata { block_metrics.push((number, BlockStage::MetadataCalculated)); } - let eth_stats = conn.eth_sender_dal().get_eth_l1_batches().await.unwrap(); + let eth_stats = conn.eth_sender_dal().get_eth_l1_batches().await?; for (tx_type, l1_batch) in eth_stats.saved { let stage = BlockStage::L1 { l1_stage: BlockL1Stage::Saved, @@ -66,18 +61,13 @@ impl L1BatchMetricsReporter { let oldest_uncommitted_batch_timestamp = conn .blocks_dal() .oldest_uncommitted_batch_timestamp() - .await - .unwrap(); - let oldest_unproved_batch_timestamp = conn - .blocks_dal() - .oldest_unproved_batch_timestamp() - .await - .unwrap(); + .await?; + let oldest_unproved_batch_timestamp = + conn.blocks_dal().oldest_unproved_batch_timestamp().await?; let oldest_unexecuted_batch_timestamp = conn .blocks_dal() .oldest_unexecuted_batch_timestamp() - .await - .unwrap(); + .await?; let now = seconds_since_epoch(); @@ -94,6 +84,7 @@ impl L1BatchMetricsReporter { APP_METRICS.blocks_state_block_eth_stage_latency[&L1StageLatencyLabel::UnexecutedBlock] .set(now.saturating_sub(timestamp)); } + Ok(()) } } @@ -102,8 +93,7 @@ impl PeriodicJob for L1BatchMetricsReporter { const SERVICE_NAME: &'static str = "L1BatchMetricsReporter"; async fn run_routine_task(&mut self) -> anyhow::Result<()> { - self.report_metrics().await; - Ok(()) + self.report_metrics().await } fn polling_interval_ms(&self) -> u64 { diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 912d632bfef..0a873006923 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -1379,7 +1379,7 @@ async fn circuit_breakers_for_components( circuit_breakers .insert(Box::new(ReplicationLagChecker { pool, - replication_lag_limit_sec: circuit_breaker_config.replication_lag_limit_sec, + replication_lag_limit: circuit_breaker_config.replication_lag_limit(), })) .await; } diff --git a/core/lib/zksync_core/src/metadata_calculator/helpers.rs b/core/lib/zksync_core/src/metadata_calculator/helpers.rs index ec067dfc2ca..cd2da81f612 100644 --- a/core/lib/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/lib/zksync_core/src/metadata_calculator/helpers.rs @@ -440,8 +440,7 @@ impl L1BatchWithLogs { let protective_reads = storage .storage_logs_dedup_dal() .get_protective_reads_for_l1_batch(l1_batch_number) - .await - .context("cannot fetch protective reads")?; + .await?; if protective_reads.is_empty() { tracing::warn!( "Protective reads for L1 batch #{l1_batch_number} are empty. This is highly unlikely \ diff --git a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs index 75f42a0faf7..d84930dcb8c 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs @@ -89,8 +89,7 @@ pub(crate) async fn load_pending_batch( let pending_miniblocks = storage .transactions_dal() .get_miniblocks_to_reexecute() - .await - .context("failed loading miniblocks for re-execution")?; + .await?; let first_pending_miniblock = pending_miniblocks .first() .context("no pending miniblocks; was environment loaded for a correct L1 batch number?")?; diff --git a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs index 3b2c5ffcf73..a42b4ea4477 100644 --- a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs +++ b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs @@ -20,8 +20,7 @@ pub(crate) async fn migrate_pending_miniblocks( let l1_batches_have_fee_account_address = storage .blocks_dal() .check_l1_batches_have_fee_account_address() - .await - .context("failed getting metadata for l1_batches table")?; + .await?; if !l1_batches_have_fee_account_address { tracing::info!("`l1_batches.fee_account_address` column is removed; assuming that the migration is complete"); return Ok(()); @@ -31,8 +30,7 @@ pub(crate) async fn migrate_pending_miniblocks( let rows_affected = storage .blocks_dal() .copy_fee_account_address_for_pending_miniblocks() - .await - .context("failed migrating `fee_account_address` for pending miniblocks")?; + .await?; let elapsed = started_at.elapsed(); tracing::info!("Migrated `fee_account_address` for {rows_affected} miniblocks in {elapsed:?}"); Ok(()) @@ -99,8 +97,7 @@ async fn migrate_miniblocks_inner( let l1_batches_have_fee_account_address = storage .blocks_dal() .check_l1_batches_have_fee_account_address() - .await - .context("Failed getting metadata for l1_batches table")?; + .await?; drop(storage); if !l1_batches_have_fee_account_address { tracing::info!("`l1_batches.fee_account_address` column is removed; assuming that the migration is complete"); @@ -130,8 +127,7 @@ async fn migrate_miniblocks_inner( let rows_affected = storage .blocks_dal() .copy_fee_account_address_for_miniblocks(chunk.clone()) - .await - .with_context(|| format!("Failed migrating miniblocks chunk {chunk:?}"))?; + .await?; tracing::debug!("Migrated {rows_affected} miniblocks in chunk {chunk:?}"); miniblocks_affected += rows_affected; } diff --git a/core/lib/zksync_core/src/state_keeper/mempool_actor.rs b/core/lib/zksync_core/src/state_keeper/mempool_actor.rs index 45d220e32c2..d789434594f 100644 --- a/core/lib/zksync_core/src/state_keeper/mempool_actor.rs +++ b/core/lib/zksync_core/src/state_keeper/mempool_actor.rs @@ -73,11 +73,7 @@ impl MempoolFetcher { .context("failed removing stuck transactions")?; tracing::info!("Number of stuck txs was removed: {removed_txs}"); } - storage - .transactions_dal() - .reset_mempool() - .await - .context("failed resetting mempool")?; + storage.transactions_dal().reset_mempool().await?; drop(storage); loop { diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index 99053f3d139..3e4df147155 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -1,4 +1,3 @@ -use anyhow::Context as _; use zksync_dal::{Connection, Core, CoreDal}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_types::{ @@ -104,11 +103,7 @@ impl IoCursor { let mut this = Self::new(storage).await?; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. - let was_new_batch_open = storage - .blocks_dal() - .pending_batch_exists() - .await - .context("Failed checking whether pending L1 batch exists")?; + let was_new_batch_open = storage.blocks_dal().pending_batch_exists().await?; if !was_new_batch_open { this.l1_batch -= 1; // Should continue from the last L1 batch present in the storage } diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index d84c623a9de..5d51c294aad 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -275,7 +275,7 @@ impl MainNodeBuilder { websocket_requests_per_minute_limit: Some( rpc_config.websocket_requests_per_minute_limit(), ), - replication_lag_limit_sec: circuit_breaker_config.replication_lag_limit_sec, + replication_lag_limit: circuit_breaker_config.replication_lag_limit(), }; self.node.add_layer(Web3ServerLayer::ws( rpc_config.ws_port, diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server.rs index 217d25d3255..8a5739e2662 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroU32; +use std::{num::NonZeroU32, time::Duration}; use tokio::{sync::oneshot, task::JoinHandle}; use zksync_circuit_breaker::replication_lag::ReplicationLagChecker; @@ -27,7 +27,7 @@ pub struct Web3ServerOptionalConfig { pub response_body_size_limit: Option, pub websocket_requests_per_minute_limit: Option, // used by circuit breaker. - pub replication_lag_limit_sec: Option, + pub replication_lag_limit: Option, } impl Web3ServerOptionalConfig { @@ -144,7 +144,7 @@ impl WiringLayer for Web3ServerLayer { if let Some(sync_state) = sync_state { api_builder = api_builder.with_sync_state(sync_state); } - let replication_lag_limit_sec = self.optional_config.replication_lag_limit_sec; + let replication_lag_limit = self.optional_config.replication_lag_limit; api_builder = self.optional_config.apply(api_builder); let server = api_builder.build()?; @@ -161,7 +161,7 @@ impl WiringLayer for Web3ServerLayer { .breakers .insert(Box::new(ReplicationLagChecker { pool: replica_pool, - replication_lag_limit_sec, + replication_lag_limit, })) .await; From b2f21fb1d72e65a738db9f7bc9f162a410d36c9b Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 10 Apr 2024 08:04:38 +0300 Subject: [PATCH 06/29] feat: fix availability checker (#1574) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Use oneshot channel to understand whether the prover was initialized. Make config value optional. ## Why ❔ Sometimes prover is getting shut down before it sets itself as available ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- core/lib/config/src/configs/fri_prover.rs | 2 +- core/lib/env_config/src/fri_prover.rs | 2 +- .../protobuf_config/src/proto/prover.proto | 2 +- core/lib/protobuf_config/src/prover.rs | 7 +-- .../src/gpu_prover_availability_checker.rs | 6 ++- prover/prover_fri/src/main.rs | 47 ++++++++++++++----- prover/prover_fri/src/socket_listener.rs | 10 ++-- 7 files changed, 50 insertions(+), 26 deletions(-) diff --git a/core/lib/config/src/configs/fri_prover.rs b/core/lib/config/src/configs/fri_prover.rs index 8afc40e9ca0..b19d72e40b8 100644 --- a/core/lib/config/src/configs/fri_prover.rs +++ b/core/lib/config/src/configs/fri_prover.rs @@ -22,7 +22,7 @@ pub struct FriProverConfig { pub queue_capacity: usize, pub witness_vector_receiver_port: u16, pub zone_read_url: String, - pub availability_check_interval_in_secs: u32, + pub availability_check_interval_in_secs: Option, // whether to write to public GCS bucket for https://github.com/matter-labs/era-boojum-validator-cli pub shall_save_to_public_bucket: bool, diff --git a/core/lib/env_config/src/fri_prover.rs b/core/lib/env_config/src/fri_prover.rs index 2a08472b680..c6d4626d9f6 100644 --- a/core/lib/env_config/src/fri_prover.rs +++ b/core/lib/env_config/src/fri_prover.rs @@ -42,7 +42,7 @@ mod tests { }, max_retries: 5, }), - availability_check_interval_in_secs: 1_800, + availability_check_interval_in_secs: Some(1_800), } } diff --git a/core/lib/protobuf_config/src/proto/prover.proto b/core/lib/protobuf_config/src/proto/prover.proto index a365a2e2886..dac7d1221c4 100644 --- a/core/lib/protobuf_config/src/proto/prover.proto +++ b/core/lib/protobuf_config/src/proto/prover.proto @@ -31,7 +31,7 @@ message Prover { optional uint64 queue_capacity = 10; // required optional uint32 witness_vector_receiver_port = 11; // required; u16 optional string zone_read_url = 12; // required - optional uint32 availability_check_interval_in_secs = 21; // required; s + optional uint32 availability_check_interval_in_secs = 21; // optional; s optional bool shall_save_to_public_bucket = 13; // required optional config.object_store.ObjectStore object_store = 20; reserved 5, 6, 9; reserved "base_layer_circuit_ids_to_be_verified", "recursive_layer_circuit_ids_to_be_verified", "witness_vector_generator_thread_count"; diff --git a/core/lib/protobuf_config/src/prover.rs b/core/lib/protobuf_config/src/prover.rs index fc13db9d7d9..f82685fdb90 100644 --- a/core/lib/protobuf_config/src/prover.rs +++ b/core/lib/protobuf_config/src/prover.rs @@ -320,10 +320,7 @@ impl ProtoRepr for proto::Prover { zone_read_url: required(&self.zone_read_url) .context("zone_read_url")? .clone(), - availability_check_interval_in_secs: *required( - &self.availability_check_interval_in_secs, - ) - .context("availability_check_interval_in_secs")?, + availability_check_interval_in_secs: self.availability_check_interval_in_secs, shall_save_to_public_bucket: *required(&self.shall_save_to_public_bucket) .context("shall_save_to_public_bucket")?, object_store, @@ -341,7 +338,7 @@ impl ProtoRepr for proto::Prover { queue_capacity: Some(this.queue_capacity.try_into().unwrap()), witness_vector_receiver_port: Some(this.witness_vector_receiver_port.into()), zone_read_url: Some(this.zone_read_url.clone()), - availability_check_interval_in_secs: Some(this.availability_check_interval_in_secs), + availability_check_interval_in_secs: this.availability_check_interval_in_secs, shall_save_to_public_bucket: Some(this.shall_save_to_public_bucket), object_store: this.object_store.as_ref().map(ProtoRepr::build), } diff --git a/prover/prover_fri/src/gpu_prover_availability_checker.rs b/prover/prover_fri/src/gpu_prover_availability_checker.rs index 515919cff5b..027c7a4b07a 100644 --- a/prover/prover_fri/src/gpu_prover_availability_checker.rs +++ b/prover/prover_fri/src/gpu_prover_availability_checker.rs @@ -1,8 +1,9 @@ #[cfg(feature = "gpu")] pub mod availability_checker { - use std::time::Duration; + use std::{sync::Arc, time::Duration}; use prover_dal::{ConnectionPool, Prover, ProverDal}; + use tokio::sync::Notify; use zksync_types::prover_dal::{GpuProverInstanceStatus, SocketAddress}; use crate::metrics::{KillingReason, METRICS}; @@ -34,7 +35,10 @@ pub mod availability_checker { pub async fn run( self, stop_receiver: tokio::sync::watch::Receiver, + init_notifier: Arc, ) -> anyhow::Result<()> { + init_notifier.notified().await; + while !*stop_receiver.borrow() { let status = self .pool diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs index efc552d50c2..5d5d11a091a 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/prover_fri/src/main.rs @@ -7,7 +7,7 @@ use local_ip_address::local_ip; use prometheus_exporter::PrometheusExporterConfig; use prover_dal::{ConnectionPool, Prover, ProverDal}; use tokio::{ - sync::{oneshot, watch::Receiver}, + sync::{oneshot, watch::Receiver, Notify}, task::JoinHandle, }; use zksync_config::configs::{ @@ -142,6 +142,9 @@ async fn main() -> anyhow::Result<()> { .await .context("failed to build a connection pool")?; let port = prover_config.witness_vector_receiver_port; + + let notify = Arc::new(Notify::new()); + let prover_tasks = get_prover_tasks( prover_config, stop_receiver.clone(), @@ -149,11 +152,13 @@ async fn main() -> anyhow::Result<()> { public_blob_store, pool, circuit_ids_for_round_to_be_proven, + notify, ) .await .context("get_prover_tasks()")?; let mut tasks = vec![tokio::spawn(exporter_config.run(stop_receiver))]; + tasks.extend(prover_tasks); let mut tasks = ManagedTasks::new(tasks); @@ -176,6 +181,7 @@ async fn main() -> anyhow::Result<()> { Ok(()) } +#[allow(clippy::too_many_arguments)] #[cfg(not(feature = "gpu"))] async fn get_prover_tasks( prover_config: FriProverConfig, @@ -184,6 +190,7 @@ async fn get_prover_tasks( public_blob_store: Option>, pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, + _init_notifier: Arc, ) -> anyhow::Result>>> { use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; @@ -210,6 +217,7 @@ async fn get_prover_tasks( Ok(vec![tokio::spawn(prover.run(stop_receiver, None))]) } +#[allow(clippy::too_many_arguments)] #[cfg(feature = "gpu")] async fn get_prover_tasks( prover_config: FriProverConfig, @@ -218,6 +226,7 @@ async fn get_prover_tasks( public_blob_store: Option>, pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, + init_notifier: Arc, ) -> anyhow::Result>>> { use gpu_prover_job_processor::gpu_prover; use socket_listener::gpu_socket_listener; @@ -263,17 +272,29 @@ async fn get_prover_tasks( prover_config.specialized_group_id, zone.clone(), ); - let availability_checker = - gpu_prover_availability_checker::availability_checker::AvailabilityChecker::new( - address, - zone, - prover_config.availability_check_interval_in_secs, - pool, - ); - - Ok(vec![ - tokio::spawn(socket_listener.listen_incoming_connections(stop_receiver.clone())), + + let mut tasks = vec![ + tokio::spawn( + socket_listener + .listen_incoming_connections(stop_receiver.clone(), init_notifier.clone()), + ), tokio::spawn(prover.run(stop_receiver.clone(), None)), - tokio::spawn(availability_checker.run(stop_receiver.clone())), - ]) + ]; + + // TODO(PLA-874): remove the check after making the availability checker required + if let Some(check_interval) = prover_config.availability_check_interval_in_secs { + let availability_checker = + gpu_prover_availability_checker::availability_checker::AvailabilityChecker::new( + address, + zone, + check_interval, + pool, + ); + + tasks.push(tokio::spawn( + availability_checker.run(stop_receiver.clone(), init_notifier), + )); + } + + Ok(tasks) } diff --git a/prover/prover_fri/src/socket_listener.rs b/prover/prover_fri/src/socket_listener.rs index d6abc3678b4..7fd67290e23 100644 --- a/prover/prover_fri/src/socket_listener.rs +++ b/prover/prover_fri/src/socket_listener.rs @@ -1,13 +1,13 @@ #[cfg(feature = "gpu")] pub mod gpu_socket_listener { - use std::{net::SocketAddr, time::Instant}; + use std::{net::SocketAddr, sync::Arc, time::Instant}; use anyhow::Context as _; use prover_dal::{ConnectionPool, Prover, ProverDal}; use tokio::{ io::copy, net::{TcpListener, TcpStream}, - sync::watch, + sync::{watch, Notify}, }; use zksync_object_store::bincode; use zksync_prover_fri_types::WitnessVectorArtifacts; @@ -42,7 +42,7 @@ pub mod gpu_socket_listener { zone, } } - async fn init(&self) -> anyhow::Result { + async fn init(&self, init_notifier: Arc) -> anyhow::Result { let listening_address = SocketAddr::new(self.address.host, self.address.port); tracing::info!( "Starting assembly receiver at host: {}, port: {}", @@ -65,14 +65,16 @@ pub mod gpu_socket_listener { self.zone.clone(), ) .await; + init_notifier.notify_one(); Ok(listener) } pub async fn listen_incoming_connections( self, stop_receiver: watch::Receiver, + init_notifier: Arc, ) -> anyhow::Result<()> { - let listener = self.init().await.context("init()")?; + let listener = self.init(init_notifier).await.context("init()")?; let mut now = Instant::now(); loop { if *stop_receiver.borrow() { From 713f56b14433a39e8cc431be3150a9abe574984f Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Wed, 10 Apr 2024 13:17:27 +0300 Subject: [PATCH 07/29] feat: Finalize fee address migration (#1617) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Removes code for fee address migration. Drops `l1_batches.fee_account_address` ## Why ❔ Finalize fee address migration, remove code that is not needed ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- core/bin/external_node/src/main.rs | 3 - ...4e7dfacfcf625e057b99924c245de03c2888c.json | 15 - ...5d95fc47eb6bedc32f4d824aac9b2b6f96faf.json | 15 - ...9360a7eab2efc70d97b901f8948f6909d4cd2.json | 22 -- ...c116162becce06841476128f864b6028129df.json | 12 - ...c0d68195346daf5f38d102eab1c1b73b0f82f.json | 20 - ...op_l1_batches_fee_account_address.down.sql | 2 + ...drop_l1_batches_fee_account_address.up.sql | 1 + core/lib/dal/src/blocks_dal.rs | 334 +--------------- core/lib/dal/src/blocks_web3_dal.rs | 13 +- core/lib/dal/src/sync_dal.rs | 15 +- core/lib/zksync_core/src/lib.rs | 5 +- .../state_keeper/io/fee_address_migration.rs | 362 ------------------ .../src/state_keeper/io/mempool.rs | 3 +- .../zksync_core/src/state_keeper/io/mod.rs | 1 - .../zksync_core/src/state_keeper/keeper.rs | 22 +- .../zksync_core/src/sync_layer/external_io.rs | 5 +- infrastructure/zk/src/database.ts | 2 +- 18 files changed, 16 insertions(+), 836 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-0034bc1041d9ba7d3c681be6dfc4e7dfacfcf625e057b99924c245de03c2888c.json delete mode 100644 core/lib/dal/.sqlx/query-08737d11b3e5067a2468013ec6e5d95fc47eb6bedc32f4d824aac9b2b6f96faf.json delete mode 100644 core/lib/dal/.sqlx/query-3c531ad0631090934ed46c538249360a7eab2efc70d97b901f8948f6909d4cd2.json delete mode 100644 core/lib/dal/.sqlx/query-c4426ae84862e720673485e3b59c116162becce06841476128f864b6028129df.json delete mode 100644 core/lib/dal/.sqlx/query-eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f.json create mode 100644 core/lib/dal/migrations/20240409102923_drop_l1_batches_fee_account_address.down.sql create mode 100644 core/lib/dal/migrations/20240409102923_drop_l1_batches_fee_account_address.up.sql delete mode 100644 core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 73e42d54042..68f507429cc 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -267,8 +267,6 @@ async fn run_core( } })); - let fee_address_migration_handle = - task::spawn(state_keeper.run_fee_address_migration(connection_pool.clone())); let sk_handle = task::spawn(state_keeper.run()); let fee_params_fetcher_handle = tokio::spawn(fee_params_fetcher.clone().run(stop_receiver.clone())); @@ -347,7 +345,6 @@ async fn run_core( task_handles.extend([ sk_handle, - fee_address_migration_handle, fee_params_fetcher_handle, consistency_checker_handle, commitment_generator_handle, diff --git a/core/lib/dal/.sqlx/query-0034bc1041d9ba7d3c681be6dfc4e7dfacfcf625e057b99924c245de03c2888c.json b/core/lib/dal/.sqlx/query-0034bc1041d9ba7d3c681be6dfc4e7dfacfcf625e057b99924c245de03c2888c.json deleted file mode 100644 index cf78a74dfb3..00000000000 --- a/core/lib/dal/.sqlx/query-0034bc1041d9ba7d3c681be6dfc4e7dfacfcf625e057b99924c245de03c2888c.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE l1_batches\n SET\n fee_account_address = $1::bytea\n WHERE\n number = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Bytea", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "0034bc1041d9ba7d3c681be6dfc4e7dfacfcf625e057b99924c245de03c2888c" -} diff --git a/core/lib/dal/.sqlx/query-08737d11b3e5067a2468013ec6e5d95fc47eb6bedc32f4d824aac9b2b6f96faf.json b/core/lib/dal/.sqlx/query-08737d11b3e5067a2468013ec6e5d95fc47eb6bedc32f4d824aac9b2b6f96faf.json deleted file mode 100644 index 2e83c0036b9..00000000000 --- a/core/lib/dal/.sqlx/query-08737d11b3e5067a2468013ec6e5d95fc47eb6bedc32f4d824aac9b2b6f96faf.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE miniblocks\n SET\n fee_account_address = l1_batches.fee_account_address\n FROM\n l1_batches\n WHERE\n l1_batches.number = miniblocks.l1_batch_number\n AND miniblocks.number BETWEEN $1 AND $2\n AND miniblocks.fee_account_address = '\\x0000000000000000000000000000000000000000'::bytea\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "08737d11b3e5067a2468013ec6e5d95fc47eb6bedc32f4d824aac9b2b6f96faf" -} diff --git a/core/lib/dal/.sqlx/query-3c531ad0631090934ed46c538249360a7eab2efc70d97b901f8948f6909d4cd2.json b/core/lib/dal/.sqlx/query-3c531ad0631090934ed46c538249360a7eab2efc70d97b901f8948f6909d4cd2.json deleted file mode 100644 index d8f17c7f772..00000000000 --- a/core/lib/dal/.sqlx/query-3c531ad0631090934ed46c538249360a7eab2efc70d97b901f8948f6909d4cd2.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batches.fee_account_address\n FROM\n l1_batches\n INNER JOIN miniblocks ON miniblocks.l1_batch_number = l1_batches.number\n WHERE\n miniblocks.number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "fee_account_address", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "3c531ad0631090934ed46c538249360a7eab2efc70d97b901f8948f6909d4cd2" -} diff --git a/core/lib/dal/.sqlx/query-c4426ae84862e720673485e3b59c116162becce06841476128f864b6028129df.json b/core/lib/dal/.sqlx/query-c4426ae84862e720673485e3b59c116162becce06841476128f864b6028129df.json deleted file mode 100644 index 4b69afd7e9c..00000000000 --- a/core/lib/dal/.sqlx/query-c4426ae84862e720673485e3b59c116162becce06841476128f864b6028129df.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE miniblocks\n SET\n fee_account_address = (\n SELECT\n l1_batches.fee_account_address\n FROM\n l1_batches\n ORDER BY\n l1_batches.number DESC\n LIMIT\n 1\n )\n WHERE\n l1_batch_number IS NULL\n AND fee_account_address = '\\x0000000000000000000000000000000000000000'::bytea\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [] - }, - "nullable": [] - }, - "hash": "c4426ae84862e720673485e3b59c116162becce06841476128f864b6028129df" -} diff --git a/core/lib/dal/.sqlx/query-eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f.json b/core/lib/dal/.sqlx/query-eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f.json deleted file mode 100644 index 5cd05036f98..00000000000 --- a/core/lib/dal/.sqlx/query-eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT COUNT(*)\n FROM information_schema.columns\n WHERE table_name = 'l1_batches' AND column_name = 'fee_account_address'\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - null - ] - }, - "hash": "eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f" -} diff --git a/core/lib/dal/migrations/20240409102923_drop_l1_batches_fee_account_address.down.sql b/core/lib/dal/migrations/20240409102923_drop_l1_batches_fee_account_address.down.sql new file mode 100644 index 00000000000..165c607e1b6 --- /dev/null +++ b/core/lib/dal/migrations/20240409102923_drop_l1_batches_fee_account_address.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + ADD COLUMN IF NOT EXISTS fee_account_address BYTEA DEFAULT '\x0000000000000000000000000000000000000000'::bytea; diff --git a/core/lib/dal/migrations/20240409102923_drop_l1_batches_fee_account_address.up.sql b/core/lib/dal/migrations/20240409102923_drop_l1_batches_fee_account_address.up.sql new file mode 100644 index 00000000000..a0c7512961b --- /dev/null +++ b/core/lib/dal/migrations/20240409102923_drop_l1_batches_fee_account_address.up.sql @@ -0,0 +1 @@ +ALTER TABLE l1_batches DROP COLUMN IF EXISTS fee_account_address; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index aa66cfe251e..418bdad3b22 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -713,16 +713,7 @@ impl BlocksDal<'_, '_> { .fetch_optional(self.storage) .await?; - let Some(header) = header else { - return Ok(None); - }; - let mut header = MiniblockHeader::from(header); - // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - #[allow(deprecated)] - self.maybe_load_fee_address(&mut header.fee_account_address, header.number) - .await?; - - Ok(Some(header)) + Ok(header.map(Into::into)) } pub async fn get_miniblock_header( @@ -761,16 +752,7 @@ impl BlocksDal<'_, '_> { .fetch_optional(self.storage) .await?; - let Some(header) = header else { - return Ok(None); - }; - let mut header = MiniblockHeader::from(header); - // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - #[allow(deprecated)] - self.maybe_load_fee_address(&mut header.fee_account_address, header.number) - .await?; - - Ok(Some(header)) + Ok(header.map(Into::into)) } pub async fn mark_miniblocks_as_executed_in_l1_batch( @@ -2116,22 +2098,6 @@ impl BlocksDal<'_, '_> { pub async fn get_fee_address_for_miniblock( &mut self, number: MiniblockNumber, - ) -> DalResult> { - let Some(mut fee_account_address) = self.raw_fee_address_for_miniblock(number).await? - else { - return Ok(None); - }; - - // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - #[allow(deprecated)] - self.maybe_load_fee_address(&mut fee_account_address, number) - .await?; - Ok(Some(fee_account_address)) - } - - async fn raw_fee_address_for_miniblock( - &mut self, - number: MiniblockNumber, ) -> DalResult> { let Some(row) = sqlx::query!( r#" @@ -2144,7 +2110,7 @@ impl BlocksDal<'_, '_> { "#, number.0 as i32 ) - .instrument("raw_fee_address_for_miniblock") + .instrument("get_fee_address_for_miniblock") .with_arg("number", &number) .fetch_optional(self.storage) .await? @@ -2229,157 +2195,6 @@ impl BlocksDal<'_, '_> { } } -/// Temporary methods for migrating `fee_account_address`. -#[deprecated(note = "will be removed after the fee address migration is complete")] -impl BlocksDal<'_, '_> { - pub(crate) async fn maybe_load_fee_address( - &mut self, - fee_address: &mut Address, - miniblock_number: MiniblockNumber, - ) -> DalResult<()> { - if *fee_address != Address::default() { - return Ok(()); - } - - // This clause should be triggered only for non-migrated miniblock rows. After `fee_account_address` - // is filled for all miniblocks, it won't be called; thus, `fee_account_address` column could be removed - // from `l1_batches` even with this code present. - let Some(row) = sqlx::query!( - r#" - SELECT - l1_batches.fee_account_address - FROM - l1_batches - INNER JOIN miniblocks ON miniblocks.l1_batch_number = l1_batches.number - WHERE - miniblocks.number = $1 - "#, - miniblock_number.0 as i32 - ) - .instrument("maybe_load_fee_address") - .with_arg("miniblock_number", &miniblock_number) - .fetch_optional(self.storage) - .await? - else { - return Ok(()); - }; - - *fee_address = Address::from_slice(&row.fee_account_address); - Ok(()) - } - - /// Checks whether `fee_account_address` is migrated for the specified miniblock. Returns - /// `Ok(None)` if the miniblock doesn't exist. - pub async fn is_fee_address_migrated( - &mut self, - number: MiniblockNumber, - ) -> DalResult> { - Ok(self - .raw_fee_address_for_miniblock(number) - .await? - .map(|address| address != Address::default())) - } - - /// Copies `fee_account_address` for pending miniblocks (ones without an associated L1 batch) - /// from the last L1 batch. Returns the number of affected rows. - pub async fn copy_fee_account_address_for_pending_miniblocks(&mut self) -> DalResult { - let execution_result = sqlx::query!( - r#" - UPDATE miniblocks - SET - fee_account_address = ( - SELECT - l1_batches.fee_account_address - FROM - l1_batches - ORDER BY - l1_batches.number DESC - LIMIT - 1 - ) - WHERE - l1_batch_number IS NULL - AND fee_account_address = '\x0000000000000000000000000000000000000000'::bytea - "# - ) - .instrument("copy_fee_account_address_for_pending_miniblocks") - .execute(self.storage) - .await?; - - Ok(execution_result.rows_affected()) - } - - pub async fn check_l1_batches_have_fee_account_address(&mut self) -> DalResult { - let count = sqlx::query_scalar!( - r#" - SELECT COUNT(*) - FROM information_schema.columns - WHERE table_name = 'l1_batches' AND column_name = 'fee_account_address' - "# - ) - .instrument("check_l1_batches_have_fee_account_address") - .fetch_one(self.storage) - .await? - .unwrap_or(0); - - Ok(count > 0) - } - - /// Copies `fee_account_address` for miniblocks in the given range from the L1 batch they belong to. - /// Returns the number of affected rows. - pub async fn copy_fee_account_address_for_miniblocks( - &mut self, - numbers: ops::RangeInclusive, - ) -> DalResult { - let execution_result = sqlx::query!( - r#" - UPDATE miniblocks - SET - fee_account_address = l1_batches.fee_account_address - FROM - l1_batches - WHERE - l1_batches.number = miniblocks.l1_batch_number - AND miniblocks.number BETWEEN $1 AND $2 - AND miniblocks.fee_account_address = '\x0000000000000000000000000000000000000000'::bytea - "#, - i64::from(numbers.start().0), - i64::from(numbers.end().0) - ) - .instrument("copy_fee_account_address_for_miniblocks") - .with_arg("numbers", &numbers) - .execute(self.storage) - .await?; - - Ok(execution_result.rows_affected()) - } - - /// Sets `fee_account_address` for an L1 batch. Should only be used in tests. - pub async fn set_l1_batch_fee_address( - &mut self, - l1_batch: L1BatchNumber, - fee_account_address: Address, - ) -> DalResult<()> { - sqlx::query!( - r#" - UPDATE l1_batches - SET - fee_account_address = $1::bytea - WHERE - number = $2 - "#, - fee_account_address.as_bytes(), - i64::from(l1_batch.0) - ) - .instrument("set_l1_batch_fee_address") - .with_arg("l1_batch", &l1_batch) - .with_arg("fee_account_address", &fee_account_address) - .execute(self.storage) - .await?; - Ok(()) - } -} - /// These methods should only be used for tests. impl BlocksDal<'_, '_> { // The actual l1 batch hash is only set by the metadata calculator. @@ -2438,7 +2253,7 @@ mod tests { }; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool, Core, CoreDal}; + use crate::{ConnectionPool, Core, CoreDal}; #[tokio::test] async fn loading_l1_batch_header() { @@ -2557,145 +2372,4 @@ mod tests { assert_eq!(gas, 3 * expected_gas); } } - - #[allow(deprecated)] // that's the whole point - #[tokio::test] - async fn checking_fee_account_address_in_l1_batches() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - assert!(conn - .blocks_dal() - .check_l1_batches_have_fee_account_address() - .await - .unwrap()); - } - - #[allow(deprecated)] // that's the whole point - #[tokio::test] - async fn ensuring_fee_account_address_for_miniblocks() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - conn.protocol_versions_dal() - .save_protocol_version_with_tx(&ProtocolVersion::default()) - .await - .unwrap(); - - for number in [1, 2] { - let l1_batch = L1BatchHeader::new( - L1BatchNumber(number), - 100, - BaseSystemContractsHashes { - bootloader: H256::repeat_byte(1), - default_aa: H256::repeat_byte(42), - }, - ProtocolVersionId::latest(), - ); - let miniblock = MiniblockHeader { - fee_account_address: Address::default(), - ..create_miniblock_header(number) - }; - conn.blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); - conn.blocks_dal() - .insert_mock_l1_batch(&l1_batch) - .await - .unwrap(); - conn.blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(number)) - .await - .unwrap(); - - assert_eq!( - conn.blocks_dal() - .is_fee_address_migrated(miniblock.number) - .await - .unwrap(), - Some(false) - ); - } - - // Manually set `fee_account_address` for the inserted L1 batches. - conn.blocks_dal() - .set_l1_batch_fee_address(L1BatchNumber(1), Address::repeat_byte(0x23)) - .await - .unwrap(); - conn.blocks_dal() - .set_l1_batch_fee_address(L1BatchNumber(2), Address::repeat_byte(0x42)) - .await - .unwrap(); - - // Add a pending miniblock. - let miniblock = MiniblockHeader { - fee_account_address: Address::default(), - ..create_miniblock_header(3) - }; - conn.blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); - - let rows_affected = conn - .blocks_dal() - .copy_fee_account_address_for_miniblocks(MiniblockNumber(0)..=MiniblockNumber(100)) - .await - .unwrap(); - - assert_eq!(rows_affected, 2); - let first_miniblock_addr = conn - .blocks_dal() - .raw_fee_address_for_miniblock(MiniblockNumber(1)) - .await - .unwrap() - .expect("No fee address for block #1"); - assert_eq!(first_miniblock_addr, Address::repeat_byte(0x23)); - let second_miniblock_addr = conn - .blocks_dal() - .raw_fee_address_for_miniblock(MiniblockNumber(2)) - .await - .unwrap() - .expect("No fee address for block #1"); - assert_eq!(second_miniblock_addr, Address::repeat_byte(0x42)); - // The pending miniblock should not be affected. - let pending_miniblock_addr = conn - .blocks_dal() - .raw_fee_address_for_miniblock(MiniblockNumber(3)) - .await - .unwrap() - .expect("No fee address for block #3"); - assert_eq!(pending_miniblock_addr, Address::default()); - assert_eq!( - conn.blocks_dal() - .is_fee_address_migrated(MiniblockNumber(3)) - .await - .unwrap(), - Some(false) - ); - - let rows_affected = conn - .blocks_dal() - .copy_fee_account_address_for_pending_miniblocks() - .await - .unwrap(); - assert_eq!(rows_affected, 1); - - let pending_miniblock_addr = conn - .blocks_dal() - .raw_fee_address_for_miniblock(MiniblockNumber(3)) - .await - .unwrap() - .expect("No fee address for block #3"); - assert_eq!(pending_miniblock_addr, Address::repeat_byte(0x42)); - - for number in 1..=3 { - assert_eq!( - conn.blocks_dal() - .is_fee_address_migrated(MiniblockNumber(number)) - .await - .unwrap(), - Some(true) - ); - } - } } diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 3e5d8ca918c..f42408b2cdd 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -614,18 +614,7 @@ impl BlocksWeb3Dal<'_, '_> { .fetch_optional(self.storage) .await?; - let Some(storage_block_details) = storage_block_details else { - return Ok(None); - }; - let mut details = api::BlockDetails::from(storage_block_details); - - // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - #[allow(deprecated)] - self.storage - .blocks_dal() - .maybe_load_fee_address(&mut details.operator_address, details.number) - .await?; - Ok(Some(details)) + Ok(storage_block_details.map(Into::into)) } pub async fn get_l1_batch_details( diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 4d23aa89ca5..60fe7235ff8 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -19,7 +19,7 @@ impl SyncDal<'_, '_> { &mut self, block_number: MiniblockNumber, ) -> DalResult> { - let Some(mut block) = sqlx::query_as!( + let block = sqlx::query_as!( StorageSyncBlock, r#" SELECT @@ -68,18 +68,9 @@ impl SyncDal<'_, '_> { .instrument("sync_dal_sync_block.block") .with_arg("block_number", &block_number) .fetch_optional(self.storage) - .await? - else { - return Ok(None); - }; + .await?; - // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - #[allow(deprecated)] - self.storage - .blocks_dal() - .maybe_load_fee_address(&mut block.fee_account_address, block.number) - .await?; - Ok(Some(block)) + Ok(block) } pub async fn sync_block( diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 0a873006923..5a9956237f3 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -849,7 +849,7 @@ async fn add_state_keeper_to_task_futures( db_config, l2chain_id, mempool_config, - state_keeper_pool.clone(), + state_keeper_pool, mempool.clone(), batch_fee_input_provider.clone(), OutputHandler::new(Box::new(persistence)), @@ -863,9 +863,6 @@ async fn add_state_keeper_to_task_futures( stop_receiver_clone.changed().await?; result })); - task_futures.push(tokio::spawn( - state_keeper.run_fee_address_migration(state_keeper_pool), - )); task_futures.push(tokio::spawn(state_keeper.run())); let mempool_fetcher_pool = pool_builder diff --git a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs deleted file mode 100644 index a42b4ea4477..00000000000 --- a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs +++ /dev/null @@ -1,362 +0,0 @@ -//! Temporary module for migrating fee addresses from L1 batches to miniblocks. - -// FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - -use std::time::{Duration, Instant}; - -use anyhow::Context as _; -use tokio::sync::watch; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_types::MiniblockNumber; - -/// Runs the migration for pending miniblocks. -pub(crate) async fn migrate_pending_miniblocks( - storage: &mut Connection<'_, Core>, -) -> anyhow::Result<()> { - let started_at = Instant::now(); - tracing::info!("Started migrating `fee_account_address` for pending miniblocks"); - - #[allow(deprecated)] - let l1_batches_have_fee_account_address = storage - .blocks_dal() - .check_l1_batches_have_fee_account_address() - .await?; - if !l1_batches_have_fee_account_address { - tracing::info!("`l1_batches.fee_account_address` column is removed; assuming that the migration is complete"); - return Ok(()); - } - - #[allow(deprecated)] - let rows_affected = storage - .blocks_dal() - .copy_fee_account_address_for_pending_miniblocks() - .await?; - let elapsed = started_at.elapsed(); - tracing::info!("Migrated `fee_account_address` for {rows_affected} miniblocks in {elapsed:?}"); - Ok(()) -} - -/// Runs the migration for non-pending miniblocks. Should be run as a background task. -pub(crate) async fn migrate_miniblocks( - pool: ConnectionPool, - stop_receiver: watch::Receiver, -) -> anyhow::Result<()> { - // `migrate_miniblocks_inner` assumes that miniblocks start from the genesis (i.e., no snapshot recovery). - // Since snapshot recovery is later that the fee address migration in terms of code versioning, - // the migration is always no-op in case of snapshot recovery; all miniblocks added after recovery are guaranteed - // to have their fee address set. - let mut storage = pool.connection_tagged("state_keeper").await?; - if storage - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await? - .is_some() - { - tracing::info!("Detected snapshot recovery; fee address migration is skipped as no-op"); - return Ok(()); - } - let last_miniblock = storage - .blocks_dal() - .get_sealed_miniblock_number() - .await? - .context("storage is empty, but there's no snapshot recovery data")?; - drop(storage); - - let MigrationOutput { - miniblocks_affected, - } = migrate_miniblocks_inner( - pool, - last_miniblock, - 100_000, - Duration::from_secs(1), - stop_receiver, - ) - .await?; - - tracing::info!("Finished fee address migration with {miniblocks_affected} affected miniblocks"); - Ok(()) -} - -#[derive(Debug, Default)] -struct MigrationOutput { - miniblocks_affected: u64, -} - -/// It's important for the `chunk_size` to be a constant; this ensures that each chunk is migrated atomically. -async fn migrate_miniblocks_inner( - pool: ConnectionPool, - last_miniblock: MiniblockNumber, - chunk_size: u32, - sleep_interval: Duration, - stop_receiver: watch::Receiver, -) -> anyhow::Result { - anyhow::ensure!(chunk_size > 0, "Chunk size must be positive"); - - let mut storage = pool.connection_tagged("state_keeper").await?; - #[allow(deprecated)] - let l1_batches_have_fee_account_address = storage - .blocks_dal() - .check_l1_batches_have_fee_account_address() - .await?; - drop(storage); - if !l1_batches_have_fee_account_address { - tracing::info!("`l1_batches.fee_account_address` column is removed; assuming that the migration is complete"); - return Ok(MigrationOutput::default()); - } - - let mut chunk_start = MiniblockNumber(0); - let mut miniblocks_affected = 0; - - tracing::info!( - "Migrating `fee_account_address` for miniblocks {chunk_start}..={last_miniblock} \ - in chunks of {chunk_size} miniblocks" - ); - while chunk_start <= last_miniblock { - let chunk_end = last_miniblock.min(chunk_start + chunk_size - 1); - let chunk = chunk_start..=chunk_end; - - let mut storage = pool.connection_tagged("state_keeper").await?; - let is_chunk_migrated = is_fee_address_migrated(&mut storage, chunk_start).await?; - - if is_chunk_migrated { - tracing::debug!("`fee_account_address` is migrated for chunk {chunk:?}"); - } else { - tracing::debug!("Migrating `fee_account_address` for miniblocks chunk {chunk:?}"); - - #[allow(deprecated)] - let rows_affected = storage - .blocks_dal() - .copy_fee_account_address_for_miniblocks(chunk.clone()) - .await?; - tracing::debug!("Migrated {rows_affected} miniblocks in chunk {chunk:?}"); - miniblocks_affected += rows_affected; - } - drop(storage); - - if *stop_receiver.borrow() { - tracing::info!("Stop signal received; fee address migration shutting down"); - return Ok(MigrationOutput { - miniblocks_affected, - }); - } - chunk_start = chunk_end + 1; - - if !is_chunk_migrated { - tokio::time::sleep(sleep_interval).await; - } - } - - Ok(MigrationOutput { - miniblocks_affected, - }) -} - -#[allow(deprecated)] -async fn is_fee_address_migrated( - storage: &mut Connection<'_, Core>, - miniblock: MiniblockNumber, -) -> anyhow::Result { - storage - .blocks_dal() - .is_fee_address_migrated(miniblock) - .await? - .with_context(|| format!("Miniblock #{miniblock} disappeared")) -} - -#[cfg(test)] -mod tests { - use test_casing::test_casing; - use zksync_contracts::BaseSystemContractsHashes; - use zksync_types::{ - block::L1BatchHeader, Address, L1BatchNumber, ProtocolVersion, ProtocolVersionId, - }; - - use super::*; - use crate::utils::testonly::create_miniblock; - - async fn prepare_storage(storage: &mut Connection<'_, Core>) { - storage - .protocol_versions_dal() - .save_protocol_version_with_tx(&ProtocolVersion::default()) - .await - .unwrap(); - for number in 0..5 { - let miniblock = create_miniblock(number); - storage - .blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); - - let l1_batch = L1BatchHeader::new( - L1BatchNumber(number), - number.into(), - BaseSystemContractsHashes::default(), - ProtocolVersionId::latest(), - ); - storage - .blocks_dal() - .insert_mock_l1_batch(&l1_batch) - .await - .unwrap(); - #[allow(deprecated)] - storage - .blocks_dal() - .set_l1_batch_fee_address( - l1_batch.number, - Address::from_low_u64_be(u64::from(number) + 1), - ) - .await - .unwrap(); - storage - .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(l1_batch.number) - .await - .unwrap(); - } - } - - async fn assert_migration(storage: &mut Connection<'_, Core>) { - for number in 0..5 { - assert!(is_fee_address_migrated(storage, MiniblockNumber(number)) - .await - .unwrap()); - - let fee_address = storage - .blocks_dal() - .get_fee_address_for_miniblock(MiniblockNumber(number)) - .await - .unwrap() - .expect("no fee address"); - let expected_address = Address::from_low_u64_be(u64::from(number) + 1); - assert_eq!(fee_address, expected_address); - } - } - - #[test_casing(3, [1, 2, 3])] - #[tokio::test] - async fn migration_basics(chunk_size: u32) { - // Replicate providing a pool with a single connection. - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - prepare_storage(&mut storage).await; - drop(storage); - - let (_stop_sender, stop_receiver) = watch::channel(false); - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(4), - chunk_size, - Duration::ZERO, - stop_receiver.clone(), - ) - .await - .unwrap(); - - assert_eq!(result.miniblocks_affected, 5); - - // Check that all blocks are migrated. - let mut storage = pool.connection().await.unwrap(); - assert_migration(&mut storage).await; - drop(storage); - - // Check that migration can run again w/o returning an error, hanging up etc. - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(4), - chunk_size, - Duration::ZERO, - stop_receiver, - ) - .await - .unwrap(); - - assert_eq!(result.miniblocks_affected, 0); - } - - #[test_casing(3, [1, 2, 3])] - #[tokio::test] - async fn stopping_and_resuming_migration(chunk_size: u32) { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - prepare_storage(&mut storage).await; - drop(storage); - - let (_stop_sender, stop_receiver) = watch::channel(true); // signal stop right away - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(4), - chunk_size, - Duration::from_secs(1_000), - stop_receiver, - ) - .await - .unwrap(); - - // Migration should stop after a single chunk. - assert_eq!(result.miniblocks_affected, u64::from(chunk_size)); - - // Check that migration resumes from the same point. - let (_stop_sender, stop_receiver) = watch::channel(false); - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(4), - chunk_size, - Duration::ZERO, - stop_receiver, - ) - .await - .unwrap(); - - assert_eq!(result.miniblocks_affected, 5 - u64::from(chunk_size)); - let mut storage = pool.connection().await.unwrap(); - assert_migration(&mut storage).await; - } - - #[test_casing(3, [1, 2, 3])] - #[tokio::test] - async fn new_blocks_added_during_migration(chunk_size: u32) { - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - prepare_storage(&mut storage).await; - - let (_stop_sender, stop_receiver) = watch::channel(true); // signal stop right away - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(4), - chunk_size, - Duration::from_secs(1_000), - stop_receiver, - ) - .await - .unwrap(); - - // Migration should stop after a single chunk. - assert_eq!(result.miniblocks_affected, u64::from(chunk_size)); - - // Insert a new miniblock to the storage with a defined fee account address. - let mut miniblock = create_miniblock(5); - miniblock.fee_account_address = Address::repeat_byte(1); - storage - .blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); - - // Resume the migration. - let (_stop_sender, stop_receiver) = watch::channel(false); - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(5), - chunk_size, - Duration::ZERO, - stop_receiver, - ) - .await - .unwrap(); - - // The new miniblock should not be affected. - assert_eq!(result.miniblocks_affected, 5 - u64::from(chunk_size)); - assert_migration(&mut storage).await; - } -} diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index 9d2197afb3d..fe61d2eb595 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -26,7 +26,7 @@ use crate::{ extractors, io::{ common::{load_pending_batch, poll_iters, IoCursor}, - fee_address_migration, L1BatchParams, MiniblockParams, PendingBatchData, StateKeeperIO, + L1BatchParams, MiniblockParams, PendingBatchData, StateKeeperIO, }, mempool_actor::l2_tx_filter, metrics::KEEPER_METRICS, @@ -406,7 +406,6 @@ impl MempoolIO { let l1_batch_params_provider = L1BatchParamsProvider::new(&mut storage) .await .context("failed initializing L1 batch params provider")?; - fee_address_migration::migrate_pending_miniblocks(&mut storage).await?; drop(storage); Ok(Self { diff --git a/core/lib/zksync_core/src/state_keeper/io/mod.rs b/core/lib/zksync_core/src/state_keeper/io/mod.rs index d52f301868d..5ad5bd4495c 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mod.rs @@ -18,7 +18,6 @@ pub use self::{ use super::seal_criteria::IoSealCriteria; pub(crate) mod common; -pub(crate) mod fee_address_migration; pub(crate) mod mempool; mod output_handler; mod persistence; diff --git a/core/lib/zksync_core/src/state_keeper/keeper.rs b/core/lib/zksync_core/src/state_keeper/keeper.rs index 2e5e2f4f5f2..39b6f2527da 100644 --- a/core/lib/zksync_core/src/state_keeper/keeper.rs +++ b/core/lib/zksync_core/src/state_keeper/keeper.rs @@ -1,6 +1,5 @@ use std::{ convert::Infallible, - future::Future, sync::Arc, time::{Duration, Instant}, }; @@ -8,7 +7,6 @@ use std::{ use anyhow::Context as _; use multivm::interface::{Halt, L1BatchEnv, SystemEnv}; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, Core}; use zksync_types::{ block::MiniblockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, protocol_version::ProtocolVersionId, storage_writes_deduplicator::StorageWritesDeduplicator, @@ -18,10 +16,7 @@ use zksync_types::{ use super::{ batch_executor::{BatchExecutor, BatchExecutorHandle, TxExecutionResult}, extractors, - io::{ - fee_address_migration, IoCursor, MiniblockParams, OutputHandler, PendingBatchData, - StateKeeperIO, - }, + io::{IoCursor, MiniblockParams, OutputHandler, PendingBatchData, StateKeeperIO}, metrics::{AGGREGATION_METRICS, KEEPER_METRICS, L1_BATCH_METRICS}, seal_criteria::{ConditionalSealer, SealData, SealResolution}, types::ExecutionMetricsForCriteria, @@ -86,21 +81,6 @@ impl ZkSyncStateKeeper { } } - /// Temporary method to migrate fee addresses from L1 batches to miniblocks. - pub fn run_fee_address_migration( - &self, - pool: ConnectionPool, - ) -> impl Future> { - let mut stop_receiver = self.stop_receiver.clone(); - async move { - fee_address_migration::migrate_miniblocks(pool, stop_receiver.clone()).await?; - // Since this is run as a task, we don't want it to exit on success (this would shut down the node). - // We still want for the task to be cancellation-aware, so we just wait until a stop signal is sent. - stop_receiver.changed().await.ok(); - Ok(()) - } - } - pub async fn run(mut self) -> anyhow::Result<()> { match self.run_inner().await { Ok(_) => unreachable!(), diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 3f200e2f6a6..3eec7a18612 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -18,7 +18,7 @@ use super::{ use crate::state_keeper::{ io::{ common::{load_pending_batch, poll_iters, IoCursor}, - fee_address_migration, L1BatchParams, MiniblockParams, PendingBatchData, StateKeeperIO, + L1BatchParams, MiniblockParams, PendingBatchData, StateKeeperIO, }, metrics::KEEPER_METRICS, seal_criteria::IoSealCriteria, @@ -54,9 +54,6 @@ impl ExternalIO { let l1_batch_params_provider = L1BatchParamsProvider::new(&mut storage) .await .context("failed initializing L1 batch params provider")?; - // We must run the migration for pending miniblocks synchronously, since we use `fee_account_address` - // from a pending miniblock in `load_pending_batch()` implementation. - fee_address_migration::migrate_pending_miniblocks(&mut storage).await?; drop(storage); Ok(Self { diff --git a/infrastructure/zk/src/database.ts b/infrastructure/zk/src/database.ts index 9b67baaebae..4f897486146 100644 --- a/infrastructure/zk/src/database.ts +++ b/infrastructure/zk/src/database.ts @@ -197,7 +197,7 @@ command .action((opts: DbGenerateMigrationOpts) => { if ((!opts.prover && !opts.core) || (opts.prover && opts.core)) { throw new Error( - '[aborted] please specify a single database to generate migration for (i.e. to generate a migration for server `zk db new-migration --server name_of_migration`' + '[aborted] please specify a single database to generate migration for (i.e. to generate a migration for server `zk db new-migration --core name_of_migration`' ); } if (opts.prover) { From 7c8ae40357a6ceeeb097c019588a8be18326bed1 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 10 Apr 2024 13:45:57 +0300 Subject: [PATCH 08/29] feat: Expose component configs as info metrics (#1584) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Exposes configs of various components as info metrics [defined](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#info) in the Open Metrics standard. - Reworks mempool cache to be shared between API servers. ## Why ❔ - More idiomatic and observable than printing these configs to logs (or not observing at all). - Allows to use configs in Grafana dashboards, alerts etc. - The mempool cache makes sense to share logically, and if multiple instances are used, reported metrics can be bogus. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- .github/release-please/config.json | 8 +- Cargo.lock | 3 +- Cargo.toml | 1 + core/bin/external_node/Cargo.toml | 5 +- core/bin/external_node/build.rs | 46 ++++ core/bin/external_node/src/config/mod.rs | 2 - core/bin/external_node/src/main.rs | 215 ++++++++---------- core/bin/external_node/src/metadata.rs | 22 ++ core/bin/external_node/src/metrics.rs | 92 +++++++- core/lib/health_check/src/lib.rs | 16 +- core/lib/health_check/src/metrics.rs | 15 +- core/lib/state/src/cache/lru_cache.rs | 12 +- core/lib/state/src/cache/metrics.rs | 25 +- core/lib/state/src/cache/sequential_cache.rs | 15 +- core/lib/state/src/postgres/mod.rs | 4 +- core/lib/web3_decl/src/client/metrics.rs | 30 ++- core/lib/web3_decl/src/client/mod.rs | 5 +- .../src/api_server/web3/mempool_cache.rs | 96 ++++---- .../src/api_server/web3/metrics.rs | 52 ++++- .../zksync_core/src/api_server/web3/mod.rs | 36 ++- .../src/api_server/web3/namespaces/eth.rs | 39 ++-- .../zksync_core/src/api_server/web3/state.rs | 6 +- core/lib/zksync_core/src/lib.rs | 19 +- .../src/metadata_calculator/metrics.rs | 54 ++++- .../src/metadata_calculator/mod.rs | 9 + .../node/node_framework/examples/main_node.rs | 11 + .../implementations/layers/web3_api/caches.rs | 56 +++++ .../implementations/layers/web3_api/mod.rs | 1 + .../implementations/layers/web3_api/server.rs | 6 +- .../src/implementations/resources/web3_api.rs | 10 + 30 files changed, 669 insertions(+), 242 deletions(-) create mode 100644 core/bin/external_node/build.rs create mode 100644 core/bin/external_node/src/metadata.rs create mode 100644 core/node/node_framework/src/implementations/layers/web3_api/caches.rs diff --git a/.github/release-please/config.json b/.github/release-please/config.json index 300ce9ec0a0..4f69aa4f937 100644 --- a/.github/release-please/config.json +++ b/.github/release-please/config.json @@ -7,7 +7,13 @@ "packages": { "core": { "release-type": "simple", - "component": "core" + "component": "core", + "extra-files": [ + { + "type": "generic", + "path": "bin/external_node/Cargo.toml" + } + ] }, "prover": { "release-type": "simple", diff --git a/Cargo.lock b/Cargo.lock index 12dca9f4bc4..f000dc83cc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8526,13 +8526,14 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "0.1.0" +version = "22.1.0" dependencies = [ "anyhow", "clap 4.4.6", "envy", "futures 0.3.28", "prometheus_exporter", + "rustc_version", "semver", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index 5284954d01d..cefe6294361 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,6 +120,7 @@ regex = "1" reqwest = "0.11" rlp = "0.5" rocksdb = "0.21.0" +rustc_version = "0.4.0" secp256k1 = "0.27.0" semver = "1" sentry = "0.31" diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index fb3517dbd21..ebdc0ed13a3 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_external_node" -version = "0.1.0" +version = "22.1.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true @@ -44,3 +44,6 @@ clap = { workspace = true, features = ["derive"] } serde_json.workspace = true semver.workspace = true tracing.workspace = true + +[build-dependencies] +rustc_version.workspace = true diff --git a/core/bin/external_node/build.rs b/core/bin/external_node/build.rs new file mode 100644 index 00000000000..d37fef0b1b0 --- /dev/null +++ b/core/bin/external_node/build.rs @@ -0,0 +1,46 @@ +//! Build script for the external node binary. + +use std::{ + env, fs, + io::{self, Write}, + path::Path, +}; + +use rustc_version::{Channel, LlvmVersion}; + +fn print_rust_meta(out: &mut impl Write, meta: &rustc_version::VersionMeta) -> io::Result<()> { + writeln!( + out, + "pub(crate) const RUSTC_METADATA: RustcMetadata = RustcMetadata {{ \ + version: {semver:?}, \ + commit_hash: {commit_hash:?}, \ + commit_date: {commit_date:?}, \ + channel: {channel:?}, \ + host: {host:?}, \ + llvm: {llvm:?} \ + }};", + semver = meta.semver.to_string(), + commit_hash = meta.commit_hash, + commit_date = meta.commit_date, + channel = match meta.channel { + Channel::Dev => "dev", + Channel::Beta => "beta", + Channel::Nightly => "nightly", + Channel::Stable => "stable", + }, + host = meta.host, + llvm = meta.llvm_version.as_ref().map(LlvmVersion::to_string), + ) +} + +fn main() { + let out_dir = env::var("OUT_DIR").expect("`OUT_DIR` env var not set for build script"); + let rustc_meta = rustc_version::version_meta().expect("Failed obtaining rustc metadata"); + + let metadata_module_path = Path::new(&out_dir).join("metadata_values.rs"); + let metadata_module = + fs::File::create(metadata_module_path).expect("cannot create metadata module"); + let mut metadata_module = io::BufWriter::new(metadata_module); + + print_rust_meta(&mut metadata_module, &rustc_meta).expect("failed printing rustc metadata"); +} diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 9cddad5e767..bfa59a000bd 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -721,8 +721,6 @@ impl From for InternalApiConfig { req_entities_limit: config.optional.req_entities_limit, fee_history_limit: config.optional.fee_history_limit, filters_disabled: config.optional.filters_disabled, - mempool_cache_update_interval: config.optional.mempool_cache_update_interval(), - mempool_cache_size: config.optional.mempool_cache_size, dummy_verifier: config.remote.dummy_verifier, l1_batch_commit_data_generator_mode: config.remote.l1_batch_commit_data_generator_mode, } diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 68f507429cc..dfac57f0168 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -18,7 +18,7 @@ use zksync_core::{ healthcheck::HealthCheckHandle, tree::{TreeApiClient, TreeApiHttpClient}, tx_sender::{proxy::TxProxy, ApiContracts, TxSenderBuilder}, - web3::{ApiBuilder, Namespace}, + web3::{mempool_cache::MempoolCache, ApiBuilder, Namespace}, }, block_reverter::{BlockReverter, BlockReverterFlags, L1ExecutedBatchesRevert, NodeRole}, commitment_generator::CommitmentGenerator, @@ -57,16 +57,16 @@ use crate::{ config::{observability::observability_config_from_env, ExternalNodeConfig}, helpers::MainNodeHealthCheck, init::ensure_storage_initialized, + metrics::RUST_METRICS, }; mod config; mod helpers; mod init; +mod metadata; mod metrics; mod version_sync_task; -const RELEASE_MANIFEST: &str = include_str!("../../../../.github/release-please/manifest.json"); - /// Creates the state keeper configured to work in the external node mode. #[allow(clippy::too_many_arguments)] async fn build_state_keeper( @@ -356,13 +356,13 @@ async fn run_core( #[allow(clippy::too_many_arguments)] async fn run_api( + task_handles: &mut Vec>>, config: &ExternalNodeConfig, app_health: &AppHealthCheck, connection_pool: ConnectionPool, stop_receiver: watch::Receiver, sync_state: SyncState, tree_reader: Option>, - task_futures: &mut Vec>>, main_node_client: L2Client, singleton_pool_builder: ConnectionPoolBuilder, fee_params_fetcher: Arc, @@ -388,96 +388,87 @@ async fn run_api( } }; - let ( - tx_sender, - vm_barrier, - cache_update_handle, - proxy_cache_updater_handle, - whitelisted_tokens_update_handle, - ) = { - let tx_proxy = TxProxy::new(main_node_client.clone()); - let proxy_cache_updater_pool = singleton_pool_builder - .build() - .await - .context("failed to build a tree_pool")?; - let proxy_cache_updater_handle = tokio::spawn( - tx_proxy - .run_account_nonce_sweeper(proxy_cache_updater_pool.clone(), stop_receiver.clone()), - ); + let tx_proxy = TxProxy::new(main_node_client.clone()); + let proxy_cache_updater_pool = singleton_pool_builder + .build() + .await + .context("failed to build a tree_pool")?; + task_handles.push(tokio::spawn(tx_proxy.run_account_nonce_sweeper( + proxy_cache_updater_pool.clone(), + stop_receiver.clone(), + ))); - let tx_sender_builder = TxSenderBuilder::new( - config.clone().into(), - connection_pool.clone(), - Arc::new(tx_proxy), - ); + let tx_sender_builder = TxSenderBuilder::new( + config.clone().into(), + connection_pool.clone(), + Arc::new(tx_proxy), + ); - if config.optional.transactions_per_sec_limit.is_some() { - tracing::warn!("`transactions_per_sec_limit` option is deprecated and ignored"); - }; + if config.optional.transactions_per_sec_limit.is_some() { + tracing::warn!("`transactions_per_sec_limit` option is deprecated and ignored"); + }; - let max_concurrency = config.optional.vm_concurrency_limit; - let (vm_concurrency_limiter, vm_barrier) = VmConcurrencyLimiter::new(max_concurrency); - let mut storage_caches = PostgresStorageCaches::new( - config.optional.factory_deps_cache_size() as u64, - config.optional.initial_writes_cache_size() as u64, - ); - let latest_values_cache_size = config.optional.latest_values_cache_size() as u64; - let cache_update_handle = (latest_values_cache_size > 0).then(|| { - task::spawn( - storage_caches - .configure_storage_values_cache( - latest_values_cache_size, - connection_pool.clone(), - ) - .run(stop_receiver.clone()), - ) - }); - - let whitelisted_tokens_for_aa_cache = Arc::new(RwLock::new(Vec::new())); - let whitelisted_tokens_for_aa_cache_clone = whitelisted_tokens_for_aa_cache.clone(); - let mut stop_receiver_for_task = stop_receiver.clone(); - let whitelisted_tokens_update_task = task::spawn(async move { - loop { - match main_node_client.whitelisted_tokens_for_aa().await { - Ok(tokens) => { - *whitelisted_tokens_for_aa_cache_clone.write().await = tokens; - } - Err(jsonrpsee::core::client::Error::Call(error)) - if error.code() == jsonrpsee::types::error::METHOD_NOT_FOUND_CODE => - { - // Method is not supported by the main node, do nothing. - } - Err(err) => { - tracing::error!( - "Failed to query `whitelisted_tokens_for_aa`, error: {err:?}" - ); - } - } + let max_concurrency = config.optional.vm_concurrency_limit; + let (vm_concurrency_limiter, vm_barrier) = VmConcurrencyLimiter::new(max_concurrency); + let mut storage_caches = PostgresStorageCaches::new( + config.optional.factory_deps_cache_size() as u64, + config.optional.initial_writes_cache_size() as u64, + ); + let latest_values_cache_size = config.optional.latest_values_cache_size() as u64; + let cache_update_handle = (latest_values_cache_size > 0).then(|| { + task::spawn( + storage_caches + .configure_storage_values_cache(latest_values_cache_size, connection_pool.clone()) + .run(stop_receiver.clone()), + ) + }); + task_handles.extend(cache_update_handle); - // Error here corresponds to a timeout w/o `stop_receiver` changed; we're OK with this. - tokio::time::timeout(Duration::from_secs(60), stop_receiver_for_task.changed()) - .await - .ok(); + let whitelisted_tokens_for_aa_cache = Arc::new(RwLock::new(Vec::new())); + let whitelisted_tokens_for_aa_cache_clone = whitelisted_tokens_for_aa_cache.clone(); + let mut stop_receiver_for_task = stop_receiver.clone(); + task_handles.push(task::spawn(async move { + while !*stop_receiver_for_task.borrow_and_update() { + match main_node_client.whitelisted_tokens_for_aa().await { + Ok(tokens) => { + *whitelisted_tokens_for_aa_cache_clone.write().await = tokens; + } + Err(jsonrpsee::core::client::Error::Call(error)) + if error.code() == jsonrpsee::types::error::METHOD_NOT_FOUND_CODE => + { + // Method is not supported by the main node, do nothing. + } + Err(err) => { + tracing::error!("Failed to query `whitelisted_tokens_for_aa`, error: {err:?}"); + } } - }); - - let tx_sender = tx_sender_builder - .with_whitelisted_tokens_for_aa(whitelisted_tokens_for_aa_cache) - .build( - fee_params_fetcher, - Arc::new(vm_concurrency_limiter), - ApiContracts::load_from_disk(), // TODO (BFT-138): Allow to dynamically reload API contracts - storage_caches, - ) - .await; - ( - tx_sender, - vm_barrier, - cache_update_handle, - proxy_cache_updater_handle, - whitelisted_tokens_update_task, + + // Error here corresponds to a timeout w/o `stop_receiver` changed; we're OK with this. + tokio::time::timeout(Duration::from_secs(60), stop_receiver_for_task.changed()) + .await + .ok(); + } + Ok(()) + })); + + let tx_sender = tx_sender_builder + .with_whitelisted_tokens_for_aa(whitelisted_tokens_for_aa_cache) + .build( + fee_params_fetcher, + Arc::new(vm_concurrency_limiter), + ApiContracts::load_from_disk(), // TODO (BFT-138): Allow to dynamically reload API contracts + storage_caches, ) - }; + .await; + + let mempool_cache = MempoolCache::new(config.optional.mempool_cache_size); + let mempool_cache_update_task = mempool_cache.update_task( + connection_pool.clone(), + config.optional.mempool_cache_update_interval(), + ); + task_handles.push(tokio::spawn( + mempool_cache_update_task.run(stop_receiver.clone()), + )); if components.contains(&Component::HttpApi) { let builder = ApiBuilder::jsonrpsee_backend(config.clone().into(), connection_pool.clone()) @@ -489,6 +480,7 @@ async fn run_api( .with_vm_barrier(vm_barrier.clone()) .with_tree_api(tree_reader.clone()) .with_sync_state(sync_state.clone()) + .with_mempool_cache(mempool_cache.clone()) .enable_api_namespaces(config.optional.api_namespaces()); let http_server_handles = builder @@ -498,7 +490,7 @@ async fn run_api( .await .context("Failed initializing HTTP JSON-RPC server")?; app_health.insert_component(http_server_handles.health_check); - task_futures.extend(http_server_handles.tasks); + task_handles.extend(http_server_handles.tasks); } if components.contains(&Component::WsApi) { @@ -513,6 +505,7 @@ async fn run_api( .with_vm_barrier(vm_barrier) .with_tree_api(tree_reader) .with_sync_state(sync_state) + .with_mempool_cache(mempool_cache) .enable_api_namespaces(config.optional.api_namespaces()); let ws_server_handles = builder @@ -522,13 +515,9 @@ async fn run_api( .await .context("Failed initializing WS JSON-RPC server")?; app_health.insert_component(ws_server_handles.health_check); - task_futures.extend(ws_server_handles.tasks); + task_handles.extend(ws_server_handles.tasks); } - task_futures.extend(cache_update_handle); - task_futures.push(proxy_cache_updater_handle); - task_futures.push(whitelisted_tokens_update_handle); - Ok(()) } @@ -541,36 +530,9 @@ async fn init_tasks( stop_receiver: watch::Receiver, components: &HashSet, ) -> anyhow::Result<()> { - let release_manifest: serde_json::Value = serde_json::from_str(RELEASE_MANIFEST) - .context("releuse manifest is a valid json document")?; - let release_manifest_version = release_manifest["core"].as_str().context( - "a release-please manifest with \"core\" version field was specified at build time", - )?; - - let version = semver::Version::parse(release_manifest_version) - .context("version in manifest is a correct semver format")?; - let pool = connection_pool.clone(); - let mut stop_receiver_for_task = stop_receiver.clone(); - task_handles.push(tokio::spawn(async move { - while !*stop_receiver_for_task.borrow_and_update() { - let protocol_version = pool - .connection() - .await? - .protocol_versions_dal() - .last_used_version_id() - .await - .map(|version| version as u16); - - EN_METRICS.version[&(version.to_string(), protocol_version)].set(1); - - // Error here corresponds to a timeout w/o `stop_receiver` changed; we're OK with this. - tokio::time::timeout(Duration::from_secs(10), stop_receiver_for_task.changed()) - .await - .ok(); - } - Ok(()) - })); - + let protocol_version_update_task = + EN_METRICS.run_protocol_version_updates(connection_pool.clone(), stop_receiver.clone()); + task_handles.push(tokio::spawn(protocol_version_update_task)); let singleton_pool_builder = ConnectionPool::singleton(&config.postgres.database_url); // Run the components. @@ -643,13 +605,13 @@ async fn init_tasks( if components.contains(&Component::HttpApi) || components.contains(&Component::WsApi) { run_api( + task_handles, config, app_health, connection_pool, stop_receiver.clone(), sync_state, tree_reader, - task_handles, main_node_client, singleton_pool_builder, fee_params_fetcher.clone(), @@ -801,6 +763,9 @@ async fn main() -> anyhow::Result<()> { ConnectionPool::::global_config().set_long_connection_threshold(threshold)?; } + RUST_METRICS.initialize(); + EN_METRICS.observe_config(&config); + let connection_pool = ConnectionPool::::builder( &config.postgres.database_url, config.postgres.max_connections, diff --git a/core/bin/external_node/src/metadata.rs b/core/bin/external_node/src/metadata.rs new file mode 100644 index 00000000000..ce454711a97 --- /dev/null +++ b/core/bin/external_node/src/metadata.rs @@ -0,0 +1,22 @@ +//! Metadata information about the external node. + +use vise::EncodeLabelSet; + +pub(crate) use self::values::RUSTC_METADATA; + +mod values { + use super::RustcMetadata; + include!(concat!(env!("OUT_DIR"), "/metadata_values.rs")); +} + +#[derive(Debug, EncodeLabelSet)] +pub(crate) struct RustcMetadata { + pub version: &'static str, + pub commit_hash: Option<&'static str>, + pub commit_date: Option<&'static str>, + pub channel: &'static str, + pub host: &'static str, + pub llvm: Option<&'static str>, +} + +pub(crate) const SERVER_VERSION: &str = env!("CARGO_PKG_VERSION"); diff --git a/core/bin/external_node/src/metrics.rs b/core/bin/external_node/src/metrics.rs index 1d493dd0087..a95b5af700c 100644 --- a/core/bin/external_node/src/metrics.rs +++ b/core/bin/external_node/src/metrics.rs @@ -1,11 +1,93 @@ -use vise::{Gauge, LabeledFamily, Metrics}; +use std::time::Duration; + +use tokio::sync::watch; +use vise::{EncodeLabelSet, Gauge, Info, Metrics}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; + +use crate::{ + config::ExternalNodeConfig, + metadata::{RustcMetadata, RUSTC_METADATA, SERVER_VERSION}, +}; + +/// Immutable EN parameters that affect multiple components. +#[derive(Debug, Clone, Copy, EncodeLabelSet)] +struct ExternalNodeInfo { + server_version: &'static str, + l1_chain_id: u64, + l2_chain_id: u64, + /// Size of the main Postgres connection pool. + postgres_pool_size: u32, +} #[derive(Debug, Metrics)] #[metrics(prefix = "external_node")] -pub(crate) struct EnMetrics { - #[metrics(labels = ["server_version", "protocol_version"])] - pub version: LabeledFamily<(String, Option), Gauge, 2>, +pub(crate) struct ExternalNodeMetrics { + /// General information about the external node. + info: Info, + /// Current protocol version. + protocol_version: Gauge, +} + +impl ExternalNodeMetrics { + pub(crate) fn observe_config(&self, config: &ExternalNodeConfig) { + let info = ExternalNodeInfo { + server_version: SERVER_VERSION, + l1_chain_id: config.remote.l1_chain_id.0, + l2_chain_id: config.remote.l2_chain_id.as_u64(), + postgres_pool_size: config.postgres.max_connections, + }; + tracing::info!("Setting general node information: {info:?}"); + + if self.info.set(info).is_err() { + tracing::warn!( + "General information is already set for the external node: {:?}, was attempting to set {info:?}", + self.info.get() + ); + } + } + + pub(crate) async fn run_protocol_version_updates( + &self, + pool: ConnectionPool, + mut stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { + const QUERY_INTERVAL: Duration = Duration::from_secs(10); + + while !*stop_receiver.borrow_and_update() { + let maybe_protocol_version = pool + .connection() + .await? + .protocol_versions_dal() + .last_used_version_id() + .await; + if let Some(version) = maybe_protocol_version { + self.protocol_version.set(version as u64); + } + + tokio::time::timeout(QUERY_INTERVAL, stop_receiver.changed()) + .await + .ok(); + } + Ok(()) + } +} + +#[vise::register] +pub(crate) static EN_METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "rust")] +pub(crate) struct RustMetrics { + /// General information about the Rust compiler. + info: Info, +} + +impl RustMetrics { + pub fn initialize(&self) { + tracing::info!("Metadata for rustc that this EN was compiled with: {RUSTC_METADATA:?}"); + self.info.set(RUSTC_METADATA).ok(); + } } #[vise::register] -pub(crate) static EN_METRICS: vise::Global = vise::Global::new(); +pub(crate) static RUST_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/health_check/src/lib.rs b/core/lib/health_check/src/lib.rs index 228d40ab995..3794e741ebd 100644 --- a/core/lib/health_check/src/lib.rs +++ b/core/lib/health_check/src/lib.rs @@ -12,8 +12,8 @@ use futures::future; use serde::Serialize; use tokio::sync::watch; -use self::metrics::METRICS; -use crate::metrics::CheckResult; +use self::metrics::{CheckResult, METRICS}; +use crate::metrics::AppHealthCheckConfig; mod metrics; #[cfg(test)] @@ -112,6 +112,18 @@ impl AppHealthCheck { let slow_time_limit = slow_time_limit.unwrap_or(DEFAULT_SLOW_TIME_LIMIT); let hard_time_limit = hard_time_limit.unwrap_or(DEFAULT_HARD_TIME_LIMIT); tracing::debug!("Created app health with time limits: slow={slow_time_limit:?}, hard={hard_time_limit:?}"); + + let config = AppHealthCheckConfig { + slow_time_limit: slow_time_limit.into(), + hard_time_limit: hard_time_limit.into(), + }; + if METRICS.info.set(config).is_err() { + tracing::warn!( + "App health redefined; previous config: {:?}", + METRICS.info.get() + ); + } + Self { components: Mutex::default(), slow_time_limit, diff --git a/core/lib/health_check/src/metrics.rs b/core/lib/health_check/src/metrics.rs index bb90e6c499f..69f6265be51 100644 --- a/core/lib/health_check/src/metrics.rs +++ b/core/lib/health_check/src/metrics.rs @@ -2,7 +2,10 @@ use std::time::Duration; -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics, Unit}; +use vise::{ + Buckets, DurationAsSecs, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Info, Metrics, + Unit, +}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue)] #[metrics(rename_all = "snake_case")] @@ -18,9 +21,19 @@ struct AbnormalCheckLabels { result: CheckResult, } +#[derive(Debug, EncodeLabelSet)] +pub(crate) struct AppHealthCheckConfig { + #[metrics(unit = Unit::Seconds)] + pub slow_time_limit: DurationAsSecs, + #[metrics(unit = Unit::Seconds)] + pub hard_time_limit: DurationAsSecs, +} + #[derive(Debug, Metrics)] #[metrics(prefix = "healthcheck")] pub(crate) struct HealthMetrics { + /// Immutable configuration for application health checks. + pub info: Info, /// Latency for abnormal checks. Includes slow, dropped and timed out checks (distinguished by the "result" label); /// skips normal checks. #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] diff --git a/core/lib/state/src/cache/lru_cache.rs b/core/lib/state/src/cache/lru_cache.rs index 0e0f3541117..fa37bdb3e22 100644 --- a/core/lib/state/src/cache/lru_cache.rs +++ b/core/lib/state/src/cache/lru_cache.rs @@ -1,9 +1,10 @@ use std::hash::Hash; use crate::cache::{ - metrics::{Method, RequestOutcome, METRICS}, + metrics::{LruCacheConfig, Method, RequestOutcome, METRICS}, CacheValue, MokaBase, }; + /// Cache implementation that uses LRU eviction policy. #[derive(Debug, Clone)] pub struct LruCache { @@ -22,6 +23,15 @@ where /// /// Panics if an invalid cache capacity is provided. pub fn new(name: &'static str, capacity: u64) -> Self { + tracing::info!("Configured LRU cache `{name}` with capacity {capacity}B"); + if let Err(err) = METRICS.lru_info[&name].set(LruCacheConfig { capacity }) { + tracing::warn!( + "LRU cache `{name}` was already created with config {:?}; new config: {:?}", + METRICS.lru_info[&name].get(), + err.into_inner() + ); + } + let cache = if capacity == 0 { None } else { diff --git a/core/lib/state/src/cache/metrics.rs b/core/lib/state/src/cache/metrics.rs index c0a805a5b33..715d0d342f3 100644 --- a/core/lib/state/src/cache/metrics.rs +++ b/core/lib/state/src/cache/metrics.rs @@ -2,7 +2,10 @@ use std::time::Duration; -use vise::{Buckets, Counter, EncodeLabelValue, Gauge, Histogram, LabeledFamily, Metrics}; +use vise::{ + Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Gauge, Histogram, Info, LabeledFamily, + Metrics, Unit, +}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue)] #[metrics(rename_all = "snake_case")] @@ -33,9 +36,29 @@ const SMALL_LATENCIES: Buckets = Buckets::values(&[ 1e-8, 2.5e-8, 5e-8, 1e-7, 2.5e-7, 5e-7, 1e-6, 2.5e-6, 5e-6, 1e-5, 2.5e-5, 5e-5, 1e-4, 1e-3, ]); +#[derive(Debug, EncodeLabelSet)] +pub(super) struct LruCacheConfig { + /// Cache capacity in bytes. + #[metrics(unit = Unit::Bytes)] + pub capacity: u64, +} + +#[derive(Debug, EncodeLabelSet)] +pub(super) struct SequentialCacheConfig { + /// Cache capacity in number of items. + pub capacity: u64, +} + #[derive(Debug, Metrics)] #[metrics(prefix = "server_state_cache")] pub(super) struct CacheMetrics { + /// Configuration of LRU caches. + #[metrics(labels = ["name"])] + pub lru_info: LabeledFamily<&'static str, Info>, + /// Configuration of sequential caches. + #[metrics(labels = ["name"])] + pub sequential_info: LabeledFamily<&'static str, Info>, + /// Latency of calling a cache method. #[metrics(buckets = SMALL_LATENCIES, labels = ["name", "method"])] pub latency: LabeledFamily<(&'static str, Method), Histogram, 2>, diff --git a/core/lib/state/src/cache/sequential_cache.rs b/core/lib/state/src/cache/sequential_cache.rs index 5ca86e35d73..0f36785f666 100644 --- a/core/lib/state/src/cache/sequential_cache.rs +++ b/core/lib/state/src/cache/sequential_cache.rs @@ -1,6 +1,6 @@ use std::collections::VecDeque; -use crate::cache::metrics::{Method, RequestOutcome, METRICS}; +use crate::cache::metrics::{Method, RequestOutcome, SequentialCacheConfig, METRICS}; /// A generic cache structure for storing key-value pairs in sequential order. /// It allows for non-unique keys and supports efficient retrieval of values based on a key @@ -27,6 +27,19 @@ impl SequentialCache { /// Panics if `capacity` is 0. pub fn new(name: &'static str, capacity: usize) -> Self { assert!(capacity > 0, "Cache capacity must be greater than 0"); + + let config = SequentialCacheConfig { + capacity: capacity as u64, + }; + tracing::info!("Configured sequential cache `{name}` with capacity {capacity} items"); + if let Err(err) = METRICS.sequential_info[&name].set(config) { + tracing::warn!( + "Sequential cache `{name}` was already created with config {:?}; new config: {:?}", + METRICS.sequential_info[&name].get(), + err.into_inner() + ); + } + SequentialCache { name, data: VecDeque::with_capacity(capacity), diff --git a/core/lib/state/src/postgres/mod.rs b/core/lib/state/src/postgres/mod.rs index de58a860630..1fc832b194a 100644 --- a/core/lib/state/src/postgres/mod.rs +++ b/core/lib/state/src/postgres/mod.rs @@ -266,8 +266,6 @@ pub struct PostgresStorageCaches { } impl PostgresStorageCaches { - const NEG_INITIAL_WRITES_NAME: &'static str = "negative_initial_writes_cache"; - /// Creates caches with the specified capacities measured in bytes. pub fn new(factory_deps_capacity: u64, initial_writes_capacity: u64) -> Self { tracing::debug!( @@ -282,7 +280,7 @@ impl PostgresStorageCaches { initial_writes_capacity / 2, ), negative_initial_writes: InitialWritesCache::new( - Self::NEG_INITIAL_WRITES_NAME, + "negative_initial_writes_cache", initial_writes_capacity / 2, ), values: None, diff --git a/core/lib/web3_decl/src/client/metrics.rs b/core/lib/web3_decl/src/client/metrics.rs index c7c254c9969..5bc2a2153b3 100644 --- a/core/lib/web3_decl/src/client/metrics.rs +++ b/core/lib/web3_decl/src/client/metrics.rs @@ -3,9 +3,12 @@ use std::time::Duration; use jsonrpsee::{core::client, http_client::transport}; -use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics, Unit}; +use vise::{ + Buckets, Counter, DurationAsSecs, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Info, + Metrics, Unit, +}; -use super::{AcquireStats, CallOrigin}; +use super::{AcquireStats, CallOrigin, SharedRateLimit}; #[derive(Debug, Clone, PartialEq, Eq, Hash, EncodeLabelSet)] pub(super) struct RequestLabels { @@ -42,9 +45,18 @@ pub(super) struct GenericErrorLabels { kind: CallErrorKind, } +#[derive(Debug, EncodeLabelSet)] +struct L2ClientConfigLabels { + rate_limit: usize, + #[metrics(unit = Unit::Seconds)] + rate_limit_window: DurationAsSecs, +} + #[derive(Debug, Metrics)] #[metrics(prefix = "l2_client")] pub(super) struct L2ClientMetrics { + /// Client configuration. + info: Info, /// Number of requests timed out in the rate-limiting logic. pub rate_limit_timeout: Family, /// Latency of rate-limiting logic for rate-limited requests. @@ -59,6 +71,20 @@ pub(super) struct L2ClientMetrics { } impl L2ClientMetrics { + pub fn observe_config(&self, rate_limit: &SharedRateLimit) { + let config_labels = L2ClientConfigLabels { + rate_limit: rate_limit.rate_limit, + rate_limit_window: rate_limit.rate_limit_window.into(), + }; + if let Err(err) = self.info.set(config_labels) { + tracing::warn!( + "Error setting configuration info {:?} for L2 client; already set to {:?}", + err.into_inner(), + self.info.get() + ); + } + } + pub fn observe_rate_limit_latency( &self, component: &'static str, diff --git a/core/lib/web3_decl/src/client/mod.rs b/core/lib/web3_decl/src/client/mod.rs index c6c19aa9960..80ddb337a27 100644 --- a/core/lib/web3_decl/src/client/mod.rs +++ b/core/lib/web3_decl/src/client/mod.rs @@ -266,9 +266,12 @@ impl L2ClientBuilder { self.client, self.rate_limit ); + let rate_limit = SharedRateLimit::new(self.rate_limit.0, self.rate_limit.1); + METRICS.observe_config(&rate_limit); + L2Client { inner: self.client, - rate_limit: SharedRateLimit::new(self.rate_limit.0, self.rate_limit.1), + rate_limit, component_name: "", metrics: &METRICS, } diff --git a/core/lib/zksync_core/src/api_server/web3/mempool_cache.rs b/core/lib/zksync_core/src/api_server/web3/mempool_cache.rs index 16eff2e5a66..2c6872a048e 100644 --- a/core/lib/zksync_core/src/api_server/web3/mempool_cache.rs +++ b/core/lib/zksync_core/src/api_server/web3/mempool_cache.rs @@ -1,4 +1,4 @@ -use std::{future::Future, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use chrono::NaiveDateTime; use tokio::sync::{watch, RwLock}; @@ -12,53 +12,30 @@ use super::metrics::MEMPOOL_CACHE_METRICS; /// Stores all transactions accepted by the mempool and provides a way to query all that are newer than a given timestamp. /// Updates the cache based on interval passed in the constructor #[derive(Debug, Clone)] -pub(crate) struct MempoolCache(Arc>>); +pub struct MempoolCache(Arc>>); /// `INITIAL_LOOKBEHIND` is the period of time for which the cache is initially populated. const INITIAL_LOOKBEHIND: Duration = Duration::from_secs(120); impl MempoolCache { /// Initializes the mempool cache with the parameters provided. - pub fn new( - connection_pool: ConnectionPool, - update_interval: Duration, - capacity: usize, - stop_receiver: watch::Receiver, - ) -> (Self, impl Future>) { + pub fn new(capacity: usize) -> Self { let cache = SequentialCache::new("mempool", capacity); let cache = Arc::new(RwLock::new(cache)); - let cache_for_task = cache.clone(); - let update_task = async move { - loop { - if *stop_receiver.borrow() { - tracing::debug!("Stopping mempool cache updates"); - return Ok(()); - } - - // Get the timestamp that will be used as the lower bound for the next update - // If cache is non-empty - this is the last tx time, otherwise it's `INITIAL_LOOKBEHIND` seconds ago - let last_timestamp = cache_for_task - .read() - .await - .get_last_key() - .unwrap_or_else(|| chrono::Utc::now().naive_utc() - INITIAL_LOOKBEHIND); - - let latency = MEMPOOL_CACHE_METRICS.db_poll_latency.start(); - let mut connection = connection_pool.connection_tagged("api").await?; - let txs = connection - .transactions_web3_dal() - .get_pending_txs_hashes_after(last_timestamp, None) - .await?; - drop(connection); - latency.observe(); - MEMPOOL_CACHE_METRICS.tx_batch_size.observe(txs.len()); - - cache_for_task.write().await.insert(txs)?; - tokio::time::sleep(update_interval).await; - } - }; + Self(cache) + } - (Self(cache), update_task) + /// Returns a task that will update this cache in background. + pub fn update_task( + &self, + connection_pool: ConnectionPool, + update_interval: Duration, + ) -> MempoolCacheUpdateTask { + MempoolCacheUpdateTask { + cache: self.0.clone(), + connection_pool, + update_interval, + } } /// Returns all transaction hashes that are newer than the given timestamp. @@ -70,3 +47,44 @@ impl MempoolCache { self.0.read().await.query(after) } } + +/// Task updating [`MempoolCache`]. Should be spawned as a Tokio task (exactly one task for the cache). +#[derive(Debug)] +pub struct MempoolCacheUpdateTask { + cache: Arc>>, + connection_pool: ConnectionPool, + update_interval: Duration, +} + +impl MempoolCacheUpdateTask { + pub async fn run(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { + loop { + if *stop_receiver.borrow() { + tracing::debug!("Stopping mempool cache updates"); + return Ok(()); + } + + // Get the timestamp that will be used as the lower bound for the next update + // If cache is non-empty - this is the last tx time, otherwise it's `INITIAL_LOOKBEHIND` seconds ago + let last_timestamp = self + .cache + .read() + .await + .get_last_key() + .unwrap_or_else(|| chrono::Utc::now().naive_utc() - INITIAL_LOOKBEHIND); + + let latency = MEMPOOL_CACHE_METRICS.db_poll_latency.start(); + let mut connection = self.connection_pool.connection_tagged("api").await?; + let txs = connection + .transactions_web3_dal() + .get_pending_txs_hashes_after(last_timestamp, None) + .await?; + drop(connection); + latency.observe(); + MEMPOOL_CACHE_METRICS.tx_batch_size.observe(txs.len()); + + self.cache.write().await.insert(txs)?; + tokio::time::sleep(self.update_interval).await; + } + } +} diff --git a/core/lib/zksync_core/src/api_server/web3/metrics.rs b/core/lib/zksync_core/src/api_server/web3/metrics.rs index c5f8cd9f1bc..02c42f4589d 100644 --- a/core/lib/zksync_core/src/api_server/web3/metrics.rs +++ b/core/lib/zksync_core/src/api_server/web3/metrics.rs @@ -3,13 +3,16 @@ use std::{fmt, time::Duration}; use vise::{ - Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LabeledFamily, - Metrics, Unit, + Buckets, Counter, DurationAsSecs, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, + Info, LabeledFamily, Metrics, Unit, }; use zksync_types::api; use zksync_web3_decl::error::Web3Error; -use super::{backend_jsonrpsee::MethodMetadata, ApiTransport, TypedFilter}; +use super::{ + backend_jsonrpsee::MethodMetadata, ApiTransport, InternalApiConfig, OptionalApiParams, + TypedFilter, +}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "scheme", rename_all = "UPPERCASE")] @@ -142,6 +145,21 @@ struct Web3ErrorLabels { kind: Web3ErrorKind, } +#[derive(Debug, EncodeLabelSet)] +struct Web3ConfigLabels { + #[metrics(unit = Unit::Seconds)] + polling_interval: DurationAsSecs, + req_entities_limit: usize, + fee_history_limit: u64, + filters_limit: Option, + subscriptions_limit: Option, + #[metrics(unit = Unit::Bytes)] + batch_request_size_limit: Option, + #[metrics(unit = Unit::Bytes)] + response_body_size_limit: Option, + websocket_requests_per_minute_limit: Option, +} + /// Roughly exponential buckets for the `web3_call_block_diff` metric. The distribution should be skewed towards lower values. const BLOCK_DIFF_BUCKETS: Buckets = Buckets::values(&[ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1_000.0, @@ -153,6 +171,9 @@ const RESPONSE_SIZE_BUCKETS: Buckets = Buckets::exponential(1.0..=1_048_576.0, 4 #[derive(Debug, Metrics)] #[metrics(prefix = "api")] pub(in crate::api_server) struct ApiMetrics { + /// Web3 server configuration. + web3_info: Family>, + /// Latency of a Web3 call. Calls that take block ID as an input have block ID and block diff /// labels (the latter is the difference between the latest sealed miniblock and the resolved miniblock). #[metrics(buckets = Buckets::LATENCIES)] @@ -184,6 +205,31 @@ pub(in crate::api_server) struct ApiMetrics { } impl ApiMetrics { + pub(super) fn observe_config( + &self, + transport: ApiTransportLabel, + polling_interval: Duration, + config: &InternalApiConfig, + optional: &OptionalApiParams, + ) { + let config_labels = Web3ConfigLabels { + polling_interval: polling_interval.into(), + req_entities_limit: config.req_entities_limit, + fee_history_limit: config.fee_history_limit, + filters_limit: optional.filters_limit, + subscriptions_limit: optional.subscriptions_limit, + batch_request_size_limit: optional.batch_request_size_limit, + response_body_size_limit: optional.response_body_size_limit, + websocket_requests_per_minute_limit: optional + .websocket_requests_per_minute_limit + .map(Into::into), + }; + tracing::info!("{transport:?} Web3 server is configured with options: {config_labels:?}"); + if self.web3_info[&transport].set(config_labels).is_err() { + tracing::warn!("Cannot set config labels for {transport:?} Web3 server"); + } + } + /// Observes latency of a finished RPC call. pub fn observe_latency(&self, meta: &MethodMetadata) { let latency = meta.started_at.elapsed(); diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs index 97cd5a5ffcc..92a00161586 100644 --- a/core/lib/zksync_core/src/api_server/web3/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/mod.rs @@ -48,7 +48,7 @@ use crate::{ }; pub mod backend_jsonrpsee; -mod mempool_cache; +pub mod mempool_cache; pub(super) mod metrics; pub mod namespaces; mod pubsub; @@ -128,6 +128,7 @@ struct OptionalApiParams { response_body_size_limit: Option, websocket_requests_per_minute_limit: Option, tree_api: Option>, + mempool_cache: Option, pub_sub_events_sender: Option>, } @@ -259,6 +260,11 @@ impl ApiBuilder { self } + pub fn with_mempool_cache(mut self, cache: MempoolCache) -> Self { + self.optional.mempool_cache = Some(cache); + self + } + #[cfg(test)] fn with_pub_sub_events(mut self, sender: mpsc::UnboundedSender) -> Self { self.optional.pub_sub_events_sender = Some(sender); @@ -309,7 +315,6 @@ impl ApiServer { async fn build_rpc_state( self, last_sealed_miniblock: SealedMiniblockNumber, - mempool_cache: MempoolCache, ) -> anyhow::Result { let mut storage = self.updaters_pool.connection_tagged("api").await?; let start_info = BlockStartInfo::new(&mut storage).await?; @@ -333,7 +338,7 @@ impl ApiServer { sync_state: self.optional.sync_state, api_config: self.config, start_info, - mempool_cache, + mempool_cache: self.optional.mempool_cache, last_sealed_miniblock, tree_api: self.optional.tree_api, }) @@ -343,13 +348,10 @@ impl ApiServer { self, pub_sub: Option, last_sealed_miniblock: SealedMiniblockNumber, - mempool_cache: MempoolCache, ) -> anyhow::Result> { let namespaces = self.namespaces.clone(); let zksync_network_id = self.config.l2_chain_id; - let rpc_state = self - .build_rpc_state(last_sealed_miniblock, mempool_cache) - .await?; + let rpc_state = self.build_rpc_state(last_sealed_miniblock).await?; // Collect all the methods into a single RPC module. let mut rpc = RpcModule::new(()); @@ -458,16 +460,6 @@ impl ApiServer { ); let mut tasks = vec![tokio::spawn(sealed_miniblock_update_task)]; - - let (mempool_cache, mempool_cache_update_task) = MempoolCache::new( - self.updaters_pool.clone(), - self.config.mempool_cache_update_interval, - self.config.mempool_cache_size, - stop_receiver.clone(), - ); - - tasks.push(tokio::spawn(mempool_cache_update_task)); - let pub_sub = if matches!(transport, ApiTransport::WebSocket(_)) && self.namespaces.contains(&Namespace::Pubsub) { @@ -493,7 +485,6 @@ impl ApiServer { let server_task = tokio::spawn(self.run_jsonrpsee_server( stop_receiver, pub_sub, - mempool_cache, last_sealed_miniblock, local_addr_sender, )); @@ -510,7 +501,6 @@ impl ApiServer { self, mut stop_receiver: watch::Receiver, pub_sub: Option, - mempool_cache: MempoolCache, last_sealed_miniblock: SealedMiniblockNumber, local_addr_sender: oneshot::Sender, ) -> anyhow::Result<()> { @@ -520,6 +510,12 @@ impl ApiServer { ApiTransport::WebSocket(addr) => ("WS", false, addr), }; let transport_label = (&transport).into(); + API_METRICS.observe_config( + transport_label, + self.polling_interval, + &self.config, + &self.optional, + ); tracing::info!( "Waiting for at least one L1 batch in Postgres to start {transport_str} API server" @@ -555,7 +551,7 @@ impl ApiServer { let method_tracer = self.method_tracer.clone(); let rpc = self - .build_rpc_module(pub_sub, last_sealed_miniblock, mempool_cache) + .build_rpc_module(pub_sub, last_sealed_miniblock) .await?; let registered_method_names = Arc::new(rpc.method_names().collect::>()); tracing::debug!( diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index bc50abac48b..6d4d5c73e25 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -698,27 +698,24 @@ impl EthNamespace { TypedFilter::PendingTransactions(from_timestamp_excluded) => { // Attempt to get pending transactions from cache. - let tx_hashes_from_cache = self - .state - .mempool_cache - .get_tx_hashes_after(*from_timestamp_excluded) - .await; - let tx_hashes = match tx_hashes_from_cache { - Some(mut result) => { - result.truncate(self.state.api_config.req_entities_limit); - result - } - None => { - // On cache miss, query the database. - let mut conn = self.state.acquire_connection().await?; - conn.transactions_web3_dal() - .get_pending_txs_hashes_after( - *from_timestamp_excluded, - Some(self.state.api_config.req_entities_limit), - ) - .await - .map_err(DalError::generalize)? - } + let tx_hashes_from_cache = if let Some(cache) = &self.state.mempool_cache { + cache.get_tx_hashes_after(*from_timestamp_excluded).await + } else { + None + }; + let tx_hashes = if let Some(mut result) = tx_hashes_from_cache { + result.truncate(self.state.api_config.req_entities_limit); + result + } else { + // On cache miss, query the database. + let mut conn = self.state.acquire_connection().await?; + conn.transactions_web3_dal() + .get_pending_txs_hashes_after( + *from_timestamp_excluded, + Some(self.state.api_config.req_entities_limit), + ) + .await + .map_err(DalError::generalize)? }; // It's possible the `tx_hashes` vector is empty, diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index 45839ae2798..98a35d7fec5 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -98,8 +98,6 @@ pub struct InternalApiConfig { pub req_entities_limit: usize, pub fee_history_limit: u64, pub filters_disabled: bool, - pub mempool_cache_update_interval: Duration, - pub mempool_cache_size: usize, pub dummy_verifier: bool, pub l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, } @@ -140,8 +138,6 @@ impl InternalApiConfig { req_entities_limit: web3_config.req_entities_limit(), fee_history_limit: web3_config.fee_history_limit(), filters_disabled: web3_config.filters_disabled, - mempool_cache_update_interval: web3_config.mempool_cache_update_interval(), - mempool_cache_size: web3_config.mempool_cache_size(), dummy_verifier: genesis_config.dummy_verifier, l1_batch_commit_data_generator_mode: genesis_config.l1_batch_commit_data_generator_mode, } @@ -236,7 +232,7 @@ pub(crate) struct RpcState { /// Number of the first locally available miniblock / L1 batch. May differ from 0 if the node state was recovered /// from a snapshot. pub(super) start_info: BlockStartInfo, - pub(super) mempool_cache: MempoolCache, + pub(super) mempool_cache: Option, pub(super) last_sealed_miniblock: SealedMiniblockNumber, } diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 5a9956237f3..dd2cf89b0a4 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -58,7 +58,7 @@ use crate::{ healthcheck::HealthCheckHandle, tree::TreeApiHttpClient, tx_sender::{ApiContracts, TxSender, TxSenderBuilder, TxSenderConfig}, - web3::{self, state::InternalApiConfig, Namespace}, + web3::{self, mempool_cache::MempoolCache, state::InternalApiConfig, Namespace}, }, basic_witness_input_producer::BasicWitnessInputProducer, commitment_generator::CommitmentGenerator, @@ -377,10 +377,19 @@ pub async fn initialize_components( // program termination. let mut storage_caches = None; + let mempool_cache = MempoolCache::new(api_config.web3_json_rpc.mempool_cache_size()); + let mempool_cache_update_task = mempool_cache.update_task( + connection_pool.clone(), + api_config.web3_json_rpc.mempool_cache_update_interval(), + ); + task_futures.push(tokio::spawn( + mempool_cache_update_task.run(stop_receiver.clone()), + )); + if components.contains(&Component::HttpApi) { storage_caches = Some( build_storage_caches( - &configs.api_config.clone().context("api")?.web3_json_rpc, + &api_config.web3_json_rpc, &replica_connection_pool, &mut task_futures, stop_receiver.clone(), @@ -412,6 +421,7 @@ pub async fn initialize_components( batch_fee_input_provider, state_keeper_config.save_call_traces, storage_caches.clone().unwrap(), + mempool_cache.clone(), ) .await .context("run_http_api")?; @@ -459,6 +469,7 @@ pub async fn initialize_components( replica_connection_pool.clone(), stop_receiver.clone(), storage_caches, + mempool_cache, ) .await .context("run_ws_api")?; @@ -1229,6 +1240,7 @@ async fn run_http_api( batch_fee_model_input_provider: Arc, with_debug_namespace: bool, storage_caches: PostgresStorageCaches, + mempool_cache: MempoolCache, ) -> anyhow::Result<()> { let (tx_sender, vm_barrier) = build_tx_sender( tx_sender_config, @@ -1261,6 +1273,7 @@ async fn run_http_api( .with_response_body_size_limit(api_config.web3_json_rpc.max_response_body_size()) .with_tx_sender(tx_sender) .with_vm_barrier(vm_barrier) + .with_mempool_cache(mempool_cache) .enable_api_namespaces(namespaces); if let Some(tree_api_url) = api_config.web3_json_rpc.tree_api_url() { let tree_api = Arc::new(TreeApiHttpClient::new(tree_api_url)); @@ -1292,6 +1305,7 @@ async fn run_ws_api( replica_connection_pool: ConnectionPool, stop_receiver: watch::Receiver, storage_caches: PostgresStorageCaches, + mempool_cache: MempoolCache, ) -> anyhow::Result<()> { let (tx_sender, vm_barrier) = build_tx_sender( tx_sender_config, @@ -1327,6 +1341,7 @@ async fn run_ws_api( .with_polling_interval(api_config.web3_json_rpc.pubsub_interval()) .with_tx_sender(tx_sender) .with_vm_barrier(vm_barrier) + .with_mempool_cache(mempool_cache) .enable_api_namespaces(namespaces); if let Some(tree_api_url) = api_config.web3_json_rpc.tree_api_url() { let tree_api = Arc::new(TreeApiHttpClient::new(tree_api_url)); diff --git a/core/lib/zksync_core/src/metadata_calculator/metrics.rs b/core/lib/zksync_core/src/metadata_calculator/metrics.rs index 7b5189a6b38..074f444dea6 100644 --- a/core/lib/zksync_core/src/metadata_calculator/metrics.rs +++ b/core/lib/zksync_core/src/metadata_calculator/metrics.rs @@ -3,14 +3,60 @@ use std::time::{Duration, Instant}; use vise::{ - Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, Metrics, - Unit, + Buckets, DurationAsSecs, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Info, + LatencyObserver, Metrics, Unit, }; +use zksync_config::configs::database::MerkleTreeMode; use zksync_shared_metrics::{BlockStage, APP_METRICS}; use zksync_types::block::L1BatchHeader; use zksync_utils::time::seconds_since_epoch; -use super::MetadataCalculator; +use super::{MetadataCalculator, MetadataCalculatorConfig}; + +#[derive(Debug, EncodeLabelValue)] +#[metrics(rename_all = "snake_case")] +enum ModeLabel { + Full, + Lightweight, +} + +impl From for ModeLabel { + fn from(mode: MerkleTreeMode) -> Self { + match mode { + MerkleTreeMode::Full => Self::Full, + MerkleTreeMode::Lightweight => Self::Lightweight, + } + } +} + +#[derive(Debug, EncodeLabelSet)] +pub(super) struct ConfigLabels { + mode: ModeLabel, + #[metrics(unit = Unit::Seconds)] + delay_interval: DurationAsSecs, + max_l1_batches_per_iter: usize, + multi_get_chunk_size: usize, + #[metrics(unit = Unit::Bytes)] + block_cache_capacity: usize, + #[metrics(unit = Unit::Bytes)] + memtable_capacity: usize, + #[metrics(unit = Unit::Seconds)] + stalled_writes_timeout: DurationAsSecs, +} + +impl ConfigLabels { + pub fn new(config: &MetadataCalculatorConfig) -> Self { + Self { + mode: config.mode.into(), + delay_interval: config.delay_interval.into(), + max_l1_batches_per_iter: config.max_l1_batches_per_iter, + multi_get_chunk_size: config.multi_get_chunk_size, + block_cache_capacity: config.block_cache_capacity, + memtable_capacity: config.memtable_capacity, + stalled_writes_timeout: config.stalled_writes_timeout.into(), + } + } +} #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] @@ -80,6 +126,8 @@ const LATENCIES_PER_LOG: Buckets = Buckets::values(&[ #[derive(Debug, Metrics)] #[metrics(prefix = "server_metadata_calculator")] pub(super) struct MetadataCalculatorMetrics { + /// Merkle tree configuration. + pub info: Info, /// Lag between the number of L1 batches processed in the Merkle tree and stored in Postgres. /// The lag can only be positive if Postgres was restored from a backup truncating some /// of the batches already processed by the tree. diff --git a/core/lib/zksync_core/src/metadata_calculator/mod.rs b/core/lib/zksync_core/src/metadata_calculator/mod.rs index 6abea0c6a88..7e541a94e85 100644 --- a/core/lib/zksync_core/src/metadata_calculator/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/mod.rs @@ -21,6 +21,7 @@ pub use self::helpers::LazyAsyncTreeReader; pub(crate) use self::helpers::{AsyncTreeReader, L1BatchWithLogs, MerkleTreeInfo}; use self::{ helpers::{create_db, Delayer, GenericAsyncTree, MerkleTreeHealth}, + metrics::{ConfigLabels, METRICS}, updater::TreeUpdater, }; @@ -97,6 +98,14 @@ impl MetadataCalculator { config: MetadataCalculatorConfig, object_store: Option>, ) -> anyhow::Result { + if let Err(err) = METRICS.info.set(ConfigLabels::new(&config)) { + tracing::warn!( + "Cannot set config {:?}; it's already set to {:?}", + err.into_inner(), + METRICS.info.get() + ); + } + anyhow::ensure!( config.max_l1_batches_per_iter > 0, "Maximum L1 batches per iteration is misconfigured to be 0; please update it to positive value" diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index 5d51c294aad..93a2856eb43 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -51,6 +51,7 @@ use zksync_node_framework::{ StateKeeperLayer, }, web3_api::{ + caches::MempoolCacheLayer, server::{Web3ServerLayer, Web3ServerOptionalConfig}, tree_api_client::TreeApiClientLayer, tx_sender::{PostgresStorageCachesConfig, TxSenderLayer}, @@ -215,6 +216,15 @@ impl MainNodeBuilder { Ok(self) } + fn add_api_caches_layer(mut self) -> anyhow::Result { + let rpc_config = ApiConfig::from_env()?.web3_json_rpc; + self.node.add_layer(MempoolCacheLayer::new( + rpc_config.mempool_cache_size(), + rpc_config.mempool_cache_update_interval(), + )); + Ok(self) + } + fn add_tree_api_client_layer(mut self) -> anyhow::Result { let rpc_config = ApiConfig::from_env()?.web3_json_rpc; self.node @@ -407,6 +417,7 @@ fn main() -> anyhow::Result<()> { .add_healthcheck_layer()? .add_tx_sender_layer()? .add_tree_api_client_layer()? + .add_api_caches_layer()? .add_http_web3_api_layer()? .add_ws_web3_api_layer()? .add_house_keeper_layer()? diff --git a/core/node/node_framework/src/implementations/layers/web3_api/caches.rs b/core/node/node_framework/src/implementations/layers/web3_api/caches.rs new file mode 100644 index 00000000000..5ca4d2b9c69 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/caches.rs @@ -0,0 +1,56 @@ +use std::time::Duration; + +use zksync_core::api_server::web3::mempool_cache::{self, MempoolCache}; + +use crate::{ + implementations::resources::{pools::ReplicaPoolResource, web3_api::MempoolCacheResource}, + service::{ServiceContext, StopReceiver}, + task::Task, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct MempoolCacheLayer { + capacity: usize, + update_interval: Duration, +} + +impl MempoolCacheLayer { + pub fn new(capacity: usize, update_interval: Duration) -> Self { + Self { + capacity, + update_interval, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for MempoolCacheLayer { + fn layer_name(&self) -> &'static str { + "mempool_cache_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let pool_resource = context.get_resource::().await?; + let replica_pool = pool_resource.get().await?; + let mempool_cache = MempoolCache::new(self.capacity); + let update_task = mempool_cache.update_task(replica_pool, self.update_interval); + context.add_task(Box::new(MempoolCacheUpdateTask(update_task))); + context.insert_resource(MempoolCacheResource(mempool_cache))?; + Ok(()) + } +} + +#[derive(Debug)] +pub struct MempoolCacheUpdateTask(mempool_cache::MempoolCacheUpdateTask); + +#[async_trait::async_trait] +impl Task for MempoolCacheUpdateTask { + fn name(&self) -> &'static str { + "mempool_cache_update_task" + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + self.0.run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/mod.rs b/core/node/node_framework/src/implementations/layers/web3_api/mod.rs index 2f872d8e298..48c9eb744ab 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/mod.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/mod.rs @@ -1,3 +1,4 @@ +pub mod caches; pub mod server; pub mod tree_api_client; pub mod tx_sender; diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server.rs index 8a5739e2662..a68fa6bee60 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server.rs @@ -10,7 +10,7 @@ use crate::{ healthcheck::AppHealthCheckResource, pools::ReplicaPoolResource, sync_state::SyncStateResource, - web3_api::{TreeApiClientResource, TxSenderResource}, + web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, }, service::{ServiceContext, StopReceiver}, task::Task, @@ -124,12 +124,14 @@ impl WiringLayer for Web3ServerLayer { Err(WiringError::ResourceLacking { .. }) => None, Err(err) => return Err(err), }; + let MempoolCacheResource(mempool_cache) = context.get_resource().await?; // Build server. let mut api_builder = ApiBuilder::jsonrpsee_backend(self.internal_api_config, replica_pool.clone()) .with_updaters_pool(updaters_pool) - .with_tx_sender(tx_sender); + .with_tx_sender(tx_sender) + .with_mempool_cache(mempool_cache); if let Some(client) = tree_api_client { api_builder = api_builder.with_tree_api(client); } diff --git a/core/node/node_framework/src/implementations/resources/web3_api.rs b/core/node/node_framework/src/implementations/resources/web3_api.rs index 7f7dedc2500..68d343b9b0c 100644 --- a/core/node/node_framework/src/implementations/resources/web3_api.rs +++ b/core/node/node_framework/src/implementations/resources/web3_api.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use zksync_core::api_server::{ tree::TreeApiClient, tx_sender::{tx_sink::TxSink, TxSender}, + web3::mempool_cache::MempoolCache, }; use crate::resource::Resource; @@ -33,3 +34,12 @@ impl Resource for TreeApiClientResource { "api/tree_api_client".into() } } + +#[derive(Debug, Clone)] +pub struct MempoolCacheResource(pub MempoolCache); + +impl Resource for MempoolCacheResource { + fn name() -> String { + "api/mempool_cache".into() + } +} From 96939ce38da87b9be2cf0ba98a729b1b57b5847e Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Wed, 10 Apr 2024 14:27:28 +0300 Subject: [PATCH 09/29] test: Add secp256r1, transient storage, code oracle integration tests (#1630) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add secp256r1, transient storage, code oracle integration tests ## Why ❔ Better integration test coverage ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- core/tests/ts-integration/package.json | 3 +- .../ts-integration/tests/contracts.test.ts | 66 +++++++ yarn.lock | 171 ++++++++++++++++-- 3 files changed, 228 insertions(+), 12 deletions(-) diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 19055585643..0cbb7612e39 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -30,6 +30,7 @@ "ts-jest": "^29.0.1", "ts-node": "^10.1.0", "typescript": "^4.3.5", - "zksync-web3": "^0.15.5" + "zksync-web3": "^0.15.5", + "elliptic": "^6.5.5" } } diff --git a/core/tests/ts-integration/tests/contracts.test.ts b/core/tests/ts-integration/tests/contracts.test.ts index e7d5fcf3a23..3a45d4b1117 100644 --- a/core/tests/ts-integration/tests/contracts.test.ts +++ b/core/tests/ts-integration/tests/contracts.test.ts @@ -12,6 +12,7 @@ import { shouldOnlyTakeFee } from '../src/modifiers/balance-checker'; import * as ethers from 'ethers'; import * as zksync from 'zksync-web3'; +import * as elliptic from 'elliptic'; import { Provider } from 'zksync-web3'; import { RetryProvider } from '../src/retry-provider'; @@ -330,6 +331,71 @@ describe('Smart contract behavior checks', () => { ).toBeRejected('not enough gas to publish compressed bytecodes'); }); + test('Should check secp256r1 precompile', async () => { + const ec = new elliptic.ec('p256'); + + const privateKeyHex = '519b423d715f8b581f4fa8ee59f4771a5b44c8130b4e3eacca54a56dda72b464'; + const privateKey = Buffer.from(privateKeyHex, 'hex'); + + const message = + '0x5905238877c77421f73e43ee3da6f2d9e2ccad5fc942dcec0cbd25482935faaf416983fe165b1a045ee2bcd2e6dca3bdf46c4310a7461f9a37960ca672d3feb5473e253605fb1ddfd28065b53cb5858a8ad28175bf9bd386a5e471ea7a65c17cc934a9d791e91491eb3754d03799790fe2d308d16146d5c9b0d0debd97d79ce8'; + const digest = ethers.utils.arrayify(ethers.utils.keccak256(message)); + const signature = ec.sign(digest, privateKey); + + const publicKeyHex = + '0x1ccbe91c075fc7f4f033bfa248db8fccd3565de94bbfb12f3c59ff46c271bf83ce4014c68811f9a21a1fdb2c0e6113e06db7ca93b7404e78dc7ccd5ca89a4ca9'; + + // Check that verification succeeds. + const res = await alice.provider.call({ + to: '0x0000000000000000000000000000000000000100', + data: ethers.utils.concat([ + digest, + '0x' + signature.r.toString('hex'), + '0x' + signature.s.toString('hex'), + publicKeyHex + ]) + }); + expect(res).toEqual('0x0000000000000000000000000000000000000000000000000000000000000001'); + + // Send the transaction. + const tx = await alice.sendTransaction({ + to: '0x0000000000000000000000000000000000000100', + data: ethers.utils.concat([ + digest, + '0x' + signature.r.toString('hex'), + '0x' + signature.s.toString('hex'), + publicKeyHex + ]) + }); + const receipt = await tx.wait(); + expect(receipt.status).toEqual(1); + }); + + test('Should check transient storage', async () => { + const artifact = require(`${process.env.ZKSYNC_HOME}/etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json`); + const contractFactory = new zksync.ContractFactory(artifact.abi, artifact.bytecode, alice); + const storageContract = await contractFactory.deploy(); + await storageContract.deployed(); + // Tests transient storage, see contract code for details. + await expect(storageContract.testTransientStore()).toBeAccepted([]); + // Checks that transient storage is cleaned up after each tx. + await expect(storageContract.assertTValue(0)).toBeAccepted([]); + }); + + test('Should check code oracle works', async () => { + // Deploy contract that calls CodeOracle. + const artifact = require(`${process.env.ZKSYNC_HOME}/etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json`); + const contractFactory = new zksync.ContractFactory(artifact.abi, artifact.bytecode, alice); + const contract = await contractFactory.deploy(); + await contract.deployed(); + + // Check that CodeOracle can decommit code of just deployed contract. + const versionedHash = zksync.utils.hashBytecode(artifact.bytecode); + const expectedBytecodeHash = ethers.utils.keccak256(artifact.bytecode); + + await expect(contract.callCodeOracle(versionedHash, expectedBytecodeHash)).toBeAccepted([]); + }); + afterAll(async () => { await testMaster.deinitialize(); }); diff --git a/yarn.lock b/yarn.lock index cd3aad20142..c8b4466e20c 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2001,7 +2001,7 @@ fs-extra "^11.1.1" semver "^7.5.1" -"@matterlabs/hardhat-zksync-solc@0.4.2", "@matterlabs/hardhat-zksync-solc@^0.4.2": +"@matterlabs/hardhat-zksync-solc@0.4.2": version "0.4.2" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-0.4.2.tgz#64121082e88c5ab22eb4e9594d120e504f6af499" integrity sha512-6NFWPSZiOAoo7wNuhMg4ztj7mMEH+tLrx09WuCbcURrHPijj/KxYNsJD6Uw5lapKr7G8H7SQISGid1/MTXVmXQ== @@ -2034,6 +2034,23 @@ proper-lockfile "^4.1.2" semver "^7.5.1" +"@matterlabs/hardhat-zksync-solc@^1.1.4": + version "1.1.4" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.1.4.tgz#04a2fad6fb6b6944c64ad969080ee65b9af3f617" + integrity sha512-4/usbogh9neewR2/v8Dn2OzqVblZMUuT/iH2MyPZgPRZYQlL4SlZtMvokU9UQjZT6iSoaKCbbdWESHDHSzfUjA== + dependencies: + "@nomiclabs/hardhat-docker" "^2.0.0" + chai "^4.3.6" + chalk "4.1.2" + debug "^4.3.4" + dockerode "^4.0.2" + fs-extra "^11.1.1" + proper-lockfile "^4.1.2" + semver "^7.5.1" + sinon "^17.0.1" + sinon-chai "^3.7.0" + undici "^5.14.0" + "@matterlabs/hardhat-zksync-verify@^0.2.0": version "0.2.2" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-0.2.2.tgz#daa34bc4404096ed0f44461ee366c1cb0e5a4f2f" @@ -2776,6 +2793,13 @@ resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.27.8.tgz#6667fac16c436b5434a387a34dedb013198f6e6e" integrity sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA== +"@sinonjs/commons@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-2.0.0.tgz#fd4ca5b063554307e8327b4564bd56d3b73924a3" + integrity sha512-uLa0j859mMrg2slwQYdO/AkrOfmH+X6LTVmNTS9CqexuE2IvVORIkSpJLqePAbEnKJ77aMmCwr1NUZ57120Xcg== + dependencies: + type-detect "4.0.8" + "@sinonjs/commons@^3.0.0": version "3.0.0" resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-3.0.0.tgz#beb434fe875d965265e04722ccfc21df7f755d72" @@ -2790,6 +2814,27 @@ dependencies: "@sinonjs/commons" "^3.0.0" +"@sinonjs/fake-timers@^11.2.2": + version "11.2.2" + resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-11.2.2.tgz#50063cc3574f4a27bd8453180a04171c85cc9699" + integrity sha512-G2piCSxQ7oWOxwGSAyFHfPIsyeJGXYtc6mFbnFA+kRXkiEnTl8c/8jul2S329iFBnDI9HGoeWWAZvuvOkZccgw== + dependencies: + "@sinonjs/commons" "^3.0.0" + +"@sinonjs/samsam@^8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@sinonjs/samsam/-/samsam-8.0.0.tgz#0d488c91efb3fa1442e26abea81759dfc8b5ac60" + integrity sha512-Bp8KUVlLp8ibJZrnvq2foVhP0IVX2CIprMJPK0vqGqgrDa0OHVKeZyBykqskkrdxV6yKBPmGasO8LVjAKR3Gew== + dependencies: + "@sinonjs/commons" "^2.0.0" + lodash.get "^4.4.2" + type-detect "^4.0.8" + +"@sinonjs/text-encoding@^0.7.2": + version "0.7.2" + resolved "https://registry.yarnpkg.com/@sinonjs/text-encoding/-/text-encoding-0.7.2.tgz#5981a8db18b56ba38ef0efb7d995b12aa7b51918" + integrity sha512-sXXKG+uL9IrKqViTtao2Ws6dy0znu9sOaP1di/jKGW1M6VssO8vlpXCQcpZ+jisQ1tTFAC5Jo/EOzFbggBagFQ== + "@solidity-parser/parser@^0.14.0", "@solidity-parser/parser@^0.14.2": version "0.14.5" resolved "https://registry.yarnpkg.com/@solidity-parser/parser/-/parser-0.14.5.tgz#87bc3cc7b068e08195c219c91cd8ddff5ef1a804" @@ -2913,6 +2958,13 @@ dependencies: "@babel/types" "^7.20.7" +"@types/bn.js@*", "@types/bn.js@^5.1.0": + version "5.1.5" + resolved "https://registry.yarnpkg.com/@types/bn.js/-/bn.js-5.1.5.tgz#2e0dacdcce2c0f16b905d20ff87aedbc6f7b4bf0" + integrity sha512-V46N0zwKRF5Q00AZ6hWtN0T8gGmDUaUzLWQvHFo5yThtVwK/VCenFY3wXVbOvNfajEpsTfQM4IN9k/d6gUVX3A== + dependencies: + "@types/node" "*" + "@types/bn.js@^4.11.3": version "4.11.6" resolved "https://registry.yarnpkg.com/@types/bn.js/-/bn.js-4.11.6.tgz#c306c70d9358aaea33cd4eda092a742b9505967c" @@ -2920,13 +2972,6 @@ dependencies: "@types/node" "*" -"@types/bn.js@^5.1.0": - version "5.1.5" - resolved "https://registry.yarnpkg.com/@types/bn.js/-/bn.js-5.1.5.tgz#2e0dacdcce2c0f16b905d20ff87aedbc6f7b4bf0" - integrity sha512-V46N0zwKRF5Q00AZ6hWtN0T8gGmDUaUzLWQvHFo5yThtVwK/VCenFY3wXVbOvNfajEpsTfQM4IN9k/d6gUVX3A== - dependencies: - "@types/node" "*" - "@types/chai-as-promised@^7.1.3", "@types/chai-as-promised@^7.1.4": version "7.1.8" resolved "https://registry.yarnpkg.com/@types/chai-as-promised/-/chai-as-promised-7.1.8.tgz#f2b3d82d53c59626b5d6bbc087667ccb4b677fe9" @@ -2956,6 +3001,13 @@ resolved "https://registry.yarnpkg.com/@types/deep-extend/-/deep-extend-0.4.32.tgz#0af51fffde55cb168e8d68f8236908c2cdfe7419" integrity sha512-7/pcMJr5I5OnpWTTfv0o3fJ9+f36EqoQa27/oJlbfvfZAMMrPyU5/+AUC+5OOtTEKdyoW4lAeIBYHtodtEdNUA== +"@types/elliptic@^6.4.18": + version "6.4.18" + resolved "https://registry.yarnpkg.com/@types/elliptic/-/elliptic-6.4.18.tgz#bc96e26e1ccccbabe8b6f0e409c85898635482e1" + integrity sha512-UseG6H5vjRiNpQvrhy4VF/JXdA3V/Fp5amvveaL+fs28BZ6xIKJBPnUPRlEaZpysD9MbpfaLi8lbl7PGUAkpWw== + dependencies: + "@types/bn.js" "*" + "@types/form-data@0.0.33": version "0.0.33" resolved "https://registry.yarnpkg.com/@types/form-data/-/form-data-0.0.33.tgz#c9ac85b2a5fd18435b8c85d9ecb50e6d6c893ff8" @@ -4270,7 +4322,7 @@ chai-as-promised@^7.1.1: dependencies: check-error "^1.0.2" -chai@^4.3.10: +chai@^4.3.10, chai@^4.3.6: version "4.4.1" resolved "https://registry.yarnpkg.com/chai/-/chai-4.4.1.tgz#3603fa6eba35425b0f2ac91a009fe924106e50d1" integrity sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g== @@ -4704,7 +4756,7 @@ cosmiconfig@^8.0.0: parse-json "^5.2.0" path-type "^4.0.0" -cpu-features@~0.0.8: +cpu-features@~0.0.8, cpu-features@~0.0.9: version "0.0.9" resolved "https://registry.yarnpkg.com/cpu-features/-/cpu-features-0.0.9.tgz#5226b92f0f1c63122b0a3eb84cb8335a4de499fc" integrity sha512-AKjgn2rP2yJyfbepsmLfiYcmtNn/2eUvocUyM/09yB0YDiz39HteK/5/T4Onf0pmdYDMgkBoGvRLvEguzyL7wQ== @@ -5090,6 +5142,11 @@ diff@^4.0.1: resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== +diff@^5.1.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/diff/-/diff-5.2.0.tgz#26ded047cd1179b78b9537d5ef725503ce1ae531" + integrity sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A== + difflib@^0.2.4: version "0.2.4" resolved "https://registry.yarnpkg.com/difflib/-/difflib-0.2.4.tgz#b5e30361a6db023176d562892db85940a718f47e" @@ -5139,6 +5196,16 @@ docker-modem@^5.0.2: split-ca "^1.0.1" ssh2 "^1.11.0" +docker-modem@^5.0.3: + version "5.0.3" + resolved "https://registry.yarnpkg.com/docker-modem/-/docker-modem-5.0.3.tgz#50c06f11285289f58112b5c4c4d89824541c41d0" + integrity sha512-89zhop5YVhcPEt5FpUFGr3cDyceGhq/F9J+ZndQ4KfqNvfbJpPMfgeixFgUj5OjCYAboElqODxY5Z1EBsSa6sg== + dependencies: + debug "^4.1.1" + readable-stream "^3.5.0" + split-ca "^1.0.1" + ssh2 "^1.15.0" + dockerode@^2.5.8: version "2.5.8" resolved "https://registry.yarnpkg.com/dockerode/-/dockerode-2.5.8.tgz#1b661e36e1e4f860e25f56e0deabe9f87f1d0acc" @@ -5166,6 +5233,15 @@ dockerode@^4.0.0: docker-modem "^5.0.2" tar-fs "~2.0.1" +dockerode@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/dockerode/-/dockerode-4.0.2.tgz#dedc8529a1db3ac46d186f5912389899bc309f7d" + integrity sha512-9wM1BVpVMFr2Pw3eJNXrYYt6DT9k0xMcsSCjtPvyQ+xa1iPg/Mo3T/gUcwI0B2cczqCeCYRPF8yFYDwtFXT0+w== + dependencies: + "@balena/dockerignore" "^1.0.2" + docker-modem "^5.0.3" + tar-fs "~2.0.1" + doctrine@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d" @@ -5265,6 +5341,19 @@ elliptic@6.5.4, elliptic@^6.5.2, elliptic@^6.5.4: minimalistic-assert "^1.0.1" minimalistic-crypto-utils "^1.0.1" +elliptic@^6.5.5: + version "6.5.5" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.5.tgz#c715e09f78b6923977610d4c2346d6ce22e6dded" + integrity sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw== + dependencies: + bn.js "^4.11.9" + brorand "^1.1.0" + hash.js "^1.0.0" + hmac-drbg "^1.0.1" + inherits "^2.0.4" + minimalistic-assert "^1.0.1" + minimalistic-crypto-utils "^1.0.1" + emittery@0.10.0: version "0.10.0" resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.10.0.tgz#bb373c660a9d421bb44706ec4967ed50c02a8026" @@ -8079,6 +8168,11 @@ jsprim@^1.2.2: json-schema "0.4.0" verror "1.10.0" +just-extend@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/just-extend/-/just-extend-6.2.0.tgz#b816abfb3d67ee860482e7401564672558163947" + integrity sha512-cYofQu2Xpom82S6qD778jBDpwvvy39s1l/hrYij2u9AMdQcGRpaBu6kY4mVhuno5kJVi1DAz4aiphA2WI1/OAw== + jwa@^1.4.1: version "1.4.1" resolved "https://registry.yarnpkg.com/jwa/-/jwa-1.4.1.tgz#743c32985cb9e98655530d53641b66c8645b039a" @@ -8373,6 +8467,11 @@ lodash.flatten@~4.4.0: resolved "https://registry.yarnpkg.com/lodash.flatten/-/lodash.flatten-4.4.0.tgz#f31c22225a9632d2bbf8e4addbef240aa765a61f" integrity sha512-C5N2Z3DgnnKr0LOpv/hKCgKdb7ZZwafIrsesve6lmzvZIRZRGaZ/l6Q8+2W7NaT+ZwO3fFlSCzCzrDCFdJfZ4g== +lodash.get@^4.4.2: + version "4.4.2" + resolved "https://registry.yarnpkg.com/lodash.get/-/lodash.get-4.4.2.tgz#2d177f652fa31e939b4438d5341499dfa3825e99" + integrity sha512-z+Uw/vLuy6gQe8cfaFWD7p0wVv8fJl3mbzXh33RS+0oW2wvUqiRXiQ69gLWSLpgB5/6sU+r6BlQR0MBILadqTQ== + lodash.includes@^4.3.0: version "4.3.0" resolved "https://registry.yarnpkg.com/lodash.includes/-/lodash.includes-4.3.0.tgz#60bb98a87cb923c68ca1e51325483314849f553f" @@ -8939,6 +9038,11 @@ nan@^2.17.0: resolved "https://registry.yarnpkg.com/nan/-/nan-2.18.0.tgz#26a6faae7ffbeb293a39660e88a76b82e30b7554" integrity sha512-W7tfG7vMOGtD30sHoZSSc/JVYiyDPEyQVso/Zz+/uQd0B0L46gtC+pHha5FFMRpil6fm/AoEcRWyOVi4+E/f8w== +nan@^2.18.0: + version "2.19.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.19.0.tgz#bb58122ad55a6c5bc973303908d5b16cfdd5a8c0" + integrity sha512-nO1xXxfh/RWNxfd/XPfbIfFk5vgLsAxUR9y5O0cHMJu/AW9U95JLXqthYHjEp+8gQ5p96K9jUp8nbVOxCdRbtw== + nanoid@3.3.1: version "3.3.1" resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.1.tgz#6347a18cac88af88f58af0b3594b723d5e99bb35" @@ -8992,6 +9096,17 @@ nice-try@^1.0.4: resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== +nise@^5.1.5: + version "5.1.9" + resolved "https://registry.yarnpkg.com/nise/-/nise-5.1.9.tgz#0cb73b5e4499d738231a473cd89bd8afbb618139" + integrity sha512-qOnoujW4SV6e40dYxJOb3uvuoPHtmLzIk4TFo+j0jPJoC+5Z9xja5qH5JZobEPsa8+YYphMrOSwnrshEhG2qww== + dependencies: + "@sinonjs/commons" "^3.0.0" + "@sinonjs/fake-timers" "^11.2.2" + "@sinonjs/text-encoding" "^0.7.2" + just-extend "^6.2.0" + path-to-regexp "^6.2.1" + node-addon-api@^2.0.0: version "2.0.2" resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-2.0.2.tgz#432cfa82962ce494b132e9d72a15b29f71ff5d32" @@ -9418,6 +9533,11 @@ path-scurry@^1.10.1: lru-cache "^9.1.1 || ^10.0.0" minipass "^5.0.0 || ^6.0.2 || ^7.0.0" +path-to-regexp@^6.2.1: + version "6.2.2" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-6.2.2.tgz#324377a83e5049cbecadc5554d6a63a9a4866b36" + integrity sha512-GQX3SSMokngb36+whdpRXE+3f9V8UzyAorlYvOGx87ufGHehNTn5lCxrKtLyZ4Yl/wEKnNnr98ZzOwwDZV5ogw== + path-type@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/path-type/-/path-type-3.0.0.tgz#cef31dc8e0a1a3bb0d105c0cd97cf3bf47f4e36f" @@ -10400,6 +10520,23 @@ signal-exit@^4.0.1: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-4.1.0.tgz#952188c1cbd546070e2dd20d0f41c0ae0530cb04" integrity sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw== +sinon-chai@^3.7.0: + version "3.7.0" + resolved "https://registry.yarnpkg.com/sinon-chai/-/sinon-chai-3.7.0.tgz#cfb7dec1c50990ed18c153f1840721cf13139783" + integrity sha512-mf5NURdUaSdnatJx3uhoBOrY9dtL19fiOtAdT1Azxg3+lNJFiuN0uzaU3xX1LeAfL17kHQhTAJgpsfhbMJMY2g== + +sinon@^17.0.1: + version "17.0.1" + resolved "https://registry.yarnpkg.com/sinon/-/sinon-17.0.1.tgz#26b8ef719261bf8df43f925924cccc96748e407a" + integrity sha512-wmwE19Lie0MLT+ZYNpDymasPHUKTaZHUH/pKEubRXIzySv9Atnlw+BUMGCzWgV7b7wO+Hw6f1TEOr0IUnmU8/g== + dependencies: + "@sinonjs/commons" "^3.0.0" + "@sinonjs/fake-timers" "^11.2.2" + "@sinonjs/samsam" "^8.0.0" + diff "^5.1.0" + nise "^5.1.5" + supports-color "^7.2.0" + sisteransi@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" @@ -10620,6 +10757,17 @@ ssh2@^1.11.0: cpu-features "~0.0.8" nan "^2.17.0" +ssh2@^1.15.0: + version "1.15.0" + resolved "https://registry.yarnpkg.com/ssh2/-/ssh2-1.15.0.tgz#2f998455036a7f89e0df5847efb5421748d9871b" + integrity sha512-C0PHgX4h6lBxYx7hcXwu3QWdh4tg6tZZsTfXcdvc5caW/EMxaB4H9dWsl7qk+F7LAW762hp8VbXOX7x4xUYvEw== + dependencies: + asn1 "^0.2.6" + bcrypt-pbkdf "^1.0.2" + optionalDependencies: + cpu-features "~0.0.9" + nan "^2.18.0" + sshpk@^1.7.0: version "1.18.0" resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.18.0.tgz#1663e55cddf4d688b86a46b77f0d5fe363aba028" @@ -10834,7 +10982,7 @@ supports-color@^5.3.0: dependencies: has-flag "^3.0.0" -supports-color@^7.1.0: +supports-color@^7.1.0, supports-color@^7.2.0: version "7.2.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== @@ -10874,6 +11022,7 @@ synckit@^0.8.5: version "0.1.0" dependencies: "@matterlabs/hardhat-zksync-deploy" "^0.6.5" + "@matterlabs/hardhat-zksync-solc" "^1.1.4" "@nomiclabs/hardhat-solpp" "^2.0.1" commander "^9.4.1" ethers "^5.7.0" From 42635b7aeca584565589ab9c6ab8055cf4299d76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Wed, 10 Apr 2024 17:40:43 +0200 Subject: [PATCH 10/29] feat(en): docker compose better postgres settings (#1621) I've updated the EN docker compose settings taking inspiration in settings in ansible-en repository. I've also updated the port, so that it doesn't collide with postgres set-up by zk init --------- Signed-off-by: tomg10 --- .../mainnet-external-node-docker-compose.yml | 39 ++++++++++++------- .../testnet-external-node-docker-compose.yml | 39 ++++++++++++------- 2 files changed, 50 insertions(+), 28 deletions(-) diff --git a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml index 04f665bb2a7..0b72277c7c7 100644 --- a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml @@ -5,8 +5,8 @@ services: volumes: - mainnet-prometheus-data:/prometheus - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml - ports: - - "9090:9090" + expose: + - 9090 grafana: image: grafana/grafana:9.3.6 volumes: @@ -17,14 +17,25 @@ services: GF_AUTH_ANONYMOUS_ENABLED: "true" GF_AUTH_DISABLE_LOGIN_FORM: "true" ports: - - "3000:3000" - expose: - - 3000 + - "127.0.0.1:3000:3000" postgres: image: "postgres:14" - command: postgres -c 'max_connections=200' - ports: - - "5432:5432" + command: > + postgres + -c max_connections=200 + -c log_error_verbosity=terse + -c shared_buffers=2GB + -c effective_cache_size=4GB + -c maintenance_work_mem=1GB + -c checkpoint_completion_target=0.9 + -c random_page_cost=1.1 + -c effective_io_concurrency=200 + -c min_wal_size=4GB + -c max_wal_size=16GB + -c max_worker_processes=16 + -c checkpoint_timeout=1800 + expose: + - 5430 volumes: - mainnet-postgres:/var/lib/postgresql/data healthcheck: @@ -32,23 +43,23 @@ services: timeout: 3s test: psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '{{ database_name }}' and application_name = 'pg_restore')" | grep -e ".f$" environment: - # We bind only to localhost, so setting insecure password is acceptable here - POSTGRES_PASSWORD=notsecurepassword + - PGPORT=5430 external-node: image: "matterlabs/external-node:e03e201-1712331604214-linux-amd64" depends_on: postgres: condition: service_healthy ports: - - "3322:3322" + - "127.0.0.1:3060:3060" + - "127.0.0.1:3061:3061" + - "127.0.0.1:3081:3081" volumes: - mainnet-rocksdb:/db expose: - - 3060 - - 3061 - - 3081 + - 3322 environment: - DATABASE_URL: "postgres://postgres:notsecurepassword@postgres/zksync_local_ext_node" + DATABASE_URL: "postgres://postgres:notsecurepassword@postgres:5430/zksync_local_ext_node" DATABASE_POOL_SIZE: 10 EN_HTTP_PORT: 3060 diff --git a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml index 2314c9b6ad5..657a0f95be7 100644 --- a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml @@ -5,8 +5,8 @@ services: volumes: - testnet-prometheus-data:/prometheus - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml - ports: - - "9090:9090" + expose: + - 9090 grafana: image: grafana/grafana:9.3.6 volumes: @@ -17,14 +17,25 @@ services: GF_AUTH_ANONYMOUS_ENABLED: "true" GF_AUTH_DISABLE_LOGIN_FORM: "true" ports: - - "3000:3000" - expose: - - 3000 + - "127.0.0.1:3000:3000" postgres: image: "postgres:14" - command: postgres -c 'max_connections=200' - ports: - - "5432:5432" + command: > + postgres + -c max_connections=200 + -c log_error_verbosity=terse + -c shared_buffers=2GB + -c effective_cache_size=4GB + -c maintenance_work_mem=1GB + -c checkpoint_completion_target=0.9 + -c random_page_cost=1.1 + -c effective_io_concurrency=200 + -c min_wal_size=4GB + -c max_wal_size=16GB + -c max_worker_processes=16 + -c checkpoint_timeout=1800 + expose: + - 5430 volumes: - testnet-postgres:/var/lib/postgresql/data healthcheck: @@ -32,23 +43,23 @@ services: timeout: 3s test: psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '{{ database_name }}' and application_name = 'pg_restore')" | grep -e ".f$" environment: - # We bind only to localhost, so setting insecure password is acceptable here - POSTGRES_PASSWORD=notsecurepassword + - PGPORT=5430 external-node: image: "matterlabs/external-node:e03e201-1712331604214-linux-amd64" depends_on: postgres: condition: service_healthy ports: - - "3322:3322" + - "127.0.0.1:3060:3060" + - "127.0.0.1:3061:3061" + - "127.0.0.1:3081:3081" volumes: - testnet-rocksdb:/db expose: - - 3060 - - 3061 - - 3081 + - 3322 environment: - DATABASE_URL: "postgres://postgres:notsecurepassword@postgres/zksync_local_ext_node" + DATABASE_URL: "postgres://postgres:notsecurepassword@postgres:5430/zksync_local_ext_node" DATABASE_POOL_SIZE: 10 EN_HTTP_PORT: 3060 From 26ca22ad46248af2143730d0df74dd2dfc0e0981 Mon Sep 17 00:00:00 2001 From: pompon0 Date: Thu, 11 Apr 2024 09:58:03 +0200 Subject: [PATCH 11/29] refactor: moved consensus config to config crate (#1594) moved consensus config to config crate --- Cargo.lock | 3 + Cargo.toml | 2 +- core/bin/external_node/Cargo.toml | 1 + core/bin/external_node/src/config/mod.rs | 26 ++- core/bin/zksync_server/src/config.rs | 16 +- core/lib/config/src/configs/consensus.rs | 69 +++++++ core/lib/config/src/configs/mod.rs | 1 + core/lib/config/src/testonly.rs | 34 ++++ core/lib/protobuf_config/build.rs | 2 +- core/lib/protobuf_config/src/consensus.rs | 92 +++++++++ core/lib/protobuf_config/src/lib.rs | 3 +- .../src/proto/{ => config}/api.proto | 0 .../src/proto/{ => config}/chain.proto | 0 .../proto/{ => config}/circuit_breaker.proto | 0 .../{ => config}/contract_verifier.proto | 0 .../src/proto/{ => config}/contracts.proto | 0 .../src/proto/{ => config}/database.proto | 0 .../src/proto/{ => config}/eth_sender.proto | 0 .../src/proto/{ => config}/general.proto | 0 .../src/proto/{ => config}/genesis.proto | 0 .../src/proto/{ => config}/house_keeper.proto | 0 .../src/proto/{ => config}/object_store.proto | 0 .../proto/{ => config}/observability.proto | 0 .../src/proto/{ => config}/prover.proto | 0 .../{ => config}/snapshots_creator.proto | 0 .../src/proto/{ => config}/utils.proto | 0 .../src/proto/{ => config}/wallets.proto | 0 .../src/proto/core}/consensus.proto | 6 +- core/lib/protobuf_config/src/proto/mod.rs | 2 + core/lib/protobuf_config/src/tests.rs | 2 + core/lib/types/Cargo.toml | 2 +- core/lib/zksync_core/Cargo.toml | 1 + core/lib/zksync_core/build.rs | 2 +- core/lib/zksync_core/src/consensus/config.rs | 176 +++++------------- core/lib/zksync_core/src/consensus/era.rs | 15 +- core/lib/zksync_core/src/consensus/mod.rs | 4 +- core/lib/zksync_core/src/consensus/tests.rs | 37 +--- core/lib/zksync_core/src/lib.rs | 13 +- .../zksync_core/src/temp_config_store/mod.rs | 12 +- core/node/node_framework/Cargo.toml | 1 + .../node/node_framework/examples/main_node.rs | 19 +- .../src/implementations/layers/consensus.rs | 9 +- 42 files changed, 337 insertions(+), 213 deletions(-) create mode 100644 core/lib/config/src/configs/consensus.rs create mode 100644 core/lib/protobuf_config/src/consensus.rs rename core/lib/protobuf_config/src/proto/{ => config}/api.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/chain.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/circuit_breaker.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/contract_verifier.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/contracts.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/database.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/eth_sender.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/general.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/genesis.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/house_keeper.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/object_store.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/observability.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/prover.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/snapshots_creator.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/utils.proto (100%) rename core/lib/protobuf_config/src/proto/{ => config}/wallets.proto (100%) rename core/lib/{zksync_core/src/proto => protobuf_config/src/proto/core}/consensus.proto (96%) diff --git a/Cargo.lock b/Cargo.lock index f000dc83cc7..59f014065fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8360,6 +8360,7 @@ dependencies = [ "zksync_object_store", "zksync_protobuf", "zksync_protobuf_build", + "zksync_protobuf_config", "zksync_prover_interface", "zksync_queued_job_processor", "zksync_shared_metrics", @@ -8554,6 +8555,7 @@ dependencies = [ "zksync_health_check", "zksync_l1_contract_interface", "zksync_object_store", + "zksync_protobuf_config", "zksync_snapshots_applier", "zksync_state", "zksync_storage", @@ -8667,6 +8669,7 @@ dependencies = [ "zksync_eth_watch", "zksync_health_check", "zksync_object_store", + "zksync_protobuf_config", "zksync_state", "zksync_storage", "zksync_types", diff --git a/Cargo.toml b/Cargo.toml index cefe6294361..d3894ee0dd3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -121,7 +121,7 @@ reqwest = "0.11" rlp = "0.5" rocksdb = "0.21.0" rustc_version = "0.4.0" -secp256k1 = "0.27.0" +secp256k1 = { version = "0.27.0", features = ["recovery", "global-context"] } semver = "1" sentry = "0.31" serde = "1" diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index ebdc0ed13a3..b447d649190 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -15,6 +15,7 @@ zksync_core.workspace = true zksync_dal.workspace = true zksync_db_connection.workspace = true zksync_config.workspace = true +zksync_protobuf_config.workspace = true zksync_eth_client.workspace = true zksync_storage.workspace = true zksync_utils.workspace = true diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index bfa59a000bd..22425cc1d06 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -8,15 +8,21 @@ use anyhow::Context; use serde::Deserialize; use url::Url; use zksync_basic_types::{Address, L1ChainId, L2ChainId}; -use zksync_config::{configs::chain::L1BatchCommitDataGeneratorMode, ObjectStoreConfig}; +use zksync_config::{ + configs::{ + chain::L1BatchCommitDataGeneratorMode, + consensus::{ConsensusConfig, ConsensusSecrets}, + }, + ObjectStoreConfig, +}; use zksync_core::{ api_server::{ tx_sender::TxSenderConfig, web3::{state::InternalApiConfig, Namespace}, }, - consensus, - temp_config_store::decode_yaml, + temp_config_store::decode_yaml_repr, }; +use zksync_protobuf_config::proto; use zksync_types::{api::BridgeAddresses, fee_model::FeeParams}; use zksync_web3_decl::{ client::L2Client, @@ -564,20 +570,24 @@ impl PostgresConfig { } } -pub(crate) fn read_consensus_secrets() -> anyhow::Result> { +pub(crate) fn read_consensus_secrets() -> anyhow::Result> { let Ok(path) = std::env::var("EN_CONSENSUS_SECRETS_PATH") else { return Ok(None); }; let cfg = std::fs::read_to_string(&path).context(path)?; - Ok(Some(decode_yaml(&cfg).context("failed decoding YAML")?)) + Ok(Some( + decode_yaml_repr::(&cfg).context("failed decoding YAML")?, + )) } -pub(crate) fn read_consensus_config() -> anyhow::Result> { +pub(crate) fn read_consensus_config() -> anyhow::Result> { let Ok(path) = std::env::var("EN_CONSENSUS_CONFIG_PATH") else { return Ok(None); }; let cfg = std::fs::read_to_string(&path).context(path)?; - Ok(Some(decode_yaml(&cfg).context("failed decoding YAML")?)) + Ok(Some( + decode_yaml_repr::(&cfg).context("failed decoding YAML")?, + )) } /// Configuration for snapshot recovery. Loaded optionally, only if the corresponding command-line argument @@ -604,7 +614,7 @@ pub(crate) struct ExternalNodeConfig { pub postgres: PostgresConfig, pub optional: OptionalENConfig, pub remote: RemoteENConfig, - pub consensus: Option, + pub consensus: Option, pub api_component: ApiComponentConfig, pub tree_component: TreeComponentConfig, } diff --git a/core/bin/zksync_server/src/config.rs b/core/bin/zksync_server/src/config.rs index 10b611a85f3..fadcced7942 100644 --- a/core/bin/zksync_server/src/config.rs +++ b/core/bin/zksync_server/src/config.rs @@ -1,20 +1,26 @@ use anyhow::Context as _; -use zksync_core::{consensus, temp_config_store::decode_yaml}; +use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; +use zksync_core::temp_config_store::decode_yaml_repr; +use zksync_protobuf_config::proto; -pub(crate) fn read_consensus_secrets() -> anyhow::Result> { +pub(crate) fn read_consensus_secrets() -> anyhow::Result> { // Read public config. let Ok(path) = std::env::var("CONSENSUS_SECRETS_PATH") else { return Ok(None); }; let secrets = std::fs::read_to_string(&path).context(path)?; - Ok(Some(decode_yaml(&secrets).context("failed decoding YAML")?)) + Ok(Some( + decode_yaml_repr::(&secrets).context("failed decoding YAML")?, + )) } -pub(crate) fn read_consensus_config() -> anyhow::Result> { +pub(crate) fn read_consensus_config() -> anyhow::Result> { // Read public config. let Ok(path) = std::env::var("CONSENSUS_CONFIG_PATH") else { return Ok(None); }; let cfg = std::fs::read_to_string(&path).context(path)?; - Ok(Some(decode_yaml(&cfg).context("failed decoding YAML")?)) + Ok(Some( + decode_yaml_repr::(&cfg).context("failed decoding YAML")?, + )) } diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs new file mode 100644 index 00000000000..d7bcb5503cc --- /dev/null +++ b/core/lib/config/src/configs/consensus.rs @@ -0,0 +1,69 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt, +}; + +/// Public key of the validator (consensus participant) of the form "validator:public::" +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ValidatorPublicKey(pub String); + +// Secret key of the validator (consensus participant) of the form "validator:secret::" +#[derive(PartialEq)] +pub struct ValidatorSecretKey(pub String); + +/// Public key of the node (gossip network participant) of the form "node:public::" +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct NodePublicKey(pub String); + +// Secret key of the node (gossip network participant) of the form "node:secret::" +#[derive(PartialEq)] +pub struct NodeSecretKey(pub String); + +impl fmt::Debug for ValidatorSecretKey { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("") + } +} + +impl fmt::Debug for NodeSecretKey { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("") + } +} + +/// Network address in the `:port` format. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Host(pub String); + +/// Config (shared between main node and external node). +#[derive(Clone, Debug, PartialEq)] +pub struct ConsensusConfig { + /// Local socket address to listen for the incoming connections. + pub server_addr: std::net::SocketAddr, + /// Public address of this node (should forward to `server_addr`) + /// that will be advertised to peers, so that they can connect to this + /// node. + pub public_addr: Host, + + /// Validators participating in consensus. + pub validators: BTreeSet, + + /// Maximal allowed size of the payload in bytes. + pub max_payload_size: usize, + + /// Limit on the number of inbound connections outside + /// of the `static_inbound` set. + pub gossip_dynamic_inbound_limit: usize, + /// Inbound gossip connections that should be unconditionally accepted. + pub gossip_static_inbound: BTreeSet, + /// Outbound gossip connections that the node should actively try to + /// establish and maintain. + pub gossip_static_outbound: BTreeMap, +} + +/// Secrets need for consensus. +#[derive(Debug, PartialEq)] +pub struct ConsensusSecrets { + pub validator_key: Option, + pub node_key: Option, +} diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 94fe69a441d..a78283ac2d0 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -22,6 +22,7 @@ pub use self::{ pub mod api; pub mod chain; +pub mod consensus; pub mod contract_verifier; pub mod contracts; pub mod database; diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 39c1af11810..72fdfe07f08 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -673,3 +673,37 @@ impl Distribution for EncodeDist { } } } + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::consensus::ConsensusConfig { + use configs::consensus::{ConsensusConfig, Host, NodePublicKey, ValidatorPublicKey}; + ConsensusConfig { + server_addr: self.sample(rng), + public_addr: Host(self.sample(rng)), + validators: self + .sample_range(rng) + .map(|_| ValidatorPublicKey(self.sample(rng))) + .collect(), + max_payload_size: self.sample(rng), + gossip_dynamic_inbound_limit: self.sample(rng), + gossip_static_inbound: self + .sample_range(rng) + .map(|_| NodePublicKey(self.sample(rng))) + .collect(), + gossip_static_outbound: self + .sample_range(rng) + .map(|_| (NodePublicKey(self.sample(rng)), Host(self.sample(rng)))) + .collect(), + } + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::consensus::ConsensusSecrets { + use configs::consensus::{ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}; + ConsensusSecrets { + validator_key: self.sample_opt(|| ValidatorSecretKey(self.sample(rng))), + node_key: self.sample_opt(|| NodeSecretKey(self.sample(rng))), + } + } +} diff --git a/core/lib/protobuf_config/build.rs b/core/lib/protobuf_config/build.rs index 66afd8fea6d..9a23d015239 100644 --- a/core/lib/protobuf_config/build.rs +++ b/core/lib/protobuf_config/build.rs @@ -2,7 +2,7 @@ fn main() { zksync_protobuf_build::Config { input_root: "src/proto".into(), - proto_root: "zksync/config".into(), + proto_root: "zksync".into(), dependencies: vec![], protobuf_crate: "::zksync_protobuf".parse().unwrap(), is_public: true, diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs new file mode 100644 index 00000000000..5d290f9c5b1 --- /dev/null +++ b/core/lib/protobuf_config/src/consensus.rs @@ -0,0 +1,92 @@ +use anyhow::Context as _; +use zksync_config::configs::consensus::{ + ConsensusConfig, ConsensusSecrets, Host, NodePublicKey, NodeSecretKey, ValidatorPublicKey, + ValidatorSecretKey, +}; +use zksync_protobuf::{repr::ProtoRepr, required}; + +use crate::proto::consensus as proto; + +impl ProtoRepr for proto::Config { + type Type = ConsensusConfig; + fn read(&self) -> anyhow::Result { + let read_addr = |e: &proto::NodeAddr| { + let key = NodePublicKey(required(&e.key).context("key")?.clone()); + let addr = Host(required(&e.addr).context("addr")?.clone()); + anyhow::Ok((key, addr)) + }; + Ok(Self::Type { + server_addr: required(&self.server_addr) + .and_then(|x| Ok(x.parse()?)) + .context("server_addr")?, + public_addr: Host(required(&self.public_addr).context("public_addr")?.clone()), + validators: self + .validators + .iter() + .map(|x| ValidatorPublicKey(x.clone())) + .collect(), + max_payload_size: required(&self.max_payload_size) + .and_then(|x| Ok((*x).try_into()?)) + .context("max_payload_size")?, + gossip_dynamic_inbound_limit: required(&self.gossip_dynamic_inbound_limit) + .and_then(|x| Ok((*x).try_into()?)) + .context("gossip_dynamic_inbound_limit")?, + gossip_static_inbound: self + .gossip_static_inbound + .iter() + .map(|x| NodePublicKey(x.clone())) + .collect(), + gossip_static_outbound: self + .gossip_static_outbound + .iter() + .enumerate() + .map(|(i, e)| read_addr(e).context(i)) + .collect::>()?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + server_addr: Some(this.server_addr.to_string()), + public_addr: Some(this.public_addr.0.clone()), + validators: this.validators.iter().map(|x| x.0.clone()).collect(), + max_payload_size: Some(this.max_payload_size.try_into().unwrap()), + gossip_dynamic_inbound_limit: Some( + this.gossip_dynamic_inbound_limit.try_into().unwrap(), + ), + gossip_static_inbound: this + .gossip_static_inbound + .iter() + .map(|x| x.0.clone()) + .collect(), + gossip_static_outbound: this + .gossip_static_outbound + .iter() + .map(|x| proto::NodeAddr { + key: Some(x.0 .0.clone()), + addr: Some(x.1 .0.clone()), + }) + .collect(), + } + } +} + +impl ProtoRepr for proto::Secrets { + type Type = ConsensusSecrets; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + validator_key: self + .validator_key + .as_ref() + .map(|x| ValidatorSecretKey(x.clone())), + node_key: self.node_key.as_ref().map(|x| NodeSecretKey(x.clone())), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + validator_key: this.validator_key.as_ref().map(|x| x.0.clone()), + node_key: this.node_key.as_ref().map(|x| x.0.clone()), + } + } +} diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index 94b9317e961..21cbcba283e 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -7,6 +7,7 @@ mod api; mod chain; mod circuit_breaker; +mod consensus; mod contract_verifier; mod contracts; mod database; @@ -39,6 +40,6 @@ fn parse_h160(bytes: &str) -> anyhow::Result { Ok(H160::from_str(bytes)?) } -fn read_optional_repr(field: &Option

) -> anyhow::Result> { +pub fn read_optional_repr(field: &Option

) -> anyhow::Result> { field.as_ref().map(|x| x.read()).transpose() } diff --git a/core/lib/protobuf_config/src/proto/api.proto b/core/lib/protobuf_config/src/proto/config/api.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/api.proto rename to core/lib/protobuf_config/src/proto/config/api.proto diff --git a/core/lib/protobuf_config/src/proto/chain.proto b/core/lib/protobuf_config/src/proto/config/chain.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/chain.proto rename to core/lib/protobuf_config/src/proto/config/chain.proto diff --git a/core/lib/protobuf_config/src/proto/circuit_breaker.proto b/core/lib/protobuf_config/src/proto/config/circuit_breaker.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/circuit_breaker.proto rename to core/lib/protobuf_config/src/proto/config/circuit_breaker.proto diff --git a/core/lib/protobuf_config/src/proto/contract_verifier.proto b/core/lib/protobuf_config/src/proto/config/contract_verifier.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/contract_verifier.proto rename to core/lib/protobuf_config/src/proto/config/contract_verifier.proto diff --git a/core/lib/protobuf_config/src/proto/contracts.proto b/core/lib/protobuf_config/src/proto/config/contracts.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/contracts.proto rename to core/lib/protobuf_config/src/proto/config/contracts.proto diff --git a/core/lib/protobuf_config/src/proto/database.proto b/core/lib/protobuf_config/src/proto/config/database.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/database.proto rename to core/lib/protobuf_config/src/proto/config/database.proto diff --git a/core/lib/protobuf_config/src/proto/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/eth_sender.proto rename to core/lib/protobuf_config/src/proto/config/eth_sender.proto diff --git a/core/lib/protobuf_config/src/proto/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/general.proto rename to core/lib/protobuf_config/src/proto/config/general.proto diff --git a/core/lib/protobuf_config/src/proto/genesis.proto b/core/lib/protobuf_config/src/proto/config/genesis.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/genesis.proto rename to core/lib/protobuf_config/src/proto/config/genesis.proto diff --git a/core/lib/protobuf_config/src/proto/house_keeper.proto b/core/lib/protobuf_config/src/proto/config/house_keeper.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/house_keeper.proto rename to core/lib/protobuf_config/src/proto/config/house_keeper.proto diff --git a/core/lib/protobuf_config/src/proto/object_store.proto b/core/lib/protobuf_config/src/proto/config/object_store.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/object_store.proto rename to core/lib/protobuf_config/src/proto/config/object_store.proto diff --git a/core/lib/protobuf_config/src/proto/observability.proto b/core/lib/protobuf_config/src/proto/config/observability.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/observability.proto rename to core/lib/protobuf_config/src/proto/config/observability.proto diff --git a/core/lib/protobuf_config/src/proto/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/prover.proto rename to core/lib/protobuf_config/src/proto/config/prover.proto diff --git a/core/lib/protobuf_config/src/proto/snapshots_creator.proto b/core/lib/protobuf_config/src/proto/config/snapshots_creator.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/snapshots_creator.proto rename to core/lib/protobuf_config/src/proto/config/snapshots_creator.proto diff --git a/core/lib/protobuf_config/src/proto/utils.proto b/core/lib/protobuf_config/src/proto/config/utils.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/utils.proto rename to core/lib/protobuf_config/src/proto/config/utils.proto diff --git a/core/lib/protobuf_config/src/proto/wallets.proto b/core/lib/protobuf_config/src/proto/config/wallets.proto similarity index 100% rename from core/lib/protobuf_config/src/proto/wallets.proto rename to core/lib/protobuf_config/src/proto/config/wallets.proto diff --git a/core/lib/zksync_core/src/proto/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto similarity index 96% rename from core/lib/zksync_core/src/proto/consensus.proto rename to core/lib/protobuf_config/src/proto/core/consensus.proto index 65ffb078904..9390445b3a0 100644 --- a/core/lib/zksync_core/src/proto/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -7,6 +7,8 @@ // (note that opening IPv6 ports may not work depending on the VM capabilities). // examples: "203.0.113.7:3456", "[2001:DB8::1]:4567" // +// Host - network address in the `:port` format. +// // ValidatorPublicKey - public key of the validator (consensus participant) of the form "validator:public::" // Currently only bn254 signature scheme is supported for validators. // example: "validator:public:bn254:4b0c4697f0a35eab30f63684ae4611f3c1d631eecfd97237e2345a9b3d0c472dbb16c49b793beceaab0cdd89cda6ff1099bd1aaf1ad6cabde9a15793cc09b407" @@ -40,7 +42,7 @@ message Config { // Public IP:port to advertise, should forward to server_addr. // Can be `127.0.0.1:` for local tests. - optional string public_addr = 2; // required; IpAddr + optional string public_addr = 2; // required; Host // Public keys of all validators. // Currently it has to be a singleton with a public key corresponding to secret key in CONSENSUS_VALIDATOR_KEY env var. @@ -65,5 +67,3 @@ message Secrets { optional string validator_key = 1; // required for validator nodes; ValidatorSecretKey optional string node_key = 2; // required for any node; NodeSecretKey } - - diff --git a/core/lib/protobuf_config/src/proto/mod.rs b/core/lib/protobuf_config/src/proto/mod.rs index 9f44835b29c..73e081736c9 100644 --- a/core/lib/protobuf_config/src/proto/mod.rs +++ b/core/lib/protobuf_config/src/proto/mod.rs @@ -1,3 +1,5 @@ #![allow(warnings)] +pub use self::{config::*, core::*}; + include!(concat!(env!("OUT_DIR"), "/src/proto/gen.rs")); diff --git a/core/lib/protobuf_config/src/tests.rs b/core/lib/protobuf_config/src/tests.rs index 41f69477756..0db119e718b 100644 --- a/core/lib/protobuf_config/src/tests.rs +++ b/core/lib/protobuf_config/src/tests.rs @@ -20,6 +20,8 @@ fn test_encoding() { test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 8ffa5bf678f..b6628481f35 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -34,7 +34,7 @@ prost.workspace = true itertools.workspace = true # Crypto stuff -secp256k1 = { workspace = true, features = ["recovery", "global-context"] } +secp256k1.workspace = true blake2.workspace = true [dev-dependencies] diff --git a/core/lib/zksync_core/Cargo.toml b/core/lib/zksync_core/Cargo.toml index a70c63178a3..8a2f2b97d5d 100644 --- a/core/lib/zksync_core/Cargo.toml +++ b/core/lib/zksync_core/Cargo.toml @@ -20,6 +20,7 @@ zksync_dal.workspace = true prover_dal.workspace = true zksync_db_connection.workspace = true zksync_config.workspace = true +zksync_protobuf_config.workspace = true zksync_utils.workspace = true zksync_contracts.workspace = true zksync_system_constants.workspace = true diff --git a/core/lib/zksync_core/build.rs b/core/lib/zksync_core/build.rs index cda803150a6..78ad3018e33 100644 --- a/core/lib/zksync_core/build.rs +++ b/core/lib/zksync_core/build.rs @@ -3,7 +3,7 @@ fn main() { zksync_protobuf_build::Config { input_root: "src/proto".into(), proto_root: "zksync/core".into(), - dependencies: vec![], + dependencies: vec!["::zksync_protobuf_config::proto".parse().unwrap()], protobuf_crate: "::zksync_protobuf".parse().unwrap(), is_public: true, } diff --git a/core/lib/zksync_core/src/consensus/config.rs b/core/lib/zksync_core/src/consensus/config.rs index 2a6d8e85f8e..31532cbd14e 100644 --- a/core/lib/zksync_core/src/consensus/config.rs +++ b/core/lib/zksync_core/src/consensus/config.rs @@ -1,22 +1,18 @@ //! Configuration utilities for the consensus component. -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashMap}; use anyhow::Context as _; use zksync_concurrency::net; -use zksync_consensus_crypto::{read_required_text, Text, TextFmt}; +use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets, Host, NodePublicKey}; +use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_executor as executor; use zksync_consensus_roles::{node, validator}; -use zksync_protobuf::{required, ProtoFmt}; -use crate::{ - consensus::{fetcher::P2PConfig, MainNodeConfig}, - proto::consensus as proto, -}; +use crate::consensus::{fetcher::P2PConfig, MainNodeConfig}; -fn read_optional_secret_text(text: &Option) -> anyhow::Result> { - text.as_ref() - .map(|t| Text::new(t).decode()) - .transpose() +fn read_secret_text(text: Option<&String>) -> anyhow::Result { + Text::new(text.context("missing")?) + .decode() .map_err(|_| anyhow::format_err!("invalid format")) } @@ -46,128 +42,56 @@ pub struct Config { pub gossip_static_outbound: BTreeMap, } -impl Config { - pub fn main_node(&self, secrets: &Secrets) -> anyhow::Result { - Ok(MainNodeConfig { - executor: self.executor_config(secrets.node_key.clone().context("missing node_key")?), - validator_key: secrets - .validator_key - .clone() - .context("missing validator_key")?, - }) - } - - pub fn p2p(&self, secrets: &Secrets) -> anyhow::Result { - Ok(self.executor_config(secrets.node_key.clone().context("missing node_key")?)) - } - - fn executor_config(&self, node_key: node::SecretKey) -> executor::Config { - executor::Config { - server_addr: self.server_addr, - public_addr: self.public_addr.clone(), - max_payload_size: self.max_payload_size, - node_key, - gossip_dynamic_inbound_limit: self.gossip_dynamic_inbound_limit, - gossip_static_inbound: self.gossip_static_inbound.clone().into_iter().collect(), - gossip_static_outbound: self.gossip_static_outbound.clone().into_iter().collect(), - } - } +fn validator_key(secrets: &ConsensusSecrets) -> anyhow::Result { + read_secret_text(secrets.validator_key.as_ref().map(|x| &x.0)) } -impl ProtoFmt for Config { - type Proto = proto::Config; - fn read(r: &Self::Proto) -> anyhow::Result { - let validators = r - .validators - .iter() - .enumerate() - .map(|(i, v)| { - Text::new(v) - .decode() - .with_context(|| format!("validators[{i}]")) - }) - .collect::, _>>()?; - let validators = validator::ValidatorSet::new(validators).context("validators")?; - - let mut gossip_static_inbound = BTreeSet::new(); - for (i, v) in r.gossip_static_inbound.iter().enumerate() { - gossip_static_inbound.insert( - Text::new(v) - .decode() - .with_context(|| format!("gossip_static_inbound[{i}]"))?, - ); - } - let mut gossip_static_outbound = BTreeMap::new(); - for (i, e) in r.gossip_static_outbound.iter().enumerate() { - let key = read_required_text(&e.key) - .with_context(|| format!("gossip_static_outbound[{i}].key"))?; - let addr = net::Host( - required(&e.addr) - .with_context(|| format!("gossip_static_outbound[{i}].addr"))? - .clone(), - ); - gossip_static_outbound.insert(key, addr); - } - Ok(Self { - server_addr: read_required_text(&r.server_addr).context("server_addr")?, - public_addr: net::Host(required(&r.public_addr).context("public_addr")?.clone()), - validators, - max_payload_size: required(&r.max_payload_size) - .and_then(|x| Ok((*x).try_into()?)) - .context("max_payload_size")?, - gossip_dynamic_inbound_limit: required(&r.gossip_dynamic_inbound_limit) - .and_then(|x| Ok((*x).try_into()?)) - .context("gossip_dynamic_inbound_limit")?, - gossip_static_inbound, - gossip_static_outbound, - }) - } - - fn build(&self) -> Self::Proto { - Self::Proto { - server_addr: Some(self.server_addr.encode()), - public_addr: Some(self.public_addr.0.clone()), - validators: self.validators.iter().map(TextFmt::encode).collect(), - max_payload_size: Some(self.max_payload_size.try_into().unwrap()), - gossip_static_inbound: self - .gossip_static_inbound - .iter() - .map(TextFmt::encode) - .collect(), - gossip_static_outbound: self - .gossip_static_outbound - .iter() - .map(|(key, addr)| proto::NodeAddr { - key: Some(TextFmt::encode(key)), - addr: Some(addr.0.clone()), - }) - .collect(), - gossip_dynamic_inbound_limit: Some( - self.gossip_dynamic_inbound_limit.try_into().unwrap(), - ), - } - } +fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result { + read_secret_text(secrets.node_key.as_ref().map(|x| &x.0)) } -#[derive(Debug)] -pub struct Secrets { - pub validator_key: Option, - pub node_key: Option, +/// Constructs a main node config from raw config. +pub fn main_node( + cfg: &ConsensusConfig, + secrets: &ConsensusSecrets, +) -> anyhow::Result { + Ok(MainNodeConfig { + executor: executor(cfg, secrets)?, + validator_key: validator_key(secrets).context("validator_key")?, + }) } -impl ProtoFmt for Secrets { - type Proto = proto::Secrets; - fn read(r: &Self::Proto) -> anyhow::Result { - Ok(Self { - validator_key: read_optional_secret_text(&r.validator_key).context("validator_key")?, - node_key: read_optional_secret_text(&r.node_key).context("node_key")?, - }) - } +pub(super) fn p2p(cfg: &ConsensusConfig, secrets: &ConsensusSecrets) -> anyhow::Result { + executor(cfg, secrets) +} - fn build(&self) -> Self::Proto { - Self::Proto { - validator_key: self.validator_key.as_ref().map(TextFmt::encode), - node_key: self.node_key.as_ref().map(TextFmt::encode), +fn executor(cfg: &ConsensusConfig, secrets: &ConsensusSecrets) -> anyhow::Result { + let mut gossip_static_outbound = HashMap::new(); + { + let mut append = |key: &NodePublicKey, addr: &Host| { + gossip_static_outbound.insert( + Text::new(&key.0).decode().context("key")?, + net::Host(addr.0.clone()), + ); + anyhow::Ok(()) + }; + for (i, (k, v)) in cfg.gossip_static_outbound.iter().enumerate() { + append(k, v).with_context(|| format!("gossip_static_outbound[{i}]"))?; } } + Ok(executor::Config { + server_addr: cfg.server_addr, + public_addr: net::Host(cfg.public_addr.0.clone()), + max_payload_size: cfg.max_payload_size, + node_key: node_key(secrets).context("node_key")?, + gossip_dynamic_inbound_limit: cfg.gossip_dynamic_inbound_limit, + gossip_static_inbound: cfg + .gossip_static_inbound + .iter() + .enumerate() + .map(|(i, x)| Text::new(&x.0).decode().context(i)) + .collect::>() + .context("gossip_static_inbound")?, + gossip_static_outbound, + }) } diff --git a/core/lib/zksync_core/src/consensus/era.rs b/core/lib/zksync_core/src/consensus/era.rs index df5f0f6229f..8853858ac71 100644 --- a/core/lib/zksync_core/src/consensus/era.rs +++ b/core/lib/zksync_core/src/consensus/era.rs @@ -7,13 +7,10 @@ use std::sync::Arc; use zksync_concurrency::ctx; +use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; use zksync_dal::{ConnectionPool, Core}; -use super::{ - config::{Config, Secrets}, - fetcher::Fetcher, - storage::Store, -}; +use super::{config, fetcher::Fetcher, storage::Store}; use crate::sync_layer::{sync_action::ActionQueueSender, MainNodeClient, SyncState}; /// Runs the consensus task in the main node mode. @@ -37,7 +34,7 @@ pub async fn run_main_node( /// The fetcher implementation may either be p2p or centralized. pub async fn run_fetcher( ctx: &ctx::Ctx, - cfg: Option<(Config, Secrets)>, + cfg: Option<(ConsensusConfig, ConsensusSecrets)>, pool: ConnectionPool, sync_state: SyncState, main_node_client: Arc, @@ -49,7 +46,11 @@ pub async fn run_fetcher( client: main_node_client, }; let res = match cfg { - Some((cfg, secrets)) => fetcher.run_p2p(ctx, actions, cfg.p2p(&secrets)?).await, + Some((cfg, secrets)) => { + fetcher + .run_p2p(ctx, actions, config::p2p(&cfg, &secrets)?) + .await + } None => fetcher.run_centralized(ctx, actions).await, }; tracing::info!("Consensus actor stopped"); diff --git a/core/lib/zksync_core/src/consensus/mod.rs b/core/lib/zksync_core/src/consensus/mod.rs index 282319240c2..433d79a53b8 100644 --- a/core/lib/zksync_core/src/consensus/mod.rs +++ b/core/lib/zksync_core/src/consensus/mod.rs @@ -10,7 +10,7 @@ use zksync_consensus_storage::BlockStore; pub use self::{fetcher::*, storage::Store}; -mod config; +pub mod config; pub mod era; mod fetcher; mod storage; @@ -19,8 +19,6 @@ pub(crate) mod testonly; #[cfg(test)] mod tests; -pub use config::{Config, Secrets}; - /// Main node consensus config. #[derive(Debug, Clone)] pub struct MainNodeConfig { diff --git a/core/lib/zksync_core/src/consensus/tests.rs b/core/lib/zksync_core/src/consensus/tests.rs index a80e450646f..dc0a5e80b0d 100644 --- a/core/lib/zksync_core/src/consensus/tests.rs +++ b/core/lib/zksync_core/src/consensus/tests.rs @@ -1,5 +1,5 @@ use anyhow::Context as _; -use rand::{distributions::Distribution, Rng}; +use rand::Rng; use test_casing::test_casing; use tracing::Instrument as _; use zksync_concurrency::{ctx, scope}; @@ -9,8 +9,6 @@ use zksync_consensus_network::testonly::{new_configs, new_fullnode}; use zksync_consensus_roles::validator::testonly::Setup; use zksync_consensus_storage as storage; use zksync_consensus_storage::PersistentBlockStore as _; -use zksync_consensus_utils::EncodeDist; -use zksync_protobuf::testonly::{test_encode_all_formats, FmtConv}; use zksync_types::{L1BatchNumber, MiniblockNumber}; use super::*; @@ -438,36 +436,3 @@ async fn test_centralized_fetcher(from_snapshot: bool) { .await .unwrap(); } - -impl Distribution for EncodeDist { - fn sample(&self, rng: &mut R) -> Config { - Config { - server_addr: self.sample(rng), - public_addr: self.sample(rng), - validators: rng.gen(), - max_payload_size: self.sample(rng), - gossip_dynamic_inbound_limit: self.sample(rng), - gossip_static_inbound: self.sample_range(rng).map(|_| rng.gen()).collect(), - gossip_static_outbound: self - .sample_range(rng) - .map(|_| (rng.gen(), self.sample(rng))) - .collect(), - } - } -} - -impl Distribution for EncodeDist { - fn sample(&self, rng: &mut R) -> Secrets { - Secrets { - validator_key: self.sample_opt(|| rng.gen()), - node_key: self.sample_opt(|| rng.gen()), - } - } -} - -#[test] -fn test_schema_encoding() { - let ctx = ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - test_encode_all_formats::>(rng); -} diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index dd2cf89b0a4..273924577da 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -29,6 +29,7 @@ use zksync_config::{ CircuitBreakerConfig, L1BatchCommitDataGeneratorMode, MempoolConfig, OperationsManagerConfig, StateKeeperConfig, }, + consensus::ConsensusConfig, database::{MerkleTreeConfig, MerkleTreeMode}, wallets, wallets::Wallets, @@ -240,7 +241,7 @@ pub async fn initialize_components( contracts_config: &ContractsConfig, components: &[Component], secrets: &Secrets, - consensus_config: Option, + consensus_config: Option, ) -> anyhow::Result<( Vec>>, watch::Sender, @@ -549,10 +550,12 @@ pub async fn initialize_components( if components.contains(&Component::Consensus) { let secrets = secrets.consensus.as_ref().context("Secrets are missing")?; - let cfg = consensus_config - .clone() - .context("consensus component's config is missing")? - .main_node(secrets)?; + let cfg = consensus::config::main_node( + consensus_config + .as_ref() + .context("consensus component's config is missing")?, + secrets, + )?; let started_at = Instant::now(); tracing::info!("initializing Consensus"); let pool = connection_pool.clone(); diff --git a/core/lib/zksync_core/src/temp_config_store/mod.rs b/core/lib/zksync_core/src/temp_config_store/mod.rs index f471a7b251c..55fb9e11d94 100644 --- a/core/lib/zksync_core/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core/src/temp_config_store/mod.rs @@ -6,6 +6,7 @@ use zksync_config::{ CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, StateKeeperConfig, }, + consensus::ConsensusSecrets, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, wallets::{AddressWallet, EthSender, StateKeeper, Wallet, Wallets}, @@ -16,9 +17,10 @@ use zksync_config::{ ApiConfig, ContractVerifierConfig, DBConfig, ETHConfig, ETHWatchConfig, GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; -use zksync_protobuf::{read_optional, repr::ProtoRepr, ProtoFmt}; +use zksync_protobuf::{repr::ProtoRepr, ProtoFmt}; +use zksync_protobuf_config::read_optional_repr; -use crate::{consensus, proto}; +use crate::proto; pub fn decode_yaml(yaml: &str) -> anyhow::Result { let d = serde_yaml::Deserializer::from_str(yaml); @@ -68,20 +70,20 @@ pub struct TempConfigStore { #[derive(Debug)] pub struct Secrets { - pub consensus: Option, + pub consensus: Option, } impl ProtoFmt for Secrets { type Proto = proto::Secrets; fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { - consensus: read_optional(&r.consensus).context("consensus")?, + consensus: read_optional_repr(&r.consensus).context("consensus")?, }) } fn build(&self) -> Self::Proto { Self::Proto { - consensus: self.consensus.as_ref().map(|x| x.build()), + consensus: self.consensus.as_ref().map(ProtoRepr::build), } } } diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 0c83e9d0f06..1b8341bf1ae 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -17,6 +17,7 @@ zksync_dal.workspace = true prover_dal.workspace = true zksync_db_connection.workspace = true zksync_config.workspace = true +zksync_protobuf_config.workspace = true zksync_state.workspace = true zksync_object_store.workspace = true zksync_core.workspace = true diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index 93a2856eb43..32ed33c644a 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -9,6 +9,7 @@ use zksync_config::{ CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, StateKeeperConfig, }, + consensus::{ConsensusConfig, ConsensusSecrets}, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, wallets::Wallets, @@ -23,9 +24,8 @@ use zksync_core::{ tx_sender::{ApiContracts, TxSenderConfig}, web3::{state::InternalApiConfig, Namespace}, }, - consensus, metadata_calculator::MetadataCalculatorConfig, - temp_config_store::decode_yaml, + temp_config_store::decode_yaml_repr, }; use zksync_env_config::FromEnv; use zksync_node_framework::{ @@ -60,6 +60,7 @@ use zksync_node_framework::{ }, service::{ZkStackService, ZkStackServiceBuilder, ZkStackServiceError}, }; +use zksync_protobuf_config::proto; struct MainNodeBuilder { node: ZkStackServiceBuilder, @@ -355,22 +356,28 @@ impl MainNodeBuilder { fn add_consensus_layer(mut self) -> anyhow::Result { // Copy-pasted from the zksync_server codebase. - fn read_consensus_secrets() -> anyhow::Result> { + fn read_consensus_secrets() -> anyhow::Result> { // Read public config. let Ok(path) = std::env::var("CONSENSUS_SECRETS_PATH") else { return Ok(None); }; let secrets = std::fs::read_to_string(&path).context(path)?; - Ok(Some(decode_yaml(&secrets).context("failed decoding YAML")?)) + Ok(Some( + decode_yaml_repr::(&secrets) + .context("failed decoding YAML")?, + )) } - fn read_consensus_config() -> anyhow::Result> { + fn read_consensus_config() -> anyhow::Result> { // Read public config. let Ok(path) = std::env::var("CONSENSUS_CONFIG_PATH") else { return Ok(None); }; let cfg = std::fs::read_to_string(&path).context(path)?; - Ok(Some(decode_yaml(&cfg).context("failed decoding YAML")?)) + Ok(Some( + decode_yaml_repr::(&cfg) + .context("failed decoding YAML")?, + )) } let config = read_consensus_config().context("read_consensus_config()")?; diff --git a/core/node/node_framework/src/implementations/layers/consensus.rs b/core/node/node_framework/src/implementations/layers/consensus.rs index b5fd528416f..48f26bb1b8b 100644 --- a/core/node/node_framework/src/implementations/layers/consensus.rs +++ b/core/node/node_framework/src/implementations/layers/consensus.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use anyhow::Context as _; use zksync_concurrency::{ctx, scope}; +use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; use zksync_core::{ consensus::{self, MainNodeConfig}, sync_layer::{ActionQueueSender, MainNodeClient, SyncState}, @@ -27,8 +28,8 @@ pub enum Mode { #[derive(Debug)] pub struct ConsensusLayer { pub mode: Mode, - pub config: Option, - pub secrets: Option, + pub config: Option, + pub secrets: Option, } #[async_trait::async_trait] @@ -53,7 +54,7 @@ impl WiringLayer for ConsensusLayer { WiringError::Configuration("Missing private consensus config".to_string()) })?; - let main_node_config = config.main_node(&secrets)?; + let main_node_config = consensus::config::main_node(&config, &secrets)?; let task = MainNodeConsensusTask { config: main_node_config, @@ -134,7 +135,7 @@ impl Task for MainNodeConsensusTask { #[derive(Debug)] pub struct FetcherTask { - config: Option<(consensus::Config, consensus::Secrets)>, + config: Option<(ConsensusConfig, ConsensusSecrets)>, pool: ConnectionPool, main_node_client: Arc, sync_state: SyncState, From 1016e082417d61661eac3c7f58a04433d5317a02 Mon Sep 17 00:00:00 2001 From: koloz193 Date: Thu, 11 Apr 2024 04:56:16 -0400 Subject: [PATCH 12/29] chore(genesis): remove gas bound caller (#1635) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Remove GasBoundCaller from genesis ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- core/lib/constants/src/contracts.rs | 7 --- core/lib/types/src/system_contracts.rs | 10 +--- .../tests/ts-integration/tests/system.test.ts | 47 ------------------- etc/env/base/contracts.toml | 6 +-- 4 files changed, 5 insertions(+), 65 deletions(-) diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index 10137a774ee..8a33c1bfc70 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -125,13 +125,6 @@ pub const ERC20_TRANSFER_TOPIC: H256 = H256([ 99, 196, 161, 22, 40, 245, 90, 77, 245, 35, 179, 239, ]); -/// This contract is specifically outside of the system contract address space as it can relay any call so it breaks -/// the trust of system contracts. -pub const GAS_BOUND_CALLER_ADDRESS: Address = H160([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x00, 0x00, -]); - // TODO (SMA-240): Research whether using zero address is ok pub const MINT_AND_BURN_ADDRESS: H160 = H160::zero(); diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index 936a4a19279..4f358e76976 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -5,7 +5,7 @@ use zksync_basic_types::{AccountTreeId, Address, U256}; use zksync_contracts::{read_sys_contract_bytecode, ContractLanguage, SystemContractsRepo}; use zksync_system_constants::{ BOOTLOADER_UTILITIES_ADDRESS, CODE_ORACLE_ADDRESS, COMPRESSOR_ADDRESS, EVENT_WRITER_ADDRESS, - GAS_BOUND_CALLER_ADDRESS, P256VERIFY_PRECOMPILE_ADDRESS, PUBDATA_CHUNK_PUBLISHER_ADDRESS, + P256VERIFY_PRECOMPILE_ADDRESS, PUBDATA_CHUNK_PUBLISHER_ADDRESS, }; use crate::{ @@ -25,7 +25,7 @@ use crate::{ pub const TX_NONCE_INCREMENT: U256 = U256([1, 0, 0, 0]); // 1 pub const DEPLOYMENT_NONCE_INCREMENT: U256 = U256([0, 0, 1, 0]); // 2^128 -static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 24] = [ +static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 23] = [ ( "", "AccountCodeStorage", @@ -156,12 +156,6 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 24] = [ PUBDATA_CHUNK_PUBLISHER_ADDRESS, ContractLanguage::Sol, ), - ( - "", - "GasBoundCaller", - GAS_BOUND_CALLER_ADDRESS, - ContractLanguage::Sol, - ), ]; static SYSTEM_CONTRACTS: Lazy> = Lazy::new(|| { diff --git a/core/tests/ts-integration/tests/system.test.ts b/core/tests/ts-integration/tests/system.test.ts index ffcc5fb4bf7..5e6ef82b25e 100644 --- a/core/tests/ts-integration/tests/system.test.ts +++ b/core/tests/ts-integration/tests/system.test.ts @@ -16,15 +16,6 @@ import { serialize, hashBytecode } from 'zksync-web3/build/src/utils'; import { deployOnAnyLocalAddress, ForceDeployment } from '../src/system'; import { getTestContract } from '../src/helpers'; -import { - GasBoundCaller, - GasBoundCallerFactory, - L1Messenger, - L1MessengerFactory, - SystemContext, - SystemContextFactory -} from 'system-contracts/typechain'; - const contracts = { counter: getTestContract('Counter'), events: getTestContract('Emitter') @@ -59,44 +50,6 @@ describe('System behavior checks', () => { expect(result_b).toEqual('0x'); }); - test('GasBoundCaller should be deployed and works correctly', async () => { - const gasBoundCallerAddress = '0x0000000000000000000000000000000000010000'; - const l1MessengerAddress = '0x0000000000000000000000000000000000008008'; - const systemContextAddress = '0x000000000000000000000000000000000000800b'; - const systemContext: SystemContext = SystemContextFactory.connect(systemContextAddress, alice._signerL2()); - const l1Messenger: L1Messenger = L1MessengerFactory.connect(l1MessengerAddress, alice._signerL2()); - const gasBoundCaller: GasBoundCaller = GasBoundCallerFactory.connect(gasBoundCallerAddress, alice._signerL2()); - - const pubdataToSend = 5000; - const gasSpentOnPubdata = (await systemContext.gasPerPubdataByte()).mul(pubdataToSend); - - const pubdata = ethers.utils.hexlify(ethers.utils.randomBytes(pubdataToSend)); - - await expect( - ( - await gasBoundCaller.gasBoundCall( - l1MessengerAddress, - gasSpentOnPubdata, - l1Messenger.interface.encodeFunctionData('sendToL1', [pubdata]), - { - gasLimit: 80_000_000 - } - ) - ).wait() - ).toBeRejected(); - - await ( - await gasBoundCaller.gasBoundCall( - l1MessengerAddress, - 80_000_000, - l1Messenger.interface.encodeFunctionData('sendToL1', [pubdata]), - { - gasLimit: 80_000_000 - } - ) - ).wait(); - }); - test('Should check that system contracts and SDK create same CREATE/CREATE2 addresses', async () => { const deployerContract = new zksync.Contract( zksync.utils.CONTRACT_DEPLOYER_ADDRESS, diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index 700b6017af9..e0a7a81642c 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -26,11 +26,11 @@ RECURSION_NODE_LEVEL_VK_HASH = "0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a2 RECURSION_LEAF_LEVEL_VK_HASH = "0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c" GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" -GENESIS_ROOT = "0x04dac60f5d2ed063336847f360cbc3f332d1688d2a39c1f87c0b3180b2200174" +GENESIS_ROOT = "0x1920e0154aa7649a645e7931b84796bfec22b58250778b828d9a5b8c7d32f661" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 -GENESIS_BATCH_COMMITMENT = "0x1b5c65b84ba99d11b2ae25fa9ec0302d15bc81805363934c782eccf1f9690fba" -GENESIS_ROLLUP_LEAF_INDEX = "52" +GENESIS_BATCH_COMMITMENT = "0xc64914ac5697bf6a73b9cca890ef013a83f8415573f2829b29111598410852a6" +GENESIS_ROLLUP_LEAF_INDEX = "50" GENESIS_PROTOCOL_VERSION = "23" L1_WETH_BRIDGE_IMPL_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_BRIDGE_PROXY_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" From 3f05e837ca80dca4f8f4636c11690f22f6a317a2 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 11 Apr 2024 12:33:26 +0300 Subject: [PATCH 13/29] chore(api): Deprecate `l1_to_l2_transactions_compatibility_mode` config (#1616) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Deprecates `l1_to_l2_transactions_compatibility_mode` config variable ## Why ❔ 1.4.1 upgrade finished and this functionality is not needed ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- core/bin/external_node/src/config/mod.rs | 16 ----- core/lib/config/src/configs/api.rs | 9 --- core/lib/config/src/testonly.rs | 1 - core/lib/env_config/src/api.rs | 2 - core/lib/protobuf_config/src/api.rs | 7 -- .../src/proto/config/api.proto | 2 +- .../src/api_server/tx_sender/mod.rs | 72 +++---------------- etc/env/base/api.toml | 1 - etc/env/file_based/general.yaml | 1 - 9 files changed, 9 insertions(+), 102 deletions(-) diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 22425cc1d06..2eb739ccfd4 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -218,15 +218,6 @@ pub(crate) struct OptionalENConfig { /// The max possible number of gas that `eth_estimateGas` is allowed to overestimate. #[serde(default = "OptionalENConfig::default_estimate_gas_acceptable_overestimation")] pub estimate_gas_acceptable_overestimation: u32, - /// Whether to use the compatibility mode for gas estimation for L1->L2 transactions. - /// During the migration to the 1.4.1 fee model, there will be a period, when the server - /// will already have the 1.4.1 fee model, while the L1 contracts will still expect the transactions - /// to use the previous fee model with much higher overhead. - /// - /// When set to `true`, the API will ensure to return gasLimit is high enough overhead for both the old - /// and the new fee model when estimating L1->L2 transactions. - #[serde(default = "OptionalENConfig::default_l1_to_l2_transactions_compatibility_mode")] - pub l1_to_l2_transactions_compatibility_mode: bool, /// The multiplier to use when suggesting gas price. Should be higher than one, /// otherwise if the L1 prices soar, the suggested gas price won't be sufficient to be included in block #[serde(default = "OptionalENConfig::default_gas_price_scale_factor")] @@ -343,10 +334,6 @@ impl OptionalENConfig { 1_000 } - const fn default_l1_to_l2_transactions_compatibility_mode() -> bool { - true - } - const fn default_gas_price_scale_factor() -> f64 { 1.2 } @@ -753,9 +740,6 @@ impl From for TxSenderConfig { max_allowed_l2_tx_gas_limit: u64::MAX, validation_computational_gas_limit: u32::MAX, chain_id: config.remote.l2_chain_id, - l1_to_l2_transactions_compatibility_mode: config - .optional - .l1_to_l2_transactions_compatibility_mode, max_pubdata_per_batch: config.remote.max_pubdata_per_batch, // Does not matter for EN. whitelisted_tokens_for_aa: Default::default(), diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index 2739849444b..bac342094e4 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -59,14 +59,6 @@ pub struct Web3JsonRpcConfig { pub estimate_gas_scale_factor: f64, /// The max possible number of gas that `eth_estimateGas` is allowed to overestimate. pub estimate_gas_acceptable_overestimation: u32, - /// Whether to use the compatibility mode for gas estimation for L1->L2 transactions. - /// During the migration to the 1.4.1 fee model, there will be a period, when the server - /// will already have the 1.4.1 fee model, while the L1 contracts will still expect the transactions - /// to use the previous fee model with much higher overhead. - /// - /// When set to `true`, the API will ensure to return gasLimit is high enough overhead for both the old - /// and the new fee model when estimating L1->L2 transactions. - pub l1_to_l2_transactions_compatibility_mode: bool, /// Max possible size of an ABI encoded tx (in bytes). pub max_tx_size: usize, /// Max number of cache misses during one VM execution. If the number of cache misses exceeds this value, the API server panics. @@ -127,7 +119,6 @@ impl Web3JsonRpcConfig { account_pks: Default::default(), estimate_gas_scale_factor: 1.2, estimate_gas_acceptable_overestimation: 1000, - l1_to_l2_transactions_compatibility_mode: true, max_tx_size: 1000000, vm_execution_cache_misses_limit: Default::default(), vm_concurrency_limit: Default::default(), diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 72fdfe07f08..1986e1a85fa 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -65,7 +65,6 @@ impl Distribution for EncodeDist { account_pks: self.sample_opt(|| self.sample_range(rng).map(|_| rng.gen()).collect()), estimate_gas_scale_factor: self.sample(rng), estimate_gas_acceptable_overestimation: self.sample(rng), - l1_to_l2_transactions_compatibility_mode: self.sample(rng), max_tx_size: self.sample(rng), vm_execution_cache_misses_limit: self.sample(rng), vm_concurrency_limit: self.sample(rng), diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index 999b4457076..f6f2995aac7 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -74,7 +74,6 @@ mod tests { estimate_gas_scale_factor: 1.0f64, gas_price_scale_factor: 1.2, estimate_gas_acceptable_overestimation: 1000, - l1_to_l2_transactions_compatibility_mode: true, max_tx_size: 1000000, vm_execution_cache_misses_limit: None, vm_concurrency_limit: Some(512), @@ -127,7 +126,6 @@ mod tests { API_WEB3_JSON_RPC_WHITELISTED_TOKENS_FOR_AA="0x0000000000000000000000000000000000000001,0x0000000000000000000000000000000000000002" API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.0 API_WEB3_JSON_RPC_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 - API_WEB3_JSON_RPC_L1_TO_L2_TRANSACTIONS_COMPATIBILITY_MODE=true API_WEB3_JSON_RPC_MAX_TX_SIZE=1000000 API_WEB3_JSON_RPC_VM_CONCURRENCY_LIMIT=512 API_WEB3_JSON_RPC_FACTORY_DEPS_CACHE_SIZE_MB=128 diff --git a/core/lib/protobuf_config/src/api.rs b/core/lib/protobuf_config/src/api.rs index 1832a2eb2a7..9aa061672b1 100644 --- a/core/lib/protobuf_config/src/api.rs +++ b/core/lib/protobuf_config/src/api.rs @@ -69,10 +69,6 @@ impl ProtoRepr for proto::Web3JsonRpc { &self.estimate_gas_acceptable_overestimation, ) .context("acceptable_overestimation")?, - l1_to_l2_transactions_compatibility_mode: *required( - &self.l1_to_l2_transactions_compatibility_mode, - ) - .context("l1_to_l2_transactions_compatibility_mode")?, max_tx_size: required(&self.max_tx_size) .and_then(|x| Ok((*x).try_into()?)) .context("max_tx_size")?, @@ -158,9 +154,6 @@ impl ProtoRepr for proto::Web3JsonRpc { estimate_gas_acceptable_overestimation: Some( this.estimate_gas_acceptable_overestimation, ), - l1_to_l2_transactions_compatibility_mode: Some( - this.l1_to_l2_transactions_compatibility_mode, - ), max_tx_size: Some(this.max_tx_size.try_into().unwrap()), vm_execution_cache_misses_limit: this .vm_execution_cache_misses_limit diff --git a/core/lib/protobuf_config/src/proto/config/api.proto b/core/lib/protobuf_config/src/proto/config/api.proto index 9199cac2b5c..46f98a93a79 100644 --- a/core/lib/protobuf_config/src/proto/config/api.proto +++ b/core/lib/protobuf_config/src/proto/config/api.proto @@ -19,7 +19,6 @@ message Web3JsonRpc { repeated string account_pks = 12; // optional optional double estimate_gas_scale_factor = 13; // required optional uint32 estimate_gas_acceptable_overestimation = 14; // required - optional bool l1_to_l2_transactions_compatibility_mode = 15; // required optional uint64 max_tx_size = 16; // required; B optional uint64 vm_execution_cache_misses_limit = 17; // optional optional uint64 vm_concurrency_limit = 18; // optional @@ -35,6 +34,7 @@ message Web3JsonRpc { optional uint64 mempool_cache_update_interval = 28; // optional optional uint64 mempool_cache_size = 29; // optional repeated string whitelisted_tokens_for_aa = 30; // optional + reserved 15; reserved "l1_to_l2_transactions_compatibility_mode"; } diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index b354a73c95f..b2781468e49 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -22,7 +22,6 @@ use zksync_types::{ fee::{Fee, TransactionExecutionMetrics}, fee_model::BatchFeeInput, get_code_key, get_intrinsic_constants, - l1::is_l1_tx_type, l2::{error::TxCheckError::TxDuplication, L2Tx}, utils::storage_key_for_eth_balance, AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, MiniblockNumber, Nonce, @@ -230,7 +229,6 @@ pub struct TxSenderConfig { pub max_allowed_l2_tx_gas_limit: u64, pub vm_execution_cache_misses_limit: Option, pub validation_computational_gas_limit: u32, - pub l1_to_l2_transactions_compatibility_mode: bool, pub chain_id: L2ChainId, pub max_pubdata_per_batch: u64, pub whitelisted_tokens_for_aa: Vec

, @@ -251,8 +249,6 @@ impl TxSenderConfig { vm_execution_cache_misses_limit: web3_json_config.vm_execution_cache_misses_limit, validation_computational_gas_limit: state_keeper_config .validation_computational_gas_limit, - l1_to_l2_transactions_compatibility_mode: web3_json_config - .l1_to_l2_transactions_compatibility_mode, chain_id, max_pubdata_per_batch: state_keeper_config.max_pubdata_per_batch, whitelisted_tokens_for_aa: web3_json_config.whitelisted_tokens_for_aa.clone(), @@ -873,29 +869,14 @@ impl TxSender { result.into_api_call_result()?; self.ensure_tx_executable(&tx, &tx_metrics, false)?; - // Now, we need to calculate the final overhead for the transaction. We need to take into account the fact - // that the migration of 1.4.1 may be still going on. - let overhead = if self - .0 - .sender_config - .l1_to_l2_transactions_compatibility_mode - { - derive_pessimistic_overhead( - suggested_gas_limit, - gas_per_pubdata_byte as u32, - tx.encoding_len(), - tx.tx_format() as u8, - protocol_version.into(), - ) - } else { - derive_overhead( - suggested_gas_limit, - gas_per_pubdata_byte as u32, - tx.encoding_len(), - tx.tx_format() as u8, - protocol_version.into(), - ) - } as u64; + // Now, we need to calculate the final overhead for the transaction. + let overhead = derive_overhead( + suggested_gas_limit, + gas_per_pubdata_byte as u32, + tx.encoding_len(), + tx.tx_format() as u8, + protocol_version.into(), + ) as u64; let full_gas_limit = match tx_body_gas_limit.overflowing_add(additional_gas_for_pubdata + overhead) { @@ -1013,40 +994,3 @@ impl TxSender { Ok(()) } } - -/// During switch to the 1.4.1 protocol version, there will be a moment of discrepancy, when while -/// the L2 has already upgraded to 1.4.1 (and thus suggests smaller overhead), the L1 is still on the previous version. -/// -/// This might lead to situations when L1->L2 transactions estimated with the new versions would work on the state keeper side, -/// but they won't even make it there, but the protection mechanisms for L1->L2 transactions will reject them on L1. -/// TODO(X): remove this function after the upgrade is complete -fn derive_pessimistic_overhead( - gas_limit: u64, - gas_price_per_pubdata: u32, - encoded_len: usize, - tx_type: u8, - vm_version: VmVersion, -) -> u32 { - let current_overhead = derive_overhead( - gas_limit, - gas_price_per_pubdata, - encoded_len, - tx_type, - vm_version, - ); - - if is_l1_tx_type(tx_type) { - // We are in the L1->L2 transaction, so we need to account for the fact that the L1 is still on the previous version. - // We assume that the overhead will be the same as for the previous version. - let previous_overhead = derive_overhead( - gas_limit, - gas_price_per_pubdata, - encoded_len, - tx_type, - VmVersion::VmBoojumIntegration, - ); - current_overhead.max(previous_overhead) - } else { - current_overhead - } -} diff --git a/etc/env/base/api.toml b/etc/env/base/api.toml index 3cbc2180dd8..c8e0050ac31 100644 --- a/etc/env/base/api.toml +++ b/etc/env/base/api.toml @@ -18,7 +18,6 @@ pubsub_polling_interval = 200 threads_per_server = 128 max_nonce_ahead = 50 gas_price_scale_factor = 1.2 -l1_to_l2_transactions_compatibility_mode = true request_timeout = 10 account_pks = [ "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index c251c667d33..8479a3022cb 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -38,7 +38,6 @@ api: pubsub_polling_interval: 200 max_nonce_ahead: 50 gas_price_scale_factor: 1.2 - l1_to_l2_transactions_compatibility_mode: true request_timeout: 10 account_pks: - 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 From f4a439d397beeea8cdea5c0f1a698ba1ad614ab9 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 11 Apr 2024 12:40:55 +0300 Subject: [PATCH 14/29] chore(db): Clean up `l1_batches` table (#1631) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Removes columns: gas_per_pubdata_byte_in_block, gas_per_pubdata_limit, compressed_write_logs, compressed_contracts, parent_hash. Soft-removes merkle_root_hash. ## Why ❔ Clean up. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- ...fda36a55972bde029fb97bbd3ddbcf2046cd.json} | 46 ++++++++----------- ...dfbf962636347864fc365fafa46c7a2da5f30.json | 22 --------- ...8eeab159533211d2ddbef41e6ff0ba937d04a.json | 14 ------ ...07997b7e24682da56f84d812da2b9aeb14ca2.json | 40 ---------------- ...8c166c71630d71cdc1b8d005a7261c0b53c6.json} | 46 ++++++++----------- ...09cb7fcee648e35b6d66b44944b2a9a82cef.json} | 46 ++++++++----------- ...1fbe5f9e838b0f1fd6ff906c0130a15f9509.json} | 46 ++++++++----------- ...709ec0333e6673ce43fedd00638b9252cbd8.json} | 46 ++++++++----------- ...b0a2e83e864d997aba4ca92c1d27e947b716.json} | 46 ++++++++----------- ...490e9dd1108a34b97b0efd54d19b678fb598.json} | 46 ++++++++----------- ...d7d59a34f97285e855d5bd67f28f6f4ff1d4e.json | 16 ------- ...4b8026e0e8dde20d98750f874415f14faefd.json} | 46 ++++++++----------- ...0e202da24ac3ca205154417e98ccfa7437379.json | 16 +++++++ ...0240410100536_clean_up_l1_batches.down.sql | 5 ++ .../20240410100536_clean_up_l1_batches.up.sql | 5 ++ core/lib/dal/src/blocks_dal.rs | 9 ---- core/lib/dal/src/models/storage_block.rs | 6 --- .../structures/commit_batch_info.rs | 8 ++-- core/lib/types/src/commitment/mod.rs | 1 - core/lib/zksync_core/src/eth_sender/tests.rs | 1 - core/lib/zksync_core/src/utils/testonly.rs | 1 - 21 files changed, 190 insertions(+), 322 deletions(-) rename core/lib/dal/.sqlx/{query-da72486421d60d3d633e19632a8c66fc73c2d4f35a5dc40e117903dba4e7edb2.json => query-04e63146ce0d7c5c7343b2881debfda36a55972bde029fb97bbd3ddbcf2046cd.json} (70%) delete mode 100644 core/lib/dal/.sqlx/query-0c899c68886f76a232ffac0454cdfbf962636347864fc365fafa46c7a2da5f30.json delete mode 100644 core/lib/dal/.sqlx/query-20f84f9ec21459d8c7ad53241758eeab159533211d2ddbef41e6ff0ba937d04a.json delete mode 100644 core/lib/dal/.sqlx/query-26cb272c2a46a267c47681e0f1f07997b7e24682da56f84d812da2b9aeb14ca2.json rename core/lib/dal/.sqlx/{query-e7d47990db585af7cbf096155babe152ead3d4a7f22bc3584803288e8d639c13.json => query-2cc640434b8bd8b848bb285d060a8c166c71630d71cdc1b8d005a7261c0b53c6.json} (69%) rename core/lib/dal/.sqlx/{query-268d27c427a69c5e35e6004380da45f005141b01eac26946251bba68e9276acf.json => query-4b8c99469e2ed69d0d6859ef1fa609cb7fcee648e35b6d66b44944b2a9a82cef.json} (75%) rename core/lib/dal/.sqlx/{query-ba1b6c4588ea3d3b138e03a63ef1731a10b332fec68f4a1e56edec4df71ccaff.json => query-4bdda8d8956ac5dedc3bd389e3721fbe5f9e838b0f1fd6ff906c0130a15f9509.json} (68%) rename core/lib/dal/.sqlx/{query-4aef05b8864484c6979c93d7ce3159cf509cf27adf9afdbe61bd867e536b7e47.json => query-50f1f9ababe67af63fab9b82294f709ec0333e6673ce43fedd00638b9252cbd8.json} (72%) rename core/lib/dal/.sqlx/{query-b63ee98d4708c8121a287becb7e1973d18d45c4a200a60d318cdb1222d63ccaa.json => query-66ea8037bb114e0cfccc51b9db41b0a2e83e864d997aba4ca92c1d27e947b716.json} (74%) rename core/lib/dal/.sqlx/{query-db41e2480bdef66e1c89347402418128cae1b40ea2ee34b9d5269c12f8b5738a.json => query-70137d0b3cdcbbf6d85c9d0a5408490e9dd1108a34b97b0efd54d19b678fb598.json} (78%) delete mode 100644 core/lib/dal/.sqlx/query-7b908340613dadbbef46e2160d2d7d59a34f97285e855d5bd67f28f6f4ff1d4e.json rename core/lib/dal/.sqlx/{query-bab1857df66bbef57705ae7796161f3a71f5c6737e08745a37b41b22f4dfd030.json => query-c6b799f7afc1c310a16aeaa9819c4b8026e0e8dde20d98750f874415f14faefd.json} (75%) create mode 100644 core/lib/dal/.sqlx/query-c98f19505792a4085bb5ddddb6d0e202da24ac3ca205154417e98ccfa7437379.json create mode 100644 core/lib/dal/migrations/20240410100536_clean_up_l1_batches.down.sql create mode 100644 core/lib/dal/migrations/20240410100536_clean_up_l1_batches.up.sql diff --git a/core/lib/dal/.sqlx/query-da72486421d60d3d633e19632a8c66fc73c2d4f35a5dc40e117903dba4e7edb2.json b/core/lib/dal/.sqlx/query-04e63146ce0d7c5c7343b2881debfda36a55972bde029fb97bbd3ddbcf2046cd.json similarity index 70% rename from core/lib/dal/.sqlx/query-da72486421d60d3d633e19632a8c66fc73c2d4f35a5dc40e117903dba4e7edb2.json rename to core/lib/dal/.sqlx/query-04e63146ce0d7c5c7343b2881debfda36a55972bde029fb97bbd3ddbcf2046cd.json index 2481edec2e5..08aa73e9a79 100644 --- a/core/lib/dal/.sqlx/query-da72486421d60d3d633e19632a8c66fc73c2d4f35a5dc40e117903dba4e7edb2.json +++ b/core/lib/dal/.sqlx/query-04e63146ce0d7c5c7343b2881debfda36a55972bde029fb97bbd3ddbcf2046cd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -60,101 +60,96 @@ }, { "ordinal": 11, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 12, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 14, + "ordinal": 13, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 15, + "ordinal": 14, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 15, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 19, + "ordinal": 18, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 20, + "ordinal": 19, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 21, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 23, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 26, + "ordinal": 25, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 26, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 28, + "ordinal": 27, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 28, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 30, + "ordinal": 29, "name": "pubdata_input", "type_info": "Bytea" } @@ -179,7 +174,6 @@ true, true, true, - true, false, false, false, @@ -201,5 +195,5 @@ true ] }, - "hash": "da72486421d60d3d633e19632a8c66fc73c2d4f35a5dc40e117903dba4e7edb2" + "hash": "04e63146ce0d7c5c7343b2881debfda36a55972bde029fb97bbd3ddbcf2046cd" } diff --git a/core/lib/dal/.sqlx/query-0c899c68886f76a232ffac0454cdfbf962636347864fc365fafa46c7a2da5f30.json b/core/lib/dal/.sqlx/query-0c899c68886f76a232ffac0454cdfbf962636347864fc365fafa46c7a2da5f30.json deleted file mode 100644 index 35c1633fc55..00000000000 --- a/core/lib/dal/.sqlx/query-0c899c68886f76a232ffac0454cdfbf962636347864fc365fafa46c7a2da5f30.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n virtual_blocks\n FROM\n miniblocks\n WHERE\n number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "virtual_blocks", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "0c899c68886f76a232ffac0454cdfbf962636347864fc365fafa46c7a2da5f30" -} diff --git a/core/lib/dal/.sqlx/query-20f84f9ec21459d8c7ad53241758eeab159533211d2ddbef41e6ff0ba937d04a.json b/core/lib/dal/.sqlx/query-20f84f9ec21459d8c7ad53241758eeab159533211d2ddbef41e6ff0ba937d04a.json deleted file mode 100644 index 5f7048a8a20..00000000000 --- a/core/lib/dal/.sqlx/query-20f84f9ec21459d8c7ad53241758eeab159533211d2ddbef41e6ff0ba937d04a.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE l1_batches\n SET\n skip_proof = TRUE\n WHERE\n number = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "20f84f9ec21459d8c7ad53241758eeab159533211d2ddbef41e6ff0ba937d04a" -} diff --git a/core/lib/dal/.sqlx/query-26cb272c2a46a267c47681e0f1f07997b7e24682da56f84d812da2b9aeb14ca2.json b/core/lib/dal/.sqlx/query-26cb272c2a46a267c47681e0f1f07997b7e24682da56f84d812da2b9aeb14ca2.json deleted file mode 100644 index 58ba7c33f2b..00000000000 --- a/core/lib/dal/.sqlx/query-26cb272c2a46a267c47681e0f1f07997b7e24682da56f84d812da2b9aeb14ca2.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblock_number AS \"miniblock_number!\",\n hash,\n index_in_block AS \"index_in_block!\",\n l1_batch_tx_index AS \"l1_batch_tx_index!\"\n FROM\n transactions\n WHERE\n l1_batch_number = $1\n ORDER BY\n miniblock_number,\n index_in_block\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "miniblock_number!", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 2, - "name": "index_in_block!", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "l1_batch_tx_index!", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - true, - false, - true, - true - ] - }, - "hash": "26cb272c2a46a267c47681e0f1f07997b7e24682da56f84d812da2b9aeb14ca2" -} diff --git a/core/lib/dal/.sqlx/query-e7d47990db585af7cbf096155babe152ead3d4a7f22bc3584803288e8d639c13.json b/core/lib/dal/.sqlx/query-2cc640434b8bd8b848bb285d060a8c166c71630d71cdc1b8d005a7261c0b53c6.json similarity index 69% rename from core/lib/dal/.sqlx/query-e7d47990db585af7cbf096155babe152ead3d4a7f22bc3584803288e8d639c13.json rename to core/lib/dal/.sqlx/query-2cc640434b8bd8b848bb285d060a8c166c71630d71cdc1b8d005a7261c0b53c6.json index d749fe4facd..fc11c7d6565 100644 --- a/core/lib/dal/.sqlx/query-e7d47990db585af7cbf096155babe152ead3d4a7f22bc3584803288e8d639c13.json +++ b/core/lib/dal/.sqlx/query-2cc640434b8bd8b848bb285d060a8c166c71630d71cdc1b8d005a7261c0b53c6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", "describe": { "columns": [ { @@ -60,101 +60,96 @@ }, { "ordinal": 11, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 12, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 14, + "ordinal": 13, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 15, + "ordinal": 14, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 15, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 19, + "ordinal": 18, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 20, + "ordinal": 19, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 21, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 23, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 26, + "ordinal": 25, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 26, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 28, + "ordinal": 27, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 28, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 30, + "ordinal": 29, "name": "pubdata_input", "type_info": "Bytea" } @@ -177,7 +172,6 @@ true, true, true, - true, false, false, false, @@ -199,5 +193,5 @@ true ] }, - "hash": "e7d47990db585af7cbf096155babe152ead3d4a7f22bc3584803288e8d639c13" + "hash": "2cc640434b8bd8b848bb285d060a8c166c71630d71cdc1b8d005a7261c0b53c6" } diff --git a/core/lib/dal/.sqlx/query-268d27c427a69c5e35e6004380da45f005141b01eac26946251bba68e9276acf.json b/core/lib/dal/.sqlx/query-4b8c99469e2ed69d0d6859ef1fa609cb7fcee648e35b6d66b44944b2a9a82cef.json similarity index 75% rename from core/lib/dal/.sqlx/query-268d27c427a69c5e35e6004380da45f005141b01eac26946251bba68e9276acf.json rename to core/lib/dal/.sqlx/query-4b8c99469e2ed69d0d6859ef1fa609cb7fcee648e35b6d66b44944b2a9a82cef.json index aaa09322306..3c4cda19362 100644 --- a/core/lib/dal/.sqlx/query-268d27c427a69c5e35e6004380da45f005141b01eac26946251bba68e9276acf.json +++ b/core/lib/dal/.sqlx/query-4b8c99469e2ed69d0d6859ef1fa609cb7fcee648e35b6d66b44944b2a9a82cef.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -60,101 +60,96 @@ }, { "ordinal": 11, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 12, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 14, + "ordinal": 13, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 15, + "ordinal": 14, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 15, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 19, + "ordinal": 18, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 20, + "ordinal": 19, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 21, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 23, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 26, + "ordinal": 25, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 26, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 28, + "ordinal": 27, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 28, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 30, + "ordinal": 29, "name": "pubdata_input", "type_info": "Bytea" } @@ -174,7 +169,6 @@ true, true, true, - true, false, false, false, @@ -196,5 +190,5 @@ true ] }, - "hash": "268d27c427a69c5e35e6004380da45f005141b01eac26946251bba68e9276acf" + "hash": "4b8c99469e2ed69d0d6859ef1fa609cb7fcee648e35b6d66b44944b2a9a82cef" } diff --git a/core/lib/dal/.sqlx/query-ba1b6c4588ea3d3b138e03a63ef1731a10b332fec68f4a1e56edec4df71ccaff.json b/core/lib/dal/.sqlx/query-4bdda8d8956ac5dedc3bd389e3721fbe5f9e838b0f1fd6ff906c0130a15f9509.json similarity index 68% rename from core/lib/dal/.sqlx/query-ba1b6c4588ea3d3b138e03a63ef1731a10b332fec68f4a1e56edec4df71ccaff.json rename to core/lib/dal/.sqlx/query-4bdda8d8956ac5dedc3bd389e3721fbe5f9e838b0f1fd6ff906c0130a15f9509.json index 2bd61878880..cdf143094c2 100644 --- a/core/lib/dal/.sqlx/query-ba1b6c4588ea3d3b138e03a63ef1731a10b332fec68f4a1e56edec4df71ccaff.json +++ b/core/lib/dal/.sqlx/query-4bdda8d8956ac5dedc3bd389e3721fbe5f9e838b0f1fd6ff906c0130a15f9509.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -60,101 +60,96 @@ }, { "ordinal": 11, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 12, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 14, + "ordinal": 13, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 15, + "ordinal": 14, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 15, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 19, + "ordinal": 18, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 20, + "ordinal": 19, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 21, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 23, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 26, + "ordinal": 25, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 26, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 28, + "ordinal": 27, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 28, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 30, + "ordinal": 29, "name": "pubdata_input", "type_info": "Bytea" } @@ -179,7 +174,6 @@ true, true, true, - true, false, false, false, @@ -201,5 +195,5 @@ true ] }, - "hash": "ba1b6c4588ea3d3b138e03a63ef1731a10b332fec68f4a1e56edec4df71ccaff" + "hash": "4bdda8d8956ac5dedc3bd389e3721fbe5f9e838b0f1fd6ff906c0130a15f9509" } diff --git a/core/lib/dal/.sqlx/query-4aef05b8864484c6979c93d7ce3159cf509cf27adf9afdbe61bd867e536b7e47.json b/core/lib/dal/.sqlx/query-50f1f9ababe67af63fab9b82294f709ec0333e6673ce43fedd00638b9252cbd8.json similarity index 72% rename from core/lib/dal/.sqlx/query-4aef05b8864484c6979c93d7ce3159cf509cf27adf9afdbe61bd867e536b7e47.json rename to core/lib/dal/.sqlx/query-50f1f9ababe67af63fab9b82294f709ec0333e6673ce43fedd00638b9252cbd8.json index d24b6eb7583..78f4430fda1 100644 --- a/core/lib/dal/.sqlx/query-4aef05b8864484c6979c93d7ce3159cf509cf27adf9afdbe61bd867e536b7e47.json +++ b/core/lib/dal/.sqlx/query-50f1f9ababe67af63fab9b82294f709ec0333e6673ce43fedd00638b9252cbd8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -60,101 +60,96 @@ }, { "ordinal": 11, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 12, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 14, + "ordinal": 13, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 15, + "ordinal": 14, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 15, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 19, + "ordinal": 18, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 20, + "ordinal": 19, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 21, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 23, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 26, + "ordinal": 25, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 26, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 28, + "ordinal": 27, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 28, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 30, + "ordinal": 29, "name": "pubdata_input", "type_info": "Bytea" } @@ -176,7 +171,6 @@ true, true, true, - true, false, false, false, @@ -198,5 +192,5 @@ true ] }, - "hash": "4aef05b8864484c6979c93d7ce3159cf509cf27adf9afdbe61bd867e536b7e47" + "hash": "50f1f9ababe67af63fab9b82294f709ec0333e6673ce43fedd00638b9252cbd8" } diff --git a/core/lib/dal/.sqlx/query-b63ee98d4708c8121a287becb7e1973d18d45c4a200a60d318cdb1222d63ccaa.json b/core/lib/dal/.sqlx/query-66ea8037bb114e0cfccc51b9db41b0a2e83e864d997aba4ca92c1d27e947b716.json similarity index 74% rename from core/lib/dal/.sqlx/query-b63ee98d4708c8121a287becb7e1973d18d45c4a200a60d318cdb1222d63ccaa.json rename to core/lib/dal/.sqlx/query-66ea8037bb114e0cfccc51b9db41b0a2e83e864d997aba4ca92c1d27e947b716.json index 8fe75d74396..82befeb8a93 100644 --- a/core/lib/dal/.sqlx/query-b63ee98d4708c8121a287becb7e1973d18d45c4a200a60d318cdb1222d63ccaa.json +++ b/core/lib/dal/.sqlx/query-66ea8037bb114e0cfccc51b9db41b0a2e83e864d997aba4ca92c1d27e947b716.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -60,101 +60,96 @@ }, { "ordinal": 11, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 12, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 14, + "ordinal": 13, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 15, + "ordinal": 14, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 15, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 19, + "ordinal": 18, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 20, + "ordinal": 19, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 21, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 23, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 26, + "ordinal": 25, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 26, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 28, + "ordinal": 27, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 28, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 30, + "ordinal": 29, "name": "pubdata_input", "type_info": "Bytea" } @@ -178,7 +173,6 @@ true, true, true, - true, false, false, false, @@ -200,5 +194,5 @@ true ] }, - "hash": "b63ee98d4708c8121a287becb7e1973d18d45c4a200a60d318cdb1222d63ccaa" + "hash": "66ea8037bb114e0cfccc51b9db41b0a2e83e864d997aba4ca92c1d27e947b716" } diff --git a/core/lib/dal/.sqlx/query-db41e2480bdef66e1c89347402418128cae1b40ea2ee34b9d5269c12f8b5738a.json b/core/lib/dal/.sqlx/query-70137d0b3cdcbbf6d85c9d0a5408490e9dd1108a34b97b0efd54d19b678fb598.json similarity index 78% rename from core/lib/dal/.sqlx/query-db41e2480bdef66e1c89347402418128cae1b40ea2ee34b9d5269c12f8b5738a.json rename to core/lib/dal/.sqlx/query-70137d0b3cdcbbf6d85c9d0a5408490e9dd1108a34b97b0efd54d19b678fb598.json index 11404965d99..ccc3f333e02 100644 --- a/core/lib/dal/.sqlx/query-db41e2480bdef66e1c89347402418128cae1b40ea2ee34b9d5269c12f8b5738a.json +++ b/core/lib/dal/.sqlx/query-70137d0b3cdcbbf6d85c9d0a5408490e9dd1108a34b97b0efd54d19b678fb598.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -60,101 +60,96 @@ }, { "ordinal": 11, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 12, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 14, + "ordinal": 13, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 15, + "ordinal": 14, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 15, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 19, + "ordinal": 18, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 20, + "ordinal": 19, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 21, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 23, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 26, + "ordinal": 25, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 27, + "ordinal": 26, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 27, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 28, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 30, + "ordinal": 29, "name": "pubdata_input", "type_info": "Bytea" } @@ -176,7 +171,6 @@ true, true, true, - true, false, false, false, @@ -198,5 +192,5 @@ true ] }, - "hash": "db41e2480bdef66e1c89347402418128cae1b40ea2ee34b9d5269c12f8b5738a" + "hash": "70137d0b3cdcbbf6d85c9d0a5408490e9dd1108a34b97b0efd54d19b678fb598" } diff --git a/core/lib/dal/.sqlx/query-7b908340613dadbbef46e2160d2d7d59a34f97285e855d5bd67f28f6f4ff1d4e.json b/core/lib/dal/.sqlx/query-7b908340613dadbbef46e2160d2d7d59a34f97285e855d5bd67f28f6f4ff1d4e.json deleted file mode 100644 index 9616aee1755..00000000000 --- a/core/lib/dal/.sqlx/query-7b908340613dadbbef46e2160d2d7d59a34f97285e855d5bd67f28f6f4ff1d4e.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE l1_batches\n SET\n hash = $1,\n merkle_root_hash = $1,\n rollup_last_leaf_index = $2,\n updated_at = NOW()\n WHERE\n number = $3\n AND hash IS NULL\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Bytea", - "Int8", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "7b908340613dadbbef46e2160d2d7d59a34f97285e855d5bd67f28f6f4ff1d4e" -} diff --git a/core/lib/dal/.sqlx/query-bab1857df66bbef57705ae7796161f3a71f5c6737e08745a37b41b22f4dfd030.json b/core/lib/dal/.sqlx/query-c6b799f7afc1c310a16aeaa9819c4b8026e0e8dde20d98750f874415f14faefd.json similarity index 75% rename from core/lib/dal/.sqlx/query-bab1857df66bbef57705ae7796161f3a71f5c6737e08745a37b41b22f4dfd030.json rename to core/lib/dal/.sqlx/query-c6b799f7afc1c310a16aeaa9819c4b8026e0e8dde20d98750f874415f14faefd.json index 4510b6084d2..db146fd7acb 100644 --- a/core/lib/dal/.sqlx/query-bab1857df66bbef57705ae7796161f3a71f5c6737e08745a37b41b22f4dfd030.json +++ b/core/lib/dal/.sqlx/query-c6b799f7afc1c310a16aeaa9819c4b8026e0e8dde20d98750f874415f14faefd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -60,101 +60,96 @@ }, { "ordinal": 11, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 12, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 13, + "ordinal": 12, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 14, + "ordinal": 13, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 15, + "ordinal": 14, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 15, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 17, + "ordinal": 16, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 18, + "ordinal": 17, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 19, + "ordinal": 18, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 20, + "ordinal": 19, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 20, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 21, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 23, + "ordinal": 22, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 23, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 24, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 26, + "ordinal": 25, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 26, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 28, + "ordinal": 27, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 28, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 30, + "ordinal": 29, "name": "pubdata_input", "type_info": "Bytea" } @@ -176,7 +171,6 @@ true, true, true, - true, false, false, false, @@ -198,5 +192,5 @@ true ] }, - "hash": "bab1857df66bbef57705ae7796161f3a71f5c6737e08745a37b41b22f4dfd030" + "hash": "c6b799f7afc1c310a16aeaa9819c4b8026e0e8dde20d98750f874415f14faefd" } diff --git a/core/lib/dal/.sqlx/query-c98f19505792a4085bb5ddddb6d0e202da24ac3ca205154417e98ccfa7437379.json b/core/lib/dal/.sqlx/query-c98f19505792a4085bb5ddddb6d0e202da24ac3ca205154417e98ccfa7437379.json new file mode 100644 index 00000000000..cfb5c9aae14 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c98f19505792a4085bb5ddddb6d0e202da24ac3ca205154417e98ccfa7437379.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE l1_batches\n SET\n hash = $1,\n rollup_last_leaf_index = $2,\n updated_at = NOW()\n WHERE\n number = $3\n AND hash IS NULL\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "c98f19505792a4085bb5ddddb6d0e202da24ac3ca205154417e98ccfa7437379" +} diff --git a/core/lib/dal/migrations/20240410100536_clean_up_l1_batches.down.sql b/core/lib/dal/migrations/20240410100536_clean_up_l1_batches.down.sql new file mode 100644 index 00000000000..a978b29c6a4 --- /dev/null +++ b/core/lib/dal/migrations/20240410100536_clean_up_l1_batches.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS gas_per_pubdata_byte_in_block INT; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS gas_per_pubdata_limit BIGINT NOT NULL DEFAULT 0; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS compressed_write_logs BYTEA; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS compressed_contracts BYTEA; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS parent_hash BYTEA; diff --git a/core/lib/dal/migrations/20240410100536_clean_up_l1_batches.up.sql b/core/lib/dal/migrations/20240410100536_clean_up_l1_batches.up.sql new file mode 100644 index 00000000000..ffe5fb2d23b --- /dev/null +++ b/core/lib/dal/migrations/20240410100536_clean_up_l1_batches.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches DROP COLUMN IF EXISTS gas_per_pubdata_byte_in_block; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS gas_per_pubdata_limit; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS compressed_write_logs; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS compressed_contracts; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS parent_hash; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 418bdad3b22..2b3f0df3fa6 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -271,7 +271,6 @@ impl BlocksDal<'_, '_> { eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, - merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, @@ -786,7 +785,6 @@ impl BlocksDal<'_, '_> { UPDATE l1_batches SET hash = $1, - merkle_root_hash = $1, rollup_last_leaf_index = $2, updated_at = NOW() WHERE @@ -962,7 +960,6 @@ impl BlocksDal<'_, '_> { eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, - merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, @@ -1147,7 +1144,6 @@ impl BlocksDal<'_, '_> { eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, - merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, @@ -1232,7 +1228,6 @@ impl BlocksDal<'_, '_> { eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, - merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, @@ -1310,7 +1305,6 @@ impl BlocksDal<'_, '_> { eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, - merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, @@ -1440,7 +1434,6 @@ impl BlocksDal<'_, '_> { eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, - merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, @@ -1509,7 +1502,6 @@ impl BlocksDal<'_, '_> { eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, - merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, @@ -1588,7 +1580,6 @@ impl BlocksDal<'_, '_> { eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, - merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index b96ff2bbe3a..42b5f6187cd 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -119,7 +119,6 @@ pub struct StorageL1Batch { pub priority_ops_onchain_data: Vec>, pub hash: Option>, - pub merkle_root_hash: Option>, pub commitment: Option>, pub meta_parameters_hash: Option>, pub pass_through_data_hash: Option>, @@ -197,11 +196,6 @@ impl TryInto for StorageL1Batch { .rollup_last_leaf_index .ok_or(StorageL1BatchConvertError::Incomplete)? as u64, - merkle_root_hash: H256::from_slice( - &self - .merkle_root_hash - .ok_or(StorageL1BatchConvertError::Incomplete)?, - ), initial_writes_compressed: self.compressed_initial_writes, repeated_writes_compressed: self.compressed_repeated_writes, l2_l1_merkle_root: H256::from_slice( diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index d7be7e7c168..723e8f4cbf1 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -47,7 +47,7 @@ impl<'a> CommitBatchInfoRollup<'a> { Token::FixedBytes( self.l1_batch_with_metadata .metadata - .merkle_root_hash + .root_hash .as_bytes() .to_vec(), ), @@ -113,7 +113,7 @@ impl<'a> CommitBatchInfoRollup<'a> { Token::FixedBytes( self.l1_batch_with_metadata .metadata - .merkle_root_hash + .root_hash .as_bytes() .to_vec(), ), @@ -259,7 +259,7 @@ impl<'a> CommitBatchInfoValidium<'a> { Token::FixedBytes( self.l1_batch_with_metadata .metadata - .merkle_root_hash + .root_hash .as_bytes() .to_vec(), ), @@ -325,7 +325,7 @@ impl<'a> CommitBatchInfoValidium<'a> { Token::FixedBytes( self.l1_batch_with_metadata .metadata - .merkle_root_hash + .root_hash .as_bytes() .to_vec(), ), diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index e03d91b4cbf..ecb60fa1e17 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -76,7 +76,6 @@ pub fn serialize_commitments(values: &[I]) -> Vec { pub struct L1BatchMetadata { pub root_hash: H256, pub rollup_last_leaf_index: u64, - pub merkle_root_hash: H256, pub initial_writes_compressed: Option>, pub repeated_writes_compressed: Option>, pub commitment: H256, diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index 1e23386aea2..d471d9ef105 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -203,7 +203,6 @@ fn default_l1_batch_metadata() -> L1BatchMetadata { L1BatchMetadata { root_hash: Default::default(), rollup_last_leaf_index: 0, - merkle_root_hash: Default::default(), initial_writes_compressed: Some(vec![]), repeated_writes_compressed: Some(vec![]), commitment: Default::default(), diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index 38b297ff22b..713bc115f80 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -58,7 +58,6 @@ pub(crate) fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { L1BatchMetadata { root_hash: H256::from_low_u64_be(number.into()), rollup_last_leaf_index: u64::from(number) + 20, - merkle_root_hash: H256::from_low_u64_be(number.into()), initial_writes_compressed: Some(vec![]), repeated_writes_compressed: Some(vec![]), commitment: H256::from_low_u64_be(number.into()), From cea6578ffb037a2ad8476b6d3fb03416c1e55593 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Thu, 11 Apr 2024 11:42:18 +0200 Subject: [PATCH 15/29] feat: EN Pruning (#1418) Signed-off-by: tomg10 --- Cargo.lock | 45 +- Cargo.toml | 1 + checks-config/era.dic | 2 + core/bin/external_node/src/config/mod.rs | 17 + core/bin/external_node/src/main.rs | 50 +- ...fb39721a54d7c763b6f3fde410595e8fbf85d.json | 22 + ...b989aabf87887d2d1c3db01b43b442241fca7.json | 23 + ...d0651174fd63cf6a8950fa6e7c4838ac5abbf.json | 26 + ...9fe7faa058a350d9de1970e5d5c341857a412.json | 23 + ...703791fdf19fd14abad08a037bbef4da12517.json | 22 + ...b7c1834960bffb558cfbdbfb014ea929c815f.json | 38 ++ ...18a8505c7544ff0e167731b867ff2abbe9879.json | 23 + ...70531d0b0a90e3c8c478c86c70e3d7e324579.json | 34 ++ ...6a498a0ab762c499dcc3cfbbfaacb3f0b17b8.json | 22 + ...a4b3bcf5d7ecae5e7f7abf8315faca6bc4917.json | 22 + ...b7d884827ff78a6dfd2085a41588d6d0a7523.json | 22 + ...aef121de4e55af741dccae40ccfd0bffc84e4.json | 23 + .../20240313171738_pruning_log.down.sql | 9 + .../20240313171738_pruning_log.up.sql | 16 + core/lib/dal/src/lib.rs | 12 +- core/lib/dal/src/pruning_dal.rs | 417 +++++++++++++++ core/lib/dal/src/pruning_dal_tests.rs | 477 +++++++++++++++++ core/lib/dal/src/storage_logs_dal.rs | 35 ++ core/lib/dal/src/tests/mod.rs | 23 +- core/lib/db_connection/src/connection_pool.rs | 2 +- core/lib/snapshots_applier/src/lib.rs | 29 + core/lib/zksync_core/Cargo.toml | 1 + .../src/api_server/execution_sandbox/mod.rs | 123 ++++- .../src/api_server/execution_sandbox/tests.rs | 34 +- .../src/api_server/tx_sender/mod.rs | 2 +- .../src/api_server/web3/namespaces/eth.rs | 24 +- .../src/api_server/web3/namespaces/zks.rs | 35 +- .../zksync_core/src/api_server/web3/pubsub.rs | 3 +- .../zksync_core/src/api_server/web3/state.rs | 31 +- core/lib/zksync_core/src/db_pruner/README.md | 17 + core/lib/zksync_core/src/db_pruner/metrics.rs | 24 + core/lib/zksync_core/src/db_pruner/mod.rs | 497 ++++++++++++++++++ .../src/db_pruner/prune_conditions.rs | 107 ++++ core/lib/zksync_core/src/lib.rs | 1 + core/lib/zksync_core/src/utils/mod.rs | 2 +- core/lib/zksync_core/src/utils/testonly.rs | 13 + .../tests/snapshot-recovery.test.ts | 5 +- infrastructure/zk/package.json | 3 +- infrastructure/zk/src/fmt.ts | 1 + infrastructure/zk/src/index.ts | 2 + infrastructure/zk/src/setup_en.ts | 202 +++++++ 46 files changed, 2484 insertions(+), 78 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-4b4b010b6a9e0e1a01aa61546e2fb39721a54d7c763b6f3fde410595e8fbf85d.json create mode 100644 core/lib/dal/.sqlx/query-550ddf6034c382f35b8cb7a6c93b989aabf87887d2d1c3db01b43b442241fca7.json create mode 100644 core/lib/dal/.sqlx/query-6179c3c1a0b2aeb01c0527f6ca4d0651174fd63cf6a8950fa6e7c4838ac5abbf.json create mode 100644 core/lib/dal/.sqlx/query-7511667f6b5b13c4062a29bff499fe7faa058a350d9de1970e5d5c341857a412.json create mode 100644 core/lib/dal/.sqlx/query-7ce9b4d426570d9bef3fddbcbd0703791fdf19fd14abad08a037bbef4da12517.json create mode 100644 core/lib/dal/.sqlx/query-9a31c42d03be1bc564e7aaa8574b7c1834960bffb558cfbdbfb014ea929c815f.json create mode 100644 core/lib/dal/.sqlx/query-ab13d2be03610a8372fdf03c9fa18a8505c7544ff0e167731b867ff2abbe9879.json create mode 100644 core/lib/dal/.sqlx/query-c0817ac0b9385cdc563d05d57d270531d0b0a90e3c8c478c86c70e3d7e324579.json create mode 100644 core/lib/dal/.sqlx/query-e30ea0d3a0b08761abff0de74516a498a0ab762c499dcc3cfbbfaacb3f0b17b8.json create mode 100644 core/lib/dal/.sqlx/query-e68bc95257fbb9d6c968c5557c0a4b3bcf5d7ecae5e7f7abf8315faca6bc4917.json create mode 100644 core/lib/dal/.sqlx/query-e6d86f3a4c87e2f70978971f9b5b7d884827ff78a6dfd2085a41588d6d0a7523.json create mode 100644 core/lib/dal/.sqlx/query-f79e5c76293efda1bcd2b07c4dbaef121de4e55af741dccae40ccfd0bffc84e4.json create mode 100644 core/lib/dal/migrations/20240313171738_pruning_log.down.sql create mode 100644 core/lib/dal/migrations/20240313171738_pruning_log.up.sql create mode 100644 core/lib/dal/src/pruning_dal.rs create mode 100644 core/lib/dal/src/pruning_dal_tests.rs create mode 100644 core/lib/zksync_core/src/db_pruner/README.md create mode 100644 core/lib/zksync_core/src/db_pruner/metrics.rs create mode 100644 core/lib/zksync_core/src/db_pruner/mod.rs create mode 100644 core/lib/zksync_core/src/db_pruner/prune_conditions.rs create mode 100644 infrastructure/zk/src/setup_en.ts diff --git a/Cargo.lock b/Cargo.lock index 59f014065fd..f5431601d65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1865,6 +1865,25 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "env_filter" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" +dependencies = [ + "log", +] + +[[package]] +name = "env_logger" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +dependencies = [ + "env_filter", + "log", +] + [[package]] name = "envy" version = "0.4.2" @@ -3399,9 +3418,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "logos" @@ -6550,6 +6569,27 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "test-log" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b319995299c65d522680decf80f2c108d85b861d81dfe340a10d16cee29d9e6" +dependencies = [ + "env_logger", + "test-log-macros", +] + +[[package]] +name = "test-log-macros" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8f546451eaa38373f549093fe9fd05e7d2bade739e2ddf834b9968621d60107" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.38", +] + [[package]] name = "textwrap" version = "0.11.0" @@ -8326,6 +8366,7 @@ dependencies = [ "serde_yaml", "tempfile", "test-casing", + "test-log", "thiserror", "thread_local", "tokio", diff --git a/Cargo.toml b/Cargo.toml index d3894ee0dd3..eda4e152758 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -138,6 +138,7 @@ strum = "0.24" tempdir = "0.3.7" tempfile = "3.0.2" test-casing = "0.1.2" +test-log = "0.2.15" thiserror = "1" thread_local = "1.1" tikv-jemallocator = "0.5" diff --git a/checks-config/era.dic b/checks-config/era.dic index ccd80148636..c28423dd21b 100644 --- a/checks-config/era.dic +++ b/checks-config/era.dic @@ -425,6 +425,8 @@ SetChainId setChainId SetChainIdUpgrade state_transition_manager_contract +prunable +bytea // Names Vyper diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 2eb739ccfd4..34f435e2b25 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -290,6 +290,15 @@ pub(crate) struct OptionalENConfig { #[serde(default = "OptionalENConfig::default_l1_batch_commit_data_generator_mode")] pub l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, + + #[serde(default = "OptionalENConfig::default_snapshots_recovery_enabled")] + pub snapshots_recovery_enabled: bool, + + #[serde(default = "OptionalENConfig::default_pruning_chunk_size")] + pub pruning_chunk_size: u32, + + /// If set, l1 batches will be pruned after they are that long + pub pruning_data_retention_hours: Option, } #[derive(Debug, Clone, PartialEq, Deserialize)] @@ -425,6 +434,14 @@ impl OptionalENConfig { L1BatchCommitDataGeneratorMode::Rollup } + const fn default_snapshots_recovery_enabled() -> bool { + false + } + + const fn default_pruning_chunk_size() -> u32 { + 10 + } + pub fn polling_interval(&self) -> Duration { Duration::from_millis(self.polling_interval) } diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index dfac57f0168..ff95b3a1704 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -24,6 +24,13 @@ use zksync_core::{ commitment_generator::CommitmentGenerator, consensus, consistency_checker::ConsistencyChecker, + db_pruner::{ + prune_conditions::{ + L1BatchExistsCondition, L1BatchOlderThanPruneCondition, + NextL1BatchHasMetadataCondition, NextL1BatchWasExecutedCondition, + }, + DbPruner, DbPrunerConfig, + }, eth_sender::l1_batch_commit_data_generator::{ L1BatchCommitDataGenerator, RollupModeL1BatchCommitDataGenerator, ValidiumModeL1BatchCommitDataGenerator, @@ -255,6 +262,39 @@ async fn run_core( } })); + if let Some(data_retention_hours) = config.optional.pruning_data_retention_hours { + let l1_batch_age_to_prune = Duration::from_secs(3600 * data_retention_hours); + tracing::info!( + "Configured pruning of batches after they become {l1_batch_age_to_prune:?} old" + ); + let db_pruner = DbPruner::new( + DbPrunerConfig { + // don't change this value without adjusting API server pruning info cache max age + soft_and_hard_pruning_time_delta: Duration::from_secs(60), + pruned_batch_chunk_size: config.optional.pruning_chunk_size, + next_iterations_delay: Duration::from_secs(30), + }, + vec![ + Arc::new(L1BatchExistsCondition { + conn: connection_pool.clone(), + }), + Arc::new(NextL1BatchHasMetadataCondition { + conn: connection_pool.clone(), + }), + Arc::new(NextL1BatchWasExecutedCondition { + conn: connection_pool.clone(), + }), + Arc::new(L1BatchOlderThanPruneCondition { + minimal_age: l1_batch_age_to_prune, + conn: connection_pool.clone(), + }), + ], + )?; + task_handles.push(tokio::spawn( + db_pruner.run(connection_pool.clone(), stop_receiver.clone()), + )); + } + let reorg_detector = ReorgDetector::new(main_node_client.clone(), connection_pool.clone()); app_health.insert_component(reorg_detector.health_check().clone()); task_handles.push(tokio::spawn({ @@ -663,13 +703,7 @@ struct Cli { /// do not use unless you know what you're doing. #[arg(long)] enable_consensus: bool, - /// Enables application-level snapshot recovery. Required to start a node that was recovered from a snapshot, - /// or to initialize a node from a snapshot. Has no effect if a node that was initialized from a Postgres dump - /// or was synced from genesis. - /// - /// This is an experimental and incomplete feature; do not use unless you know what you're doing. - #[arg(long)] - enable_snapshots_recovery: bool, + /// Comma-separated list of components to launch. #[arg(long, default_value = "all")] components: ComponentsToRun, @@ -840,7 +874,7 @@ async fn main() -> anyhow::Result<()> { main_node_client.clone(), &app_health, config.remote.l2_chain_id, - opt.enable_snapshots_recovery, + config.optional.snapshots_recovery_enabled, ) .await?; let sigint_receiver = setup_sigint_handler(); diff --git a/core/lib/dal/.sqlx/query-4b4b010b6a9e0e1a01aa61546e2fb39721a54d7c763b6f3fde410595e8fbf85d.json b/core/lib/dal/.sqlx/query-4b4b010b6a9e0e1a01aa61546e2fb39721a54d7c763b6f3fde410595e8fbf85d.json new file mode 100644 index 00000000000..a7f076acaa8 --- /dev/null +++ b/core/lib/dal/.sqlx/query-4b4b010b6a9e0e1a01aa61546e2fb39721a54d7c763b6f3fde410595e8fbf85d.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n deleted AS (\n DELETE FROM l2_to_l1_logs\n WHERE\n miniblock_number <= $1\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "4b4b010b6a9e0e1a01aa61546e2fb39721a54d7c763b6f3fde410595e8fbf85d" +} diff --git a/core/lib/dal/.sqlx/query-550ddf6034c382f35b8cb7a6c93b989aabf87887d2d1c3db01b43b442241fca7.json b/core/lib/dal/.sqlx/query-550ddf6034c382f35b8cb7a6c93b989aabf87887d2d1c3db01b43b442241fca7.json new file mode 100644 index 00000000000..686366fa06e --- /dev/null +++ b/core/lib/dal/.sqlx/query-550ddf6034c382f35b8cb7a6c93b989aabf87887d2d1c3db01b43b442241fca7.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n deleted AS (\n DELETE FROM storage_logs USING (\n SELECT\n hashed_key,\n MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n GROUP BY\n hashed_key\n ) AS last_storage_logs\n WHERE\n storage_logs.miniblock_number BETWEEN $1 AND $2\n AND last_storage_logs.hashed_key = storage_logs.hashed_key\n AND (\n storage_logs.miniblock_number != last_storage_logs.op[1]\n OR storage_logs.operation_number != last_storage_logs.op[2]\n )\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "550ddf6034c382f35b8cb7a6c93b989aabf87887d2d1c3db01b43b442241fca7" +} diff --git a/core/lib/dal/.sqlx/query-6179c3c1a0b2aeb01c0527f6ca4d0651174fd63cf6a8950fa6e7c4838ac5abbf.json b/core/lib/dal/.sqlx/query-6179c3c1a0b2aeb01c0527f6ca4d0651174fd63cf6a8950fa6e7c4838ac5abbf.json new file mode 100644 index 00000000000..443dbf36ea1 --- /dev/null +++ b/core/lib/dal/.sqlx/query-6179c3c1a0b2aeb01c0527f6ca4d0651174fd63cf6a8950fa6e7c4838ac5abbf.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n pruning_log (\n pruned_l1_batch,\n pruned_miniblock,\n TYPE,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + { + "Custom": { + "name": "prune_type", + "kind": { + "Enum": [ + "Soft", + "Hard" + ] + } + } + } + ] + }, + "nullable": [] + }, + "hash": "6179c3c1a0b2aeb01c0527f6ca4d0651174fd63cf6a8950fa6e7c4838ac5abbf" +} diff --git a/core/lib/dal/.sqlx/query-7511667f6b5b13c4062a29bff499fe7faa058a350d9de1970e5d5c341857a412.json b/core/lib/dal/.sqlx/query-7511667f6b5b13c4062a29bff499fe7faa058a350d9de1970e5d5c341857a412.json new file mode 100644 index 00000000000..1fe545668e9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-7511667f6b5b13c4062a29bff499fe7faa058a350d9de1970e5d5c341857a412.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n deleted AS (\n DELETE FROM storage_logs USING (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ) AS batches_to_prune\n WHERE\n storage_logs.miniblock_number < $1\n AND batches_to_prune.hashed_key = storage_logs.hashed_key\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "7511667f6b5b13c4062a29bff499fe7faa058a350d9de1970e5d5c341857a412" +} diff --git a/core/lib/dal/.sqlx/query-7ce9b4d426570d9bef3fddbcbd0703791fdf19fd14abad08a037bbef4da12517.json b/core/lib/dal/.sqlx/query-7ce9b4d426570d9bef3fddbcbd0703791fdf19fd14abad08a037bbef4da12517.json new file mode 100644 index 00000000000..b04987dbb5d --- /dev/null +++ b/core/lib/dal/.sqlx/query-7ce9b4d426570d9bef3fddbcbd0703791fdf19fd14abad08a037bbef4da12517.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n deleted AS (\n DELETE FROM miniblocks\n WHERE\n number <= $1\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "7ce9b4d426570d9bef3fddbcbd0703791fdf19fd14abad08a037bbef4da12517" +} diff --git a/core/lib/dal/.sqlx/query-9a31c42d03be1bc564e7aaa8574b7c1834960bffb558cfbdbfb014ea929c815f.json b/core/lib/dal/.sqlx/query-9a31c42d03be1bc564e7aaa8574b7c1834960bffb558cfbdbfb014ea929c815f.json new file mode 100644 index 00000000000..fd7c140dda5 --- /dev/null +++ b/core/lib/dal/.sqlx/query-9a31c42d03be1bc564e7aaa8574b7c1834960bffb558cfbdbfb014ea929c815f.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n soft.pruned_l1_batch AS last_soft_pruned_l1_batch,\n soft.pruned_miniblock AS last_soft_pruned_miniblock,\n hard.pruned_l1_batch AS last_hard_pruned_l1_batch,\n hard.pruned_miniblock AS last_hard_pruned_miniblock\n FROM\n (\n SELECT\n 1\n ) AS dummy\n LEFT JOIN (\n SELECT\n pruned_l1_batch,\n pruned_miniblock\n FROM\n pruning_log\n WHERE\n TYPE = 'Soft'\n ORDER BY\n pruned_l1_batch DESC\n LIMIT\n 1\n ) AS soft ON TRUE\n LEFT JOIN (\n SELECT\n pruned_l1_batch,\n pruned_miniblock\n FROM\n pruning_log\n WHERE\n TYPE = 'Hard'\n ORDER BY\n pruned_l1_batch DESC\n LIMIT\n 1\n ) AS hard ON TRUE;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_soft_pruned_l1_batch", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "last_soft_pruned_miniblock", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "last_hard_pruned_l1_batch", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "last_hard_pruned_miniblock", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "9a31c42d03be1bc564e7aaa8574b7c1834960bffb558cfbdbfb014ea929c815f" +} diff --git a/core/lib/dal/.sqlx/query-ab13d2be03610a8372fdf03c9fa18a8505c7544ff0e167731b867ff2abbe9879.json b/core/lib/dal/.sqlx/query-ab13d2be03610a8372fdf03c9fa18a8505c7544ff0e167731b867ff2abbe9879.json new file mode 100644 index 00000000000..013b881eab5 --- /dev/null +++ b/core/lib/dal/.sqlx/query-ab13d2be03610a8372fdf03c9fa18a8505c7544ff0e167731b867ff2abbe9879.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n deleted AS (\n DELETE FROM call_traces USING (\n SELECT\n *\n FROM\n transactions\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ) AS matching_transactions\n WHERE\n matching_transactions.hash = call_traces.tx_hash\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "ab13d2be03610a8372fdf03c9fa18a8505c7544ff0e167731b867ff2abbe9879" +} diff --git a/core/lib/dal/.sqlx/query-c0817ac0b9385cdc563d05d57d270531d0b0a90e3c8c478c86c70e3d7e324579.json b/core/lib/dal/.sqlx/query-c0817ac0b9385cdc563d05d57d270531d0b0a90e3c8c478c86c70e3d7e324579.json new file mode 100644 index 00000000000..136b40c5471 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c0817ac0b9385cdc563d05d57d270531d0b0a90e3c8c478c86c70e3d7e324579.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n hashed_key,\n value,\n operation_number\n FROM\n storage_logs\n WHERE\n miniblock_number = $1\n ORDER BY\n operation_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hashed_key", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "value", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "operation_number", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "c0817ac0b9385cdc563d05d57d270531d0b0a90e3c8c478c86c70e3d7e324579" +} diff --git a/core/lib/dal/.sqlx/query-e30ea0d3a0b08761abff0de74516a498a0ab762c499dcc3cfbbfaacb3f0b17b8.json b/core/lib/dal/.sqlx/query-e30ea0d3a0b08761abff0de74516a498a0ab762c499dcc3cfbbfaacb3f0b17b8.json new file mode 100644 index 00000000000..8bed11c40a0 --- /dev/null +++ b/core/lib/dal/.sqlx/query-e30ea0d3a0b08761abff0de74516a498a0ab762c499dcc3cfbbfaacb3f0b17b8.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MIN(number) AS first_miniblock_to_prune\n FROM\n miniblocks\n WHERE\n l1_batch_number <= $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "first_miniblock_to_prune", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e30ea0d3a0b08761abff0de74516a498a0ab762c499dcc3cfbbfaacb3f0b17b8" +} diff --git a/core/lib/dal/.sqlx/query-e68bc95257fbb9d6c968c5557c0a4b3bcf5d7ecae5e7f7abf8315faca6bc4917.json b/core/lib/dal/.sqlx/query-e68bc95257fbb9d6c968c5557c0a4b3bcf5d7ecae5e7f7abf8315faca6bc4917.json new file mode 100644 index 00000000000..289045d5555 --- /dev/null +++ b/core/lib/dal/.sqlx/query-e68bc95257fbb9d6c968c5557c0a4b3bcf5d7ecae5e7f7abf8315faca6bc4917.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n deleted AS (\n DELETE FROM l1_batches\n WHERE\n number <= $1\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e68bc95257fbb9d6c968c5557c0a4b3bcf5d7ecae5e7f7abf8315faca6bc4917" +} diff --git a/core/lib/dal/.sqlx/query-e6d86f3a4c87e2f70978971f9b5b7d884827ff78a6dfd2085a41588d6d0a7523.json b/core/lib/dal/.sqlx/query-e6d86f3a4c87e2f70978971f9b5b7d884827ff78a6dfd2085a41588d6d0a7523.json new file mode 100644 index 00000000000..c7a59137dfe --- /dev/null +++ b/core/lib/dal/.sqlx/query-e6d86f3a4c87e2f70978971f9b5b7d884827ff78a6dfd2085a41588d6d0a7523.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n deleted AS (\n DELETE FROM events\n WHERE\n miniblock_number <= $1\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e6d86f3a4c87e2f70978971f9b5b7d884827ff78a6dfd2085a41588d6d0a7523" +} diff --git a/core/lib/dal/.sqlx/query-f79e5c76293efda1bcd2b07c4dbaef121de4e55af741dccae40ccfd0bffc84e4.json b/core/lib/dal/.sqlx/query-f79e5c76293efda1bcd2b07c4dbaef121de4e55af741dccae40ccfd0bffc84e4.json new file mode 100644 index 00000000000..991193a5f15 --- /dev/null +++ b/core/lib/dal/.sqlx/query-f79e5c76293efda1bcd2b07c4dbaef121de4e55af741dccae40ccfd0bffc84e4.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n updated AS (\n UPDATE transactions\n SET\n input = NULL,\n data = '{}',\n execution_info = '{}',\n updated_at = NOW()\n WHERE\n miniblock_number BETWEEN $1 AND $2\n AND upgrade_id IS NULL\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n updated\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "f79e5c76293efda1bcd2b07c4dbaef121de4e55af741dccae40ccfd0bffc84e4" +} diff --git a/core/lib/dal/migrations/20240313171738_pruning_log.down.sql b/core/lib/dal/migrations/20240313171738_pruning_log.down.sql new file mode 100644 index 00000000000..70d3342a505 --- /dev/null +++ b/core/lib/dal/migrations/20240313171738_pruning_log.down.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS pruning_log; + +DROP TYPE IF EXISTS prune_type; + +ALTER TABLE transactions + ADD FOREIGN KEY (miniblock_number) REFERENCES miniblocks; + +ALTER TABLE transactions + ADD FOREIGN KEY (l1_batch_number) REFERENCES l1_batches; diff --git a/core/lib/dal/migrations/20240313171738_pruning_log.up.sql b/core/lib/dal/migrations/20240313171738_pruning_log.up.sql new file mode 100644 index 00000000000..3589ac4171b --- /dev/null +++ b/core/lib/dal/migrations/20240313171738_pruning_log.up.sql @@ -0,0 +1,16 @@ +CREATE TYPE prune_type AS ENUM ('Soft', 'Hard'); + +CREATE TABLE pruning_log +( + pruned_l1_batch BIGINT NOT NULL, + pruned_miniblock BIGINT NOT NULL, + type prune_type NOT NULL, + + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + PRIMARY KEY (type, pruned_l1_batch) +); + +ALTER TABLE transactions DROP CONSTRAINT IF EXISTS transactions_miniblock_number_fkey; + +ALTER TABLE transactions DROP CONSTRAINT IF EXISTS transactions_l1_batch_number_fkey; diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 58a5c301a49..33f27d9b9bd 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -17,7 +17,7 @@ use crate::{ contract_verification_dal::ContractVerificationDal, eth_sender_dal::EthSenderDal, events_dal::EventsDal, events_web3_dal::EventsWeb3Dal, factory_deps_dal::FactoryDepsDal, proof_generation_dal::ProofGenerationDal, protocol_versions_dal::ProtocolVersionsDal, - protocol_versions_web3_dal::ProtocolVersionsWeb3Dal, + protocol_versions_web3_dal::ProtocolVersionsWeb3Dal, pruning_dal::PruningDal, snapshot_recovery_dal::SnapshotRecoveryDal, snapshots_creator_dal::SnapshotsCreatorDal, snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, storage_logs_dedup_dal::StorageLogsDedupDal, storage_web3_dal::StorageWeb3Dal, @@ -39,6 +39,7 @@ mod models; pub mod proof_generation_dal; pub mod protocol_versions_dal; pub mod protocol_versions_web3_dal; +pub mod pruning_dal; pub mod snapshot_recovery_dal; pub mod snapshots_creator_dal; pub mod snapshots_dal; @@ -58,6 +59,9 @@ pub mod metrics; #[cfg(test)] mod tests; +#[cfg(test)] +mod pruning_dal_tests; + // This module is private and serves as a way to seal the trait. mod private { pub trait Sealed {} @@ -120,6 +124,8 @@ where fn snapshots_creator_dal(&mut self) -> SnapshotsCreatorDal<'_, 'a>; fn snapshot_recovery_dal(&mut self) -> SnapshotRecoveryDal<'_, 'a>; + + fn pruning_dal(&mut self) -> PruningDal<'_, 'a>; } #[derive(Clone, Debug)] @@ -230,4 +236,8 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { fn snapshot_recovery_dal(&mut self) -> SnapshotRecoveryDal<'_, 'a> { SnapshotRecoveryDal { storage: self } } + + fn pruning_dal(&mut self) -> PruningDal<'_, 'a> { + PruningDal { storage: self } + } } diff --git a/core/lib/dal/src/pruning_dal.rs b/core/lib/dal/src/pruning_dal.rs new file mode 100644 index 00000000000..e52e856c862 --- /dev/null +++ b/core/lib/dal/src/pruning_dal.rs @@ -0,0 +1,417 @@ +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; +use zksync_types::{L1BatchNumber, MiniblockNumber}; + +use crate::Core; + +#[derive(Debug)] +pub struct PruningDal<'a, 'c> { + pub(crate) storage: &'a mut Connection<'c, Core>, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct PruningInfo { + pub last_soft_pruned_l1_batch: Option, + pub last_soft_pruned_miniblock: Option, + pub last_hard_pruned_l1_batch: Option, + pub last_hard_pruned_miniblock: Option, +} + +#[derive(Debug, sqlx::Type)] +#[sqlx(type_name = "prune_type")] +pub enum PruneType { + Soft, + Hard, +} + +impl PruningDal<'_, '_> { + pub async fn get_pruning_info(&mut self) -> DalResult { + let row = sqlx::query!( + r#" + SELECT + soft.pruned_l1_batch AS last_soft_pruned_l1_batch, + soft.pruned_miniblock AS last_soft_pruned_miniblock, + hard.pruned_l1_batch AS last_hard_pruned_l1_batch, + hard.pruned_miniblock AS last_hard_pruned_miniblock + FROM + ( + SELECT + 1 + ) AS dummy + LEFT JOIN ( + SELECT + pruned_l1_batch, + pruned_miniblock + FROM + pruning_log + WHERE + TYPE = 'Soft' + ORDER BY + pruned_l1_batch DESC + LIMIT + 1 + ) AS soft ON TRUE + LEFT JOIN ( + SELECT + pruned_l1_batch, + pruned_miniblock + FROM + pruning_log + WHERE + TYPE = 'Hard' + ORDER BY + pruned_l1_batch DESC + LIMIT + 1 + ) AS hard ON TRUE; + "# + ) + .instrument("get_last_soft_pruned_batch") + .report_latency() + .fetch_one(self.storage) + .await?; + Ok(PruningInfo { + last_soft_pruned_l1_batch: row + .last_soft_pruned_l1_batch + .map(|x| L1BatchNumber(x as u32)), + last_soft_pruned_miniblock: row + .last_soft_pruned_miniblock + .map(|x| MiniblockNumber(x as u32)), + last_hard_pruned_l1_batch: row + .last_hard_pruned_l1_batch + .map(|x| L1BatchNumber(x as u32)), + last_hard_pruned_miniblock: row + .last_hard_pruned_miniblock + .map(|x| MiniblockNumber(x as u32)), + }) + } + + pub async fn soft_prune_batches_range( + &mut self, + last_l1_batch_to_prune: L1BatchNumber, + last_miniblock_to_prune: MiniblockNumber, + ) -> DalResult<()> { + sqlx::query!( + r#" + INSERT INTO + pruning_log ( + pruned_l1_batch, + pruned_miniblock, + TYPE, + created_at, + updated_at + ) + VALUES + ($1, $2, $3, NOW(), NOW()) + "#, + i64::from(last_l1_batch_to_prune.0), + i64::from(last_miniblock_to_prune.0), + PruneType::Soft as PruneType, + ) + .instrument("soft_prune_batches_range#insert_pruning_log") + .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) + .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) + .with_arg("prune_type", &PruneType::Soft) + .report_latency() + .execute(self.storage) + .await?; + + Ok(()) + } + + pub async fn hard_prune_batches_range( + &mut self, + last_l1_batch_to_prune: L1BatchNumber, + last_miniblock_to_prune: MiniblockNumber, + ) -> DalResult<()> { + let row = sqlx::query!( + r#" + SELECT + MIN(number) AS first_miniblock_to_prune + FROM + miniblocks + WHERE + l1_batch_number <= $1 + "#, + i64::from(last_l1_batch_to_prune.0), + ) + .instrument("hard_prune_batches_range#get_miniblocks_range") + .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) + .report_latency() + .fetch_one(self.storage) + .await?; + + // we don't have any miniblocks available when recovering from a snapshot + if row.first_miniblock_to_prune.is_some() { + let first_miniblock_to_prune = + MiniblockNumber(row.first_miniblock_to_prune.unwrap() as u32); + + let deleted_events = sqlx::query!( + r#" + WITH + deleted AS ( + DELETE FROM events + WHERE + miniblock_number <= $1 + RETURNING + * + ) + SELECT + COUNT(*) AS "count!" + FROM + deleted + "#, + i64::from(last_miniblock_to_prune.0), + ) + .instrument("hard_prune_batches_range#delete_events") + .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) + .report_latency() + .fetch_one(self.storage) + .await?; + + let deleted_l2_to_l1_logs = sqlx::query!( + r#" + WITH + deleted AS ( + DELETE FROM l2_to_l1_logs + WHERE + miniblock_number <= $1 + RETURNING + * + ) + SELECT + COUNT(*) AS "count!" + FROM + deleted + "#, + i64::from(last_miniblock_to_prune.0), + ) + .instrument("hard_prune_batches_range#delete_l2_to_l1_logs") + .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) + .report_latency() + .fetch_one(self.storage) + .await?; + + let deleted_call_traces = sqlx::query!( + r#" + WITH + deleted AS ( + DELETE FROM call_traces USING ( + SELECT + * + FROM + transactions + WHERE + miniblock_number BETWEEN $1 AND $2 + ) AS matching_transactions + WHERE + matching_transactions.hash = call_traces.tx_hash + RETURNING + * + ) + SELECT + COUNT(*) AS "count!" + FROM + deleted + "#, + i64::from(first_miniblock_to_prune.0), + i64::from(last_miniblock_to_prune.0), + ) + .instrument("hard_prune_batches_range#delete_call_traces") + .with_arg("first_miniblock_to_prune", &first_miniblock_to_prune) + .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) + .report_latency() + .fetch_one(self.storage) + .await?; + + sqlx::query!( + r#" + WITH + updated AS ( + UPDATE transactions + SET + input = NULL, + data = '{}', + execution_info = '{}', + updated_at = NOW() + WHERE + miniblock_number BETWEEN $1 AND $2 + AND upgrade_id IS NULL + RETURNING + * + ) + SELECT + COUNT(*) AS "count!" + FROM + updated + "#, + i64::from(first_miniblock_to_prune.0), + i64::from(last_miniblock_to_prune.0), + ) + .instrument("hard_prune_batches_range#clear_transactions_references") + .with_arg("first_miniblock_to_prune", &first_miniblock_to_prune) + .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) + .report_latency() + .fetch_one(self.storage) + .await?; + + //The deleting of logs is split into two queries to make it faster, + // only the first query has to go through all previous logs + // and the query optimizer should be happy with it + let deleted_storage_logs_from_past_batches = sqlx::query!( + r#" + WITH + deleted AS ( + DELETE FROM storage_logs USING ( + SELECT + * + FROM + storage_logs + WHERE + miniblock_number BETWEEN $1 AND $2 + ) AS batches_to_prune + WHERE + storage_logs.miniblock_number < $1 + AND batches_to_prune.hashed_key = storage_logs.hashed_key + RETURNING + * + ) + SELECT + COUNT(*) AS "count!" + FROM + deleted + "#, + i64::from(first_miniblock_to_prune.0), + i64::from(last_miniblock_to_prune.0), + ) + .instrument("hard_prune_batches_range#delete_overriden_storage_logs_from_past_batches") + .with_arg("first_miniblock_to_prune", &first_miniblock_to_prune) + .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) + .report_latency() + .fetch_one(self.storage) + .await?; + + let deleted_storage_logs_from_pruned_batches = sqlx::query!( + r#" + WITH + deleted AS ( + DELETE FROM storage_logs USING ( + SELECT + hashed_key, + MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op + FROM + storage_logs + WHERE + miniblock_number BETWEEN $1 AND $2 + GROUP BY + hashed_key + ) AS last_storage_logs + WHERE + storage_logs.miniblock_number BETWEEN $1 AND $2 + AND last_storage_logs.hashed_key = storage_logs.hashed_key + AND ( + storage_logs.miniblock_number != last_storage_logs.op[1] + OR storage_logs.operation_number != last_storage_logs.op[2] + ) + RETURNING + * + ) + SELECT + COUNT(*) AS "count!" + FROM + deleted + "#, + i64::from(first_miniblock_to_prune.0), + i64::from(last_miniblock_to_prune.0), + ) + .instrument( + "hard_prune_batches_range#delete_overriden_storage_logs_from_pruned_batches", + ) + .with_arg("first_miniblock_to_prune", &first_miniblock_to_prune) + .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) + .report_latency() + .fetch_one(self.storage) + .await?; + + let deleted_l1_batches = sqlx::query!( + r#" + WITH + deleted AS ( + DELETE FROM l1_batches + WHERE + number <= $1 + RETURNING + * + ) + SELECT + COUNT(*) AS "count!" + FROM + deleted + "#, + i64::from(last_l1_batch_to_prune.0), + ) + .instrument("hard_prune_batches_range#delete_l1_batches") + .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) + .report_latency() + .fetch_one(self.storage) + .await?; + + let deleted_miniblocks = sqlx::query!( + r#" + WITH + deleted AS ( + DELETE FROM miniblocks + WHERE + number <= $1 + RETURNING + * + ) + SELECT + COUNT(*) AS "count!" + FROM + deleted + "#, + i64::from(last_miniblock_to_prune.0), + ) + .instrument("hard_prune_batches_range#delete_miniblocks") + .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) + .report_latency() + .fetch_one(self.storage) + .await?; + + tracing::info!("Performed pruning of database, deleted {} l1_batches, {} miniblocks, {} storage_logs, {} events, {} call traces, {} l2_to_l1_logs", + deleted_l1_batches.count, + deleted_miniblocks.count, + deleted_storage_logs_from_past_batches.count + deleted_storage_logs_from_pruned_batches.count, + deleted_events.count, + deleted_call_traces.count, + deleted_l2_to_l1_logs.count) + } + + sqlx::query!( + r#" + INSERT INTO + pruning_log ( + pruned_l1_batch, + pruned_miniblock, + TYPE, + created_at, + updated_at + ) + VALUES + ($1, $2, $3, NOW(), NOW()) + "#, + i64::from(last_l1_batch_to_prune.0), + i64::from(last_miniblock_to_prune.0), + PruneType::Hard as PruneType + ) + .instrument("soft_prune_batches_range#insert_pruning_log") + .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) + .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) + .with_arg("prune_type", &PruneType::Hard) + .report_latency() + .execute(self.storage) + .await?; + + Ok(()) + } +} diff --git a/core/lib/dal/src/pruning_dal_tests.rs b/core/lib/dal/src/pruning_dal_tests.rs new file mode 100644 index 00000000000..636bf294bde --- /dev/null +++ b/core/lib/dal/src/pruning_dal_tests.rs @@ -0,0 +1,477 @@ +use crate::pruning_dal::PruningInfo; + +#[cfg(test)] +mod tests { + use std::ops; + + use zksync_contracts::BaseSystemContractsHashes; + use zksync_db_connection::connection::Connection; + use zksync_types::{ + block::L1BatchHeader, + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + tx::IncludedTxLocation, + AccountTreeId, Address, L1BatchNumber, MiniblockNumber, ProtocolVersion, ProtocolVersionId, + StorageKey, StorageLog, H256, + }; + + use super::*; + use crate::{ + tests::{create_miniblock_header, mock_l2_to_l1_log, mock_vm_event}, + ConnectionPool, Core, CoreDal, + }; + + async fn insert_miniblock( + conn: &mut Connection<'_, Core>, + miniblock_number: MiniblockNumber, + l1_batch_number: L1BatchNumber, + ) { + let miniblock1 = create_miniblock_header(miniblock_number.0); + conn.blocks_dal() + .insert_miniblock(&miniblock1) + .await + .unwrap(); + + conn.blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(l1_batch_number) + .await + .unwrap(); + + insert_events(conn, miniblock_number).await; + insert_l2_to_l1_logs(conn, miniblock_number).await; + } + + async fn insert_l2_to_l1_logs( + conn: &mut Connection<'_, Core>, + miniblock_number: MiniblockNumber, + ) { + let first_location = IncludedTxLocation { + tx_hash: H256([1; 32]), + tx_index_in_miniblock: 0, + tx_initiator_address: Address::default(), + }; + let first_logs = vec![mock_l2_to_l1_log(), mock_l2_to_l1_log()]; + let second_location = IncludedTxLocation { + tx_hash: H256([2; 32]), + tx_index_in_miniblock: 1, + tx_initiator_address: Address::default(), + }; + let second_logs = vec![ + mock_l2_to_l1_log(), + mock_l2_to_l1_log(), + mock_l2_to_l1_log(), + ]; + let all_logs = vec![ + (first_location, first_logs.iter().collect()), + (second_location, second_logs.iter().collect()), + ]; + conn.events_dal() + .save_user_l2_to_l1_logs(miniblock_number, &all_logs) + .await + .unwrap(); + } + + async fn insert_events(conn: &mut Connection<'_, Core>, miniblock_number: MiniblockNumber) { + let first_location = IncludedTxLocation { + tx_hash: H256([1; 32]), + tx_index_in_miniblock: 0, + tx_initiator_address: Address::default(), + }; + let first_events = vec![mock_vm_event(0), mock_vm_event(1)]; + let second_location = IncludedTxLocation { + tx_hash: H256([2; 32]), + tx_index_in_miniblock: 1, + tx_initiator_address: Address::default(), + }; + let second_events = vec![mock_vm_event(2), mock_vm_event(3), mock_vm_event(4)]; + let all_events = vec![ + (first_location, first_events.iter().collect()), + (second_location, second_events.iter().collect()), + ]; + conn.events_dal() + .save_events(miniblock_number, &all_events) + .await + .unwrap(); + } + + async fn insert_l1_batch(conn: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber) { + let mut header = L1BatchHeader::new( + l1_batch_number, + 100, + BaseSystemContractsHashes { + bootloader: H256::repeat_byte(1), + default_aa: H256::repeat_byte(42), + }, + ProtocolVersionId::latest(), + ); + header.l1_tx_count = 3; + header.l2_tx_count = 5; + header.l2_to_l1_logs.push(UserL2ToL1Log(L2ToL1Log { + shard_id: 0, + is_service: false, + tx_number_in_block: 2, + sender: Address::repeat_byte(2), + key: H256::repeat_byte(3), + value: H256::zero(), + })); + header.l2_to_l1_messages.push(vec![22; 22]); + header.l2_to_l1_messages.push(vec![33; 33]); + + conn.blocks_dal() + .insert_mock_l1_batch(&header) + .await + .unwrap(); + } + + async fn insert_realistic_l1_batches(conn: &mut Connection<'_, Core>, l1_batches_count: u32) { + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + for l1_batch_number in 0..l1_batches_count { + insert_l1_batch(conn, L1BatchNumber(l1_batch_number)).await; + insert_miniblock( + conn, + MiniblockNumber(l1_batch_number * 2), + L1BatchNumber(l1_batch_number), + ) + .await; + insert_miniblock( + conn, + MiniblockNumber(l1_batch_number * 2 + 1), + L1BatchNumber(l1_batch_number), + ) + .await; + } + } + + async fn assert_l1_batch_objects_exists( + conn: &mut Connection<'_, Core>, + l1_batches_range: ops::RangeInclusive, + ) { + for l1_batch_number in l1_batches_range.start().0..l1_batches_range.end().0 { + let l1_batch_number = L1BatchNumber(l1_batch_number); + assert!(conn + .blocks_dal() + .get_miniblock_header(MiniblockNumber(l1_batch_number.0 * 2)) + .await + .unwrap() + .is_some()); + + assert!(conn + .blocks_dal() + .get_miniblock_header(MiniblockNumber(l1_batch_number.0 * 2 + 1)) + .await + .unwrap() + .is_some()); + + assert!(conn + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await + .unwrap() + .is_some()); + } + } + + async fn assert_l1_batch_objects_dont_exist( + conn: &mut Connection<'_, Core>, + l1_batches_range: ops::RangeInclusive, + ) { + for l1_batch_number in l1_batches_range.start().0..l1_batches_range.end().0 { + let l1_batch_number = L1BatchNumber(l1_batch_number); + assert!(conn + .blocks_dal() + .get_miniblock_header(MiniblockNumber(l1_batch_number.0 * 2)) + .await + .unwrap() + .is_none()); + assert_eq!( + 0, + conn.storage_logs_dal() + .get_miniblock_storage_logs(MiniblockNumber(l1_batch_number.0 * 2)) + .await + .len() + ); + + assert!(conn + .blocks_dal() + .get_miniblock_header(MiniblockNumber(l1_batch_number.0 * 2 + 1)) + .await + .unwrap() + .is_none()); + assert_eq!( + 0, + conn.storage_logs_dal() + .get_miniblock_storage_logs(MiniblockNumber(l1_batch_number.0 * 2 + 1)) + .await + .len() + ); + + assert!(conn + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await + .unwrap() + .is_none()); + } + } + + #[tokio::test] + async fn soft_pruning_works() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + let mut transaction = conn.start_transaction().await.unwrap(); + + assert_eq!( + PruningInfo { + last_soft_pruned_miniblock: None, + last_soft_pruned_l1_batch: None, + last_hard_pruned_miniblock: None, + last_hard_pruned_l1_batch: None + }, + transaction.pruning_dal().get_pruning_info().await.unwrap() + ); + + transaction + .pruning_dal() + .soft_prune_batches_range(L1BatchNumber(5), MiniblockNumber(11)) + .await + .unwrap(); + assert_eq!( + PruningInfo { + last_soft_pruned_miniblock: Some(MiniblockNumber(11)), + last_soft_pruned_l1_batch: Some(L1BatchNumber(5)), + last_hard_pruned_miniblock: None, + last_hard_pruned_l1_batch: None + }, + transaction.pruning_dal().get_pruning_info().await.unwrap() + ); + + transaction + .pruning_dal() + .soft_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) + .await + .unwrap(); + assert_eq!( + PruningInfo { + last_soft_pruned_miniblock: Some(MiniblockNumber(21)), + last_soft_pruned_l1_batch: Some(L1BatchNumber(10)), + last_hard_pruned_miniblock: None, + last_hard_pruned_l1_batch: None + }, + transaction.pruning_dal().get_pruning_info().await.unwrap() + ); + + transaction + .pruning_dal() + .hard_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) + .await + .unwrap(); + assert_eq!( + PruningInfo { + last_soft_pruned_miniblock: Some(MiniblockNumber(21)), + last_soft_pruned_l1_batch: Some(L1BatchNumber(10)), + last_hard_pruned_miniblock: Some(MiniblockNumber(21)), + last_hard_pruned_l1_batch: Some(L1BatchNumber(10)) + }, + transaction.pruning_dal().get_pruning_info().await.unwrap() + ); + } + + fn random_storage_log(hashed_key_seed: u8, value_seed: u8) -> StorageLog { + let key = StorageKey::new( + AccountTreeId::from_fixed_bytes([hashed_key_seed; 20]), + H256([hashed_key_seed; 32]), + ); + StorageLog::new_write_log(key, H256([value_seed; 32])) + } + async fn insert_miniblock_storage_logs( + conn: &mut Connection<'_, Core>, + miniblock_number: MiniblockNumber, + storage_logs: Vec, + ) { + conn.storage_logs_dal() + .insert_storage_logs(miniblock_number, &[(H256::zero(), storage_logs)]) + .await + .unwrap(); + } + + async fn assert_miniblock_storage_logs_equal( + conn: &mut Connection<'_, Core>, + miniblock_number: MiniblockNumber, + expected_logs: Vec, + ) { + let actual_logs: Vec<(H256, H256)> = conn + .storage_logs_dal() + .get_miniblock_storage_logs(miniblock_number) + .await + .iter() + .map(|log| (log.0, log.1)) + .collect(); + let expected_logs: Vec<(H256, H256)> = expected_logs + .iter() + .enumerate() + .map(|(_enumeration_number, log)| (log.key.hashed_key(), log.value)) + .collect(); + assert_eq!( + expected_logs, actual_logs, + "logs don't match at miniblock {miniblock_number}" + ) + } + + #[tokio::test] + async fn storage_logs_pruning_works_correctly() { + let pool = ConnectionPool::::test_pool().await; + + let mut conn = pool.connection().await.unwrap(); + let mut transaction = conn.start_transaction().await.unwrap(); + insert_realistic_l1_batches(&mut transaction, 10).await; + insert_miniblock_storage_logs( + &mut transaction, + MiniblockNumber(1), + vec![random_storage_log(1, 1)], + ) + .await; + + insert_miniblock_storage_logs( + &mut transaction, + MiniblockNumber(0), + // first storage will be overwritten in 1st miniblock, + // the second one should be kept throughout the pruning + // the third one will be overwritten in 10th miniblock + vec![ + random_storage_log(1, 2), + random_storage_log(2, 3), + random_storage_log(3, 4), + ], + ) + .await; + + insert_miniblock_storage_logs( + &mut transaction, + MiniblockNumber(15), + // this storage log overrides log from block 0 + vec![random_storage_log(3, 5)], + ) + .await; + + insert_miniblock_storage_logs( + &mut transaction, + MiniblockNumber(17), + // there are two logs with the same hashed key, the second one should be overwritten + vec![random_storage_log(5, 5), random_storage_log(5, 7)], + ) + .await; + + transaction + .pruning_dal() + .hard_prune_batches_range(L1BatchNumber(4), MiniblockNumber(9)) + .await + .unwrap(); + + assert_miniblock_storage_logs_equal( + &mut transaction, + MiniblockNumber(0), + vec![random_storage_log(2, 3), random_storage_log(3, 4)], + ) + .await; + assert_miniblock_storage_logs_equal( + &mut transaction, + MiniblockNumber(1), + vec![random_storage_log(1, 1)], + ) + .await; + + transaction + .pruning_dal() + .hard_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) + .await + .unwrap(); + + assert_miniblock_storage_logs_equal( + &mut transaction, + MiniblockNumber(0), + vec![random_storage_log(2, 3)], + ) + .await; + + assert_miniblock_storage_logs_equal( + &mut transaction, + MiniblockNumber(1), + vec![random_storage_log(1, 1)], + ) + .await; + + assert_miniblock_storage_logs_equal( + &mut transaction, + MiniblockNumber(15), + vec![random_storage_log(3, 5)], + ) + .await; + + assert_miniblock_storage_logs_equal( + &mut transaction, + MiniblockNumber(17), + vec![random_storage_log(5, 7)], + ) + .await; + } + + #[tokio::test] + async fn l1_batches_can_be_hard_pruned() { + let pool = ConnectionPool::::test_pool().await; + + let mut conn = pool.connection().await.unwrap(); + let mut transaction = conn.start_transaction().await.unwrap(); + insert_realistic_l1_batches(&mut transaction, 10).await; + + assert_l1_batch_objects_exists(&mut transaction, L1BatchNumber(1)..=L1BatchNumber(10)) + .await; + assert!(transaction + .pruning_dal() + .get_pruning_info() + .await + .unwrap() + .last_hard_pruned_l1_batch + .is_none()); + + transaction + .pruning_dal() + .hard_prune_batches_range(L1BatchNumber(5), MiniblockNumber(11)) + .await + .unwrap(); + + assert_l1_batch_objects_dont_exist(&mut transaction, L1BatchNumber(1)..=L1BatchNumber(5)) + .await; + assert_l1_batch_objects_exists(&mut transaction, L1BatchNumber(6)..=L1BatchNumber(10)) + .await; + assert_eq!( + Some(L1BatchNumber(5)), + transaction + .pruning_dal() + .get_pruning_info() + .await + .unwrap() + .last_hard_pruned_l1_batch + ); + + transaction + .pruning_dal() + .hard_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) + .await + .unwrap(); + + assert_l1_batch_objects_dont_exist(&mut transaction, L1BatchNumber(1)..=L1BatchNumber(10)) + .await; + assert_eq!( + Some(L1BatchNumber(10)), + transaction + .pruning_dal() + .get_pruning_info() + .await + .unwrap() + .last_hard_pruned_l1_batch + ); + } +} diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index 369a005ddd9..0ca06c320c0 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -276,6 +276,41 @@ impl StorageLogsDal<'_, '_> { Ok(()) } + /// Loads (hashed_key, value, operation_number) tuples for given miniblock_number. + /// Shouldn't be used in production. + #[cfg(test)] + pub async fn get_miniblock_storage_logs( + &mut self, + miniblock_number: MiniblockNumber, + ) -> Vec<(H256, H256, u32)> { + sqlx::query!( + r#" + SELECT + hashed_key, + value, + operation_number + FROM + storage_logs + WHERE + miniblock_number = $1 + ORDER BY + operation_number + "#, + i64::from(miniblock_number.0) + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + let hashed_key = H256::from_slice(&row.hashed_key); + let value = H256::from_slice(&row.value); + let operation_number: u32 = row.operation_number as u32; + (hashed_key, value, operation_number) + }) + .collect() + } + pub async fn is_contract_deployed_at_address(&mut self, address: Address) -> bool { let hashed_key = get_code_key(&address).hashed_key(); let row = sqlx::query!( diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 7f6e38ae971..52e8d58d438 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -9,11 +9,12 @@ use zksync_types::{ helpers::unix_timestamp_ms, l1::{L1Tx, OpProcessingType, PriorityQueueType}, l2::L2Tx, + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, protocol_upgrade::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, snapshots::SnapshotRecoveryStatus, tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, Address, Execute, L1BatchNumber, L1BlockNumber, L1TxCommonData, L2ChainId, MiniblockNumber, - PriorityOpId, ProtocolVersion, ProtocolVersionId, H160, H256, U256, + PriorityOpId, ProtocolVersion, ProtocolVersionId, VmEvent, H160, H256, U256, }; use crate::{ @@ -164,6 +165,26 @@ pub(crate) fn create_snapshot_recovery() -> SnapshotRecoveryStatus { } } +pub(crate) fn mock_vm_event(index: u8) -> VmEvent { + VmEvent { + location: (L1BatchNumber(1), u32::from(index)), + address: Address::repeat_byte(index), + indexed_topics: (0..4).map(H256::repeat_byte).collect(), + value: vec![index], + } +} + +pub(crate) fn mock_l2_to_l1_log() -> UserL2ToL1Log { + UserL2ToL1Log(L2ToL1Log { + shard_id: 0, + is_service: false, + tx_number_in_block: 0, + sender: Address::repeat_byte(0), + key: H256::from_low_u64_be(0), + value: H256::repeat_byte(0), + }) +} + #[tokio::test] async fn workflow_with_submit_tx_equal_hashes() { let connection_pool = ConnectionPool::::test_pool().await; diff --git a/core/lib/db_connection/src/connection_pool.rs b/core/lib/db_connection/src/connection_pool.rs index 0aeeba6cad5..a642a5844d4 100644 --- a/core/lib/db_connection/src/connection_pool.rs +++ b/core/lib/db_connection/src/connection_pool.rs @@ -280,7 +280,7 @@ impl ConnectionPool { /// Test pools trace their active connections. If acquiring a connection fails (e.g., with a timeout), /// the returned error will contain information on all active connections. pub async fn test_pool() -> ConnectionPool { - const DEFAULT_CONNECTIONS: u32 = 50; // Expected to be enough for any unit test. + const DEFAULT_CONNECTIONS: u32 = 100; // Expected to be enough for any unit test. Self::constrained_test_pool(DEFAULT_CONNECTIONS).await } diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index b619016da25..47e45ae9f83 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -321,6 +321,20 @@ impl<'a> SnapshotsApplier<'a> { .await?; let mut storage_transaction = storage.start_transaction().await?; + if storage_transaction + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await? + .is_some() + && storage_transaction + .blocks_dal() + .get_sealed_miniblock_number() + .await? + .is_some() + { + return Ok(()); + } + let (applied_snapshot_status, created_from_scratch) = Self::prepare_applied_snapshot_status(&mut storage_transaction, main_node_client) .await?; @@ -352,6 +366,21 @@ impl<'a> SnapshotsApplier<'a> { .snapshot_recovery_dal() .insert_initial_recovery_status(&this.applied_snapshot_status) .await?; + storage_transaction + .pruning_dal() + .soft_prune_batches_range( + this.applied_snapshot_status.l1_batch_number, + this.applied_snapshot_status.miniblock_number, + ) + .await?; + + storage_transaction + .pruning_dal() + .hard_prune_batches_range( + this.applied_snapshot_status.l1_batch_number, + this.applied_snapshot_status.miniblock_number, + ) + .await?; } storage_transaction.commit().await?; drop(storage); diff --git a/core/lib/zksync_core/Cargo.toml b/core/lib/zksync_core/Cargo.toml index 8a2f2b97d5d..0ed908204b4 100644 --- a/core/lib/zksync_core/Cargo.toml +++ b/core/lib/zksync_core/Cargo.toml @@ -100,6 +100,7 @@ assert_matches.workspace = true jsonrpsee.workspace = true tempfile.workspace = true test-casing.workspace = true +test-log.workspace = true [build-dependencies] zksync_protobuf_build.workspace = true diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs index 1e858f630f7..7743f71e665 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -1,8 +1,13 @@ -use std::{sync::Arc, time::Duration}; +use std::{ + sync::{Arc, RwLock}, + time::Duration, +}; use anyhow::Context as _; +use chrono::{DateTime, Utc}; +use rand::random; use tokio::runtime::Handle; -use zksync_dal::{Connection, Core, CoreDal, DalError}; +use zksync_dal::{pruning_dal::PruningInfo, Connection, Core, CoreDal, DalError}; use zksync_state::PostgresStorageCaches; use zksync_types::{ api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2ChainId, @@ -188,42 +193,112 @@ impl TxSharedArgs { } /// Information about first L1 batch / miniblock in the node storage. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] pub(crate) struct BlockStartInfo { - /// Number of the first locally available miniblock. - pub first_miniblock: MiniblockNumber, - /// Number of the first locally available L1 batch. - pub first_l1_batch: L1BatchNumber, + cached_pruning_info: Arc)>>, } impl BlockStartInfo { pub async fn new(storage: &mut Connection<'_, Core>) -> anyhow::Result { - let snapshot_recovery = storage - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await?; - let snapshot_recovery = snapshot_recovery.as_ref(); Ok(Self { - first_miniblock: snapshot_recovery - .map_or(MiniblockNumber(0), |recovery| recovery.miniblock_number + 1), - first_l1_batch: snapshot_recovery - .map_or(L1BatchNumber(0), |recovery| recovery.l1_batch_number + 1), + cached_pruning_info: Arc::from(RwLock::from(( + storage.pruning_dal().get_pruning_info().await?, + Utc::now(), + ))), }) } + fn get_cache_state_copy(&self) -> (PruningInfo, DateTime) { + let current_cache = self + .cached_pruning_info + .read() + .expect("BlockStartInfo is poisoned"); + *current_cache + } + + fn is_cache_expired(&self, now: DateTime, last_cache_date: DateTime) -> bool { + const CACHE_MAX_AGE_MS: i64 = 20000; + // we make max age a bit random so that all threads don't start refreshing cache at the same time + let random_delay = + chrono::Duration::milliseconds(i64::from(random::()) % CACHE_MAX_AGE_MS / 2); + now - last_cache_date > chrono::Duration::milliseconds(CACHE_MAX_AGE_MS) + random_delay + } + + async fn update_cache( + &self, + storage: &mut Connection<'_, Core>, + now: DateTime, + ) -> anyhow::Result { + let new_pruning_info = storage.pruning_dal().get_pruning_info().await?; + + let mut new_cached_pruning_info = self + .cached_pruning_info + .write() + .expect("BlockStartInfo is poisoned"); + new_cached_pruning_info.0 = new_pruning_info; + new_cached_pruning_info.1 = now; + Ok(new_pruning_info) + } + + async fn get_pruning_info( + &self, + storage: &mut Connection<'_, Core>, + ) -> anyhow::Result { + let (last_cached_pruning_info, last_cache_date) = self.get_cache_state_copy(); + let now = Utc::now(); + if self.is_cache_expired(now, last_cache_date) { + //multiple threads may execute this query if we're very unlucky + self.update_cache(storage, now).await + } else { + Ok(last_cached_pruning_info) + } + } + + pub async fn first_miniblock( + &self, + storage: &mut Connection<'_, Core>, + ) -> anyhow::Result { + let cached_pruning_info = self.get_pruning_info(storage).await?; + let last_block = cached_pruning_info.last_soft_pruned_miniblock; + if let Some(MiniblockNumber(last_block)) = last_block { + return Ok(MiniblockNumber(last_block + 1)); + } + Ok(MiniblockNumber(0)) + } + + pub async fn first_l1_batch( + &self, + storage: &mut Connection<'_, Core>, + ) -> anyhow::Result { + let cached_pruning_info = self.get_pruning_info(storage).await?; + let last_batch = cached_pruning_info.last_soft_pruned_l1_batch; + if let Some(L1BatchNumber(last_block)) = last_batch { + return Ok(L1BatchNumber(last_block + 1)); + } + Ok(L1BatchNumber(0)) + } + /// Checks whether a block with the specified ID is pruned and returns an error if it is. /// The `Err` variant wraps the first non-pruned miniblock. - pub fn ensure_not_pruned_block(&self, block: api::BlockId) -> Result<(), MiniblockNumber> { + pub async fn ensure_not_pruned_block( + &self, + block: api::BlockId, + storage: &mut Connection<'_, Core>, + ) -> Result<(), BlockArgsError> { + let first_miniblock = self + .first_miniblock(storage) + .await + .map_err(BlockArgsError::Database)?; match block { api::BlockId::Number(api::BlockNumber::Number(number)) - if number < self.first_miniblock.0.into() => + if number < first_miniblock.0.into() => { - Err(self.first_miniblock) + Err(BlockArgsError::Pruned(first_miniblock)) } api::BlockId::Number(api::BlockNumber::Earliest) - if self.first_miniblock > MiniblockNumber(0) => + if first_miniblock > MiniblockNumber(0) => { - Err(self.first_miniblock) + Err(BlockArgsError::Pruned(first_miniblock)) } _ => Ok(()), } @@ -262,14 +337,14 @@ impl BlockArgs { pub async fn new( connection: &mut Connection<'_, Core>, block_id: api::BlockId, - start_info: BlockStartInfo, + start_info: &BlockStartInfo, ) -> Result { // We need to check that `block_id` is present in Postgres or can be present in the future // (i.e., it does not refer to a pruned block). If called for a pruned block, the returned value // (specifically, `l1_batch_timestamp_s`) will be nonsensical. start_info - .ensure_not_pruned_block(block_id) - .map_err(BlockArgsError::Pruned)?; + .ensure_not_pruned_block(block_id, connection) + .await?; if block_id == api::BlockId::Number(api::BlockNumber::Pending) { return Ok(BlockArgs::pending(connection).await?); diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs index c08c318d7f2..58f52e3badf 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs @@ -33,11 +33,17 @@ async fn creating_block_args() { assert_eq!(pending_block_args.l1_batch_timestamp_s, None); let start_info = BlockStartInfo::new(&mut storage).await.unwrap(); - assert_eq!(start_info.first_miniblock, MiniblockNumber(0)); - assert_eq!(start_info.first_l1_batch, L1BatchNumber(0)); + assert_eq!( + start_info.first_miniblock(&mut storage).await.unwrap(), + MiniblockNumber(0) + ); + assert_eq!( + start_info.first_l1_batch(&mut storage).await.unwrap(), + L1BatchNumber(0) + ); let latest_block = api::BlockId::Number(api::BlockNumber::Latest); - let latest_block_args = BlockArgs::new(&mut storage, latest_block, start_info) + let latest_block_args = BlockArgs::new(&mut storage, latest_block, &start_info) .await .unwrap(); assert_eq!(latest_block_args.block_id, latest_block); @@ -48,7 +54,7 @@ async fn creating_block_args() { ); let earliest_block = api::BlockId::Number(api::BlockNumber::Earliest); - let earliest_block_args = BlockArgs::new(&mut storage, earliest_block, start_info) + let earliest_block_args = BlockArgs::new(&mut storage, earliest_block, &start_info) .await .unwrap(); assert_eq!(earliest_block_args.block_id, earliest_block); @@ -59,7 +65,7 @@ async fn creating_block_args() { assert_eq!(earliest_block_args.l1_batch_timestamp_s, Some(0)); let missing_block = api::BlockId::Number(100.into()); - let err = BlockArgs::new(&mut storage, missing_block, start_info) + let err = BlockArgs::new(&mut storage, missing_block, &start_info) .await .unwrap_err(); assert_matches!(err, BlockArgsError::Missing); @@ -85,16 +91,16 @@ async fn creating_block_args_after_snapshot_recovery() { let start_info = BlockStartInfo::new(&mut storage).await.unwrap(); assert_eq!( - start_info.first_miniblock, + start_info.first_miniblock(&mut storage).await.unwrap(), snapshot_recovery.miniblock_number + 1 ); assert_eq!( - start_info.first_l1_batch, + start_info.first_l1_batch(&mut storage).await.unwrap(), snapshot_recovery.l1_batch_number + 1 ); let latest_block = api::BlockId::Number(api::BlockNumber::Latest); - let err = BlockArgs::new(&mut storage, latest_block, start_info) + let err = BlockArgs::new(&mut storage, latest_block, &start_info) .await .unwrap_err(); assert_matches!(err, BlockArgsError::Missing); @@ -106,7 +112,7 @@ async fn creating_block_args_after_snapshot_recovery() { ]; for pruned_block in pruned_blocks { let pruned_block = api::BlockId::Number(pruned_block); - let err = BlockArgs::new(&mut storage, pruned_block, start_info) + let err = BlockArgs::new(&mut storage, pruned_block, &start_info) .await .unwrap_err(); assert_matches!(err, BlockArgsError::Pruned(_)); @@ -118,7 +124,7 @@ async fn creating_block_args_after_snapshot_recovery() { ]; for missing_block in missing_blocks { let missing_block = api::BlockId::Number(missing_block); - let err = BlockArgs::new(&mut storage, missing_block, start_info) + let err = BlockArgs::new(&mut storage, missing_block, &start_info) .await .unwrap_err(); assert_matches!(err, BlockArgsError::Missing); @@ -131,7 +137,7 @@ async fn creating_block_args_after_snapshot_recovery() { .await .unwrap(); - let latest_block_args = BlockArgs::new(&mut storage, latest_block, start_info) + let latest_block_args = BlockArgs::new(&mut storage, latest_block, &start_info) .await .unwrap(); assert_eq!(latest_block_args.block_id, latest_block); @@ -143,14 +149,14 @@ async fn creating_block_args_after_snapshot_recovery() { for pruned_block in pruned_blocks { let pruned_block = api::BlockId::Number(pruned_block); - let err = BlockArgs::new(&mut storage, pruned_block, start_info) + let err = BlockArgs::new(&mut storage, pruned_block, &start_info) .await .unwrap_err(); assert_matches!(err, BlockArgsError::Pruned(_)); } for missing_block in missing_blocks { let missing_block = api::BlockId::Number(missing_block); - let err = BlockArgs::new(&mut storage, missing_block, start_info) + let err = BlockArgs::new(&mut storage, missing_block, &start_info) .await .unwrap_err(); assert_matches!(err, BlockArgsError::Missing); @@ -168,7 +174,7 @@ async fn instantiating_vm() { let block_args = BlockArgs::pending(&mut storage).await.unwrap(); test_instantiating_vm(pool.clone(), block_args).await; let start_info = BlockStartInfo::new(&mut storage).await.unwrap(); - let block_args = BlockArgs::new(&mut storage, api::BlockId::Number(0.into()), start_info) + let block_args = BlockArgs::new(&mut storage, api::BlockId::Number(0.into()), &start_info) .await .unwrap(); test_instantiating_vm(pool.clone(), block_args).await; diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index b2781468e49..171bd665196 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -532,7 +532,7 @@ impl TxSender { None => { // We don't have miniblocks in the storage yet. Use the snapshot miniblock number instead. let start = BlockStartInfo::new(&mut storage).await?; - MiniblockNumber(start.first_miniblock.saturating_sub(1)) + MiniblockNumber(start.first_miniblock(&mut storage).await?.saturating_sub(1)) } }; diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index 6d4d5c73e25..18c4af7582b 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -216,9 +216,13 @@ impl EthNamespace { full_transactions: bool, ) -> Result>, Web3Error> { self.current_method().set_block_id(block_id); - self.state.start_info.ensure_not_pruned(block_id)?; - let mut storage = self.state.acquire_connection().await?; + + self.state + .start_info + .ensure_not_pruned(block_id, &mut storage) + .await?; + let Some(block_number) = self .state .resolve_block_unchecked(&mut storage, block_id) @@ -278,9 +282,13 @@ impl EthNamespace { block_id: BlockId, ) -> Result, Web3Error> { self.current_method().set_block_id(block_id); - self.state.start_info.ensure_not_pruned(block_id)?; - let mut storage = self.state.acquire_connection().await?; + + self.state + .start_info + .ensure_not_pruned(block_id, &mut storage) + .await?; + let Some(block_number) = self .state .resolve_block_unchecked(&mut storage, block_id) @@ -306,9 +314,13 @@ impl EthNamespace { block_id: BlockId, ) -> Result>, Web3Error> { self.current_method().set_block_id(block_id); - self.state.start_info.ensure_not_pruned(block_id)?; - let mut storage = self.state.acquire_connection().await?; + + self.state + .start_info + .ensure_not_pruned(block_id, &mut storage) + .await?; + let Some(block_number) = self .state .resolve_block_unchecked(&mut storage, block_id) diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 06f96ee046c..e859e97680f 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -206,8 +206,12 @@ impl ZksNamespace { msg: H256, l2_log_position: Option, ) -> Result, Web3Error> { - self.state.start_info.ensure_not_pruned(block_number)?; let mut storage = self.state.acquire_connection().await?; + self.state + .start_info + .ensure_not_pruned(block_number, &mut storage) + .await?; + let Some(l1_batch_number) = storage .blocks_web3_dal() .get_l1_batch_number_of_miniblock(block_number) @@ -356,8 +360,11 @@ impl ZksNamespace { &self, batch: L1BatchNumber, ) -> Result, Web3Error> { - self.state.start_info.ensure_not_pruned(batch)?; let mut storage = self.state.acquire_connection().await?; + self.state + .start_info + .ensure_not_pruned(batch, &mut storage) + .await?; let range = storage .blocks_web3_dal() .get_miniblock_range_of_l1_batch(batch) @@ -371,8 +378,12 @@ impl ZksNamespace { &self, block_number: MiniblockNumber, ) -> Result, Web3Error> { - self.state.start_info.ensure_not_pruned(block_number)?; let mut storage = self.state.acquire_connection().await?; + self.state + .start_info + .ensure_not_pruned(block_number, &mut storage) + .await?; + Ok(storage .blocks_web3_dal() .get_block_details(block_number) @@ -385,8 +396,12 @@ impl ZksNamespace { &self, block_number: MiniblockNumber, ) -> Result, Web3Error> { - self.state.start_info.ensure_not_pruned(block_number)?; let mut storage = self.state.acquire_connection().await?; + self.state + .start_info + .ensure_not_pruned(block_number, &mut storage) + .await?; + Ok(storage .transactions_web3_dal() .get_raw_miniblock_transactions(block_number) @@ -418,8 +433,12 @@ impl ZksNamespace { &self, batch_number: L1BatchNumber, ) -> Result, Web3Error> { - self.state.start_info.ensure_not_pruned(batch_number)?; let mut storage = self.state.acquire_connection().await?; + self.state + .start_info + .ensure_not_pruned(batch_number, &mut storage) + .await?; + Ok(storage .blocks_web3_dal() .get_l1_batch_details(batch_number) @@ -493,7 +512,11 @@ impl ZksNamespace { keys: Vec, l1_batch_number: L1BatchNumber, ) -> Result, Web3Error> { - self.state.start_info.ensure_not_pruned(l1_batch_number)?; + let mut storage = self.state.acquire_connection().await?; + self.state + .start_info + .ensure_not_pruned(l1_batch_number, &mut storage) + .await?; let hashed_keys = keys .iter() .map(|key| StorageKey::new(AccountTreeId::new(address), *key).hashed_key_u256()) diff --git a/core/lib/zksync_core/src/api_server/web3/pubsub.rs b/core/lib/zksync_core/src/api_server/web3/pubsub.rs index 5530a6cbe26..a8e14e59780 100644 --- a/core/lib/zksync_core/src/api_server/web3/pubsub.rs +++ b/core/lib/zksync_core/src/api_server/web3/pubsub.rs @@ -70,7 +70,8 @@ impl PubSubNotifier { None => { // We don't have miniblocks in the storage yet. Use the snapshot miniblock number instead. let start_info = BlockStartInfo::new(&mut storage).await?; - MiniblockNumber(start_info.first_miniblock.saturating_sub(1)) + let first_miniblock = start_info.first_miniblock(&mut storage).await?; + MiniblockNumber(first_miniblock.saturating_sub(1)) } }) } diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index 98a35d7fec5..06013e40d95 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -62,15 +62,28 @@ impl From for PruneQuery { } } +impl From for Web3Error { + fn from(value: BlockArgsError) -> Self { + match value { + BlockArgsError::Pruned(miniblock) => Web3Error::PrunedBlock(miniblock), + BlockArgsError::Missing => Web3Error::NoBlock, + BlockArgsError::Database(error) => Web3Error::InternalError(error), + } + } +} + impl BlockStartInfo { - pub(super) fn ensure_not_pruned(&self, query: impl Into) -> Result<(), Web3Error> { + pub(super) async fn ensure_not_pruned( + &self, + query: impl Into, + storage: &mut Connection<'_, Core>, + ) -> Result<(), Web3Error> { match query.into() { - PruneQuery::BlockId(id) => self - .ensure_not_pruned_block(id) - .map_err(Web3Error::PrunedBlock), + PruneQuery::BlockId(id) => Ok(self.ensure_not_pruned_block(id, storage).await?), PruneQuery::L1Batch(number) => { - if number < self.first_l1_batch { - return Err(Web3Error::PrunedL1Batch(self.first_l1_batch)); + let first_l1_batch = self.first_l1_batch(storage).await?; + if number < first_l1_batch { + return Err(Web3Error::PrunedL1Batch(first_l1_batch)); } Ok(()) } @@ -277,7 +290,7 @@ impl RpcState { connection: &mut Connection<'_, Core>, block: api::BlockId, ) -> Result { - self.start_info.ensure_not_pruned(block)?; + self.start_info.ensure_not_pruned(block, connection).await?; connection .blocks_web3_dal() .resolve_block_id(block) @@ -298,7 +311,7 @@ impl RpcState { connection: &mut Connection<'_, Core>, block: api::BlockId, ) -> Result, Web3Error> { - self.start_info.ensure_not_pruned(block)?; + self.start_info.ensure_not_pruned(block, connection).await?; match block { api::BlockId::Number(api::BlockNumber::Number(number)) => { Ok(u32::try_from(number).ok().map(MiniblockNumber)) @@ -317,7 +330,7 @@ impl RpcState { connection: &mut Connection<'_, Core>, block: api::BlockId, ) -> Result { - BlockArgs::new(connection, block, self.start_info) + BlockArgs::new(connection, block, &self.start_info) .await .map_err(|err| match err { BlockArgsError::Pruned(number) => Web3Error::PrunedBlock(number), diff --git a/core/lib/zksync_core/src/db_pruner/README.md b/core/lib/zksync_core/src/db_pruner/README.md new file mode 100644 index 00000000000..8ffcc8ce367 --- /dev/null +++ b/core/lib/zksync_core/src/db_pruner/README.md @@ -0,0 +1,17 @@ +# Db pruner + +Database pruner is a component that regularly removes the oldest l1 batches from the database together with +corresponding miniblocks, events, etc. + +**There are two types of objects that are not fully cleaned:** + +**Transactions** - Transactions only have BYTEA fields cleaned as many of other components rely on transactions +existence. + +**Storage logs** - We only remove storage logs that have been overwritten + +### Soft and Hard pruning + +There are two 'phases' of pruning an L1 batch, soft pruning and hard pruning. Every batch that would have it's records +removed if first soft pruned. Soft pruned batches can't safely be used. One minute (this is configurable) after soft +pruning, hard pruning is performed, where hard means physically removing those batches from the database diff --git a/core/lib/zksync_core/src/db_pruner/metrics.rs b/core/lib/zksync_core/src/db_pruner/metrics.rs new file mode 100644 index 00000000000..1011d820517 --- /dev/null +++ b/core/lib/zksync_core/src/db_pruner/metrics.rs @@ -0,0 +1,24 @@ +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "prune_type", rename_all = "snake_case")] +pub(crate) enum MetricPruneType { + Soft, + Hard, +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "db_pruner")] +pub(crate) struct DbPrunerMetrics { + /// Total latency of pruning chunk of l1 batches. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub pruning_chunk_duration: Family>, + + /// Number of not-pruned l1 batches + pub not_pruned_l1_batches_count: Gauge, +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/zksync_core/src/db_pruner/mod.rs b/core/lib/zksync_core/src/db_pruner/mod.rs new file mode 100644 index 00000000000..e54fc11ae8e --- /dev/null +++ b/core/lib/zksync_core/src/db_pruner/mod.rs @@ -0,0 +1,497 @@ +mod metrics; +pub mod prune_conditions; + +use std::{fmt::Debug, sync::Arc, time::Duration}; + +use async_trait::async_trait; +use tokio::sync::watch; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_types::L1BatchNumber; + +use crate::db_pruner::metrics::{MetricPruneType, METRICS}; + +#[derive(Debug)] +pub struct DbPrunerConfig { + pub soft_and_hard_pruning_time_delta: Duration, + pub next_iterations_delay: Duration, + pub pruned_batch_chunk_size: u32, +} + +#[derive(Debug)] +pub struct DbPruner { + config: DbPrunerConfig, + prune_conditions: Vec>, +} + +/// Interface to be used for health checks. +#[async_trait] +pub trait PruneCondition: Debug + Send + Sync + 'static { + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result; +} + +impl DbPruner { + pub fn new( + config: DbPrunerConfig, + prune_conditions: Vec>, + ) -> anyhow::Result { + Ok(Self { + config, + prune_conditions, + }) + } + + pub async fn is_l1_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> bool { + let mut successful_conditions: Vec = vec![]; + let mut failed_conditions: Vec = vec![]; + let mut errored_conditions: Vec = vec![]; + + for condition in &self.prune_conditions { + match condition.is_batch_prunable(l1_batch_number).await { + Ok(true) => successful_conditions.push(format!("{condition:?}")), + Ok(false) => failed_conditions.push(format!("{condition:?}")), + Err(error) => { + errored_conditions.push(format!("{condition:?}")); + tracing::warn!( + "Pruning condition for component {condition:?} resulted in an error: {error}" + ) + } + } + } + let result = failed_conditions.is_empty() && errored_conditions.is_empty(); + if !result { + tracing::info!( + "Pruning l1 batch {l1_batch_number} is not possible, \ + successful conditions: {successful_conditions:?}, \ + failed conditions: {failed_conditions:?}, \ + errored_conditions: {errored_conditions:?}" + ); + } + result + } + + async fn update_l1_batches_metric(&self, pool: &ConnectionPool) -> anyhow::Result<()> { + let mut storage = pool.connection_tagged("db_pruner").await?; + let first_l1_batch = storage.blocks_dal().get_earliest_l1_batch_number().await?; + let last_l1_batch = storage.blocks_dal().get_sealed_l1_batch_number().await?; + if first_l1_batch.is_none() { + METRICS.not_pruned_l1_batches_count.set(0); + return Ok(()); + } + + METRICS + .not_pruned_l1_batches_count + .set((last_l1_batch.unwrap().0 - first_l1_batch.unwrap().0) as u64); + Ok(()) + } + + async fn soft_prune(&self, pool: &ConnectionPool) -> anyhow::Result { + let latency = METRICS.pruning_chunk_duration[&MetricPruneType::Soft].start(); + + let mut storage = pool.connection_tagged("db_pruner").await?; + let mut transaction = storage.start_transaction().await?; + + let current_pruning_info = transaction.pruning_dal().get_pruning_info().await?; + let next_l1_batch_to_prune = L1BatchNumber( + current_pruning_info + .last_soft_pruned_l1_batch + .unwrap_or(L1BatchNumber(0)) + .0 + + self.config.pruned_batch_chunk_size, + ); + if !self.is_l1_batch_prunable(next_l1_batch_to_prune).await { + latency.observe(); + return Ok(false); + } + + let next_miniblock_to_prune = transaction + .blocks_dal() + .get_miniblock_range_of_l1_batch(next_l1_batch_to_prune) + .await? + .unwrap() + .1; + transaction + .pruning_dal() + .soft_prune_batches_range(next_l1_batch_to_prune, next_miniblock_to_prune) + .await?; + + transaction.commit().await?; + + let latency = latency.observe(); + tracing::info!( + "Soft pruned db l1_batches up to {next_l1_batch_to_prune} and miniblocks up to {next_miniblock_to_prune}, operation took {latency:?}", + ); + + Ok(true) + } + + async fn hard_prune(&self, pool: &ConnectionPool) -> anyhow::Result<()> { + let latency = METRICS.pruning_chunk_duration[&MetricPruneType::Hard].start(); + + let mut storage = pool.connection_tagged("db_pruner").await?; + let mut transaction = storage.start_transaction().await?; + + let current_pruning_info = transaction.pruning_dal().get_pruning_info().await?; + transaction + .pruning_dal() + .hard_prune_batches_range( + current_pruning_info.last_soft_pruned_l1_batch.unwrap(), + current_pruning_info.last_soft_pruned_miniblock.unwrap(), + ) + .await?; + + transaction.commit().await?; + + let latency = latency.observe(); + tracing::info!( + "Hard pruned db l1_batches up to {} and miniblocks up to {}, operation took {:?}", + current_pruning_info.last_soft_pruned_l1_batch.unwrap(), + current_pruning_info.last_soft_pruned_miniblock.unwrap(), + latency + ); + + Ok(()) + } + + pub async fn run_single_iteration(&self, pool: &ConnectionPool) -> anyhow::Result { + let mut storage = pool.connection_tagged("db_pruner").await?; + let current_pruning_info = storage.pruning_dal().get_pruning_info().await?; + + // If this if is not entered, it means that the node has restarted after soft pruning + if current_pruning_info.last_soft_pruned_l1_batch + == current_pruning_info.last_hard_pruned_l1_batch + { + let pruning_done = self.soft_prune(pool).await?; + if !pruning_done { + return Ok(false); + } + } + + tokio::time::sleep(self.config.soft_and_hard_pruning_time_delta).await; + self.hard_prune(pool).await?; + + Ok(true) + } + pub async fn run( + self, + pool: ConnectionPool, + stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { + loop { + if *stop_receiver.borrow() { + tracing::info!("Stop signal received, shutting down DbPruner"); + } + let _ = self.update_l1_batches_metric(&pool).await; + // as this component is not really mission-critical, all errors are generally ignored + let pruning_done = self.run_single_iteration(&pool).await; + if let Err(e) = pruning_done { + tracing::warn!( + "Pruning error, retrying in {:?}, error was: {e}", + self.config.next_iterations_delay + ); + tokio::time::sleep(self.config.next_iterations_delay).await; + } else if !pruning_done.unwrap() { + tokio::time::sleep(self.config.next_iterations_delay).await; + } + } + } +} + +#[cfg(test)] +mod tests { + use std::{collections::HashMap, fmt, fmt::Formatter}; + + use anyhow::anyhow; + use multivm::zk_evm_latest::ethereum_types::H256; + use test_log::test; + use zksync_dal::pruning_dal::PruningInfo; + use zksync_db_connection::connection::Connection; + use zksync_types::{block::MiniblockHeader, Address, MiniblockNumber, ProtocolVersion}; + + use super::*; + + struct ConditionMock { + pub name: &'static str, + pub is_batch_prunable_responses: HashMap, + } + + impl ConditionMock { + fn name(name: &'static str) -> ConditionMock { + Self { + name, + is_batch_prunable_responses: HashMap::default(), + } + } + + fn with_response(mut self, l1_batch_number: L1BatchNumber, value: bool) -> Self { + self.is_batch_prunable_responses + .insert(l1_batch_number, value); + self + } + } + + impl Debug for ConditionMock { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.name) + } + } + + #[async_trait] + impl PruneCondition for ConditionMock { + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { + if !self + .is_batch_prunable_responses + .contains_key(&l1_batch_number) + { + return Err(anyhow!("Error!")); + } + Ok(self + .is_batch_prunable_responses + .get(&l1_batch_number) + .cloned() + .unwrap()) + } + } + + #[test(tokio::test)] + async fn is_l1_batch_prunable_works() { + let failing_check = Arc::new( + ConditionMock::name("some failing some passing1") + .with_response(L1BatchNumber(1), true) + .with_response(L1BatchNumber(2), true) + .with_response(L1BatchNumber(3), false) + .with_response(L1BatchNumber(4), true), + ); + let other_failing_check = Arc::new( + ConditionMock::name("some failing some passing2") + .with_response(L1BatchNumber(2), false) + .with_response(L1BatchNumber(3), true) + .with_response(L1BatchNumber(4), true), + ); + let pruner = DbPruner::new( + DbPrunerConfig { + soft_and_hard_pruning_time_delta: Duration::from_secs(0), + pruned_batch_chunk_size: 1, + next_iterations_delay: Duration::from_secs(0), + }, + vec![failing_check, other_failing_check], + ) + .unwrap(); + // first check succeeds, but second returns an error + assert!(!pruner.is_l1_batch_prunable(L1BatchNumber(1)).await); + //second check fails + assert!(!pruner.is_l1_batch_prunable(L1BatchNumber(2)).await); + //first check fails + assert!(!pruner.is_l1_batch_prunable(L1BatchNumber(3)).await); + + assert!(pruner.is_l1_batch_prunable(L1BatchNumber(4)).await); + } + + async fn insert_miniblocks( + conn: &mut Connection<'_, Core>, + l1_batches_count: u64, + miniblocks_per_batch: u64, + ) { + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + for l1_batch_number in 0..l1_batches_count { + for miniblock_index in 0..miniblocks_per_batch { + let miniblock_number = MiniblockNumber( + (l1_batch_number * miniblocks_per_batch + miniblock_index) as u32, + ); + let miniblock_header = MiniblockHeader { + number: miniblock_number, + timestamp: 0, + hash: H256::from_low_u64_be(u64::from(miniblock_number.0)), + l1_tx_count: 0, + l2_tx_count: 0, + fee_account_address: Address::repeat_byte(1), + base_fee_per_gas: 0, + gas_per_pubdata_limit: 0, + batch_fee_input: Default::default(), + base_system_contracts_hashes: Default::default(), + protocol_version: Some(Default::default()), + virtual_blocks: 0, + gas_limit: 0, + }; + + conn.blocks_dal() + .insert_miniblock(&miniblock_header) + .await + .unwrap(); + conn.blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(l1_batch_number as u32)) + .await + .unwrap(); + } + } + } + + #[test(tokio::test)] + async fn hard_pruning_ignores_conditions_checks() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + insert_miniblocks(&mut conn, 10, 2).await; + conn.pruning_dal() + .soft_prune_batches_range(L1BatchNumber(2), MiniblockNumber(5)) + .await + .unwrap(); + + let nothing_prunable_check = Arc::new(ConditionMock::name("nothing prunable")); + let pruner = DbPruner::new( + DbPrunerConfig { + soft_and_hard_pruning_time_delta: Duration::from_secs(0), + pruned_batch_chunk_size: 5, + next_iterations_delay: Duration::from_secs(0), + }, + vec![nothing_prunable_check], + ) + .unwrap(); + + pruner.run_single_iteration(&pool).await.unwrap(); + + assert_eq!( + PruningInfo { + last_soft_pruned_l1_batch: Some(L1BatchNumber(2)), + last_soft_pruned_miniblock: Some(MiniblockNumber(5)), + last_hard_pruned_l1_batch: Some(L1BatchNumber(2)), + last_hard_pruned_miniblock: Some(MiniblockNumber(5)), + }, + conn.pruning_dal().get_pruning_info().await.unwrap() + ); + } + #[test(tokio::test)] + async fn pruner_should_catch_up_with_hard_pruning_up_to_soft_pruning_boundary_ignoring_chunk_size( + ) { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + insert_miniblocks(&mut conn, 10, 2).await; + conn.pruning_dal() + .soft_prune_batches_range(L1BatchNumber(2), MiniblockNumber(5)) + .await + .unwrap(); + let pruner = DbPruner::new( + DbPrunerConfig { + soft_and_hard_pruning_time_delta: Duration::from_secs(0), + pruned_batch_chunk_size: 5, + next_iterations_delay: Duration::from_secs(0), + }, + vec![], //No checks, so every batch is prunable + ) + .unwrap(); + + pruner.run_single_iteration(&pool).await.unwrap(); + + assert_eq!( + PruningInfo { + last_soft_pruned_l1_batch: Some(L1BatchNumber(2)), + last_soft_pruned_miniblock: Some(MiniblockNumber(5)), + last_hard_pruned_l1_batch: Some(L1BatchNumber(2)), + last_hard_pruned_miniblock: Some(MiniblockNumber(5)), + }, + conn.pruning_dal().get_pruning_info().await.unwrap() + ); + + pruner.run_single_iteration(&pool).await.unwrap(); + assert_eq!( + PruningInfo { + last_soft_pruned_l1_batch: Some(L1BatchNumber(7)), + last_soft_pruned_miniblock: Some(MiniblockNumber(15)), + last_hard_pruned_l1_batch: Some(L1BatchNumber(7)), + last_hard_pruned_miniblock: Some(MiniblockNumber(15)), + }, + conn.pruning_dal().get_pruning_info().await.unwrap() + ); + } + + #[test(tokio::test)] + async fn unconstrained_pruner_with_fresh_database() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + insert_miniblocks(&mut conn, 10, 2).await; + + let pruner = DbPruner::new( + DbPrunerConfig { + soft_and_hard_pruning_time_delta: Duration::from_secs(0), + pruned_batch_chunk_size: 3, + next_iterations_delay: Duration::from_secs(0), + }, + vec![], //No checks, so every batch is prunable + ) + .unwrap(); + + pruner.run_single_iteration(&pool).await.unwrap(); + + assert_eq!( + PruningInfo { + last_soft_pruned_l1_batch: Some(L1BatchNumber(3)), + last_soft_pruned_miniblock: Some(MiniblockNumber(7)), + last_hard_pruned_l1_batch: Some(L1BatchNumber(3)), + last_hard_pruned_miniblock: Some(MiniblockNumber(7)), + }, + conn.pruning_dal().get_pruning_info().await.unwrap() + ); + + pruner.run_single_iteration(&pool).await.unwrap(); + assert_eq!( + PruningInfo { + last_soft_pruned_l1_batch: Some(L1BatchNumber(6)), + last_soft_pruned_miniblock: Some(MiniblockNumber(13)), + last_hard_pruned_l1_batch: Some(L1BatchNumber(6)), + last_hard_pruned_miniblock: Some(MiniblockNumber(13)), + }, + conn.pruning_dal().get_pruning_info().await.unwrap() + ); + } + + #[test(tokio::test)] + async fn pruning_blocked_after_first_chunk() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + insert_miniblocks(&mut conn, 10, 2).await; + + let first_chunk_prunable_check = Arc::new( + ConditionMock::name("first chunk prunable").with_response(L1BatchNumber(3), true), + ); + + let pruner = DbPruner::new( + DbPrunerConfig { + soft_and_hard_pruning_time_delta: Duration::from_secs(0), + pruned_batch_chunk_size: 3, + next_iterations_delay: Duration::from_secs(0), + }, + vec![first_chunk_prunable_check], + ) + .unwrap(); + + pruner.run_single_iteration(&pool).await.unwrap(); + + assert_eq!( + PruningInfo { + last_soft_pruned_l1_batch: Some(L1BatchNumber(3)), + last_soft_pruned_miniblock: Some(MiniblockNumber(7)), + last_hard_pruned_l1_batch: Some(L1BatchNumber(3)), + last_hard_pruned_miniblock: Some(MiniblockNumber(7)), + }, + conn.pruning_dal().get_pruning_info().await.unwrap() + ); + + pruner.run_single_iteration(&pool).await.unwrap(); + //pruning shouldn't have progressed as chunk 6 cannot be pruned + assert_eq!( + PruningInfo { + last_soft_pruned_l1_batch: Some(L1BatchNumber(3)), + last_soft_pruned_miniblock: Some(MiniblockNumber(7)), + last_hard_pruned_l1_batch: Some(L1BatchNumber(3)), + last_hard_pruned_miniblock: Some(MiniblockNumber(7)), + }, + conn.pruning_dal().get_pruning_info().await.unwrap() + ); + } +} diff --git a/core/lib/zksync_core/src/db_pruner/prune_conditions.rs b/core/lib/zksync_core/src/db_pruner/prune_conditions.rs new file mode 100644 index 00000000000..8d27c5fdb23 --- /dev/null +++ b/core/lib/zksync_core/src/db_pruner/prune_conditions.rs @@ -0,0 +1,107 @@ +use std::{ + fmt::{Debug, Formatter}, + time::Duration, +}; + +use async_trait::async_trait; +use chrono::Utc; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_types::L1BatchNumber; + +use crate::db_pruner::PruneCondition; + +pub struct L1BatchOlderThanPruneCondition { + pub minimal_age: Duration, + pub conn: ConnectionPool, +} + +impl Debug for L1BatchOlderThanPruneCondition { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "l1 Batch is older than {:?}", self.minimal_age) + } +} + +#[async_trait] +impl PruneCondition for L1BatchOlderThanPruneCondition { + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { + let mut storage = self.conn.connection().await?; + let l1_batch_header = storage + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await?; + let is_old_enough = l1_batch_header.is_some() + && (Utc::now().timestamp() as u64 - l1_batch_header.unwrap().timestamp + > self.minimal_age.as_secs()); + Ok(is_old_enough) + } +} + +pub struct NextL1BatchWasExecutedCondition { + pub conn: ConnectionPool, +} + +impl Debug for NextL1BatchWasExecutedCondition { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "next l1 batch was executed") + } +} + +#[async_trait] +impl PruneCondition for NextL1BatchWasExecutedCondition { + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { + let mut storage = self.conn.connection().await?; + let next_l1_batch_number = L1BatchNumber(l1_batch_number.0 + 1); + let last_executed_batch = storage + .blocks_dal() + .get_number_of_last_l1_batch_executed_on_eth() + .await?; + let was_next_batch_executed = + last_executed_batch.is_some() && last_executed_batch.unwrap() >= next_l1_batch_number; + Ok(was_next_batch_executed) + } +} + +pub struct NextL1BatchHasMetadataCondition { + pub conn: ConnectionPool, +} + +impl Debug for NextL1BatchHasMetadataCondition { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "next l1 batch has metadata") + } +} + +#[async_trait] +impl PruneCondition for NextL1BatchHasMetadataCondition { + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { + let mut storage = self.conn.connection().await?; + let next_l1_batch_number = L1BatchNumber(l1_batch_number.0 + 1); + let l1_batch_metadata = storage + .blocks_dal() + .get_l1_batch_metadata(next_l1_batch_number) + .await?; + Ok(l1_batch_metadata.is_some()) + } +} + +pub struct L1BatchExistsCondition { + pub conn: ConnectionPool, +} + +impl Debug for L1BatchExistsCondition { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "l1 batch exists") + } +} + +#[async_trait] +impl PruneCondition for L1BatchExistsCondition { + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { + let mut storage = self.conn.connection().await?; + let l1_batch_header = storage + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await?; + Ok(l1_batch_header.is_some()) + } +} diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 273924577da..d1594d6a910 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -102,6 +102,7 @@ pub mod block_reverter; pub mod commitment_generator; pub mod consensus; pub mod consistency_checker; +pub mod db_pruner; pub mod eth_sender; pub mod fee_model; pub mod gas_tracker; diff --git a/core/lib/zksync_core/src/utils/mod.rs b/core/lib/zksync_core/src/utils/mod.rs index 0a158f4437d..de8ce6217fa 100644 --- a/core/lib/zksync_core/src/utils/mod.rs +++ b/core/lib/zksync_core/src/utils/mod.rs @@ -68,6 +68,7 @@ pub(crate) async fn wait_for_l1_batch( poll_interval: Duration, stop_receiver: &mut watch::Receiver, ) -> anyhow::Result> { + tracing::debug!("Waiting for at least one L1 batch in db in DB"); loop { if *stop_receiver.borrow() { return Ok(None); @@ -80,7 +81,6 @@ pub(crate) async fn wait_for_l1_batch( if let Some(number) = sealed_l1_batch_number { return Ok(Some(number)); } - tracing::debug!("No L1 batches are present in DB; trying again in {poll_interval:?}"); // We don't check the result: if a stop signal is received, we'll return at the start // of the next iteration. diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index 713bc115f80..f45237a9bcd 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -329,6 +329,19 @@ pub(crate) async fn recover( .insert_initial_recovery_status(&snapshot_recovery) .await .unwrap(); + + storage + .pruning_dal() + .soft_prune_batches_range(snapshot.l1_batch.number, snapshot.miniblock.number) + .await + .unwrap(); + + storage + .pruning_dal() + .hard_prune_batches_range(snapshot.l1_batch.number, snapshot.miniblock.number) + .await + .unwrap(); + storage.commit().await.unwrap(); snapshot_recovery } diff --git a/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts b/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts index c2fc53be3bf..f63f1b63691 100644 --- a/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts +++ b/core/tests/snapshot-recovery-test/tests/snapshot-recovery.test.ts @@ -89,7 +89,8 @@ describe('snapshot recovery', () => { console.log('Using external node env profile', externalNodeEnvProfile); const externalNodeEnv = { ...process.env, - ZKSYNC_ENV: externalNodeEnvProfile + ZKSYNC_ENV: externalNodeEnvProfile, + EN_SNAPSHOTS_RECOVERY_ENABLED: 'true' }; let snapshotMetadata: GetSnapshotResponse; @@ -222,7 +223,7 @@ describe('snapshot recovery', () => { externalNodeLogs = await fs.open('snapshot-recovery.log', 'w'); const enableConsensus = process.env.ENABLE_CONSENSUS === 'true'; - let args = ['external-node', '--', '--enable-snapshots-recovery']; + let args = ['external-node', '--']; if (enableConsensus) { args.push('--enable-consensus'); } diff --git a/infrastructure/zk/package.json b/infrastructure/zk/package.json index a30538e6a08..b0408df3716 100644 --- a/infrastructure/zk/package.json +++ b/infrastructure/zk/package.json @@ -19,7 +19,8 @@ "handlebars": "^4.7.8", "node-fetch": "^2.6.1", "pg": "^8.11.3", - "tabtab": "^3.0.2" + "tabtab": "^3.0.2", + "enquirer": "^2.4.1" }, "devDependencies": { "@matterlabs/hardhat-zksync-solc": "^0.3.15", diff --git a/infrastructure/zk/src/fmt.ts b/infrastructure/zk/src/fmt.ts index 3f4d73c1c6c..30a04271c1d 100644 --- a/infrastructure/zk/src/fmt.ts +++ b/infrastructure/zk/src/fmt.ts @@ -52,6 +52,7 @@ export async function rustfmt(check: boolean = false) { export async function runAllRustFormatters(check: boolean = false) { // we need to run those two steps one by one as they operate on the same set of files + await rustfmt(check); await formatSqlxQueries(check); await rustfmt(check); } diff --git a/infrastructure/zk/src/index.ts b/infrastructure/zk/src/index.ts index a3af380e929..8d27d27b801 100644 --- a/infrastructure/zk/src/index.ts +++ b/infrastructure/zk/src/index.ts @@ -24,6 +24,7 @@ import { proverCommand } from './prover_setup'; import { command as status } from './status'; import { command as spellcheck } from './spellcheck'; import { command as linkcheck } from './linkcheck'; +import { command as setupEn } from './setup_en'; import * as env from './env'; const COMMANDS = [ @@ -52,6 +53,7 @@ const COMMANDS = [ status, spellcheck, linkcheck, + setupEn, completion(program as Command) ]; diff --git a/infrastructure/zk/src/setup_en.ts b/infrastructure/zk/src/setup_en.ts new file mode 100644 index 00000000000..81185ad0cc6 --- /dev/null +++ b/infrastructure/zk/src/setup_en.ts @@ -0,0 +1,202 @@ +import { Command } from 'commander'; +import { prompt } from 'enquirer'; +import chalk from 'chalk'; +import { compileConfig } from './config'; +import fs from 'fs'; +import path from 'path'; +import { set as setEnv } from './env'; +import { setup as setupDb } from './database'; +import * as utils from './utils'; + +enum Environment { + Mainnet = 'mainnet', + Testnet = 'testnet' +} + +enum DataRetentionDuration { + Hour = 'hour', + Day = 'day', + Week = 'week', + Month = 'month', + Year = 'year', + Forever = 'forever' +} + +async function selectDataRetentionDurationHours(): Promise { + const question = { + type: 'select', + name: 'retention', + message: 'Select how long do you want to keep newest transactions data', + choices: [ + { name: DataRetentionDuration.Hour, message: 'Hour', value: 1 }, + { name: DataRetentionDuration.Day, message: 'Day', value: 24 }, + { name: DataRetentionDuration.Week, message: 'Week', value: 24 * 7 }, + { name: DataRetentionDuration.Month, message: 'Month', value: 24 * 31 }, + { name: DataRetentionDuration.Year, message: 'Year', value: 24 * 366 }, + { name: DataRetentionDuration.Forever, message: 'Forever', value: null } + ] + }; + + const answer: { retention: DataRetentionDuration } = await prompt(question); + const choice = question.choices.find((choice) => choice.name === answer.retention); + return choice ? choice.value : null; +} + +async function selectEnvironment(): Promise { + const question = { + type: 'select', + name: 'environment', + message: 'Select the environment:', + choices: [ + { name: Environment.Testnet, message: 'Testnet (Sepolia)' }, + { name: Environment.Mainnet, message: 'Mainnet' } + ] + }; + + const answer: { environment: Environment } = await prompt(question); + return answer.environment; +} + +async function removeConfigKey(env: string, key: string) { + const filePath = path.join(path.join(process.env.ZKSYNC_HOME as string, `etc/env/${env}.toml`)); + const contents = await fs.promises.readFile(filePath, { encoding: 'utf-8' }); + + const modifiedContents = contents + .split('\n') + .filter((line) => !line.startsWith(`${key} =`) && !line.startsWith(`${key}=`)) + .join('\n'); + await fs.promises.writeFile(filePath, modifiedContents); +} + +async function changeConfigKey(env: string, key: string, newValue: string | number | boolean, section: string) { + const filePath = path.join(path.join(process.env.ZKSYNC_HOME as string, `etc/env/${env}.toml`)); + let contents = await fs.promises.readFile(filePath, { encoding: 'utf-8' }); + + const keyExists = + contents.split('\n').find((line) => line.startsWith(`${key} =`) || line.startsWith(`${key}=`)) !== undefined; + + if (!keyExists) { + contents = contents.replace(`\n[${section}]\n`, `\n[${section}]\n${key} =\n`); + } + + const modifiedContents = contents + .split('\n') + .map((line) => (line.startsWith(`${key} =`) ? `${key} = ${JSON.stringify(newValue)}` : line)) + .map((line) => (line.startsWith(`${key}=`) ? `${key}=${JSON.stringify(newValue)}` : line)) + .join('\n'); + await fs.promises.writeFile(filePath, modifiedContents); +} + +async function clearIfNeeded() { + const filePath = path.join(path.join(process.env.ZKSYNC_HOME as string, `etc/env/ext-node.env`)); + if (!fs.existsSync(filePath)) { + return true; + } + + const question = { + type: 'confirm', + name: 'cleanup', + message: + 'The external node files need to be cleared first, this will clear all its databases, do you want to continue?' + }; + + const answer: { cleanup: boolean } = await prompt(question); + if (!answer.cleanup) { + return false; + } + const cmd = chalk.yellow; + console.log(`cleaning up database (${cmd('zk clean --config ext-node --database')})`); + await utils.exec('zk clean --config ext-node --database'); + console.log(`cleaning up db (${cmd('zk db drop')})`); + await utils.exec('zk db drop'); + return true; +} + +async function runEnIfAskedTo() { + const question = { + type: 'confirm', + name: 'runRequested', + message: 'Do you want to run external-node now?' + }; + const answer: { runRequested: boolean } = await prompt(question); + if (!answer.runRequested) { + return false; + } + await utils.spawn('zk external-node'); +} + +async function commentOutConfigKey(env: string, key: string) { + const filePath = path.join(path.join(process.env.ZKSYNC_HOME as string, `etc/env/${env}.toml`)); + const contents = await fs.promises.readFile(filePath, { encoding: 'utf-8' }); + const modifiedContents = contents + .split('\n') + .map((line) => (line.startsWith(`${key} =`) || line.startsWith(`${key}=`) ? `#${line}` : line)) + .join('\n'); + await fs.promises.writeFile(filePath, modifiedContents); +} + +async function configExternalNode() { + const cmd = chalk.yellow; + const success = chalk.green; + const failure = chalk.red; + + console.log(`Changing active env to ext-node (${cmd('zk env ext-node')})`); + setEnv('ext-node'); + + const cleaningSucceeded = await clearIfNeeded(); + if (!cleaningSucceeded) { + console.log(failure('Cleanup not allowed, but needed to proceed, exiting!')); + return; + } + const env = await selectEnvironment(); + + const retention = await selectDataRetentionDurationHours(); + await commentOutConfigKey('ext-node', 'template_database_url'); + await changeConfigKey('ext-node', 'mode', 'GCSAnonymousReadOnly', 'en.snapshots.object_store'); + await changeConfigKey('ext-node', 'snapshots_recovery_enabled', true, 'en'); + if (retention !== null) { + await changeConfigKey('ext-node', 'pruning_data_retention_hours', retention, 'en'); + } else { + await removeConfigKey('ext-node', 'pruning_data_retention_hours'); + } + + switch (env) { + case Environment.Mainnet: + await changeConfigKey('ext-node', 'l1_chain_id', 1, 'en'); + await changeConfigKey('ext-node', 'l2_chain_id', 324, 'en'); + await changeConfigKey('ext-node', 'main_node_url', 'https://mainnet.era.zksync.io', 'en'); + await changeConfigKey('ext-node', 'eth_client_url', 'https://ethereum-rpc.publicnode.com', 'en'); + await changeConfigKey( + 'ext-node', + 'bucket_base_url', + 'zksync-era-mainnet-external-node-snapshots', + 'en.snapshots.object_store' + ); + break; + case Environment.Testnet: + await changeConfigKey('ext-node', 'l1_chain_id', 11155111, 'en'); + await changeConfigKey('ext-node', 'l2_chain_id', 300, 'en'); + await changeConfigKey('ext-node', 'main_node_url', 'https://sepolia.era.zksync.dev', 'en'); + await changeConfigKey('ext-node', 'eth_client_url', 'https://ethereum-sepolia-rpc.publicnode.com', 'en'); + await changeConfigKey( + 'ext-node', + 'bucket_base_url', + 'zksync-era-boojnet-external-node-snapshots', + 'en.snapshots.object_store' + ); + break; + } + await compileConfig('ext-node'); + setEnv('ext-node'); + console.log(`Setting up postgres (${cmd('zk db setup')})`); + await setupDb({ prover: false, core: true }); + + console.log(`${success('Everything done!')} You can now run your external node using ${cmd('zk external-node')}`); + await runEnIfAskedTo(); +} + +export const command = new Command('setup-external-node') + .description('prepare local setup for running external-node on mainnet/testnet') + .action(async (_: Command) => { + await configExternalNode(); + }); From 932b14b6ddee35375fbc302523da2d4d37f1d46b Mon Sep 17 00:00:00 2001 From: Stanislav Bezkorovainyi Date: Thu, 11 Apr 2024 13:59:49 +0200 Subject: [PATCH 16/29] fix(vm): Fix storage oracle and estimation (#1634) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Makes "free" slots also free with regard to I/O costs. These are context / operator balance slots which are global and warm for everyone. - Fixes a bug with setting the initial value inside storage oracle. This bug only affects 1.5.0. - The gas used for pubdata is also multiplied by the `estimated_fee_scale_factor`. For "small" transactions, e.g. transfers this gives us the same estimation overhead as in the previous (v1.4.1) system. But this will produce a bit larger overhead for big transactions that involve a lot of pubdata, but this is the correct consistent behavior and it should not affect most of the users. ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- .../src/versions/vm_latest/oracles/storage.rs | 14 ++++-- .../src/api_server/tx_sender/mod.rs | 48 +++++++++---------- 2 files changed, 33 insertions(+), 29 deletions(-) diff --git a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs index 2f2d3891340..b2d8b3ccd72 100644 --- a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs @@ -238,7 +238,9 @@ impl StorageOracle { if query.rw_flag { // It is a write - if self.written_storage_keys.inner().contains_key(&key) { + if self.written_storage_keys.inner().contains_key(&key) + || self.is_storage_key_free(&key) + { // It is a warm write StorageAccessRefund::Warm { ergs: WARM_WRITE_REFUND, @@ -253,7 +255,9 @@ impl StorageOracle { // It is a cold write StorageAccessRefund::Cold } - } else if self.read_storage_keys.inner().contains_key(&key) { + } else if self.read_storage_keys.inner().contains_key(&key) + || self.is_storage_key_free(&key) + { // It is a warm read StorageAccessRefund::Warm { ergs: WARM_READ_REFUND, @@ -362,9 +366,9 @@ impl VmStorageOracle for StorageOracle { .insert(storage_key, (), query.timestamp); } - self.set_initial_value(&storage_key, query.read_value, query.timestamp); - - self.storage.read_from_storage(&storage_key) + let read_value = self.storage.read_from_storage(&storage_key); + self.set_initial_value(&storage_key, read_value, query.timestamp); + read_value } else { // Just in case unreachable!(); diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index 171bd665196..84c76e84d56 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -1,6 +1,6 @@ //! Helper module to submit transactions into the zkSync Network. -use std::{cmp, sync::Arc, time::Instant}; +use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use multivm::{ @@ -846,12 +846,8 @@ impl TxSender { .estimate_gas_binary_search_iterations .observe(number_of_iterations); - let tx_body_gas_limit = cmp::min( - MAX_L2_TX_GAS_LIMIT, - ((upper_bound as f64) * estimated_fee_scale_factor) as u64, - ); - - let suggested_gas_limit = tx_body_gas_limit + additional_gas_for_pubdata; + let suggested_gas_limit = + ((upper_bound + additional_gas_for_pubdata) as f64 * estimated_fee_scale_factor) as u64; let (result, tx_metrics) = self .estimate_gas_step( vm_permit, @@ -878,32 +874,36 @@ impl TxSender { protocol_version.into(), ) as u64; - let full_gas_limit = - match tx_body_gas_limit.overflowing_add(additional_gas_for_pubdata + overhead) { - (value, false) => { - if value > max_gas_limit { - return Err(SubmitTxError::ExecutionReverted( - "exceeds block gas limit".to_string(), - vec![], - )); - } - - value - } - (_, true) => { + let full_gas_limit = match suggested_gas_limit.overflowing_add(overhead) { + (value, false) => { + if value > max_gas_limit { return Err(SubmitTxError::ExecutionReverted( "exceeds block gas limit".to_string(), vec![], )); } - }; + + value + } + (_, true) => { + return Err(SubmitTxError::ExecutionReverted( + "exceeds block gas limit".to_string(), + vec![], + )); + } + }; + + let gas_for_pubdata = (tx_metrics.pubdata_published as u64) * gas_per_pubdata_byte; + let estimated_gas_for_pubdata = + (gas_for_pubdata as f64 * estimated_fee_scale_factor) as u64; tracing::info!( initiator = ?tx.initiator_account(), nonce = %tx.nonce().unwrap_or(Nonce(0)), - "fee estimation: gas for pubdata: {}, tx body gas: {tx_body_gas_limit}, overhead gas: {overhead} \ - (with params base_fee: {base_fee}, gas_per_pubdata_byte: {gas_per_pubdata_byte})", - (tx_metrics.pubdata_published as u64) * gas_per_pubdata_byte, + "fee estimation: gas for pubdata: {estimated_gas_for_pubdata}, computational gas: {}, overhead gas: {overhead} \ + (with params base_fee: {base_fee}, gas_per_pubdata_byte: {gas_per_pubdata_byte}) \ + estimated_fee_scale_factor: {estimated_fee_scale_factor}", + suggested_gas_limit - estimated_gas_for_pubdata, ); Ok(Fee { From 9a22fa07b0c577dbcdc48450ae209c83772a1fc0 Mon Sep 17 00:00:00 2001 From: Joaquin Carletti <56092489+ColoCarletti@users.noreply.github.com> Date: Thu, 11 Apr 2024 17:05:32 +0300 Subject: [PATCH 17/29] feat: Prover CLI Scaffoldings (#1609) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR implements the scaffolding for a CLI to interact with the prover. Additionally, it integrates the functionality of the `tool` crate. ## Why ❔ The purpose of this CLI is to simplify the process of interacting with the prover and enhance its usability. Users can easily interact with the prover without requiring specialized insider knowledge, ensuring that it can managed without direct database access and complex manual queries. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --------- Co-authored-by: Lech <88630083+Artemka374@users.noreply.github.com> Co-authored-by: Ivan Litteri <67517699+ilitteri@users.noreply.github.com> --- prover/Cargo.lock | 53 ++++++++++++++----- prover/Cargo.toml | 2 +- prover/{tools => prover_cli}/Cargo.toml | 18 +++++-- prover/{tools => prover_cli}/README.md | 4 +- prover/prover_cli/src/cli.rs | 26 +++++++++ .../src/commands/get_file_info.rs} | 47 ++++------------ prover/prover_cli/src/commands/mod.rs | 1 + prover/prover_cli/src/lib.rs | 2 + prover/prover_cli/src/main.rs | 10 ++++ 9 files changed, 106 insertions(+), 57 deletions(-) rename prover/{tools => prover_cli}/Cargo.toml (58%) rename prover/{tools => prover_cli}/README.md (93%) create mode 100644 prover/prover_cli/src/cli.rs rename prover/{tools/src/main.rs => prover_cli/src/commands/get_file_info.rs} (90%) create mode 100644 prover/prover_cli/src/commands/mod.rs create mode 100644 prover/prover_cli/src/lib.rs create mode 100644 prover/prover_cli/src/main.rs diff --git a/prover/Cargo.lock b/prover/Cargo.lock index a0f470ce63e..45f591dbfe6 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -1756,7 +1756,11 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ + "humantime", + "is-terminal", "log", + "regex", + "termcolor", ] [[package]] @@ -2748,6 +2752,17 @@ dependencies = [ "serde", ] +[[package]] +name = "is-terminal" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +dependencies = [ + "hermit-abi 0.3.6", + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "itertools" version = "0.10.5" @@ -4315,6 +4330,30 @@ dependencies = [ "thiserror", ] +[[package]] +name = "prover_cli" +version = "0.1.0" +dependencies = [ + "anyhow", + "bincode", + "clap 4.4.6", + "colored", + "env_logger 0.10.2", + "hex", + "log", + "prover_dal", + "tokio", + "tracing", + "tracing-subscriber", + "zksync_basic_types", + "zksync_config", + "zksync_db_connection", + "zksync_env_config", + "zksync_prover_fri_types", + "zksync_prover_interface", + "zksync_types", +] + [[package]] name = "prover_dal" version = "0.1.0" @@ -6246,20 +6285,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tools" -version = "0.1.0" -dependencies = [ - "bincode", - "clap 4.4.6", - "colored", - "hex", - "tracing", - "tracing-subscriber", - "zksync_prover_fri_types", - "zksync_prover_interface", -] - [[package]] name = "tower" version = "0.4.13" diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 5968bdccf65..11412ec5289 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -12,7 +12,7 @@ members = [ "witness_vector_generator", "prover_fri_gateway", "proof_fri_compressor", - "tools", + "prover_cli", ] resolver = "2" diff --git a/prover/tools/Cargo.toml b/prover/prover_cli/Cargo.toml similarity index 58% rename from prover/tools/Cargo.toml rename to prover/prover_cli/Cargo.toml index 66df1e99db4..994a8f93a84 100644 --- a/prover/tools/Cargo.toml +++ b/prover/prover_cli/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "tools" +name = "prover_cli" version.workspace = true edition.workspace = true authors.workspace = true @@ -10,11 +10,23 @@ keywords.workspace = true categories.workspace = true [dependencies] +tokio = { version = "1", features = ["rt-multi-thread", "macros"] } +env_logger = "0.10" +log = "0.4" + clap = { workspace = true, features = ["derive"] } tracing.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter"] } -zksync_prover_fri_types.workspace = true bincode.workspace = true colored.workspace = true +hex.workspace = true +anyhow.workspace = true +zksync_config.workspace = true +zksync_env_config.workspace = true +zksync_db_connection.workspace = true +zksync_basic_types.workspace = true +zksync_types.workspace = true +zksync_prover_fri_types.workspace = true zksync_prover_interface.workspace = true -hex.workspace = true \ No newline at end of file +prover_dal.workspace = true + diff --git a/prover/tools/README.md b/prover/prover_cli/README.md similarity index 93% rename from prover/tools/README.md rename to prover/prover_cli/README.md index 35778faa687..25bbe95fc4d 100644 --- a/prover/tools/README.md +++ b/prover/prover_cli/README.md @@ -1,9 +1,9 @@ -# Tool to better understand and debug provers +# CLI to better understand and debug provers For now, it has only one command 'file-info' ``` -cargo run --release file-info /zksync-era/prover/artifacts/proofs_fri/l1_batch_proof_1.bin +cargo run -- file-info --file-path /zksync-era/prover/artifacts/proofs_fri/l1_batch_proof_1.bin ``` Example outputs: diff --git a/prover/prover_cli/src/cli.rs b/prover/prover_cli/src/cli.rs new file mode 100644 index 00000000000..844387f983e --- /dev/null +++ b/prover/prover_cli/src/cli.rs @@ -0,0 +1,26 @@ +use clap::{command, Parser, Subcommand}; + +use crate::commands::get_file_info; + +pub const VERSION_STRING: &str = env!("CARGO_PKG_VERSION"); + +#[derive(Parser)] +#[command(name="prover-cli", version=VERSION_STRING, about, long_about = None)] +struct ProverCLI { + #[command(subcommand)] + command: ProverCommand, +} + +#[derive(Subcommand)] +enum ProverCommand { + FileInfo(get_file_info::Args), +} + +pub async fn start() -> anyhow::Result<()> { + let ProverCLI { command } = ProverCLI::parse(); + match command { + ProverCommand::FileInfo(args) => get_file_info::run(args).await?, + }; + + Ok(()) +} diff --git a/prover/tools/src/main.rs b/prover/prover_cli/src/commands/get_file_info.rs similarity index 90% rename from prover/tools/src/main.rs rename to prover/prover_cli/src/commands/get_file_info.rs index f7df2147fac..ff32f56a22c 100644 --- a/prover/tools/src/main.rs +++ b/prover/prover_cli/src/commands/get_file_info.rs @@ -1,8 +1,7 @@ use std::fs; -use clap::{Parser, Subcommand}; +use clap::Args as ClapArgs; use colored::Colorize; -use tracing::level_filters::LevelFilter; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -18,23 +17,10 @@ use zksync_prover_fri_types::{ }; use zksync_prover_interface::outputs::L1BatchProofForL1; -#[derive(Debug, Parser)] -#[command( - author = "Matter Labs", - version, - about = "Debugging tools for prover related things", - long_about = None -)] - -struct Cli { - #[command(subcommand)] - command: Command, -} - -#[derive(Debug, Subcommand)] -enum Command { - #[command(name = "file-info")] - FileInfo { file_path: String }, +#[derive(ClapArgs)] +pub(crate) struct Args { + #[clap(short, long)] + file_path: String, } fn pretty_print_size_hint(size_hint: (Option, Option)) { @@ -204,7 +190,8 @@ fn pretty_print_l1_proof(result: &L1BatchProofForL1) { println!(" This proof will pass on L1, if L1 executor computes the block commitment that is matching exactly the Inputs value above"); } -fn file_info(path: String) { +pub(crate) async fn run(args: Args) -> anyhow::Result<()> { + let path = args.file_path; println!("Reading file {} and guessing the type.", path.bold()); let bytes = fs::read(path).unwrap(); @@ -214,14 +201,14 @@ fn file_info(path: String) { if let Some(circuit) = maybe_circuit { println!(" Parsing file as CircuitWrapper."); pretty_print_circuit_wrapper(&circuit); - return; + return Ok(()); } println!(" NOT a CircuitWrapper."); let maybe_fri_proof: Option = bincode::deserialize(&bytes).ok(); if let Some(fri_proof) = maybe_fri_proof { println!(" Parsing file as FriProofWrapper."); pretty_print_proof(&fri_proof); - return; + return Ok(()); } println!(" NOT a FriProofWrapper."); @@ -232,19 +219,5 @@ fn file_info(path: String) { } else { println!(" NOT a L1BatchProof."); } -} - -fn main() { - tracing_subscriber::fmt() - .with_env_filter( - tracing_subscriber::EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .from_env_lossy(), - ) - .init(); - - let opt = Cli::parse(); - match opt.command { - Command::FileInfo { file_path } => file_info(file_path), - } + Ok(()) } diff --git a/prover/prover_cli/src/commands/mod.rs b/prover/prover_cli/src/commands/mod.rs new file mode 100644 index 00000000000..3e9a45cb72a --- /dev/null +++ b/prover/prover_cli/src/commands/mod.rs @@ -0,0 +1 @@ +pub(crate) mod get_file_info; diff --git a/prover/prover_cli/src/lib.rs b/prover/prover_cli/src/lib.rs new file mode 100644 index 00000000000..3ef8b313f0c --- /dev/null +++ b/prover/prover_cli/src/lib.rs @@ -0,0 +1,2 @@ +pub mod cli; +mod commands; diff --git a/prover/prover_cli/src/main.rs b/prover/prover_cli/src/main.rs new file mode 100644 index 00000000000..f2a7dd71026 --- /dev/null +++ b/prover/prover_cli/src/main.rs @@ -0,0 +1,10 @@ +use prover_cli::cli; + +#[tokio::main] +async fn main() { + env_logger::builder() + .filter_level(log::LevelFilter::Debug) + .init(); + + cli::start().await.unwrap(); +} From 7dbabacfb171cc2122f2ac63d97539d4879ae311 Mon Sep 17 00:00:00 2001 From: pompon0 Date: Thu, 11 Apr 2024 16:48:38 +0200 Subject: [PATCH 18/29] chore: bumped consensus deps (#1648) includes https://github.com/matter-labs/era-consensus/pull/92 --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 20 ++++++++++---------- prover/Cargo.lock | 14 +++++++------- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f5431601d65..32a1e4bcc17 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8117,7 +8117,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "once_cell", @@ -8147,7 +8147,7 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "async-trait", @@ -8168,7 +8168,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "blst", @@ -8186,7 +8186,7 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "rand 0.8.5", @@ -8206,7 +8206,7 @@ dependencies = [ [[package]] name = "zksync_consensus_network" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "async-trait", @@ -8231,7 +8231,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "bit-vec", @@ -8251,7 +8251,7 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "async-trait", @@ -8269,7 +8269,7 @@ dependencies = [ [[package]] name = "zksync_consensus_sync_blocks" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "thiserror", @@ -8284,7 +8284,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "rand 0.8.5", "thiserror", @@ -8743,7 +8743,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "bit-vec", @@ -8763,7 +8763,7 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/Cargo.toml b/Cargo.toml index eda4e152758..2482c5711fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -169,16 +169,16 @@ zk_evm_1_3_3 = { package = "zk_evm", git = "https://github.com/matter-labs/era-z zk_evm_1_4_0 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.0" } zk_evm_1_4_1 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.1" } zk_evm_1_5_0 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.5.0" } -zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } -zksync_consensus_bft = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } -zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } -zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } -zksync_consensus_network = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } -zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } -zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "d17c018b94f8d186e7fe701e947af8ef1953fa3b" } +zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } +zksync_consensus_bft = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } +zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } +zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } +zksync_consensus_network = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } +zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } +zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } # "Local" dependencies multivm = { path = "core/lib/multivm" } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 45f591dbfe6..d20418a2d3f 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7501,7 +7501,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "once_cell", @@ -7531,7 +7531,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "blst", @@ -7549,7 +7549,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "bit-vec", @@ -7569,7 +7569,7 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "async-trait", @@ -7587,7 +7587,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "rand 0.8.5", "thiserror", @@ -7773,7 +7773,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "bit-vec", @@ -7793,7 +7793,7 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=d17c018b94f8d186e7fe701e947af8ef1953fa3b#d17c018b94f8d186e7fe701e947af8ef1953fa3b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" dependencies = [ "anyhow", "heck 0.5.0", From 5ed92b9810cdb0dc0ceea594a8828cfbbf067006 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Thu, 11 Apr 2024 17:20:31 +0200 Subject: [PATCH 19/29] feat(en): add consistency checker condition in db pruner (#1653) Signed-off-by: tomg10 --- core/bin/external_node/src/main.rs | 8 +++++-- .../src/db_pruner/prune_conditions.rs | 22 +++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index ff95b3a1704..adc5f116557 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -26,8 +26,9 @@ use zksync_core::{ consistency_checker::ConsistencyChecker, db_pruner::{ prune_conditions::{ - L1BatchExistsCondition, L1BatchOlderThanPruneCondition, - NextL1BatchHasMetadataCondition, NextL1BatchWasExecutedCondition, + ConsistencyCheckerProcessedBatch, L1BatchExistsCondition, + L1BatchOlderThanPruneCondition, NextL1BatchHasMetadataCondition, + NextL1BatchWasExecutedCondition, }, DbPruner, DbPrunerConfig, }, @@ -288,6 +289,9 @@ async fn run_core( minimal_age: l1_batch_age_to_prune, conn: connection_pool.clone(), }), + Arc::new(ConsistencyCheckerProcessedBatch { + conn: connection_pool.clone(), + }), ], )?; task_handles.push(tokio::spawn( diff --git a/core/lib/zksync_core/src/db_pruner/prune_conditions.rs b/core/lib/zksync_core/src/db_pruner/prune_conditions.rs index 8d27c5fdb23..38cd9844835 100644 --- a/core/lib/zksync_core/src/db_pruner/prune_conditions.rs +++ b/core/lib/zksync_core/src/db_pruner/prune_conditions.rs @@ -105,3 +105,25 @@ impl PruneCondition for L1BatchExistsCondition { Ok(l1_batch_header.is_some()) } } + +pub struct ConsistencyCheckerProcessedBatch { + pub conn: ConnectionPool, +} + +impl Debug for ConsistencyCheckerProcessedBatch { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "l1 batch was processed by consistency checker") + } +} + +#[async_trait] +impl PruneCondition for ConsistencyCheckerProcessedBatch { + async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result { + let mut storage = self.conn.connection().await?; + let last_processed_l1_batch = storage + .blocks_dal() + .get_consistency_checker_last_processed_l1_batch() + .await?; + Ok(l1_batch_number <= last_processed_l1_batch) + } +} From c818be362aef9244bb644a2c50a620ac1ef077b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Fri, 12 Apr 2024 15:13:21 +0200 Subject: [PATCH 20/29] feat(en): add manual vacuum step in db pruning (#1652) Signed-off-by: tomg10 --- ...dd70e04bd6e714c8b763efdfe0259a5dd4128.json | 12 ++++++++++ core/lib/dal/src/pruning_dal.rs | 23 ++++++++++++++++++- core/lib/zksync_core/src/db_pruner/mod.rs | 6 +++++ 3 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 core/lib/dal/.sqlx/query-b66045a20fd41f44627e5f1360cdd70e04bd6e714c8b763efdfe0259a5dd4128.json diff --git a/core/lib/dal/.sqlx/query-b66045a20fd41f44627e5f1360cdd70e04bd6e714c8b763efdfe0259a5dd4128.json b/core/lib/dal/.sqlx/query-b66045a20fd41f44627e5f1360cdd70e04bd6e714c8b763efdfe0259a5dd4128.json new file mode 100644 index 00000000000..93f485f255f --- /dev/null +++ b/core/lib/dal/.sqlx/query-b66045a20fd41f44627e5f1360cdd70e04bd6e714c8b763efdfe0259a5dd4128.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "\n VACUUM l1_batches,\n miniblocks,\n storage_logs,\n events,\n call_traces,\n l2_to_l1_logs,\n transactions\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "b66045a20fd41f44627e5f1360cdd70e04bd6e714c8b763efdfe0259a5dd4128" +} diff --git a/core/lib/dal/src/pruning_dal.rs b/core/lib/dal/src/pruning_dal.rs index e52e856c862..7f0c54f2901 100644 --- a/core/lib/dal/src/pruning_dal.rs +++ b/core/lib/dal/src/pruning_dal.rs @@ -404,7 +404,7 @@ impl PruningDal<'_, '_> { i64::from(last_miniblock_to_prune.0), PruneType::Hard as PruneType ) - .instrument("soft_prune_batches_range#insert_pruning_log") + .instrument("hard_prune_batches_range#insert_pruning_log") .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) .with_arg("prune_type", &PruneType::Hard) @@ -414,4 +414,25 @@ impl PruningDal<'_, '_> { Ok(()) } + + // This method must be separate as VACUUM is not supported inside a transaction + pub async fn run_vacuum_after_hard_pruning(&mut self) -> DalResult<()> { + sqlx::query!( + r#" + VACUUM l1_batches, + miniblocks, + storage_logs, + events, + call_traces, + l2_to_l1_logs, + transactions + "#, + ) + .instrument("hard_prune_batches_range#vacuum") + .report_latency() + .execute(self.storage) + .await?; + + Ok(()) + } } diff --git a/core/lib/zksync_core/src/db_pruner/mod.rs b/core/lib/zksync_core/src/db_pruner/mod.rs index e54fc11ae8e..17057ced881 100644 --- a/core/lib/zksync_core/src/db_pruner/mod.rs +++ b/core/lib/zksync_core/src/db_pruner/mod.rs @@ -141,6 +141,12 @@ impl DbPruner { transaction.commit().await?; + let mut storage = pool.connection_tagged("db_pruner").await?; + storage + .pruning_dal() + .run_vacuum_after_hard_pruning() + .await?; + let latency = latency.observe(); tracing::info!( "Hard pruned db l1_batches up to {} and miniblocks up to {}, operation took {:?}", From 45164fa6cb174c04c8542246cdb79c4f393339af Mon Sep 17 00:00:00 2001 From: Marcin M <128217157+mm-zk@users.noreply.github.com> Date: Fri, 12 Apr 2024 15:57:56 +0200 Subject: [PATCH 21/29] feat(genesis): Add genesis config generator (#1671) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ * Tool to create new genesis.yaml file * Update the genesis.yaml to v23 ## Why ❔ * To automate the process, before we introduce hyperchains. --------- Signed-off-by: Danil Co-authored-by: Danil --- Cargo.lock | 23 +++++ Cargo.toml | 1 + core/bin/genesis_generator/Cargo.toml | 34 +++++++ core/bin/genesis_generator/src/main.rs | 113 ++++++++++++++++++++++++ core/lib/config/src/configs/genesis.rs | 4 +- core/lib/protobuf_config/src/genesis.rs | 2 +- etc/env/file_based/genesis.yaml | 27 +++--- infrastructure/zk/src/run.ts | 13 +++ 8 files changed, 201 insertions(+), 16 deletions(-) create mode 100644 core/bin/genesis_generator/Cargo.toml create mode 100644 core/bin/genesis_generator/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 32a1e4bcc17..f0ee6540d8a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2324,6 +2324,29 @@ dependencies = [ "zeroize", ] +[[package]] +name = "genesis_generator" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap 4.4.6", + "futures 0.3.28", + "serde", + "serde_json", + "serde_yaml", + "tokio", + "tracing", + "zksync_config", + "zksync_contracts", + "zksync_core", + "zksync_dal", + "zksync_env_config", + "zksync_protobuf", + "zksync_protobuf_config", + "zksync_types", + "zksync_utils", +] + [[package]] name = "getrandom" version = "0.2.10" diff --git a/Cargo.toml b/Cargo.toml index 2482c5711fa..1216fc810a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "core/bin/system-constants-generator", "core/bin/verified_sources_fetcher", "core/bin/zksync_server", + "core/bin/genesis_generator", # Node services "core/node/node_framework", # Libraries diff --git a/core/bin/genesis_generator/Cargo.toml b/core/bin/genesis_generator/Cargo.toml new file mode 100644 index 00000000000..55b0cc24c2a --- /dev/null +++ b/core/bin/genesis_generator/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "genesis_generator" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +zksync_config.workspace = true +zksync_env_config.workspace = true +zksync_protobuf_config.workspace = true +zksync_utils.workspace = true +zksync_types.workspace = true +zksync_core.workspace = true +zksync_dal.workspace = true +zksync_contracts.workspace = true +zksync_protobuf.workspace = true + + +anyhow.workspace = true +serde_json.workspace = true +serde_yaml.workspace = true +serde.workspace = true +tokio = { workspace = true, features = ["full"] } +tracing.workspace = true +futures.workspace = true +clap = { workspace = true, features = ["derive"] } + diff --git a/core/bin/genesis_generator/src/main.rs b/core/bin/genesis_generator/src/main.rs new file mode 100644 index 00000000000..f3cedd90c48 --- /dev/null +++ b/core/bin/genesis_generator/src/main.rs @@ -0,0 +1,113 @@ +/// Each protocol upgrade required to update genesis config values. +/// This tool generates the new correct genesis file that could be used for the new chain +/// Please note, this tool update only yaml file, if you are still use env based configuration, +/// update env values correspondingly +use std::fs; + +use anyhow::Context as _; +use clap::Parser; +use serde_yaml::Serializer; +use zksync_config::{GenesisConfig, PostgresConfig}; +use zksync_contracts::BaseSystemContracts; +use zksync_core::{ + genesis::{insert_genesis_batch, GenesisParams}, + temp_config_store::decode_yaml_repr, +}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_env_config::FromEnv; +use zksync_protobuf::{ + build::{prost_reflect, prost_reflect::ReflectMessage}, + ProtoRepr, +}; +use zksync_protobuf_config::proto::genesis::Genesis; +use zksync_types::ProtocolVersionId; + +const DEFAULT_GENESIS_FILE_PATH: &str = "./etc/env/file_based//genesis.yaml"; + +#[derive(Debug, Parser)] +#[command(author = "Matter Labs", version, about = "zkSync operator node", long_about = None)] +struct Cli { + #[arg(long)] + config_path: Option, + #[arg(long, default_value = "false")] + check: bool, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let opt = Cli::parse(); + + let postgres_config = match opt.config_path { + None => PostgresConfig::from_env()?, + Some(path) => { + let yaml = + std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; + let config = + decode_yaml_repr::(&yaml) + .context("failed decoding general YAML config")?; + config + .postgres_config + .context("Postgres config must exist")? + } + }; + + let yaml = std::fs::read_to_string(DEFAULT_GENESIS_FILE_PATH) + .with_context(|| DEFAULT_GENESIS_FILE_PATH.to_string())?; + let original_genesis = decode_yaml_repr::(&yaml)?; + let db_url = postgres_config.master_url()?; + let new_genesis = generate_new_config(db_url, original_genesis.clone()).await?; + if opt.check { + assert_eq!(&original_genesis, &new_genesis); + println!("Genesis config is up to date"); + return Ok(()); + } + let data = encode_yaml(&Genesis::build(&new_genesis))?; + fs::write(DEFAULT_GENESIS_FILE_PATH, data)?; + Ok(()) +} + +async fn generate_new_config( + db_url: &str, + genesis_config: GenesisConfig, +) -> anyhow::Result { + let pool = ConnectionPool::::singleton(db_url) + .build() + .await + .context("failed to build connection_pool")?; + let mut storage = pool.connection().await.context("connection()")?; + let mut transaction = storage.start_transaction().await?; + + if !transaction.blocks_dal().is_genesis_needed().await? { + anyhow::bail!("Please cleanup database for regenerating genesis") + } + + let base_system_contracts = BaseSystemContracts::load_from_disk().hashes(); + let mut updated_genesis = GenesisConfig { + protocol_version: Some(ProtocolVersionId::latest() as u16), + genesis_root_hash: None, + rollup_last_leaf_index: None, + genesis_commitment: None, + bootloader_hash: Some(base_system_contracts.bootloader), + default_aa_hash: Some(base_system_contracts.default_aa), + ..genesis_config + }; + + let params = GenesisParams::load_genesis_params(updated_genesis.clone())?; + let batch_params = insert_genesis_batch(&mut transaction, ¶ms).await?; + + updated_genesis.genesis_commitment = Some(batch_params.commitment); + updated_genesis.genesis_root_hash = Some(batch_params.root_hash); + updated_genesis.rollup_last_leaf_index = Some(batch_params.rollup_last_leaf_index); + Ok(updated_genesis) +} + +/// Encodes a generated proto message to json for arbitrary Proto. +pub(crate) fn encode_yaml(x: &T) -> anyhow::Result { + let mut serializer = Serializer::new(vec![]); + let opts = prost_reflect::SerializeOptions::new() + .use_proto_field_name(true) + .stringify_64_bit_integers(true); + x.transcode_to_dynamic() + .serialize_with_options(&mut serializer, &opts)?; + Ok(String::from_utf8_lossy(&serializer.into_inner()?).to_string()) +} diff --git a/core/lib/config/src/configs/genesis.rs b/core/lib/config/src/configs/genesis.rs index e4252706c9c..ca1b64e6c03 100644 --- a/core/lib/config/src/configs/genesis.rs +++ b/core/lib/config/src/configs/genesis.rs @@ -3,7 +3,7 @@ use zksync_basic_types::{Address, L1ChainId, L2ChainId, H256}; use crate::configs::chain::L1BatchCommitDataGeneratorMode; -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct SharedBridge { pub bridgehub_proxy_addr: Address, pub state_transition_proxy_addr: Address, @@ -12,7 +12,7 @@ pub struct SharedBridge { /// This config represents the genesis state of the chain. /// Each chain has this config immutable and we update it only during the protocol upgrade -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct GenesisConfig { // TODO make fields non optional, once we fully moved to file based configs. // Now for backward compatibility we keep it optional diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index e058ebba369..af33ef226ef 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -125,7 +125,7 @@ impl ProtoRepr for proto::Genesis { Self { genesis_root: this.genesis_root_hash.map(|x| format!("{:?}", x)), genesis_rollup_leaf_index: this.rollup_last_leaf_index, - genesis_batch_commitment: this.genesis_root_hash.map(|x| format!("{:?}", x)), + genesis_batch_commitment: this.genesis_commitment.map(|x| format!("{:?}", x)), genesis_protocol_version: this.protocol_version.map(|x| x as u32), default_aa_hash: this.default_aa_hash.map(|x| format!("{:?}", x)), bootloader_hash: this.bootloader_hash.map(|x| format!("{:?}", x)), diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index f0ccc2d3445..c9b79aa4f21 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -1,19 +1,20 @@ -bootloader_hash: 0x010007ede999d096c84553fb514d3d6ca76fbf39789dda76bfeda9f3ae06236e -default_aa_hash: 0x0100055b041eb28aff6e3a6e0f37c31fd053fc9ef142683b05e5f0aee6934066 -genesis_root: 0x1f50e4eda06a68d96a0272ba4581a342df2227ad12c23759ab7d78157950e69a -genesis_batch_commitment: 0x84d7b576b9374729e0b38439c97aaf5074335e2a8d1c7a2e4581c1c1ec611631 -genesis_rollup_leaf_index: 46 -genesis_protocol_version: 22 -l1_chain_id: 9 -l2_chain_id: 270 -fee_account: 0x0000000000000000000000000000000000000001 -l1_batch_commit_data_generator_mode: Rollup +genesis_root: 0x1920e0154aa7649a645e7931b84796bfec22b58250778b828d9a5b8c7d32f661 +genesis_rollup_leaf_index: '50' +genesis_batch_commitment: 0xc64914ac5697bf6a73b9cca890ef013a83f8415573f2829b29111598410852a6 +genesis_protocol_version: 23 +default_aa_hash: 0x01000563ee5d0abdf9fc65f748a700d2c377aadebc16d5bf21105f8a94eff028 +bootloader_hash: 0x0100089d6cf001d52b9e64572fa9b3555211f56a2ad66784f495c4192a88b477 +l1_chain_id: '9' +l2_chain_id: '270' +fee_account: '0x0000000000000000000000000000000000000001' prover: - recursion_leaf_level_vk_hash: 0x400a4b532c6f072c00d1806ef299300d4c104f4ac55bd8698ade78894fcadc0a - recursion_node_level_vk_hash: 0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080 recursion_scheduler_level_vk_hash: 0x063c6fb5c70404c2867f413a8e35563ad3d040b1ad8c11786231bfdba7b472c7 + recursion_node_level_vk_hash: 0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080 + recursion_leaf_level_vk_hash: 0x400a4b532c6f072c00d1806ef299300d4c104f4ac55bd8698ade78894fcadc0a + recursion_circuits_set_vks_hash: '0x0000000000000000000000000000000000000000000000000000000000000000' dummy_verifier: true shared_bridge: - state_transition_proxy_addr: 0x87d456da9ed212eb49d80d96afb44afddf36adf8 bridgehub_proxy_addr: 0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347e5 + state_transition_proxy_addr: 0x87d456da9ed212eb49d80d96afb44afddf36adf8 transparent_proxy_admin_addr: 0xc957c0e82d3bafb5ad46ffbcc66900648784eb05 +l1_batch_commit_data_generator_mode: Rollup diff --git a/infrastructure/zk/src/run.ts b/infrastructure/zk/src/run.ts index 50313df5fd0..8098e9b6142 100644 --- a/infrastructure/zk/src/run.ts +++ b/infrastructure/zk/src/run.ts @@ -93,6 +93,11 @@ export async function loadtest(...args: string[]) { await utils.spawn(`cargo run --release --bin loadnext -- ${args.join(' ')}`); } +export async function genesisConfigGenerator(...args: string[]) { + console.log(args); + await utils.spawn(`cargo run --release --bin genesis_generator -- ${args.join(' ')}`); +} + export async function readVariable(address: string, contractName: string, variableName: string, file?: string) { if (file === undefined) await utils.spawn( @@ -137,6 +142,14 @@ command .description('get the revert reason for ethereum transaction') .action(revertReason); +command + .command('genesis-config-generator [options...]') + .description('run the genesis-config-generator') + .allowUnknownOption() + .action(async (options: string[]) => { + await genesisConfigGenerator(...options); + }); + command .command('loadtest [options...]') .description('run the loadtest') From f763d1f193bac0dcdc367c8566b31d8384fe0651 Mon Sep 17 00:00:00 2001 From: AnastasiiaVashchuk <72273339+AnastasiiaVashchuk@users.noreply.github.com> Date: Fri, 12 Apr 2024 17:31:28 +0300 Subject: [PATCH 22/29] feat: Extract commitment generator into a separate crate (#1636) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Extract `zksync_core::commitment_generator` to separate the crate in the node folder Put `zksync_commitment_utils` to `zksync_commitment_generator ` (reason: utils are used only by generator) ## Why ❔ Part of the "modularize the codebase" process, required for legolizer and publishing on crates.io. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `zk spellcheck`. - [ ] Linkcheck has been run via `zk linkcheck`. --- Cargo.lock | 15 +++++++++++++-- Cargo.toml | 4 ++-- core/bin/external_node/Cargo.toml | 1 + core/bin/external_node/src/main.rs | 2 +- core/lib/zksync_core/Cargo.toml | 2 +- core/lib/zksync_core/src/lib.rs | 3 +-- .../commitment_generator}/Cargo.toml | 15 +++++++++++++-- core/node/commitment_generator/README.md | 4 ++++ .../commitment_generator/src/lib.rs} | 8 ++++++-- .../commitment_generator/src}/metrics.rs | 0 .../commitment_generator/src/utils.rs} | 0 core/node/node_framework/Cargo.toml | 1 + .../layers/commitment_generator.rs | 2 +- 13 files changed, 44 insertions(+), 13 deletions(-) rename core/{lib/commitment_utils => node/commitment_generator}/Cargo.toml (62%) create mode 100644 core/node/commitment_generator/README.md rename core/{lib/zksync_core/src/commitment_generator/mod.rs => node/commitment_generator/src/lib.rs} (98%) rename core/{lib/zksync_core/src/commitment_generator => node/commitment_generator/src}/metrics.rs (100%) rename core/{lib/commitment_utils/src/lib.rs => node/commitment_generator/src/utils.rs} (100%) diff --git a/Cargo.lock b/Cargo.lock index f0ee6540d8a..8c7b736c578 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8123,16 +8123,25 @@ dependencies = [ ] [[package]] -name = "zksync_commitment_utils" +name = "zksync_commitment_generator" version = "0.1.0" dependencies = [ + "anyhow", "circuit_sequencer_api 0.1.40", "circuit_sequencer_api 0.1.41", "circuit_sequencer_api 0.1.50", + "itertools 0.10.5", "multivm", + "serde_json", + "tokio", + "tracing", + "vise", "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", "zk_evm 1.4.1", "zk_evm 1.5.0", + "zksync_dal", + "zksync_health_check", + "zksync_l1_contract_interface", "zksync_types", "zksync_utils", ] @@ -8400,7 +8409,7 @@ dependencies = [ "vlog", "vm_utils", "zksync_circuit_breaker", - "zksync_commitment_utils", + "zksync_commitment_generator", "zksync_concurrency", "zksync_config", "zksync_consensus_bft", @@ -8608,6 +8617,7 @@ dependencies = [ "vise", "vlog", "zksync_basic_types", + "zksync_commitment_generator", "zksync_concurrency", "zksync_config", "zksync_consensus_roles", @@ -8722,6 +8732,7 @@ dependencies = [ "tracing", "vlog", "zksync_circuit_breaker", + "zksync_commitment_generator", "zksync_concurrency", "zksync_config", "zksync_contracts", diff --git a/Cargo.toml b/Cargo.toml index 1216fc810a2..8aa37038e30 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "core/bin/genesis_generator", # Node services "core/node/node_framework", + "core/node/commitment_generator", # Libraries "core/lib/db_connection", "core/lib/zksync_core", @@ -21,7 +22,6 @@ members = [ "core/lib/contracts", "core/lib/crypto", "core/lib/circuit_breaker", - "core/lib/commitment_utils", "core/lib/dal", "core/lib/env_config", "core/lib/eth_client", @@ -191,7 +191,6 @@ vm-benchmark-harness = { path = "core/tests/vm-benchmark/harness" } zksync = { path = "sdk/zksync-rs" } zksync_basic_types = { path = "core/lib/basic_types" } zksync_circuit_breaker = { path = "core/lib/circuit_breaker" } -zksync_commitment_utils = { path = "core/lib/commitment_utils" } zksync_config = { path = "core/lib/config" } zksync_contracts = { path = "core/lib/contracts" } zksync_core = { path = "core/lib/zksync_core" } @@ -224,3 +223,4 @@ zksync_crypto_primitives = { path = "core/lib/crypto_primitives" } zksync_node_framework = { path = "core/node/node_framework" } zksync_eth_watch = { path = "core/node/eth_watch" } zksync_shared_metrics = { path = "core/node/shared_metrics" } +zksync_commitment_generator = { path = "core/node/commitment_generator" } diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index b447d649190..d0f653abdb3 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -12,6 +12,7 @@ publish = false [dependencies] zksync_core.workspace = true +zksync_commitment_generator.workspace = true zksync_dal.workspace = true zksync_db_connection.workspace = true zksync_config.workspace = true diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index adc5f116557..074ae97388b 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -8,6 +8,7 @@ use tokio::{ sync::{watch, RwLock}, task::{self, JoinHandle}, }; +use zksync_commitment_generator::CommitmentGenerator; use zksync_concurrency::{ctx, scope}; use zksync_config::configs::{ api::MerkleTreeApiConfig, chain::L1BatchCommitDataGeneratorMode, database::MerkleTreeMode, @@ -21,7 +22,6 @@ use zksync_core::{ web3::{mempool_cache::MempoolCache, ApiBuilder, Namespace}, }, block_reverter::{BlockReverter, BlockReverterFlags, L1ExecutedBatchesRevert, NodeRole}, - commitment_generator::CommitmentGenerator, consensus, consistency_checker::ConsistencyChecker, db_pruner::{ diff --git a/core/lib/zksync_core/Cargo.toml b/core/lib/zksync_core/Cargo.toml index 0ed908204b4..62c75ef3e8f 100644 --- a/core/lib/zksync_core/Cargo.toml +++ b/core/lib/zksync_core/Cargo.toml @@ -24,7 +24,6 @@ zksync_protobuf_config.workspace = true zksync_utils.workspace = true zksync_contracts.workspace = true zksync_system_constants.workspace = true -zksync_commitment_utils.workspace = true zksync_eth_client.workspace = true zksync_eth_signer.workspace = true zksync_l1_contract_interface.workspace = true @@ -45,6 +44,7 @@ zksync_health_check.workspace = true vlog.workspace = true zksync_eth_watch.workspace = true zksync_shared_metrics.workspace = true +zksync_commitment_generator.workspace = true multivm.workspace = true diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index d1594d6a910..609346f40cf 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -21,6 +21,7 @@ use zksync_circuit_breaker::{ l1_txs::FailedL1TransactionChecker, replication_lag::ReplicationLagChecker, CircuitBreakerChecker, CircuitBreakers, }; +use zksync_commitment_generator::CommitmentGenerator; use zksync_concurrency::{ctx, scope}; use zksync_config::{ configs::{ @@ -62,7 +63,6 @@ use crate::{ web3::{self, mempool_cache::MempoolCache, state::InternalApiConfig, Namespace}, }, basic_witness_input_producer::BasicWitnessInputProducer, - commitment_generator::CommitmentGenerator, eth_sender::{ l1_batch_commit_data_generator::{ L1BatchCommitDataGenerator, RollupModeL1BatchCommitDataGenerator, @@ -99,7 +99,6 @@ use crate::{ pub mod api_server; pub mod basic_witness_input_producer; pub mod block_reverter; -pub mod commitment_generator; pub mod consensus; pub mod consistency_checker; pub mod db_pruner; diff --git a/core/lib/commitment_utils/Cargo.toml b/core/node/commitment_generator/Cargo.toml similarity index 62% rename from core/lib/commitment_utils/Cargo.toml rename to core/node/commitment_generator/Cargo.toml index b3d8e74b2fa..a15266c7a67 100644 --- a/core/lib/commitment_utils/Cargo.toml +++ b/core/node/commitment_generator/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "zksync_commitment_utils" +name = "zksync_commitment_generator" version = "0.1.0" edition.workspace = true authors.workspace = true @@ -10,8 +10,13 @@ keywords.workspace = true categories.workspace = true [dependencies] +vise.workspace = true zksync_types.workspace = true +zksync_dal.workspace = true +zksync_health_check.workspace = true +zksync_l1_contract_interface.workspace = true zksync_utils.workspace = true +multivm.workspace = true circuit_sequencer_api_1_4_0.workspace = true circuit_sequencer_api_1_4_1.workspace = true circuit_sequencer_api_1_5_0.workspace = true @@ -19,4 +24,10 @@ circuit_sequencer_api_1_5_0.workspace = true zk_evm_1_5_0.workspace = true zk_evm_1_4_1.workspace = true zk_evm_1_3_3.workspace = true -multivm.workspace = true + +tokio = { workspace = true, features = ["time"] } +anyhow.workspace = true +tracing.workspace = true +itertools.workspace = true +serde_json.workspace = true + diff --git a/core/node/commitment_generator/README.md b/core/node/commitment_generator/README.md new file mode 100644 index 00000000000..da99ca9403a --- /dev/null +++ b/core/node/commitment_generator/README.md @@ -0,0 +1,4 @@ +# zkSync Era commitment generator + +This crate contains an implementation of the zkSync Era commitment generator component, which is responsible for the +calculation commitment info for L1 batches. diff --git a/core/lib/zksync_core/src/commitment_generator/mod.rs b/core/node/commitment_generator/src/lib.rs similarity index 98% rename from core/lib/zksync_core/src/commitment_generator/mod.rs rename to core/node/commitment_generator/src/lib.rs index 266b9a5cde1..5b1ccbb2bc1 100644 --- a/core/lib/zksync_core/src/commitment_generator/mod.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -2,10 +2,8 @@ use std::time::Duration; use anyhow::Context; use itertools::Itertools; -use metrics::{CommitmentStage, METRICS}; use multivm::zk_evm_latest::ethereum_types::U256; use tokio::{sync::watch, task::JoinHandle}; -use zksync_commitment_utils::{bootloader_initial_content_commitment, events_queue_commitment}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_l1_contract_interface::i_executor::commit::kzg::pubdata_to_blob_commitments; @@ -18,7 +16,13 @@ use zksync_types::{ }; use zksync_utils::h256_to_u256; +use crate::{ + metrics::{CommitmentStage, METRICS}, + utils::{bootloader_initial_content_commitment, events_queue_commitment}, +}; + mod metrics; +mod utils; const SLEEP_INTERVAL: Duration = Duration::from_millis(100); diff --git a/core/lib/zksync_core/src/commitment_generator/metrics.rs b/core/node/commitment_generator/src/metrics.rs similarity index 100% rename from core/lib/zksync_core/src/commitment_generator/metrics.rs rename to core/node/commitment_generator/src/metrics.rs diff --git a/core/lib/commitment_utils/src/lib.rs b/core/node/commitment_generator/src/utils.rs similarity index 100% rename from core/lib/commitment_utils/src/lib.rs rename to core/node/commitment_generator/src/utils.rs diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 1b8341bf1ae..10011443fdb 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -29,6 +29,7 @@ zksync_utils.workspace = true zksync_circuit_breaker.workspace = true zksync_concurrency.workspace = true zksync_eth_watch.workspace = true +zksync_commitment_generator.workspace = true tracing.workspace = true thiserror.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/commitment_generator.rs b/core/node/node_framework/src/implementations/layers/commitment_generator.rs index 583996a1bcd..9dbb7b7a4c6 100644 --- a/core/node/node_framework/src/implementations/layers/commitment_generator.rs +++ b/core/node/node_framework/src/implementations/layers/commitment_generator.rs @@ -1,4 +1,4 @@ -use zksync_core::commitment_generator::CommitmentGenerator; +use zksync_commitment_generator::CommitmentGenerator; use crate::{ implementations::resources::{healthcheck::AppHealthCheckResource, pools::MasterPoolResource}, From b0b8f8932b03d98780d504f9acf394547dac7724 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 12 Apr 2024 17:31:52 +0300 Subject: [PATCH 23/29] feat(eth-watch): Brush up Ethereum watcher component (#1596) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Brushes up the Ethereum Watcher component that was recently moved to its own crate. Wraps `sqlx` errors in the relevant DB queries. ## Why ❔ Simplifies maintenance. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- Cargo.lock | 2 - core/bin/block_reverter/src/main.rs | 4 +- core/bin/zksync_server/src/main.rs | 6 +- core/lib/config/src/configs/eth_sender.rs | 10 +- core/lib/config/src/configs/eth_watch.rs | 4 +- core/lib/config/src/configs/general.rs | 4 +- core/lib/config/src/configs/mod.rs | 4 +- core/lib/config/src/lib.rs | 2 +- core/lib/config/src/testonly.rs | 12 +- ...0a01a6c7cbe9297cbb55d42533fddc18719b6.json | 20 -- ...cc43d486a5539b5a8c20780a8546cd5cec87a.json | 20 ++ ...02c690c33686c889d318b1d64bdd7fa6374db.json | 20 -- ...c71e450e868077f1f9bc70443308ea112ce17.json | 20 ++ core/lib/dal/src/protocol_versions_dal.rs | 122 ++++++----- core/lib/dal/src/transactions_dal.rs | 88 ++++---- core/lib/env_config/src/eth_sender.rs | 14 +- core/lib/env_config/src/eth_watch.rs | 10 +- .../eth_client/src/clients/http/signing.rs | 6 +- core/lib/protobuf_config/src/eth.rs | 6 +- .../src/proto/config/general.proto | 2 - .../lib/zksync_core/src/block_reverter/mod.rs | 4 +- core/lib/zksync_core/src/eth_sender/tests.rs | 4 +- core/lib/zksync_core/src/lib.rs | 35 +++- .../src/state_keeper/io/mempool.rs | 5 +- core/lib/zksync_core/src/sync_layer/tests.rs | 1 + .../zksync_core/src/temp_config_store/mod.rs | 6 +- core/lib/zksync_core/src/utils/testonly.rs | 3 +- core/node/eth_watch/Cargo.toml | 2 - core/node/eth_watch/README.md | 15 +- core/node/eth_watch/src/client.rs | 96 +++------ .../event_processors/governance_upgrades.rs | 54 +++-- .../eth_watch/src/event_processors/mod.rs | 43 +++- .../src/event_processors/priority_ops.rs | 40 ++-- .../src/event_processors/upgrades.rs | 90 -------- core/node/eth_watch/src/lib.rs | 164 +++++++-------- core/node/eth_watch/src/metrics.rs | 4 +- core/node/eth_watch/src/tests.rs | 198 ++++-------------- .../node/node_framework/examples/main_node.rs | 12 +- .../src/implementations/layers/eth_sender.rs | 6 +- .../src/implementations/layers/eth_watch.rs | 18 +- .../layers/pk_signing_eth_client.rs | 6 +- docs/guides/advanced/02_deposits.md | 5 +- docs/guides/advanced/how_transaction_works.md | 8 +- etc/env/base/rust.toml | 1 + 44 files changed, 513 insertions(+), 683 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-1b4ebbfc96b4fd66ecbe64a6be80a01a6c7cbe9297cbb55d42533fddc18719b6.json create mode 100644 core/lib/dal/.sqlx/query-86a712fac0cf6823ac0c64ad1cacc43d486a5539b5a8c20780a8546cd5cec87a.json delete mode 100644 core/lib/dal/.sqlx/query-8a7a57ca3d4d65da3e0877c003902c690c33686c889d318b1d64bdd7fa6374db.json create mode 100644 core/lib/dal/.sqlx/query-9f3b61d0a408b509a1cbd3d2efbc71e450e868077f1f9bc70443308ea112ce17.json delete mode 100644 core/node/eth_watch/src/event_processors/upgrades.rs diff --git a/Cargo.lock b/Cargo.lock index 8c7b736c578..8ffce1cb701 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8584,12 +8584,10 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "metrics", "thiserror", "tokio", "tracing", "vise", - "zksync_config", "zksync_contracts", "zksync_dal", "zksync_eth_client", diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index c9d0bb756b2..7459dc1b431 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -2,7 +2,7 @@ use anyhow::Context as _; use clap::{Parser, Subcommand}; use tokio::io::{self, AsyncReadExt}; use zksync_config::{ - configs::ObservabilityConfig, ContractsConfig, DBConfig, ETHConfig, PostgresConfig, + configs::ObservabilityConfig, ContractsConfig, DBConfig, EthConfig, PostgresConfig, }; use zksync_core::block_reverter::{ BlockReverter, BlockReverterEthConfig, BlockReverterFlags, L1ExecutedBatchesRevert, NodeRole, @@ -90,7 +90,7 @@ async fn main() -> anyhow::Result<()> { } let _guard = builder.build(); - let eth_sender = ETHConfig::from_env().context("ETHSenderConfig::from_env()")?; + let eth_sender = EthConfig::from_env().context("EthConfig::from_env()")?; let db_config = DBConfig::from_env().context("DBConfig::from_env()")?; let default_priority_fee_per_gas = U256::from( eth_sender diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 1d8daf1b857..6ba9772d696 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -15,7 +15,7 @@ use zksync_config::{ FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, }, - ApiConfig, ContractVerifierConfig, DBConfig, ETHConfig, ETHWatchConfig, GasAdjusterConfig, + ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; use zksync_core::{ @@ -271,8 +271,8 @@ fn load_env_config() -> anyhow::Result { proof_data_handler_config: ProofDataHandlerConfig::from_env().ok(), api_config: ApiConfig::from_env().ok(), db_config: DBConfig::from_env().ok(), - eth_sender_config: ETHConfig::from_env().ok(), - eth_watch_config: ETHWatchConfig::from_env().ok(), + eth_sender_config: EthConfig::from_env().ok(), + eth_watch_config: EthWatchConfig::from_env().ok(), gas_adjuster_config: GasAdjusterConfig::from_env().ok(), object_store_config: ObjectStoreConfig::from_env().ok(), observability: ObservabilityConfig::from_env().ok(), diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 1d9669ad41f..818cd7f51cf 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -3,20 +3,20 @@ use std::time::Duration; use serde::Deserialize; use zksync_basic_types::H256; -use crate::ETHWatchConfig; +use crate::EthWatchConfig; /// Configuration for the Ethereum related components. #[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct ETHConfig { +pub struct EthConfig { /// Options related to the Ethereum sender directly. pub sender: Option, /// Options related to the `GasAdjuster` submodule. pub gas_adjuster: Option, - pub watcher: Option, + pub watcher: Option, pub web3_url: String, } -impl ETHConfig { +impl EthConfig { /// Creates a mock configuration object suitable for unit tests. /// Values inside match the config used for localhost development. pub fn for_tests() -> Self { @@ -55,7 +55,7 @@ impl ETHConfig { internal_pubdata_pricing_multiplier: 1.0, max_blob_base_fee: None, }), - watcher: Some(ETHWatchConfig { + watcher: Some(EthWatchConfig { confirmations_for_eth_event: None, eth_node_poll_interval: 0, }), diff --git a/core/lib/config/src/configs/eth_watch.rs b/core/lib/config/src/configs/eth_watch.rs index cb162b46f3b..2cb9bf18675 100644 --- a/core/lib/config/src/configs/eth_watch.rs +++ b/core/lib/config/src/configs/eth_watch.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; /// Configuration for the Ethereum watch crate. #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] -pub struct ETHWatchConfig { +pub struct EthWatchConfig { /// Amount of confirmations for the priority operation to be processed. /// If not specified operation will be processed once its block is finalized. pub confirmations_for_eth_event: Option, @@ -13,7 +13,7 @@ pub struct ETHWatchConfig { pub eth_node_poll_interval: u64, } -impl ETHWatchConfig { +impl EthWatchConfig { /// Converts `self.eth_node_poll_interval` into `Duration`. pub fn poll_interval(&self) -> Duration { Duration::from_millis(self.eth_node_poll_interval) diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index e3097bdf5f5..0cd55ed9222 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -7,7 +7,7 @@ use crate::{ FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, }, - ApiConfig, ContractVerifierConfig, DBConfig, ETHConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, PostgresConfig, SnapshotsCreatorConfig, }; #[derive(Debug, PartialEq)] @@ -29,7 +29,7 @@ pub struct GeneralConfig { pub prometheus_config: Option, pub proof_data_handler_config: Option, pub db_config: Option, - pub eth: Option, + pub eth: Option, pub snapshot_creator: Option, pub observability: Option, } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index a78283ac2d0..4289e6625c2 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -4,8 +4,8 @@ pub use self::{ contract_verifier::ContractVerifierConfig, contracts::ContractsConfig, database::{DBConfig, PostgresConfig}, - eth_sender::{ETHConfig, GasAdjusterConfig}, - eth_watch::ETHWatchConfig, + eth_sender::{EthConfig, GasAdjusterConfig}, + eth_watch::EthWatchConfig, fri_proof_compressor::FriProofCompressorConfig, fri_prover::FriProverConfig, fri_prover_gateway::FriProverGatewayConfig, diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index 1a5c56ab091..66656e60b70 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -1,7 +1,7 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] pub use crate::configs::{ - ApiConfig, ContractVerifierConfig, ContractsConfig, DBConfig, ETHConfig, ETHWatchConfig, + ApiConfig, ContractVerifierConfig, ContractsConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 1986e1a85fa..c095ffe8ef8 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -293,9 +293,9 @@ impl Distribution for EncodeDist { } } -impl Distribution for EncodeDist { - fn sample(&self, rng: &mut R) -> configs::ETHConfig { - configs::ETHConfig { +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::EthConfig { + configs::EthConfig { sender: self.sample(rng), gas_adjuster: self.sample(rng), watcher: self.sample(rng), @@ -379,9 +379,9 @@ impl Distribution for EncodeDist { } } -impl Distribution for EncodeDist { - fn sample(&self, rng: &mut R) -> configs::ETHWatchConfig { - configs::ETHWatchConfig { +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::EthWatchConfig { + configs::EthWatchConfig { confirmations_for_eth_event: self.sample(rng), eth_node_poll_interval: self.sample(rng), } diff --git a/core/lib/dal/.sqlx/query-1b4ebbfc96b4fd66ecbe64a6be80a01a6c7cbe9297cbb55d42533fddc18719b6.json b/core/lib/dal/.sqlx/query-1b4ebbfc96b4fd66ecbe64a6be80a01a6c7cbe9297cbb55d42533fddc18719b6.json deleted file mode 100644 index 8b9995b3b0f..00000000000 --- a/core/lib/dal/.sqlx/query-1b4ebbfc96b4fd66ecbe64a6be80a01a6c7cbe9297cbb55d42533fddc18719b6.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(priority_op_id) AS \"op_id\"\n FROM\n transactions\n WHERE\n is_priority = TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "op_id", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - null - ] - }, - "hash": "1b4ebbfc96b4fd66ecbe64a6be80a01a6c7cbe9297cbb55d42533fddc18719b6" -} diff --git a/core/lib/dal/.sqlx/query-86a712fac0cf6823ac0c64ad1cacc43d486a5539b5a8c20780a8546cd5cec87a.json b/core/lib/dal/.sqlx/query-86a712fac0cf6823ac0c64ad1cacc43d486a5539b5a8c20780a8546cd5cec87a.json new file mode 100644 index 00000000000..214257670b2 --- /dev/null +++ b/core/lib/dal/.sqlx/query-86a712fac0cf6823ac0c64ad1cacc43d486a5539b5a8c20780a8546cd5cec87a.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_block_number\n FROM\n transactions\n WHERE\n priority_op_id IS NOT NULL\n ORDER BY\n priority_op_id DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_block_number", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true + ] + }, + "hash": "86a712fac0cf6823ac0c64ad1cacc43d486a5539b5a8c20780a8546cd5cec87a" +} diff --git a/core/lib/dal/.sqlx/query-8a7a57ca3d4d65da3e0877c003902c690c33686c889d318b1d64bdd7fa6374db.json b/core/lib/dal/.sqlx/query-8a7a57ca3d4d65da3e0877c003902c690c33686c889d318b1d64bdd7fa6374db.json deleted file mode 100644 index ea6562d1a67..00000000000 --- a/core/lib/dal/.sqlx/query-8a7a57ca3d4d65da3e0877c003902c690c33686c889d318b1d64bdd7fa6374db.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_block_number\n FROM\n transactions\n WHERE\n priority_op_id IS NOT NULL\n ORDER BY\n priority_op_id DESC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_block_number", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true - ] - }, - "hash": "8a7a57ca3d4d65da3e0877c003902c690c33686c889d318b1d64bdd7fa6374db" -} diff --git a/core/lib/dal/.sqlx/query-9f3b61d0a408b509a1cbd3d2efbc71e450e868077f1f9bc70443308ea112ce17.json b/core/lib/dal/.sqlx/query-9f3b61d0a408b509a1cbd3d2efbc71e450e868077f1f9bc70443308ea112ce17.json new file mode 100644 index 00000000000..59f82434317 --- /dev/null +++ b/core/lib/dal/.sqlx/query-9f3b61d0a408b509a1cbd3d2efbc71e450e868077f1f9bc70443308ea112ce17.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MAX(priority_op_id) AS \"op_id\"\n FROM\n transactions\n WHERE\n is_priority = TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "op_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "9f3b61d0a408b509a1cbd3d2efbc71e450e868077f1f9bc70443308ea112ce17" +} diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index f8daf8ded3b..6e10205884b 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -2,7 +2,11 @@ use std::convert::TryInto; use anyhow::Context as _; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; -use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; +use zksync_db_connection::{ + connection::Connection, + error::DalResult, + instrument::{InstrumentExt, Instrumented}, +}; use zksync_types::{ protocol_upgrade::{ProtocolUpgradeTx, ProtocolVersion}, protocol_version::{L1VerifierConfig, VerifierParams}, @@ -10,7 +14,10 @@ use zksync_types::{ }; use crate::{ - models::storage_protocol_version::{protocol_version_from_storage, StorageProtocolVersion}, + models::{ + parse_protocol_version, + storage_protocol_version::{protocol_version_from_storage, StorageProtocolVersion}, + }, Core, CoreDal, }; @@ -212,8 +219,8 @@ impl ProtocolVersionsDal<'_, '_> { pub async fn load_previous_version( &mut self, version_id: ProtocolVersionId, - ) -> Option { - let storage_protocol_version: StorageProtocolVersion = sqlx::query_as!( + ) -> DalResult> { + let maybe_version = sqlx::query_as!( StorageProtocolVersion, r#" SELECT @@ -229,21 +236,24 @@ impl ProtocolVersionsDal<'_, '_> { "#, version_id as i32 ) - .fetch_optional(self.storage.conn()) - .await - .unwrap()?; - let tx = self - .get_protocol_upgrade_tx((storage_protocol_version.id as u16).try_into().unwrap()) - .await; + .try_map(|row| Ok((parse_protocol_version(row.id)?, row))) + .instrument("load_previous_version") + .with_arg("version_id", &version_id) + .fetch_optional(self.storage) + .await?; - Some(protocol_version_from_storage(storage_protocol_version, tx)) + let Some((version_id, row)) = maybe_version else { + return Ok(None); + }; + let tx = self.get_protocol_upgrade_tx(version_id).await?; + Ok(Some(protocol_version_from_storage(row, tx))) } pub async fn get_protocol_version( &mut self, version_id: ProtocolVersionId, - ) -> Option { - let storage_protocol_version: StorageProtocolVersion = sqlx::query_as!( + ) -> DalResult> { + let maybe_row = sqlx::query_as!( StorageProtocolVersion, r#" SELECT @@ -255,12 +265,17 @@ impl ProtocolVersionsDal<'_, '_> { "#, version_id as i32 ) - .fetch_optional(self.storage.conn()) - .await - .unwrap()?; - let tx = self.get_protocol_upgrade_tx(version_id).await; + .instrument("get_protocol_version") + .with_arg("version_id", &version_id) + .fetch_optional(self.storage) + .await?; - Some(protocol_version_from_storage(storage_protocol_version, tx)) + let Some(row) = maybe_row else { + return Ok(None); + }; + let tx = self.get_protocol_upgrade_tx(version_id).await?; + + Ok(Some(protocol_version_from_storage(row, tx))) } pub async fn l1_verifier_config_for_version( @@ -298,8 +313,8 @@ impl ProtocolVersionsDal<'_, '_> { }) } - pub async fn last_version_id(&mut self) -> Option { - let id = sqlx::query!( + pub async fn last_version_id(&mut self) -> DalResult> { + Ok(sqlx::query!( r#" SELECT MAX(id) AS "max?" @@ -307,11 +322,11 @@ impl ProtocolVersionsDal<'_, '_> { protocol_versions "# ) - .fetch_optional(self.storage.conn()) - .await - .unwrap()? - .max?; - Some((id as u16).try_into().unwrap()) + .try_map(|row| row.max.map(parse_protocol_version).transpose()) + .instrument("last_version_id") + .fetch_optional(self.storage) + .await? + .flatten()) } pub async fn last_used_version_id(&mut self) -> Option { @@ -355,8 +370,10 @@ impl ProtocolVersionsDal<'_, '_> { pub async fn get_protocol_upgrade_tx( &mut self, protocol_version_id: ProtocolVersionId, - ) -> Option { - let row = sqlx::query!( + ) -> DalResult> { + let instrumentation = Instrumented::new("get_protocol_upgrade_tx") + .with_arg("protocol_version_id", &protocol_version_id); + let query = sqlx::query!( r#" SELECT upgrade_tx_hash @@ -366,27 +383,34 @@ impl ProtocolVersionsDal<'_, '_> { id = $1 "#, protocol_version_id as i32 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap()?; - if let Some(hash) = row.upgrade_tx_hash { - Some( - self.storage - .transactions_dal() - .get_tx_by_hash(H256::from_slice(&hash)) - .await - .unwrap_or_else(|| { - panic!( - "Missing upgrade tx for protocol version {}", - protocol_version_id as u16 - ); - }) - .try_into() - .unwrap(), - ) - } else { - None - } + ); + + let maybe_row = instrumentation + .with(query) + .fetch_optional(self.storage) + .await?; + let Some(upgrade_tx_hash) = maybe_row.and_then(|row| row.upgrade_tx_hash) else { + return Ok(None); + }; + let upgrade_tx_hash = H256::from_slice(&upgrade_tx_hash); + + let instrumentation = Instrumented::new("get_protocol_upgrade_tx#get_tx") + .with_arg("protocol_version_id", &protocol_version_id) + .with_arg("upgrade_tx_hash", &upgrade_tx_hash); + let tx = self + .storage + .transactions_dal() + .get_tx_by_hash(upgrade_tx_hash) + .await? + .ok_or_else(|| { + instrumentation.arg_error( + "upgrade_tx_hash", + anyhow::anyhow!("upgrade transaction is not present in storage"), + ) + })?; + let tx = tx + .try_into() + .map_err(|err| instrumentation.arg_error("tx", anyhow::Error::msg(err)))?; + Ok(Some(tx)) } } diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 00b82d8475e..a28ab45543e 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -1162,47 +1162,48 @@ impl TransactionsDal<'_, '_> { Ok(()) } - pub async fn get_last_processed_l1_block(&mut self) -> Option { - { - sqlx::query!( - r#" - SELECT - l1_block_number - FROM - transactions - WHERE - priority_op_id IS NOT NULL - ORDER BY - priority_op_id DESC - LIMIT - 1 - "# - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .and_then(|x| x.l1_block_number.map(|block| L1BlockNumber(block as u32))) - } + pub async fn get_last_processed_l1_block(&mut self) -> DalResult> { + let maybe_row = sqlx::query!( + r#" + SELECT + l1_block_number + FROM + transactions + WHERE + priority_op_id IS NOT NULL + ORDER BY + priority_op_id DESC + LIMIT + 1 + "# + ) + .instrument("get_last_processed_l1_block") + .fetch_optional(self.storage) + .await?; + + Ok(maybe_row + .and_then(|row| row.l1_block_number) + .map(|number| L1BlockNumber(number as u32))) } - pub async fn last_priority_id(&mut self) -> Option { - { - let op_id = sqlx::query!( - r#" - SELECT - MAX(priority_op_id) AS "op_id" - FROM - transactions - WHERE - is_priority = TRUE - "# - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap()? - .op_id?; - Some(PriorityOpId(op_id as u64)) - } + pub async fn last_priority_id(&mut self) -> DalResult> { + let maybe_row = sqlx::query!( + r#" + SELECT + MAX(priority_op_id) AS "op_id" + FROM + transactions + WHERE + is_priority = TRUE + "# + ) + .instrument("last_priority_id") + .fetch_optional(self.storage) + .await?; + + Ok(maybe_row + .and_then(|row| row.op_id) + .map(|op_id| PriorityOpId(op_id as u64))) } pub async fn next_priority_id(&mut self) -> PriorityOpId { @@ -1444,7 +1445,7 @@ impl TransactionsDal<'_, '_> { .map(|call_trace| call_trace.into_call(protocol_version))) } - pub(crate) async fn get_tx_by_hash(&mut self, hash: H256) -> Option { + pub(crate) async fn get_tx_by_hash(&mut self, hash: H256) -> DalResult> { sqlx::query_as!( StorageTransaction, r#" @@ -1457,10 +1458,11 @@ impl TransactionsDal<'_, '_> { "#, hash.as_bytes() ) - .fetch_optional(self.storage.conn()) + .map(Into::into) + .instrument("get_tx_by_hash") + .with_arg("hash", &hash) + .fetch_optional(self.storage) .await - .unwrap() - .map(|tx| tx.into()) } } diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index 779dda99164..7a8c170d080 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -1,16 +1,16 @@ use anyhow::Context as _; use zksync_config::{ - configs::eth_sender::SenderConfig, ETHConfig, ETHWatchConfig, GasAdjusterConfig, + configs::eth_sender::SenderConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, }; use crate::{envy_load, FromEnv}; -impl FromEnv for ETHConfig { +impl FromEnv for EthConfig { fn from_env() -> anyhow::Result { Ok(Self { sender: SenderConfig::from_env().ok(), gas_adjuster: GasAdjusterConfig::from_env().ok(), - watcher: ETHWatchConfig::from_env().ok(), + watcher: EthWatchConfig::from_env().ok(), web3_url: std::env::var("ETH_CLIENT_WEB3_URL").context("ETH_CLIENT_WEB3_URL")?, }) } @@ -39,8 +39,8 @@ mod tests { static MUTEX: EnvMutex = EnvMutex::new(); - fn expected_config() -> ETHConfig { - ETHConfig { + fn expected_config() -> EthConfig { + EthConfig { sender: Some(SenderConfig { aggregated_proof_sizes: vec![1, 5], aggregated_block_commit_deadline: 30, @@ -76,7 +76,7 @@ mod tests { internal_pubdata_pricing_multiplier: 1.0, max_blob_base_fee: None, }), - watcher: Some(ETHWatchConfig { + watcher: Some(EthWatchConfig { confirmations_for_eth_event: Some(0), eth_node_poll_interval: 300, }), @@ -125,7 +125,7 @@ mod tests { "#; lock.set_env(config); - let actual = ETHConfig::from_env().unwrap(); + let actual = EthConfig::from_env().unwrap(); assert_eq!(actual, expected_config()); assert_eq!( actual.sender.unwrap().private_key().unwrap(), diff --git a/core/lib/env_config/src/eth_watch.rs b/core/lib/env_config/src/eth_watch.rs index ef90b7e9c46..9d5177d4027 100644 --- a/core/lib/env_config/src/eth_watch.rs +++ b/core/lib/env_config/src/eth_watch.rs @@ -1,8 +1,8 @@ -use zksync_config::ETHWatchConfig; +use zksync_config::EthWatchConfig; use crate::{envy_load, FromEnv}; -impl FromEnv for ETHWatchConfig { +impl FromEnv for EthWatchConfig { fn from_env() -> anyhow::Result { envy_load("eth_watch", "ETH_WATCH_") } @@ -15,8 +15,8 @@ mod tests { static MUTEX: EnvMutex = EnvMutex::new(); - fn expected_config() -> ETHWatchConfig { - ETHWatchConfig { + fn expected_config() -> EthWatchConfig { + EthWatchConfig { confirmations_for_eth_event: Some(0), eth_node_poll_interval: 300, } @@ -31,7 +31,7 @@ mod tests { "#; lock.set_env(config); - let actual = ETHWatchConfig::from_env().unwrap(); + let actual = EthWatchConfig::from_env().unwrap(); assert_eq!(actual, expected_config()); } } diff --git a/core/lib/eth_client/src/clients/http/signing.rs b/core/lib/eth_client/src/clients/http/signing.rs index 8c36f41a6e0..ff78043b1f9 100644 --- a/core/lib/eth_client/src/clients/http/signing.rs +++ b/core/lib/eth_client/src/clients/http/signing.rs @@ -1,7 +1,7 @@ use std::{fmt, sync::Arc}; use async_trait::async_trait; -use zksync_config::{configs::ContractsConfig, ETHConfig}; +use zksync_config::{configs::ContractsConfig, EthConfig}; use zksync_contracts::zksync_contract; use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner, PrivateKeySigner}; use zksync_types::{ @@ -30,7 +30,7 @@ pub type PKSigningClient = SigningClient; impl PKSigningClient { pub fn from_config( - eth_sender: ÐConfig, + eth_sender: &EthConfig, contracts_config: &ContractsConfig, l1_chain_id: L1ChainId, operator_private_key: H256, @@ -68,7 +68,7 @@ impl PKSigningClient { } fn from_config_inner( - eth_sender: ÐConfig, + eth_sender: &EthConfig, contracts_config: &ContractsConfig, l1_chain_id: L1ChainId, operator_private_key: H256, diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index 5fac5aa1638..7b1540e9461 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -61,7 +61,8 @@ impl proto::PubdataSendingMode { } impl ProtoRepr for proto::Eth { - type Type = configs::eth_sender::ETHConfig; + type Type = configs::eth_sender::EthConfig; + fn read(&self) -> anyhow::Result { Ok(Self::Type { sender: read_optional_repr(&self.sender).context("sender")?, @@ -222,7 +223,8 @@ impl ProtoRepr for proto::GasAdjuster { } impl ProtoRepr for proto::EthWatch { - type Type = configs::ETHWatchConfig; + type Type = configs::EthWatchConfig; + fn read(&self) -> anyhow::Result { Ok(Self::Type { confirmations_for_eth_event: self.confirmations_for_eth_event, diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index 179f3c7ed90..fdc60c57cfd 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -35,6 +35,4 @@ message GeneralConfig { optional config.prover.ProverGateway prover_gateway = 30; optional config.snapshot_creator.SnapshotsCreator snapshot_creator = 31; optional config.observability.Observability observability = 32; - } - diff --git a/core/lib/zksync_core/src/block_reverter/mod.rs b/core/lib/zksync_core/src/block_reverter/mod.rs index aadd57c98a9..009d5b1b4bb 100644 --- a/core/lib/zksync_core/src/block_reverter/mod.rs +++ b/core/lib/zksync_core/src/block_reverter/mod.rs @@ -3,7 +3,7 @@ use std::{path::Path, time::Duration}; use bitflags::bitflags; use serde::Serialize; use tokio::time::sleep; -use zksync_config::{ContractsConfig, ETHConfig}; +use zksync_config::{ContractsConfig, EthConfig}; use zksync_contracts::zksync_contract; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; @@ -54,7 +54,7 @@ pub struct BlockReverterEthConfig { impl BlockReverterEthConfig { pub fn new( - eth_config: ETHConfig, + eth_config: EthConfig, contract: ContractsConfig, reverter_address: Option
, ) -> Self { diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index d471d9ef105..6cfa56a8e78 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -5,7 +5,7 @@ use once_cell::sync::Lazy; use test_casing::{test_casing, Product}; use zksync_config::{ configs::eth_sender::{ProofSendingMode, PubdataSendingMode, SenderConfig}, - ContractsConfig, ETHConfig, GasAdjusterConfig, + ContractsConfig, EthConfig, GasAdjusterConfig, }; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{clients::MockEthereum, EthInterface}; @@ -84,7 +84,7 @@ impl EthSenderTester { aggregator_operate_4844_mode: bool, deployment_mode: &DeploymentMode, ) -> Self { - let eth_sender_config = ETHConfig::for_tests(); + let eth_sender_config = EthConfig::for_tests(); let contracts_config = ContractsConfig::for_tests(); let aggregator_config = SenderConfig { aggregated_proof_sizes: vec![1], diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 609346f40cf..6a47f6070e2 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -36,22 +36,22 @@ use zksync_config::{ wallets::Wallets, ContractsConfig, GeneralConfig, }, - ApiConfig, DBConfig, GenesisConfig, PostgresConfig, + ApiConfig, DBConfig, EthWatchConfig, GenesisConfig, PostgresConfig, }; use zksync_contracts::governance_contract; use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core, CoreDal}; use zksync_db_connection::healthcheck::ConnectionPoolHealthCheck; use zksync_eth_client::{ clients::{PKSigningClient, QueryClient}, - BoundEthInterface, + BoundEthInterface, EthInterface, }; -use zksync_eth_watch::start_eth_watch; +use zksync_eth_watch::{EthHttpQueryClient, EthWatch}; use zksync_health_check::{AppHealthCheck, HealthStatus, ReactiveHealthCheck}; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_queued_job_processor::JobProcessor; use zksync_shared_metrics::{InitStage, APP_METRICS}; use zksync_state::PostgresStorageCaches; -use zksync_types::{fee_model::FeeModelConfig, L2ChainId}; +use zksync_types::{ethabi::Contract, fee_model::FeeModelConfig, Address, L2ChainId}; use crate::{ api_server::{ @@ -894,6 +894,33 @@ async fn add_state_keeper_to_task_futures( Ok(()) } +async fn start_eth_watch( + config: EthWatchConfig, + pool: ConnectionPool, + eth_gateway: Arc, + diamond_proxy_addr: Address, + governance: (Contract, Address), + stop_receiver: watch::Receiver, +) -> anyhow::Result>> { + let eth_client = EthHttpQueryClient::new( + eth_gateway, + diamond_proxy_addr, + governance.1, + config.confirmations_for_eth_event, + ); + + let eth_watch = EthWatch::new( + diamond_proxy_addr, + &governance.0, + Box::new(eth_client), + pool, + config.poll_interval(), + ) + .await?; + + Ok(tokio::spawn(eth_watch.run(stop_receiver))) +} + async fn add_trees_to_task_futures( configs: &GeneralConfig, task_futures: &mut Vec>>, diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index fe61d2eb595..23915f64294 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -317,10 +317,11 @@ impl StateKeeperIO for MempoolIO { version_id: ProtocolVersionId, ) -> anyhow::Result> { let mut storage = self.pool.connection_tagged("state_keeper").await?; - Ok(storage + storage .protocol_versions_dal() .get_protocol_upgrade_tx(version_id) - .await) + .await + .map_err(Into::into) } async fn load_batch_state_hash( diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 7fe4da79188..532abb5915f 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -289,6 +289,7 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo .protocol_versions_dal() .get_protocol_version(ProtocolVersionId::next()) .await + .unwrap() .expect("next protocol version not persisted"); assert_eq!( persisted_protocol_version.timestamp, diff --git a/core/lib/zksync_core/src/temp_config_store/mod.rs b/core/lib/zksync_core/src/temp_config_store/mod.rs index 55fb9e11d94..2bda8de9a3e 100644 --- a/core/lib/zksync_core/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core/src/temp_config_store/mod.rs @@ -14,7 +14,7 @@ use zksync_config::{ FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, }, - ApiConfig, ContractVerifierConfig, DBConfig, ETHConfig, ETHWatchConfig, GasAdjusterConfig, + ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; use zksync_protobuf::{repr::ProtoRepr, ProtoFmt}; @@ -60,8 +60,8 @@ pub struct TempConfigStore { pub proof_data_handler_config: Option, pub api_config: Option, pub db_config: Option, - pub eth_sender_config: Option, - pub eth_watch_config: Option, + pub eth_sender_config: Option, + pub eth_watch_config: Option, pub gas_adjuster_config: Option, pub object_store_config: Option, pub observability: Option, diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index f45237a9bcd..311e3461179 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -277,7 +277,8 @@ pub(crate) async fn recover( let protocol_version = storage .protocol_versions_dal() .get_protocol_version(snapshot.l1_batch.protocol_version.unwrap()) - .await; + .await + .unwrap(); if let Some(protocol_version) = protocol_version { assert_eq!( protocol_version.base_system_contracts_hashes, diff --git a/core/node/eth_watch/Cargo.toml b/core/node/eth_watch/Cargo.toml index 43d4d98f57a..e3a2e33d8ff 100644 --- a/core/node/eth_watch/Cargo.toml +++ b/core/node/eth_watch/Cargo.toml @@ -13,7 +13,6 @@ categories.workspace = true vise.workspace = true zksync_types.workspace = true zksync_dal.workspace = true -zksync_config.workspace = true zksync_contracts.workspace = true zksync_system_constants.workspace = true zksync_eth_client.workspace = true @@ -24,4 +23,3 @@ anyhow.workspace = true thiserror.workspace = true async-trait.workspace = true tracing.workspace = true -metrics.workspace = true diff --git a/core/node/eth_watch/README.md b/core/node/eth_watch/README.md index 06b1b620941..f805f3e4c38 100644 --- a/core/node/eth_watch/README.md +++ b/core/node/eth_watch/README.md @@ -1,4 +1,17 @@ # zkSync Era Eth Watcher -This crate contains implementation of zkSync Era Eth Watcher component, responsible for fetching the changes from the +This crate contains an implementation of the zkSync Era Eth Watcher component, which fetches the changes from the corresponding L1 contract. + +## Overview + +Internally, Eth Watcher contains _event processors_, each of which provides a relevant topic (i.e., a `bytes32` Solidity +event selector) and is responsible for processing the corresponding events. Besides events, processors have access to +the L1 client (to query more info) and to the node Postgres (to persist processing results). Examples of processors are: + +- [Priority operations processor](src/event_processors/priority_ops.rs): persists priority operations (aka L1 + transactions) +- [Upgrades processor](src/event_processors/governance_upgrades.rs): persists protocol upgrades. + +Eth Watcher combines topics from the processors into a single filter and periodically queries L1 for the corresponding +events. The fetched events are partitioned per processor and fed to them in succession. diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 785d20bb44b..fc54e0c89d5 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -1,7 +1,8 @@ use std::{fmt, sync::Arc}; use zksync_contracts::verifier_contract; -use zksync_eth_client::{CallFunctionArgs, Error as EthClientError, EthInterface}; +pub(super) use zksync_eth_client::Error as EthClientError; +use zksync_eth_client::{CallFunctionArgs, EthInterface}; use zksync_types::{ ethabi::Contract, web3::{ @@ -12,24 +13,7 @@ use zksync_types::{ Address, H256, }; -use super::metrics::METRICS; - -#[derive(Debug, thiserror::Error)] -pub enum Error { - #[error("Log parsing failed: {0}")] - LogParse(String), - #[error("Eth client error: {0}")] - EthClient(#[from] EthClientError), - #[error("Infinite recursion caused by too many responses")] - InfiniteRecursion, -} - -impl From for Error { - fn from(err: web3::contract::Error) -> Self { - Self::EthClient(err.into()) - } -} - +/// L1 client functionality used by [`EthWatch`](crate::EthWatch) and constituent event processors. #[async_trait::async_trait] pub trait EthClient: 'static + fmt::Debug + Send + Sync { /// Returns events in a given block range. @@ -38,11 +22,11 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { from: BlockNumber, to: BlockNumber, retries_left: usize, - ) -> Result, Error>; + ) -> Result, EthClientError>; /// Returns finalized L1 block number. - async fn finalized_block_number(&self) -> Result; + async fn finalized_block_number(&self) -> Result; /// Returns scheduler verification key hash by verifier address. - async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result; + async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result; /// Sets list of topics to return events for. fn set_topics(&mut self, topics: Vec); } @@ -51,14 +35,13 @@ pub const RETRY_LIMIT: usize = 5; const TOO_MANY_RESULTS_INFURA: &str = "query returned more than"; const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; +/// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). #[derive(Debug)] pub struct EthHttpQueryClient { client: Arc, topics: Vec, zksync_contract_addr: Address, - /// Address of the `Governance` contract. It's optional because it is present only for post-boojum chains. - /// If address is some then client will listen to events coming from it. - governance_address: Option
, + governance_address: Address, verifier_contract_abi: Contract, confirmations_for_eth_event: Option, } @@ -67,13 +50,11 @@ impl EthHttpQueryClient { pub fn new( client: Arc, zksync_contract_addr: Address, - governance_address: Option
, + governance_address: Address, confirmations_for_eth_event: Option, ) -> Self { tracing::debug!( - "New eth client, zkSync addr: {:x}, governance addr: {:?}", - zksync_contract_addr, - governance_address + "New eth client, zkSync addr: {zksync_contract_addr:x}, governance addr: {governance_address:?}" ); Self { client, @@ -90,33 +71,24 @@ impl EthHttpQueryClient { from: BlockNumber, to: BlockNumber, topics: Vec, - ) -> Result, Error> { + ) -> Result, EthClientError> { let filter = FilterBuilder::default() - .address( - [Some(self.zksync_contract_addr), self.governance_address] - .iter() - .flatten() - .copied() - .collect(), - ) + .address(vec![self.zksync_contract_addr, self.governance_address]) .from_block(from) .to_block(to) .topics(Some(topics), None, None, None) .build(); - - self.client.logs(filter, "watch").await.map_err(Into::into) + self.client.logs(filter, "watch").await } } #[async_trait::async_trait] impl EthClient for EthHttpQueryClient { - async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result { + async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result { // New verifier returns the hash of the verification key. - let args = CallFunctionArgs::new("verificationKeyHash", ()) .for_contract(verifier_address, self.verifier_contract_abi.clone()); let vk_hash_tokens = self.client.call_contract_function(args).await?; - Ok(H256::from_tokens(vk_hash_tokens)?) } @@ -125,13 +97,12 @@ impl EthClient for EthHttpQueryClient { from: BlockNumber, to: BlockNumber, retries_left: usize, - ) -> Result, Error> { - let latency = METRICS.get_priority_op_events.start(); + ) -> Result, EthClientError> { let mut result = self.get_filter_logs(from, to, self.topics.clone()).await; // This code is compatible with both Infura and Alchemy API providers. // Note: we don't handle rate-limits here - assumption is that we're never going to hit them. - if let Err(Error::EthClient(EthClientError::EthereumGateway(err))) = &result { + if let Err(EthClientError::EthereumGateway(err)) = &result { tracing::warn!("Provider returned error message: {:?}", err); let err_message = err.to_string(); let err_code = if let web3::Error::Rpc(err) = err { @@ -173,14 +144,11 @@ impl EthClient for EthHttpQueryClient { // safety check to prevent infinite recursion (quite unlikely) if from_number >= mid { - return Err(Error::InfiniteRecursion); + tracing::warn!("Infinite recursion detected while getting events: from_number={from_number:?}, mid={mid:?}"); + return result; } - tracing::warn!( - "Splitting block range in half: {:?} - {:?} - {:?}", - from, - mid, - to - ); + + tracing::warn!("Splitting block range in half: {from:?} - {mid:?} - {to:?}"); let mut first_half = self .get_events(from, BlockNumber::Number(mid), RETRY_LIMIT) .await?; @@ -191,30 +159,30 @@ impl EthClient for EthHttpQueryClient { first_half.append(&mut second_half); result = Ok(first_half); } else if should_retry(err_code, err_message) && retries_left > 0 { - tracing::warn!("Retrying. Retries left: {:?}", retries_left); + tracing::warn!("Retrying. Retries left: {retries_left}"); result = self.get_events(from, to, retries_left - 1).await; } } - latency.observe(); result } - async fn finalized_block_number(&self) -> Result { + async fn finalized_block_number(&self) -> Result { if let Some(confirmations) = self.confirmations_for_eth_event { let latest_block_number = self.client.block_number("watch").await?.as_u64(); Ok(latest_block_number.saturating_sub(confirmations)) } else { - self.client + let block = self + .client .block(BlockId::Number(BlockNumber::Finalized), "watch") - .await - .map_err(Into::into) - .map(|res| { - res.expect("Finalized block must be present on L1") - .number - .expect("Finalized block must contain number") - .as_u64() - }) + .await? + .ok_or_else(|| { + web3::Error::InvalidResponse("Finalized block must be present on L1".into()) + })?; + let block_number = block.number.ok_or_else(|| { + web3::Error::InvalidResponse("Finalized block must contain number".into()) + })?; + Ok(block_number.as_u64()) } } diff --git a/core/node/eth_watch/src/event_processors/governance_upgrades.rs b/core/node/eth_watch/src/event_processors/governance_upgrades.rs index f1a318e0def..c7b92e68c55 100644 --- a/core/node/eth_watch/src/event_processors/governance_upgrades.rs +++ b/core/node/eth_watch/src/event_processors/governance_upgrades.rs @@ -1,14 +1,14 @@ -use std::{convert::TryFrom, time::Instant}; - -use zksync_dal::{Connection, Core, CoreDal}; +use anyhow::Context as _; +use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_types::{ ethabi::Contract, protocol_upgrade::GovernanceOperation, web3::types::Log, Address, ProtocolUpgrade, ProtocolVersionId, H256, }; use crate::{ - client::{Error, EthClient}, - event_processors::EventProcessor, + client::EthClient, + event_processors::{EventProcessor, EventProcessorError}, + metrics::{PollStage, METRICS}, }; /// Listens to operation events coming from the governance contract and saves new protocol upgrade proposals to the database. @@ -25,15 +25,15 @@ impl GovernanceUpgradesEventProcessor { diamond_proxy_address: Address, last_seen_version_id: ProtocolVersionId, governance_contract: &Contract, - ) -> Self { - Self { + ) -> anyhow::Result { + Ok(Self { diamond_proxy_address, last_seen_version_id, upgrade_proposal_signature: governance_contract .event("TransparentOperationScheduled") - .expect("TransparentOperationScheduled event is missing in abi") + .context("TransparentOperationScheduled event is missing in ABI")? .signature(), - } + }) } } @@ -44,14 +44,13 @@ impl EventProcessor for GovernanceUpgradesEventProcessor { storage: &mut Connection<'_, Core>, client: &dyn EthClient, events: Vec, - ) -> Result<(), Error> { + ) -> Result<(), EventProcessorError> { let mut upgrades = Vec::new(); - for event in events - .into_iter() - .filter(|event| event.topics[0] == self.upgrade_proposal_signature) - { + for event in events { + assert_eq!(event.topics[0], self.upgrade_proposal_signature); // guaranteed by the watcher + let governance_operation = GovernanceOperation::try_from(event) - .map_err(|err| Error::LogParse(format!("{:?}", err)))?; + .map_err(|err| EventProcessorError::log_parse(err, "governance operation"))?; // Some calls can target other contracts than Diamond proxy, skip them. for call in governance_operation .calls @@ -81,37 +80,36 @@ impl EventProcessor for GovernanceUpgradesEventProcessor { .skip_while(|(v, _)| v.id as u16 <= self.last_seen_version_id as u16) .collect(); - if new_upgrades.is_empty() { + let Some((last_upgrade, _)) = new_upgrades.last() else { return Ok(()); - } - + }; let ids: Vec<_> = new_upgrades.iter().map(|(u, _)| u.id as u16).collect(); - tracing::debug!("Received upgrades with ids: {:?}", ids); + tracing::debug!("Received upgrades with ids: {ids:?}"); - let last_id = new_upgrades.last().unwrap().0.id; - let stage_start = Instant::now(); + let last_id = last_upgrade.id; + let stage_latency = METRICS.poll_eth_node[&PollStage::PersistUpgrades].start(); for (upgrade, scheduler_vk_hash) in new_upgrades { let previous_version = storage .protocol_versions_dal() .load_previous_version(upgrade.id) .await - .unwrap_or_else(|| { - panic!( - "Expected some version preceding {:?} be present in DB", + .map_err(DalError::generalize)? + .with_context(|| { + format!( + "expected some version preceding {:?} to be present in DB", upgrade.id ) - }); + })?; let new_version = previous_version.apply_upgrade(upgrade, scheduler_vk_hash); storage .protocol_versions_dal() .save_protocol_version_with_tx(&new_version) .await - .unwrap(); + .map_err(DalError::generalize)?; } - metrics::histogram!("eth_watcher.poll_eth_node", stage_start.elapsed(), "stage" => "persist_upgrades"); + stage_latency.observe(); self.last_seen_version_id = last_id; - Ok(()) } diff --git a/core/node/eth_watch/src/event_processors/mod.rs b/core/node/eth_watch/src/event_processors/mod.rs index 84aa4336cb0..f67436ddf53 100644 --- a/core/node/eth_watch/src/event_processors/mod.rs +++ b/core/node/eth_watch/src/event_processors/mod.rs @@ -3,21 +3,50 @@ use std::fmt; use zksync_dal::{Connection, Core}; use zksync_types::{web3::types::Log, H256}; -use crate::client::{Error, EthClient}; +pub(crate) use self::{ + governance_upgrades::GovernanceUpgradesEventProcessor, priority_ops::PriorityOpsEventProcessor, +}; +use crate::client::{EthClient, EthClientError}; -pub mod governance_upgrades; -pub mod priority_ops; -pub mod upgrades; +mod governance_upgrades; +mod priority_ops; +/// Errors issued by an [`EventProcessor`]. +#[derive(Debug, thiserror::Error)] +pub(super) enum EventProcessorError { + #[error("failed parsing a log into {log_kind}: {source:?}")] + LogParse { + log_kind: &'static str, + #[source] + source: anyhow::Error, + }, + #[error("Eth client error: {0}")] + Client(#[from] EthClientError), + /// Internal errors are considered fatal (i.e., they bubble up and lead to the watcher termination). + #[error("internal processing error: {0:?}")] + Internal(#[from] anyhow::Error), +} + +impl EventProcessorError { + pub fn log_parse(source: impl Into, log_kind: &'static str) -> Self { + Self::LogParse { + log_kind, + source: source.into(), + } + } +} + +/// Processor for a single type of events emitted by the L1 contract. [`EthWatch`](crate::EthWatch) +/// feeds events to all processors one-by-one. #[async_trait::async_trait] -pub trait EventProcessor: 'static + fmt::Debug + Send + Sync { - /// Processes given events +pub(super) trait EventProcessor: 'static + fmt::Debug + Send + Sync { + /// Processes given events. All events are guaranteed to match [`Self::relevant_topic()`]. async fn process_events( &mut self, storage: &mut Connection<'_, Core>, client: &dyn EthClient, events: Vec, - ) -> Result<(), Error>; + ) -> Result<(), EventProcessorError>; /// Relevant topic which defines what events to be processed fn relevant_topic(&self) -> H256; diff --git a/core/node/eth_watch/src/event_processors/priority_ops.rs b/core/node/eth_watch/src/event_processors/priority_ops.rs index 0c1dc40f303..856e9c0b27e 100644 --- a/core/node/eth_watch/src/event_processors/priority_ops.rs +++ b/core/node/eth_watch/src/event_processors/priority_ops.rs @@ -1,13 +1,12 @@ -use std::convert::TryFrom; - +use anyhow::Context as _; use zksync_contracts::zksync_contract; -use zksync_dal::{Connection, Core, CoreDal}; +use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_types::{l1::L1Tx, web3::types::Log, PriorityOpId, H256}; use crate::{ - client::{Error, EthClient}, - event_processors::EventProcessor, + client::EthClient, + event_processors::{EventProcessor, EventProcessorError}, metrics::{PollStage, METRICS}, }; @@ -19,14 +18,14 @@ pub struct PriorityOpsEventProcessor { } impl PriorityOpsEventProcessor { - pub fn new(next_expected_priority_id: PriorityOpId) -> Self { - Self { + pub fn new(next_expected_priority_id: PriorityOpId) -> anyhow::Result { + Ok(Self { next_expected_priority_id, new_priority_request_signature: zksync_contract() .event("NewPriorityRequest") - .expect("NewPriorityRequest event is missing in abi") + .context("NewPriorityRequest event is missing in ABI")? .signature(), - } + }) } } @@ -37,13 +36,12 @@ impl EventProcessor for PriorityOpsEventProcessor { storage: &mut Connection<'_, Core>, _client: &dyn EthClient, events: Vec, - ) -> Result<(), Error> { + ) -> Result<(), EventProcessorError> { let mut priority_ops = Vec::new(); - for event in events - .into_iter() - .filter(|event| event.topics[0] == self.new_priority_request_signature) - { - let tx = L1Tx::try_from(event).map_err(|err| Error::LogParse(format!("{}", err)))?; + for event in events { + assert_eq!(event.topics[0], self.new_priority_request_signature); // guaranteed by the watcher + let tx = L1Tx::try_from(event) + .map_err(|err| EventProcessorError::log_parse(err, "priority op"))?; priority_ops.push(tx); } @@ -70,17 +68,15 @@ impl EventProcessor for PriorityOpsEventProcessor { .into_iter() .skip_while(|tx| tx.serial_id() < self.next_expected_priority_id) .collect(); - if new_ops.is_empty() { + let (Some(first_new), Some(last_new)) = (new_ops.first(), new_ops.last()) else { return Ok(()); - } - - let first_new = &new_ops[0]; - let last_new = new_ops[new_ops.len() - 1].clone(); + }; assert_eq!( first_new.serial_id(), self.next_expected_priority_id, "priority transaction serial id mismatch" ); + let next_expected_priority_id = last_new.serial_id().next(); let stage_latency = METRICS.poll_eth_node[&PollStage::PersistL1Txs].start(); APP_METRICS.processed_txs[&TxStage::added_to_mempool()].inc(); @@ -91,10 +87,10 @@ impl EventProcessor for PriorityOpsEventProcessor { .transactions_dal() .insert_transaction_l1(&new_op, eth_block) .await - .unwrap(); + .map_err(DalError::generalize)?; } stage_latency.observe(); - self.next_expected_priority_id = last_new.serial_id().next(); + self.next_expected_priority_id = next_expected_priority_id; Ok(()) } diff --git a/core/node/eth_watch/src/event_processors/upgrades.rs b/core/node/eth_watch/src/event_processors/upgrades.rs deleted file mode 100644 index 346b61fcc5c..00000000000 --- a/core/node/eth_watch/src/event_processors/upgrades.rs +++ /dev/null @@ -1,90 +0,0 @@ -use std::convert::TryFrom; - -use zksync_dal::{Connection, Core, CoreDal}; -use zksync_types::{web3::types::Log, ProtocolUpgrade, ProtocolVersionId, H256}; - -use crate::{ - client::{Error, EthClient}, - event_processors::EventProcessor, - metrics::{PollStage, METRICS}, -}; - -pub(crate) const UPGRADE_PROPOSAL_SIGNATURE: H256 = H256([ - 105, 17, 91, 73, 175, 231, 166, 16, 26, 46, 122, 241, 125, 66, 30, 218, 29, 193, 83, 189, 38, - 214, 153, 240, 19, 196, 255, 240, 64, 70, 70, 166, -]); - -/// Responsible for saving new protocol upgrade proposals to the database. -#[derive(Debug)] -pub struct UpgradesEventProcessor { - last_seen_version_id: ProtocolVersionId, -} - -impl UpgradesEventProcessor { - pub fn new(last_seen_version_id: ProtocolVersionId) -> Self { - Self { - last_seen_version_id, - } - } -} - -#[async_trait::async_trait] -impl EventProcessor for UpgradesEventProcessor { - async fn process_events( - &mut self, - storage: &mut Connection<'_, Core>, - client: &dyn EthClient, - events: Vec, - ) -> Result<(), Error> { - let mut upgrades = Vec::new(); - for event in events - .into_iter() - .filter(|event| event.topics[0] == UPGRADE_PROPOSAL_SIGNATURE) - { - let upgrade = ProtocolUpgrade::try_from(event) - .map_err(|err| Error::LogParse(format!("{:?}", err)))?; - // Scheduler VK is not present in proposal event. It is hard coded in verifier contract. - let scheduler_vk_hash = if let Some(address) = upgrade.verifier_address { - Some(client.scheduler_vk_hash(address).await?) - } else { - None - }; - upgrades.push((upgrade, scheduler_vk_hash)); - } - - let new_upgrades: Vec<_> = upgrades - .into_iter() - .skip_while(|(v, _)| v.id as u16 <= self.last_seen_version_id as u16) - .collect(); - - if new_upgrades.is_empty() { - return Ok(()); - } - - let ids: Vec<_> = new_upgrades.iter().map(|(u, _)| u.id as u16).collect(); - tracing::debug!("Received upgrades with ids: {:?}", ids); - - let last_id = new_upgrades.last().unwrap().0.id; - let stage_latency = METRICS.poll_eth_node[&PollStage::PersistUpgrades].start(); - for (upgrade, scheduler_vk_hash) in new_upgrades { - let previous_version = storage - .protocol_versions_dal() - .load_previous_version(upgrade.id) - .await - .expect("Expected previous version to be present in DB"); - let new_version = previous_version.apply_upgrade(upgrade, scheduler_vk_hash); - storage - .protocol_versions_dal() - .save_protocol_version_with_tx(&new_version) - .await - .unwrap(); - } - stage_latency.observe(); - self.last_seen_version_id = last_id; - Ok(()) - } - - fn relevant_topic(&self) -> H256 { - UPGRADE_PROPOSAL_SIGNATURE - } -} diff --git a/core/node/eth_watch/src/lib.rs b/core/node/eth_watch/src/lib.rs index c0328953326..399c0ff31e4 100644 --- a/core/node/eth_watch/src/lib.rs +++ b/core/node/eth_watch/src/lib.rs @@ -1,31 +1,29 @@ -//! Ethereum watcher polls the Ethereum node for PriorityQueue events. +//! Ethereum watcher polls the Ethereum node for the relevant events, such as priority operations (aka L1 transactions), +//! protocol upgrades etc. //! New events are accepted to the zkSync network once they have the sufficient amount of L1 confirmations. -//! -//! Poll interval is configured using the `ETH_POLL_INTERVAL` constant. -//! Number of confirmations is configured using the `CONFIRMATIONS_FOR_ETH_EVENT` environment variable. -use std::{sync::Arc, time::Duration}; +use std::time::Duration; -use tokio::{sync::watch, task::JoinHandle}; -use zksync_config::ETHWatchConfig; +use anyhow::Context as _; +use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_eth_client::EthInterface; use zksync_system_constants::PRIORITY_EXPIRATION; use zksync_types::{ ethabi::Contract, web3::types::BlockNumber as Web3BlockNumber, Address, PriorityOpId, ProtocolVersionId, }; +pub use self::client::EthHttpQueryClient; use self::{ - client::{Error, EthClient, EthHttpQueryClient, RETRY_LIMIT}, + client::{EthClient, RETRY_LIMIT}, event_processors::{ - governance_upgrades::GovernanceUpgradesEventProcessor, - priority_ops::PriorityOpsEventProcessor, upgrades::UpgradesEventProcessor, EventProcessor, + EventProcessor, EventProcessorError, GovernanceUpgradesEventProcessor, + PriorityOpsEventProcessor, }, metrics::{PollStage, METRICS}, }; -pub mod client; +mod client; mod event_processors; mod metrics; #[cfg(test)] @@ -38,12 +36,12 @@ struct EthWatchState { last_processed_ethereum_block: u64, } +/// Ethereum watcher component. #[derive(Debug)] pub struct EthWatch { client: Box, poll_interval: Duration, event_processors: Vec>, - last_processed_ethereum_block: u64, pool: ConnectionPool, } @@ -51,71 +49,63 @@ pub struct EthWatch { impl EthWatch { pub async fn new( diamond_proxy_address: Address, - governance_contract: Option, + governance_contract: &Contract, mut client: Box, pool: ConnectionPool, poll_interval: Duration, - ) -> Self { - let mut storage = pool.connection_tagged("eth_watch").await.unwrap(); - - let state = Self::initialize_state(&*client, &mut storage).await; - - tracing::info!("initialized state: {:?}", state); - + ) -> anyhow::Result { + let mut storage = pool.connection_tagged("eth_watch").await?; + let state = Self::initialize_state(&*client, &mut storage).await?; + tracing::info!("initialized state: {state:?}"); drop(storage); let priority_ops_processor = - PriorityOpsEventProcessor::new(state.next_expected_priority_id); - let upgrades_processor = UpgradesEventProcessor::new(state.last_seen_version_id); - let mut event_processors: Vec> = vec![ + PriorityOpsEventProcessor::new(state.next_expected_priority_id)?; + let governance_upgrades_processor = GovernanceUpgradesEventProcessor::new( + diamond_proxy_address, + state.last_seen_version_id, + governance_contract, + )?; + let event_processors: Vec> = vec![ Box::new(priority_ops_processor), - Box::new(upgrades_processor), + Box::new(governance_upgrades_processor), ]; - if let Some(governance_contract) = governance_contract { - let governance_upgrades_processor = GovernanceUpgradesEventProcessor::new( - diamond_proxy_address, - state.last_seen_version_id, - &governance_contract, - ); - event_processors.push(Box::new(governance_upgrades_processor)) - } - let topics = event_processors .iter() - .map(|p| p.relevant_topic()) + .map(|processor| processor.relevant_topic()) .collect(); client.set_topics(topics); - Self { + Ok(Self { client, poll_interval, event_processors, last_processed_ethereum_block: state.last_processed_ethereum_block, pool, - } + }) } async fn initialize_state( client: &dyn EthClient, storage: &mut Connection<'_, Core>, - ) -> EthWatchState { + ) -> anyhow::Result { let next_expected_priority_id: PriorityOpId = storage .transactions_dal() .last_priority_id() - .await + .await? .map_or(PriorityOpId(0), |e| e + 1); let last_seen_version_id = storage .protocol_versions_dal() .last_version_id() - .await - .expect("Expected at least one (genesis) version to be present in DB"); + .await? + .context("expected at least one (genesis) version to be present in DB")?; let last_processed_ethereum_block = match storage .transactions_dal() .get_last_processed_l1_block() - .await + .await? { // There are some priority ops processed - start from the last processed eth block // but subtract 1 in case the server stopped mid-block. @@ -124,45 +114,56 @@ impl EthWatch { None => client .finalized_block_number() .await - .expect("cannot initialize eth watch: cannot get current ETH block") + .context("cannot get current Ethereum block")? .saturating_sub(PRIORITY_EXPIRATION), }; - EthWatchState { + Ok(EthWatchState { next_expected_priority_id, last_seen_version_id, last_processed_ethereum_block, - } + }) } - pub async fn run(mut self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { + pub async fn run(mut self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { let mut timer = tokio::time::interval(self.poll_interval); let pool = self.pool.clone(); - loop { - if *stop_receiver.borrow() { - tracing::info!("Stop signal received, eth_watch is shutting down"); - break; - } - timer.tick().await; + while !*stop_receiver.borrow_and_update() { + tokio::select! { + _ = timer.tick() => { /* continue iterations */ } + _ = stop_receiver.changed() => break, + } METRICS.eth_poll.inc(); - let mut storage = pool.connection_tagged("eth_watch").await.unwrap(); - if let Err(error) = self.loop_iteration(&mut storage).await { - // This is an error because otherwise we could potentially miss a priority operation - // thus entering priority mode, which is not desired. - tracing::error!("Failed to process new blocks {}", error); - self.last_processed_ethereum_block = - Self::initialize_state(&*self.client, &mut storage) - .await - .last_processed_ethereum_block; + let mut storage = pool.connection_tagged("eth_watch").await?; + match self.loop_iteration(&mut storage).await { + Ok(()) => { /* everything went fine */ } + Err(EventProcessorError::Internal(err)) => { + tracing::error!("Internal error processing new blocks: {err:?}"); + return Err(err); + } + Err(err) => { + // This is an error because otherwise we could potentially miss a priority operation + // thus entering priority mode, which is not desired. + tracing::error!("Failed to process new blocks: {err}"); + self.last_processed_ethereum_block = + Self::initialize_state(&*self.client, &mut storage) + .await? + .last_processed_ethereum_block; + } } } + + tracing::info!("Stop signal received, eth_watch is shutting down"); Ok(()) } - #[tracing::instrument(skip(self, storage))] - async fn loop_iteration(&mut self, storage: &mut Connection<'_, Core>) -> Result<(), Error> { + #[tracing::instrument(skip_all)] + async fn loop_iteration( + &mut self, + storage: &mut Connection<'_, Core>, + ) -> Result<(), EventProcessorError> { let stage_latency = METRICS.poll_eth_node[&PollStage::Request].start(); let to_block = self.client.finalized_block_number().await?; if to_block <= self.last_processed_ethereum_block { @@ -179,39 +180,18 @@ impl EthWatch { .await?; stage_latency.observe(); - for processor in self.event_processors.iter_mut() { + for processor in &mut self.event_processors { + let relevant_topic = processor.relevant_topic(); + let processor_events = events + .iter() + .filter(|event| event.topics.get(0) == Some(&relevant_topic)) + .cloned() + .collect(); processor - .process_events(storage, &*self.client, events.clone()) + .process_events(storage, &*self.client, processor_events) .await?; } self.last_processed_ethereum_block = to_block; Ok(()) } } - -pub async fn start_eth_watch( - config: ETHWatchConfig, - pool: ConnectionPool, - eth_gateway: Arc, - diamond_proxy_addr: Address, - governance: (Contract, Address), - stop_receiver: watch::Receiver, -) -> anyhow::Result>> { - let eth_client = EthHttpQueryClient::new( - eth_gateway, - diamond_proxy_addr, - Some(governance.1), - config.confirmations_for_eth_event, - ); - - let eth_watch = EthWatch::new( - diamond_proxy_addr, - Some(governance.0), - Box::new(eth_client), - pool, - config.poll_interval(), - ) - .await; - - Ok(tokio::spawn(eth_watch.run(stop_receiver))) -} diff --git a/core/node/eth_watch/src/metrics.rs b/core/node/eth_watch/src/metrics.rs index c96b8c08483..a3684cc6e72 100644 --- a/core/node/eth_watch/src/metrics.rs +++ b/core/node/eth_watch/src/metrics.rs @@ -15,11 +15,11 @@ pub(super) enum PollStage { #[derive(Debug, Metrics)] #[metrics(prefix = "server_eth_watch")] pub(super) struct EthWatcherMetrics { + /// Number of times Ethereum was polled. pub eth_poll: Counter, + /// Latency of polling and processing events split by stage. #[metrics(buckets = Buckets::LATENCIES)] pub poll_eth_node: Family>, - #[metrics(buckets = Buckets::LATENCIES)] - pub get_priority_op_events: Histogram, } #[vise::register] diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index 6c4a86b276a..281a036f410 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -12,8 +12,10 @@ use zksync_types::{ Transaction, H256, U256, }; -use super::client::Error; -use crate::{client::EthClient, event_processors::upgrades::UPGRADE_PROPOSAL_SIGNATURE, EthWatch}; +use crate::{ + client::{EthClient, EthClientError}, + EthWatch, +}; #[derive(Debug)] struct FakeEthClientData { @@ -43,15 +45,6 @@ impl FakeEthClientData { } } - fn add_diamond_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { - for (upgrade, eth_block) in upgrades { - self.diamond_upgrades - .entry(*eth_block) - .or_default() - .push(upgrade_into_diamond_proxy_log(upgrade.clone(), *eth_block)); - } - } - fn add_governance_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { for (upgrade, eth_block) in upgrades { self.governance_upgrades @@ -67,11 +60,11 @@ impl FakeEthClientData { } #[derive(Debug, Clone)] -struct FakeEthClient { +struct MockEthClient { inner: Arc>, } -impl FakeEthClient { +impl MockEthClient { fn new() -> Self { Self { inner: Arc::new(RwLock::new(FakeEthClientData::new())), @@ -82,10 +75,6 @@ impl FakeEthClient { self.inner.write().await.add_transactions(transactions); } - async fn add_diamond_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { - self.inner.write().await.add_diamond_upgrades(upgrades); - } - async fn add_governance_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { self.inner.write().await.add_governance_upgrades(upgrades); } @@ -110,13 +99,13 @@ impl FakeEthClient { } #[async_trait::async_trait] -impl EthClient for FakeEthClient { +impl EthClient for MockEthClient { async fn get_events( &self, from: BlockNumber, to: BlockNumber, _retries_left: usize, - ) -> Result, Error> { + ) -> Result, EthClientError> { let from = self.block_to_number(from).await; let to = self.block_to_number(to).await; let mut logs = vec![]; @@ -136,11 +125,11 @@ impl EthClient for FakeEthClient { fn set_topics(&mut self, _topics: Vec) {} - async fn scheduler_vk_hash(&self, _verifier_address: Address) -> Result { + async fn scheduler_vk_hash(&self, _verifier_address: Address) -> Result { Ok(H256::zero()) } - async fn finalized_block_number(&self) -> Result { + async fn finalized_block_number(&self) -> Result { Ok(self.inner.read().await.last_finalized_block_number) } } @@ -198,20 +187,26 @@ fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx } } -#[tokio::test] -async fn test_normal_operation_l1_txs() { - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - - let mut client = FakeEthClient::new(); - let mut watcher = EthWatch::new( +async fn create_test_watcher(connection_pool: ConnectionPool) -> (EthWatch, MockEthClient) { + let client = MockEthClient::new(); + let watcher = EthWatch::new( Address::default(), - None, + &governance_contract(), Box::new(client.clone()), - connection_pool.clone(), + connection_pool, std::time::Duration::from_nanos(1), ) - .await; + .await + .unwrap(); + + (watcher, client) +} + +#[tokio::test] +async fn test_normal_operation_l1_txs() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -247,84 +242,14 @@ async fn test_normal_operation_l1_txs() { } #[tokio::test] -async fn test_normal_operation_upgrades() { - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - - let mut client = FakeEthClient::new(); - let mut watcher = EthWatch::new( - Address::default(), - None, - Box::new(client.clone()), - connection_pool.clone(), - std::time::Duration::from_nanos(1), - ) - .await; - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_diamond_upgrades(&[ - ( - ProtocolUpgrade { - id: ProtocolVersionId::latest(), - tx: None, - ..Default::default() - }, - 10, - ), - ( - ProtocolUpgrade { - id: ProtocolVersionId::next(), - tx: Some(build_upgrade_tx(ProtocolVersionId::next(), 18)), - ..Default::default() - }, - 18, - ), - ]) - .await; - client.set_last_finalized_block_number(15).await; - // second upgrade will not be processed, as it has less than 5 confirmations - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_ids = storage.protocol_versions_dal().all_version_ids().await; - // there should be genesis version and just added version - assert_eq!(db_ids.len(), 2); - assert_eq!(db_ids[1], ProtocolVersionId::latest()); - - client.set_last_finalized_block_number(20).await; - // now the second upgrade will be processed - watcher.loop_iteration(&mut storage).await.unwrap(); - let db_ids = storage.protocol_versions_dal().all_version_ids().await; - assert_eq!(db_ids.len(), 3); - assert_eq!(db_ids[2], ProtocolVersionId::next()); - - // check that tx was saved with the last upgrade - let tx = storage - .protocol_versions_dal() - .get_protocol_upgrade_tx(ProtocolVersionId::next()) - .await - .unwrap(); - assert_eq!(tx.common_data.upgrade_id, ProtocolVersionId::next()); -} - -#[tokio::test] -async fn test_gap_in_upgrades() { +async fn test_gap_in_governance_upgrades() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - - let mut client = FakeEthClient::new(); - let mut watcher = EthWatch::new( - Address::default(), - None, - Box::new(client.clone()), - connection_pool.clone(), - std::time::Duration::from_nanos(1), - ) - .await; + let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client - .add_diamond_upgrades(&[( + .add_governance_upgrades(&[( ProtocolUpgrade { id: ProtocolVersionId::next(), tx: None, @@ -351,15 +276,16 @@ async fn test_normal_operation_governance_upgrades() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let mut client = FakeEthClient::new(); + let mut client = MockEthClient::new(); let mut watcher = EthWatch::new( Address::default(), - Some(governance_contract()), + &governance_contract(), Box::new(client.clone()), connection_pool.clone(), std::time::Duration::from_nanos(1), ) - .await; + .await + .unwrap(); let mut storage = connection_pool.connection().await.unwrap(); client @@ -403,7 +329,8 @@ async fn test_normal_operation_governance_upgrades() { .protocol_versions_dal() .get_protocol_upgrade_tx(ProtocolVersionId::next()) .await - .unwrap(); + .unwrap() + .expect("no protocol upgrade transaction"); assert_eq!(tx.common_data.upgrade_id, ProtocolVersionId::next()); } @@ -412,16 +339,7 @@ async fn test_normal_operation_governance_upgrades() { async fn test_gap_in_single_batch() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - - let mut client = FakeEthClient::new(); - let mut watcher = EthWatch::new( - Address::default(), - None, - Box::new(client.clone()), - connection_pool.clone(), - std::time::Duration::from_nanos(1), - ) - .await; + let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -442,16 +360,7 @@ async fn test_gap_in_single_batch() { async fn test_gap_between_batches() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - - let mut client = FakeEthClient::new(); - let mut watcher = EthWatch::new( - Address::default(), - None, - Box::new(client.clone()), - connection_pool.clone(), - std::time::Duration::from_nanos(1), - ) - .await; + let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -467,6 +376,7 @@ async fn test_gap_between_batches() { .await; client.set_last_finalized_block_number(15).await; watcher.loop_iteration(&mut storage).await.unwrap(); + let db_txs = get_all_db_txs(&mut storage).await; assert_eq!(db_txs.len(), 3); client.set_last_finalized_block_number(25).await; @@ -477,16 +387,7 @@ async fn test_gap_between_batches() { async fn test_overlapping_batches() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - - let mut client = FakeEthClient::new(); - let mut watcher = EthWatch::new( - Address::default(), - None, - Box::new(client.clone()), - connection_pool.clone(), - std::time::Duration::from_nanos(1), - ) - .await; + let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -504,10 +405,13 @@ async fn test_overlapping_batches() { .await; client.set_last_finalized_block_number(15).await; watcher.loop_iteration(&mut storage).await.unwrap(); + let db_txs = get_all_db_txs(&mut storage).await; assert_eq!(db_txs.len(), 3); + client.set_last_finalized_block_number(25).await; watcher.loop_iteration(&mut storage).await.unwrap(); + let db_txs = get_all_db_txs(&mut storage).await; assert_eq!(db_txs.len(), 5); let mut db_txs: Vec = db_txs @@ -583,24 +487,6 @@ fn tx_into_log(tx: L1Tx) -> Log { } } -fn upgrade_into_diamond_proxy_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { - let diamond_cut = upgrade_into_diamond_cut(upgrade); - let data = encode(&[diamond_cut, Token::FixedBytes(vec![0u8; 32])]); - Log { - address: Address::repeat_byte(0x1), - topics: vec![UPGRADE_PROPOSAL_SIGNATURE], - data: data.into(), - block_hash: Some(H256::repeat_byte(0x11)), - block_number: Some(eth_block.into()), - transaction_hash: Some(H256::random()), - transaction_index: Some(0u64.into()), - log_index: Some(0u64.into()), - transaction_log_index: Some(0u64.into()), - log_type: None, - removed: None, - } -} - fn upgrade_into_governor_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { let diamond_cut = upgrade_into_diamond_cut(upgrade); let execute_upgrade_selector = zksync_contract() diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index 32ed33c644a..4b9a7e6af6c 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -16,7 +16,7 @@ use zksync_config::{ FriProofCompressorConfig, FriProverConfig, FriWitnessGeneratorConfig, ObservabilityConfig, ProofDataHandlerConfig, }, - ApiConfig, ContractVerifierConfig, ContractsConfig, DBConfig, ETHConfig, ETHWatchConfig, + ApiConfig, ContractVerifierConfig, ContractsConfig, DBConfig, EthConfig, EthWatchConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, }; use zksync_core::{ @@ -91,7 +91,7 @@ impl MainNodeBuilder { fn add_pk_signing_client_layer(mut self) -> anyhow::Result { let genesis = GenesisConfig::from_env()?; - let eth_config = ETHConfig::from_env()?; + let eth_config = EthConfig::from_env()?; let wallets = Wallets::from_env()?; self.node.add_layer(PKSigningEthClientLayer::new( eth_config, @@ -103,7 +103,7 @@ impl MainNodeBuilder { } fn add_query_eth_client_layer(mut self) -> anyhow::Result { - let eth_client_config = ETHConfig::from_env()?; + let eth_client_config = EthConfig::from_env()?; let query_eth_client_layer = QueryEthClientLayer::new(eth_client_config.web3_url); self.node.add_layer(query_eth_client_layer); Ok(self) @@ -113,7 +113,7 @@ impl MainNodeBuilder { let gas_adjuster_config = GasAdjusterConfig::from_env()?; let state_keeper_config = StateKeeperConfig::from_env()?; let genesis_config = GenesisConfig::from_env()?; - let eth_sender_config = ETHConfig::from_env()?; + let eth_sender_config = EthConfig::from_env()?; let sequencer_l1_gas_layer = SequencerL1GasLayer::new( gas_adjuster_config, genesis_config, @@ -167,7 +167,7 @@ impl MainNodeBuilder { fn add_eth_watch_layer(mut self) -> anyhow::Result { self.node.add_layer(EthWatchLayer::new( - ETHWatchConfig::from_env()?, + EthWatchConfig::from_env()?, ContractsConfig::from_env()?, )); Ok(self) @@ -297,7 +297,7 @@ impl MainNodeBuilder { Ok(self) } fn add_eth_sender_layer(mut self) -> anyhow::Result { - let eth_sender_config = ETHConfig::from_env()?; + let eth_sender_config = EthConfig::from_env()?; let contracts_config = ContractsConfig::from_env()?; let network_config = NetworkConfig::from_env()?; let genesis_config = GenesisConfig::from_env()?; diff --git a/core/node/node_framework/src/implementations/layers/eth_sender.rs b/core/node/node_framework/src/implementations/layers/eth_sender.rs index c5ab89f9a93..bd865a8a7db 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender.rs @@ -4,7 +4,7 @@ use anyhow::Context; use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; use zksync_config::configs::{ chain::{L1BatchCommitDataGeneratorMode, NetworkConfig}, - eth_sender::ETHConfig, + eth_sender::EthConfig, wallets, ContractsConfig, }; use zksync_core::eth_sender::{ @@ -32,7 +32,7 @@ use crate::{ #[derive(Debug)] pub struct EthSenderLayer { - eth_sender_config: ETHConfig, + eth_sender_config: EthConfig, contracts_config: ContractsConfig, network_config: NetworkConfig, l1chain_id: L1ChainId, @@ -42,7 +42,7 @@ pub struct EthSenderLayer { impl EthSenderLayer { pub fn new( - eth_sender_config: ETHConfig, + eth_sender_config: EthConfig, contracts_config: ContractsConfig, network_config: NetworkConfig, l1chain_id: L1ChainId, diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index 6fee3f718ba..8abe37de11c 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -1,9 +1,9 @@ use std::time::Duration; -use zksync_config::{ContractsConfig, ETHWatchConfig}; +use zksync_config::{ContractsConfig, EthWatchConfig}; use zksync_contracts::governance_contract; use zksync_dal::{ConnectionPool, Core}; -use zksync_eth_watch::{client::EthHttpQueryClient, EthWatch}; +use zksync_eth_watch::{EthHttpQueryClient, EthWatch}; use zksync_types::{ethabi::Contract, Address}; use crate::{ @@ -15,12 +15,12 @@ use crate::{ #[derive(Debug)] pub struct EthWatchLayer { - eth_watch_config: ETHWatchConfig, + eth_watch_config: EthWatchConfig, contracts_config: ContractsConfig, } impl EthWatchLayer { - pub fn new(eth_watch_config: ETHWatchConfig, contracts_config: ContractsConfig) -> Self { + pub fn new(eth_watch_config: EthWatchConfig, contracts_config: ContractsConfig) -> Self { Self { eth_watch_config, contracts_config, @@ -43,13 +43,13 @@ impl WiringLayer for EthWatchLayer { let eth_client = EthHttpQueryClient::new( client, self.contracts_config.diamond_proxy_addr, - Some(self.contracts_config.governance_addr), + self.contracts_config.governance_addr, self.eth_watch_config.confirmations_for_eth_event, ); context.add_task(Box::new(EthWatchTask { main_pool, client: eth_client, - governance_contract: Some(governance_contract()), + governance_contract: governance_contract(), diamond_proxy_address: self.contracts_config.diamond_proxy_addr, poll_interval: self.eth_watch_config.poll_interval(), })); @@ -62,7 +62,7 @@ impl WiringLayer for EthWatchLayer { struct EthWatchTask { main_pool: ConnectionPool, client: EthHttpQueryClient, - governance_contract: Option, + governance_contract: Contract, diamond_proxy_address: Address, poll_interval: Duration, } @@ -76,12 +76,12 @@ impl Task for EthWatchTask { async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { let eth_watch = EthWatch::new( self.diamond_proxy_address, - self.governance_contract, + &self.governance_contract, Box::new(self.client), self.main_pool, self.poll_interval, ) - .await; + .await?; eth_watch.run(stop_receiver.0).await } diff --git a/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs b/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs index 59a9a1c568e..5e27cf38bd6 100644 --- a/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs +++ b/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use zksync_config::{ configs::{wallets, ContractsConfig}, - ETHConfig, + EthConfig, }; use zksync_eth_client::clients::PKSigningClient; use zksync_types::L1ChainId; @@ -15,7 +15,7 @@ use crate::{ #[derive(Debug)] pub struct PKSigningEthClientLayer { - eth_sender_config: ETHConfig, + eth_sender_config: EthConfig, contracts_config: ContractsConfig, l1chain_id: L1ChainId, wallets: wallets::EthSender, @@ -23,7 +23,7 @@ pub struct PKSigningEthClientLayer { impl PKSigningEthClientLayer { pub fn new( - eth_sender_config: ETHConfig, + eth_sender_config: EthConfig, contracts_config: ContractsConfig, l1chain_id: L1ChainId, wallets: wallets::EthSender, diff --git a/docs/guides/advanced/02_deposits.md b/docs/guides/advanced/02_deposits.md index 64802460c25..7a40e33f91c 100644 --- a/docs/guides/advanced/02_deposits.md +++ b/docs/guides/advanced/02_deposits.md @@ -144,9 +144,8 @@ gas limit (641k) and set the gas per pubdata byte limit to 800. (TODO: explain w The call to requestL2Transaction, is adding the transaction to the priorityQueue and then emits the NewPriorityRequest. The zk server (that you started with `zk server` command) is listening on events that are emitted from this contract -(via eth_watcher module - -[`loop_iteration` function](https://github.com/matter-labs/zksync-era/blob/main/core/node/eth_watch/src/lib.rs#L165)) -and adds them to the postgres database (into `transactions` table). +(via the [`eth_watcher` component](https://github.com/matter-labs/zksync-era/blob/main/core/node/eth_watch/)) and adds +them to the postgres database (into `transactions` table). You can actually check it - by running the psql and looking at the contents of the table - then you'll notice that transaction was successfully inserted, and it was also marked as 'priority' (as it came from L1) - as regular diff --git a/docs/guides/advanced/how_transaction_works.md b/docs/guides/advanced/how_transaction_works.md index f3ee6fe08d1..800b2612d16 100644 --- a/docs/guides/advanced/how_transaction_works.md +++ b/docs/guides/advanced/how_transaction_works.md @@ -36,8 +36,8 @@ Here's the code that does the parsing: [TransactionRequest::from_bytes][transact ### Priority queue (L1 Tx only) L1 transactions are first 'packaged' and then sent to our Ethereum contract. After this, the L1 contract records this -transaction in L1 logs. Our 'eth_watcher' constantly monitors these logs through the -[`get_priority_op_events`][get_priority_op_events] method and then adds them to the database (mempool). +transaction in L1 logs. [The `eth_watcher` component][eth_watcher] constantly monitors these logs and then adds them to +the database (mempool). ### RPC & validation (L2 Tx only) @@ -79,9 +79,7 @@ The transaction can have three different results in state keeper: [transaction_request_from_bytes]: https://github.com/matter-labs/zksync-era/blob/main/core/lib/types/src/transaction_request.rs#L196 'transaction request from bytes' -[get_priority_op_events]: - https://github.com/matter-labs/zksync-era/blob/main/core/node/eth_watch/src/client.rs - 'get priority op events' +[eth_watcher]: https://github.com/matter-labs/zksync-era/blob/main/core/node/eth_watch 'Ethereum watcher component' [l1_tx]: https://github.com/matter-labs/zksync-era/blob/main/core/lib/types/src/l1/mod.rs#L183 'l1 tx' [l2_tx]: https://github.com/matter-labs/zksync-era/blob/main/core/lib/types/src/l2/mod.rs#L140 'l2 tx' [submit_tx]: diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index 5ff7ee3b2d0..0cf9ddb5b48 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -14,6 +14,7 @@ zksync_server=debug,\ zksync_contract_verifier=debug,\ zksync_dal=info,\ zksync_eth_client=info,\ +zksync_eth_watch=debug,\ zksync_storage=info,\ zksync_db_manager=info,\ zksync_merkle_tree=info,\ From eb432405fe74aac58e8c9bc716d10ef651112e7f Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 12 Apr 2024 17:41:29 +0300 Subject: [PATCH 24/29] fix(ci): Bump `release-please-action` to v4 (#1666) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fixes release-please config broken after #1584, by bumping the corresponding GitHub action. ## Why ❔ release-please shouldn't be broken. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). --- .github/release-please/config.json | 1 + .github/workflows/release-please.yml | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/release-please/config.json b/.github/release-please/config.json index 4f69aa4f937..ec6df305d0e 100644 --- a/.github/release-please/config.json +++ b/.github/release-please/config.json @@ -4,6 +4,7 @@ "bootstrap-sha": "691a7008f6d1f88fb9a5b6b8d92592e1199f37ea", "bump-minor-pre-major": true, "bump-patch-for-minor-pre-major": true, + "include-component-in-tag": true, "packages": { "core": { "release-type": "simple", diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index c6f1fa5454f..fb426a0edd6 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -17,13 +17,11 @@ jobs: steps: - name: Run release-please id: release - uses: google-github-actions/release-please-action@v3 + uses: google-github-actions/release-please-action@v4 with: token: ${{ secrets.RELEASE_TOKEN }} - command: manifest config-file: .github/release-please/config.json manifest-file: .github/release-please/manifest.json - monorepo-tags: true - name: Send Release Info if: ${{ steps.release.outputs.releases_created }} From 1c85271e6e2b00b049d3238ede07457e0cedb541 Mon Sep 17 00:00:00 2001 From: pompon0 Date: Fri, 12 Apr 2024 18:53:08 +0200 Subject: [PATCH 25/29] chore: bumped consensus deps (#1678) Includes a quickfix for the excessive throttling of p2p requests: https://github.com/matter-labs/era-consensus/pull/95 --- Cargo.lock | 23 ++++++++++++----------- Cargo.toml | 20 ++++++++++---------- prover/Cargo.lock | 14 +++++++------- 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8ffce1cb701..5f7e8af3a57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8149,7 +8149,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "once_cell", @@ -8179,7 +8179,7 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "async-trait", @@ -8200,7 +8200,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "blst", @@ -8218,7 +8218,7 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "rand 0.8.5", @@ -8238,7 +8238,7 @@ dependencies = [ [[package]] name = "zksync_consensus_network" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "async-trait", @@ -8263,7 +8263,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "bit-vec", @@ -8283,7 +8283,7 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "async-trait", @@ -8301,9 +8301,10 @@ dependencies = [ [[package]] name = "zksync_consensus_sync_blocks" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", + "rand 0.8.5", "thiserror", "tracing", "zksync_concurrency", @@ -8316,7 +8317,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "rand 0.8.5", "thiserror", @@ -8775,7 +8776,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "bit-vec", @@ -8795,7 +8796,7 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/Cargo.toml b/Cargo.toml index 8aa37038e30..526aad46af7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -170,16 +170,16 @@ zk_evm_1_3_3 = { package = "zk_evm", git = "https://github.com/matter-labs/era-z zk_evm_1_4_0 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.0" } zk_evm_1_4_1 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.1" } zk_evm_1_5_0 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.5.0" } -zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } -zksync_consensus_bft = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } -zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } -zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } -zksync_consensus_network = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } -zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } -zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "36be3daba58703c5639892c2f3a6b037f0654837" } +zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "c9935c0fa69cde357a3d6f5eca148962dd3313e1" } +zksync_consensus_bft = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "c9935c0fa69cde357a3d6f5eca148962dd3313e1" } +zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "c9935c0fa69cde357a3d6f5eca148962dd3313e1" } +zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "c9935c0fa69cde357a3d6f5eca148962dd3313e1" } +zksync_consensus_network = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "c9935c0fa69cde357a3d6f5eca148962dd3313e1" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "c9935c0fa69cde357a3d6f5eca148962dd3313e1" } +zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "c9935c0fa69cde357a3d6f5eca148962dd3313e1" } +zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "c9935c0fa69cde357a3d6f5eca148962dd3313e1" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "c9935c0fa69cde357a3d6f5eca148962dd3313e1" } +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "c9935c0fa69cde357a3d6f5eca148962dd3313e1" } # "Local" dependencies multivm = { path = "core/lib/multivm" } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index d20418a2d3f..7262bd7ecda 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7501,7 +7501,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "once_cell", @@ -7531,7 +7531,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "blst", @@ -7549,7 +7549,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "bit-vec", @@ -7569,7 +7569,7 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "async-trait", @@ -7587,7 +7587,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "rand 0.8.5", "thiserror", @@ -7773,7 +7773,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "bit-vec", @@ -7793,7 +7793,7 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=36be3daba58703c5639892c2f3a6b037f0654837#36be3daba58703c5639892c2f3a6b037f0654837" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=c9935c0fa69cde357a3d6f5eca148962dd3313e1#c9935c0fa69cde357a3d6f5eca148962dd3313e1" dependencies = [ "anyhow", "heck 0.5.0", From c51ca91f05e65bffd52c190115bdb39180880f2b Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 15 Apr 2024 14:54:14 +0300 Subject: [PATCH 26/29] fix(en): Fix DB pool for Postgres metrics on EN (#1675) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Uses a singleton DB pool for scraping Postgres metrics instead of sharing the main pool. ## Why ❔ The main pool may be used at capacity (e.g., during snapshot recovery), which leads to timeouts collecting Postgres metrics. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- core/bin/external_node/src/main.rs | 15 +++++++------ core/lib/dal/src/metrics.rs | 2 +- .../implementations/layers/house_keeper.rs | 21 ++++++++++++------- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 074ae97388b..0497a331a31 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -408,7 +408,7 @@ async fn run_api( sync_state: SyncState, tree_reader: Option>, main_node_client: L2Client, - singleton_pool_builder: ConnectionPoolBuilder, + singleton_pool_builder: &ConnectionPoolBuilder, fee_params_fetcher: Arc, components: &HashSet, ) -> anyhow::Result<()> { @@ -565,9 +565,11 @@ async fn run_api( Ok(()) } +#[allow(clippy::too_many_arguments)] async fn init_tasks( config: &ExternalNodeConfig, connection_pool: ConnectionPool, + singleton_pool_builder: ConnectionPoolBuilder, main_node_client: L2Client, task_handles: &mut Vec>>, app_health: &AppHealthCheck, @@ -577,7 +579,6 @@ async fn init_tasks( let protocol_version_update_task = EN_METRICS.run_protocol_version_updates(connection_pool.clone(), stop_receiver.clone()); task_handles.push(tokio::spawn(protocol_version_update_task)); - let singleton_pool_builder = ConnectionPool::singleton(&config.postgres.database_url); // Run the components. let tree_pool = singleton_pool_builder @@ -657,7 +658,7 @@ async fn init_tasks( sync_state, tree_reader, main_node_client, - singleton_pool_builder, + &singleton_pool_builder, fee_params_fetcher.clone(), components, ) @@ -804,6 +805,7 @@ async fn main() -> anyhow::Result<()> { RUST_METRICS.initialize(); EN_METRICS.observe_config(&config); + let singleton_pool_builder = ConnectionPool::singleton(&config.postgres.database_url); let connection_pool = ConnectionPool::::builder( &config.postgres.database_url, config.postgres.max_connections, @@ -843,7 +845,7 @@ async fn main() -> anyhow::Result<()> { app_health.clone(), ); // Start scraping Postgres metrics before store initialization as well. - let pool_for_metrics = connection_pool.clone(); + let pool_for_metrics = singleton_pool_builder.build().await?; let mut stop_receiver_for_metrics = stop_receiver.clone(); let metrics_task = tokio::spawn(async move { tokio::select! { @@ -930,8 +932,9 @@ async fn main() -> anyhow::Result<()> { init_tasks( &config, - connection_pool.clone(), - main_node_client.clone(), + connection_pool, + singleton_pool_builder, + main_node_client, &mut task_handles, &app_health, stop_receiver.clone(), diff --git a/core/lib/dal/src/metrics.rs b/core/lib/dal/src/metrics.rs index 2ec80af5db5..7f2ae59b4a5 100644 --- a/core/lib/dal/src/metrics.rs +++ b/core/lib/dal/src/metrics.rs @@ -29,7 +29,7 @@ static POSTGRES_METRICS: vise::Global = vise::Global::new(); impl PostgresMetrics { pub async fn run_scraping(pool: ConnectionPool, scrape_interval: Duration) { - let scrape_timeout = Duration::from_secs(1).min(scrape_interval / 2); + let scrape_timeout = Duration::from_secs(5).min(scrape_interval / 2); loop { match tokio::time::timeout(scrape_timeout, Self::scrape(&pool)).await { Err(_) => { diff --git a/core/node/node_framework/src/implementations/layers/house_keeper.rs b/core/node/node_framework/src/implementations/layers/house_keeper.rs index f6257808fc8..15cc10a22ad 100644 --- a/core/node/node_framework/src/implementations/layers/house_keeper.rs +++ b/core/node/node_framework/src/implementations/layers/house_keeper.rs @@ -70,8 +70,8 @@ impl WiringLayer for HouseKeeperLayer { let prover_pool = prover_pool_resource.get().await?; // initialize and add tasks - let pool_for_metrics = replica_pool.clone(); - context.add_task(Box::new(PoolForMetricsTask { pool_for_metrics })); + let pool_for_metrics = replica_pool_resource.get_singleton().await?; + context.add_task(Box::new(PostgresMetricsScrapingTask { pool_for_metrics })); let l1_batch_metrics_reporter = L1BatchMetricsReporter::new( self.house_keeper_config @@ -184,18 +184,25 @@ impl WiringLayer for HouseKeeperLayer { } #[derive(Debug)] -struct PoolForMetricsTask { +struct PostgresMetricsScrapingTask { pool_for_metrics: ConnectionPool, } #[async_trait::async_trait] -impl Task for PoolForMetricsTask { +impl Task for PostgresMetricsScrapingTask { fn name(&self) -> &'static str { - "pool_for_metrics" + "postgres_metrics_scraping" } - async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { - PostgresMetrics::run_scraping(self.pool_for_metrics, SCRAPE_INTERVAL).await; + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + tokio::select! { + () = PostgresMetrics::run_scraping(self.pool_for_metrics, SCRAPE_INTERVAL) => { + tracing::warn!("Postgres metrics scraping unexpectedly stopped"); + } + _ = stop_receiver.0.changed() => { + tracing::info!("Stop signal received, Postgres metrics scraping is shutting down"); + } + } Ok(()) } } From addf887d07891b26413f846ffec7cf14bc7bbd3e Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 16 Apr 2024 10:49:11 +0300 Subject: [PATCH 27/29] refactor(en): Brush up pruning code (#1664) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Miscellaneous brush-up after #1418 is merged. ## Why ❔ - Improves code maintainability. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --- core/bin/external_node/src/config/mod.rs | 12 +- core/bin/external_node/src/main.rs | 41 +- ...c3bdae18b88c4c223e374df9a997c271b091b.json | 38 ++ ...760b8368dd5532caa5fb8b4310a96d44d8530.json | 15 + ...9f3f0b5d629fdb5c36ea1bfb93ed246be968e.json | 88 --- ...618217af059a796e4a3cc0f5269709f7ffd49.json | 14 + ...ef5f6b03c144f5a35204c8c77b7098548b19.json} | 4 +- ...fb39721a54d7c763b6f3fde410595e8fbf85d.json | 22 - ...0e8a100140875f95cd8cf5de3c6202d59a19c.json | 15 + ...b989aabf87887d2d1c3db01b43b442241fca7.json | 23 - ...5d84dc38610a86642d359c10f709145a9b446.json | 15 + ...9fe7faa058a350d9de1970e5d5c341857a412.json | 23 - ...703791fdf19fd14abad08a037bbef4da12517.json | 22 - ...466f8a4efab1a52acc26f3a6c6b219a46390b.json | 15 + ...f6d8b037d7a5fade524a4faadb421399a00b.json} | 4 +- ...b7c1834960bffb558cfbdbfb014ea929c815f.json | 38 -- ...d10c23ab2924f6a3f0594f6990af8ea9146a6.json | 15 + ...18a8505c7544ff0e167731b867ff2abbe9879.json | 23 - ...5679d90947c0af172c9a199cf9280aa557e95.json | 88 +++ ...70531d0b0a90e3c8c478c86c70e3d7e324579.json | 34 -- ...5a9a38a8d8ff1fba9a834d37927793f532b7d.json | 14 + ...685582bb37ba816a5434ee546d86ef9a8d9e.json} | 4 +- ...f64c789d3a8a1d6c96c58213c23a055cde751.json | 15 + ...a4b3bcf5d7ecae5e7f7abf8315faca6bc4917.json | 22 - ...b7d884827ff78a6dfd2085a41588d6d0a7523.json | 22 - ...ac942fc88835d4bd925e4144cfbb7bc1cf2c.json} | 4 +- ...aef121de4e55af741dccae40ccfd0bffc84e4.json | 23 - core/lib/dal/src/lib.rs | 6 +- core/lib/dal/src/pruning_dal.rs | 438 -------------- core/lib/dal/src/pruning_dal/mod.rs | 470 +++++++++++++++ core/lib/dal/src/pruning_dal/tests.rs | 535 ++++++++++++++++++ core/lib/dal/src/pruning_dal_tests.rs | 477 ---------------- core/lib/dal/src/storage_logs_dal.rs | 38 +- core/lib/dal/src/transactions_web3_dal.rs | 99 ++-- core/lib/snapshots_applier/src/lib.rs | 218 +++---- .../src/api_server/execution_sandbox/mod.rs | 82 ++- .../src/api_server/tx_sender/mod.rs | 22 +- .../src/api_server/tx_sender/tests.rs | 11 +- .../zksync_core/src/api_server/web3/pubsub.rs | 69 ++- core/lib/zksync_core/src/db_pruner/mod.rs | 296 ++++++---- .../src/db_pruner/prune_conditions.rs | 54 +- 41 files changed, 1758 insertions(+), 1710 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-046590a310cc7bbfa421d0d8e20c3bdae18b88c4c223e374df9a997c271b091b.json create mode 100644 core/lib/dal/.sqlx/query-0d60eb7536b49716fa01a14b0be760b8368dd5532caa5fb8b4310a96d44d8530.json delete mode 100644 core/lib/dal/.sqlx/query-1862d3a78e4e9068df1b8ce3bbe9f3f0b5d629fdb5c36ea1bfb93ed246be968e.json create mode 100644 core/lib/dal/.sqlx/query-3a82d9a45f4a90b8baf82f3e281618217af059a796e4a3cc0f5269709f7ffd49.json rename core/lib/dal/.sqlx/{query-b6837d2deed935da748339538c2c332a122d0b88271ae0127c65c4612b41a619.json => query-3feb0cae2cd055bc2a02e5993db9ef5f6b03c144f5a35204c8c77b7098548b19.json} (95%) delete mode 100644 core/lib/dal/.sqlx/query-4b4b010b6a9e0e1a01aa61546e2fb39721a54d7c763b6f3fde410595e8fbf85d.json create mode 100644 core/lib/dal/.sqlx/query-4cff62fad4a7044a824a60656050e8a100140875f95cd8cf5de3c6202d59a19c.json delete mode 100644 core/lib/dal/.sqlx/query-550ddf6034c382f35b8cb7a6c93b989aabf87887d2d1c3db01b43b442241fca7.json create mode 100644 core/lib/dal/.sqlx/query-70fa8e4193c4e546227b328f25c5d84dc38610a86642d359c10f709145a9b446.json delete mode 100644 core/lib/dal/.sqlx/query-7511667f6b5b13c4062a29bff499fe7faa058a350d9de1970e5d5c341857a412.json delete mode 100644 core/lib/dal/.sqlx/query-7ce9b4d426570d9bef3fddbcbd0703791fdf19fd14abad08a037bbef4da12517.json create mode 100644 core/lib/dal/.sqlx/query-8f662682747a24fbe122533f421466f8a4efab1a52acc26f3a6c6b219a46390b.json rename core/lib/dal/.sqlx/{query-3d7536cfe7d88dceebff2125a51bcae561c0eea2b1cd8beb245b0cc66ebffcaa.json => query-982a7566aebc4f94b37c9cbe32f0f6d8b037d7a5fade524a4faadb421399a00b.json} (95%) delete mode 100644 core/lib/dal/.sqlx/query-9a31c42d03be1bc564e7aaa8574b7c1834960bffb558cfbdbfb014ea929c815f.json create mode 100644 core/lib/dal/.sqlx/query-a51b8f1eeb6ef6800619e7a5a91d10c23ab2924f6a3f0594f6990af8ea9146a6.json delete mode 100644 core/lib/dal/.sqlx/query-ab13d2be03610a8372fdf03c9fa18a8505c7544ff0e167731b867ff2abbe9879.json create mode 100644 core/lib/dal/.sqlx/query-ac47b807af0441cd522a41879f25679d90947c0af172c9a199cf9280aa557e95.json delete mode 100644 core/lib/dal/.sqlx/query-c0817ac0b9385cdc563d05d57d270531d0b0a90e3c8c478c86c70e3d7e324579.json create mode 100644 core/lib/dal/.sqlx/query-c29909fda6f1e4116a9a28407805a9a38a8d8ff1fba9a834d37927793f532b7d.json rename core/lib/dal/.sqlx/{query-555f396946bdb8b84a5d77abbfc1397212b4767039a6c0e22697cf40969729af.json => query-c75cdc655cd843a474f857e80b30685582bb37ba816a5434ee546d86ef9a8d9e.json} (85%) create mode 100644 core/lib/dal/.sqlx/query-d3e4ee6677ce9de438abf7529aaf64c789d3a8a1d6c96c58213c23a055cde751.json delete mode 100644 core/lib/dal/.sqlx/query-e68bc95257fbb9d6c968c5557c0a4b3bcf5d7ecae5e7f7abf8315faca6bc4917.json delete mode 100644 core/lib/dal/.sqlx/query-e6d86f3a4c87e2f70978971f9b5b7d884827ff78a6dfd2085a41588d6d0a7523.json rename core/lib/dal/.sqlx/{query-aac861efb4acb81d5cefa598c822bef649a6db197a36aca098cd8054909d82e9.json => query-f406091f793e2eb09d9490f2f8f7ac942fc88835d4bd925e4144cfbb7bc1cf2c.json} (96%) delete mode 100644 core/lib/dal/.sqlx/query-f79e5c76293efda1bcd2b07c4dbaef121de4e55af741dccae40ccfd0bffc84e4.json delete mode 100644 core/lib/dal/src/pruning_dal.rs create mode 100644 core/lib/dal/src/pruning_dal/mod.rs create mode 100644 core/lib/dal/src/pruning_dal/tests.rs delete mode 100644 core/lib/dal/src/pruning_dal_tests.rs diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 34f435e2b25..704162f337c 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -290,8 +290,12 @@ pub(crate) struct OptionalENConfig { #[serde(default = "OptionalENConfig::default_l1_batch_commit_data_generator_mode")] pub l1_batch_commit_data_generator_mode: L1BatchCommitDataGeneratorMode, - - #[serde(default = "OptionalENConfig::default_snapshots_recovery_enabled")] + /// Enables application-level snapshot recovery. Required to start a node that was recovered from a snapshot, + /// or to initialize a node from a snapshot. Has no effect if a node that was initialized from a Postgres dump + /// or was synced from genesis. + /// + /// This is an experimental and incomplete feature; do not use unless you know what you're doing. + #[serde(default)] pub snapshots_recovery_enabled: bool, #[serde(default = "OptionalENConfig::default_pruning_chunk_size")] @@ -434,10 +438,6 @@ impl OptionalENConfig { L1BatchCommitDataGeneratorMode::Rollup } - const fn default_snapshots_recovery_enabled() -> bool { - false - } - const fn default_pruning_chunk_size() -> u32 { 10 } diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 0497a331a31..e9d9055b2de 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -24,14 +24,7 @@ use zksync_core::{ block_reverter::{BlockReverter, BlockReverterFlags, L1ExecutedBatchesRevert, NodeRole}, consensus, consistency_checker::ConsistencyChecker, - db_pruner::{ - prune_conditions::{ - ConsistencyCheckerProcessedBatch, L1BatchExistsCondition, - L1BatchOlderThanPruneCondition, NextL1BatchHasMetadataCondition, - NextL1BatchWasExecutedCondition, - }, - DbPruner, DbPrunerConfig, - }, + db_pruner::{DbPruner, DbPrunerConfig}, eth_sender::l1_batch_commit_data_generator::{ L1BatchCommitDataGenerator, RollupModeL1BatchCommitDataGenerator, ValidiumModeL1BatchCommitDataGenerator, @@ -264,39 +257,21 @@ async fn run_core( })); if let Some(data_retention_hours) = config.optional.pruning_data_retention_hours { - let l1_batch_age_to_prune = Duration::from_secs(3600 * data_retention_hours); + let minimum_l1_batch_age = Duration::from_secs(3600 * data_retention_hours); tracing::info!( - "Configured pruning of batches after they become {l1_batch_age_to_prune:?} old" + "Configured pruning of batches after they become {minimum_l1_batch_age:?} old" ); let db_pruner = DbPruner::new( DbPrunerConfig { // don't change this value without adjusting API server pruning info cache max age soft_and_hard_pruning_time_delta: Duration::from_secs(60), - pruned_batch_chunk_size: config.optional.pruning_chunk_size, next_iterations_delay: Duration::from_secs(30), + pruned_batch_chunk_size: config.optional.pruning_chunk_size, + minimum_l1_batch_age, }, - vec![ - Arc::new(L1BatchExistsCondition { - conn: connection_pool.clone(), - }), - Arc::new(NextL1BatchHasMetadataCondition { - conn: connection_pool.clone(), - }), - Arc::new(NextL1BatchWasExecutedCondition { - conn: connection_pool.clone(), - }), - Arc::new(L1BatchOlderThanPruneCondition { - minimal_age: l1_batch_age_to_prune, - conn: connection_pool.clone(), - }), - Arc::new(ConsistencyCheckerProcessedBatch { - conn: connection_pool.clone(), - }), - ], - )?; - task_handles.push(tokio::spawn( - db_pruner.run(connection_pool.clone(), stop_receiver.clone()), - )); + connection_pool.clone(), + ); + task_handles.push(tokio::spawn(db_pruner.run(stop_receiver.clone()))); } let reorg_detector = ReorgDetector::new(main_node_client.clone(), connection_pool.clone()); diff --git a/core/lib/dal/.sqlx/query-046590a310cc7bbfa421d0d8e20c3bdae18b88c4c223e374df9a997c271b091b.json b/core/lib/dal/.sqlx/query-046590a310cc7bbfa421d0d8e20c3bdae18b88c4c223e374df9a997c271b091b.json new file mode 100644 index 00000000000..950d72a3e23 --- /dev/null +++ b/core/lib/dal/.sqlx/query-046590a310cc7bbfa421d0d8e20c3bdae18b88c4c223e374df9a997c271b091b.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n soft AS (\n SELECT\n pruned_l1_batch,\n pruned_miniblock\n FROM\n pruning_log\n WHERE\n TYPE = 'Soft'\n ORDER BY\n pruned_l1_batch DESC\n LIMIT\n 1\n ),\n hard AS (\n SELECT\n pruned_l1_batch,\n pruned_miniblock\n FROM\n pruning_log\n WHERE\n TYPE = 'Hard'\n ORDER BY\n pruned_l1_batch DESC\n LIMIT\n 1\n )\n SELECT\n soft.pruned_l1_batch AS last_soft_pruned_l1_batch,\n soft.pruned_miniblock AS last_soft_pruned_miniblock,\n hard.pruned_l1_batch AS last_hard_pruned_l1_batch,\n hard.pruned_miniblock AS last_hard_pruned_miniblock\n FROM\n soft\n FULL JOIN hard ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_soft_pruned_l1_batch", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "last_soft_pruned_miniblock", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "last_hard_pruned_l1_batch", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "last_hard_pruned_miniblock", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "046590a310cc7bbfa421d0d8e20c3bdae18b88c4c223e374df9a997c271b091b" +} diff --git a/core/lib/dal/.sqlx/query-0d60eb7536b49716fa01a14b0be760b8368dd5532caa5fb8b4310a96d44d8530.json b/core/lib/dal/.sqlx/query-0d60eb7536b49716fa01a14b0be760b8368dd5532caa5fb8b4310a96d44d8530.json new file mode 100644 index 00000000000..fcb753088e9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-0d60eb7536b49716fa01a14b0be760b8368dd5532caa5fb8b4310a96d44d8530.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE transactions\n SET\n input = NULL,\n data = '{}',\n execution_info = '{}',\n updated_at = NOW()\n WHERE\n miniblock_number BETWEEN $1 AND $2\n AND upgrade_id IS NULL\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "0d60eb7536b49716fa01a14b0be760b8368dd5532caa5fb8b4310a96d44d8530" +} diff --git a/core/lib/dal/.sqlx/query-1862d3a78e4e9068df1b8ce3bbe9f3f0b5d629fdb5c36ea1bfb93ed246be968e.json b/core/lib/dal/.sqlx/query-1862d3a78e4e9068df1b8ce3bbe9f3f0b5d629fdb5c36ea1bfb93ed246be968e.json deleted file mode 100644 index 1bb2d641bef..00000000000 --- a/core/lib/dal/.sqlx/query-1862d3a78e4e9068df1b8ce3bbe9f3f0b5d629fdb5c36ea1bfb93ed246be968e.json +++ /dev/null @@ -1,88 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n transactions.is_priority,\n transactions.initiator_address,\n transactions.gas_limit,\n transactions.gas_per_pubdata_limit,\n transactions.received_at,\n transactions.miniblock_number,\n transactions.error,\n transactions.effective_gas_price,\n transactions.refunded_gas,\n commit_tx.tx_hash AS \"eth_commit_tx_hash?\",\n prove_tx.tx_hash AS \"eth_prove_tx_hash?\",\n execute_tx.tx_hash AS \"eth_execute_tx_hash?\"\n FROM\n transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n transactions.hash = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "is_priority", - "type_info": "Bool" - }, - { - "ordinal": 1, - "name": "initiator_address", - "type_info": "Bytea" - }, - { - "ordinal": 2, - "name": "gas_limit", - "type_info": "Numeric" - }, - { - "ordinal": 3, - "name": "gas_per_pubdata_limit", - "type_info": "Numeric" - }, - { - "ordinal": 4, - "name": "received_at", - "type_info": "Timestamp" - }, - { - "ordinal": 5, - "name": "miniblock_number", - "type_info": "Int8" - }, - { - "ordinal": 6, - "name": "error", - "type_info": "Varchar" - }, - { - "ordinal": 7, - "name": "effective_gas_price", - "type_info": "Numeric" - }, - { - "ordinal": 8, - "name": "refunded_gas", - "type_info": "Int8" - }, - { - "ordinal": 9, - "name": "eth_commit_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 10, - "name": "eth_prove_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 11, - "name": "eth_execute_tx_hash?", - "type_info": "Text" - } - ], - "parameters": { - "Left": [ - "Bytea" - ] - }, - "nullable": [ - false, - false, - true, - true, - false, - true, - true, - true, - false, - false, - false, - false - ] - }, - "hash": "1862d3a78e4e9068df1b8ce3bbe9f3f0b5d629fdb5c36ea1bfb93ed246be968e" -} diff --git a/core/lib/dal/.sqlx/query-3a82d9a45f4a90b8baf82f3e281618217af059a796e4a3cc0f5269709f7ffd49.json b/core/lib/dal/.sqlx/query-3a82d9a45f4a90b8baf82f3e281618217af059a796e4a3cc0f5269709f7ffd49.json new file mode 100644 index 00000000000..cb33b4c6487 --- /dev/null +++ b/core/lib/dal/.sqlx/query-3a82d9a45f4a90b8baf82f3e281618217af059a796e4a3cc0f5269709f7ffd49.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM l1_batches\n WHERE\n number <= $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "3a82d9a45f4a90b8baf82f3e281618217af059a796e4a3cc0f5269709f7ffd49" +} diff --git a/core/lib/dal/.sqlx/query-b6837d2deed935da748339538c2c332a122d0b88271ae0127c65c4612b41a619.json b/core/lib/dal/.sqlx/query-3feb0cae2cd055bc2a02e5993db9ef5f6b03c144f5a35204c8c77b7098548b19.json similarity index 95% rename from core/lib/dal/.sqlx/query-b6837d2deed935da748339538c2c332a122d0b88271ae0127c65c4612b41a619.json rename to core/lib/dal/.sqlx/query-3feb0cae2cd055bc2a02e5993db9ef5f6b03c144f5a35204c8c77b7098548b19.json index acd2d51f6ea..0c7acd0125b 100644 --- a/core/lib/dal/.sqlx/query-b6837d2deed935da748339538c2c332a122d0b88271ae0127c65c4612b41a619.json +++ b/core/lib/dal/.sqlx/query-3feb0cae2cd055bc2a02e5993db9ef5f6b03c144f5a35204c8c77b7098548b19.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n sl AS (\n SELECT DISTINCT\n ON (storage_logs.tx_hash) *\n FROM\n storage_logs\n WHERE\n storage_logs.address = $1\n AND storage_logs.tx_hash = ANY ($3)\n ORDER BY\n storage_logs.tx_hash,\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error AS error,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.initiator_address AS initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas AS refunded_gas,\n transactions.gas_limit AS gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n sl.key AS \"contract_address?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN sl ON sl.value != $2\n AND sl.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n ", + "query": "\n WITH\n sl AS (\n SELECT DISTINCT\n ON (storage_logs.tx_hash) *\n FROM\n storage_logs\n WHERE\n storage_logs.address = $1\n AND storage_logs.tx_hash = ANY ($3)\n ORDER BY\n storage_logs.tx_hash,\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error AS error,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.initiator_address AS initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas AS refunded_gas,\n transactions.gas_limit AS gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n sl.key AS \"contract_address?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN sl ON sl.value != $2\n AND sl.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n AND transactions.data != '{}'::jsonb\n ", "describe": { "columns": [ { @@ -104,5 +104,5 @@ true ] }, - "hash": "b6837d2deed935da748339538c2c332a122d0b88271ae0127c65c4612b41a619" + "hash": "3feb0cae2cd055bc2a02e5993db9ef5f6b03c144f5a35204c8c77b7098548b19" } diff --git a/core/lib/dal/.sqlx/query-4b4b010b6a9e0e1a01aa61546e2fb39721a54d7c763b6f3fde410595e8fbf85d.json b/core/lib/dal/.sqlx/query-4b4b010b6a9e0e1a01aa61546e2fb39721a54d7c763b6f3fde410595e8fbf85d.json deleted file mode 100644 index a7f076acaa8..00000000000 --- a/core/lib/dal/.sqlx/query-4b4b010b6a9e0e1a01aa61546e2fb39721a54d7c763b6f3fde410595e8fbf85d.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n deleted AS (\n DELETE FROM l2_to_l1_logs\n WHERE\n miniblock_number <= $1\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "4b4b010b6a9e0e1a01aa61546e2fb39721a54d7c763b6f3fde410595e8fbf85d" -} diff --git a/core/lib/dal/.sqlx/query-4cff62fad4a7044a824a60656050e8a100140875f95cd8cf5de3c6202d59a19c.json b/core/lib/dal/.sqlx/query-4cff62fad4a7044a824a60656050e8a100140875f95cd8cf5de3c6202d59a19c.json new file mode 100644 index 00000000000..2c4d795f2f4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-4cff62fad4a7044a824a60656050e8a100140875f95cd8cf5de3c6202d59a19c.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM storage_logs USING (\n SELECT\n hashed_key,\n MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n GROUP BY\n hashed_key\n ) AS last_storage_logs\n WHERE\n storage_logs.miniblock_number BETWEEN $1 AND $2\n AND last_storage_logs.hashed_key = storage_logs.hashed_key\n AND (\n storage_logs.miniblock_number != last_storage_logs.op[1]\n OR storage_logs.operation_number != last_storage_logs.op[2]\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "4cff62fad4a7044a824a60656050e8a100140875f95cd8cf5de3c6202d59a19c" +} diff --git a/core/lib/dal/.sqlx/query-550ddf6034c382f35b8cb7a6c93b989aabf87887d2d1c3db01b43b442241fca7.json b/core/lib/dal/.sqlx/query-550ddf6034c382f35b8cb7a6c93b989aabf87887d2d1c3db01b43b442241fca7.json deleted file mode 100644 index 686366fa06e..00000000000 --- a/core/lib/dal/.sqlx/query-550ddf6034c382f35b8cb7a6c93b989aabf87887d2d1c3db01b43b442241fca7.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n deleted AS (\n DELETE FROM storage_logs USING (\n SELECT\n hashed_key,\n MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n GROUP BY\n hashed_key\n ) AS last_storage_logs\n WHERE\n storage_logs.miniblock_number BETWEEN $1 AND $2\n AND last_storage_logs.hashed_key = storage_logs.hashed_key\n AND (\n storage_logs.miniblock_number != last_storage_logs.op[1]\n OR storage_logs.operation_number != last_storage_logs.op[2]\n )\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "550ddf6034c382f35b8cb7a6c93b989aabf87887d2d1c3db01b43b442241fca7" -} diff --git a/core/lib/dal/.sqlx/query-70fa8e4193c4e546227b328f25c5d84dc38610a86642d359c10f709145a9b446.json b/core/lib/dal/.sqlx/query-70fa8e4193c4e546227b328f25c5d84dc38610a86642d359c10f709145a9b446.json new file mode 100644 index 00000000000..4cfe93b3f81 --- /dev/null +++ b/core/lib/dal/.sqlx/query-70fa8e4193c4e546227b328f25c5d84dc38610a86642d359c10f709145a9b446.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM storage_logs USING (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ) AS batches_to_prune\n WHERE\n storage_logs.miniblock_number < $1\n AND batches_to_prune.hashed_key = storage_logs.hashed_key\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "70fa8e4193c4e546227b328f25c5d84dc38610a86642d359c10f709145a9b446" +} diff --git a/core/lib/dal/.sqlx/query-7511667f6b5b13c4062a29bff499fe7faa058a350d9de1970e5d5c341857a412.json b/core/lib/dal/.sqlx/query-7511667f6b5b13c4062a29bff499fe7faa058a350d9de1970e5d5c341857a412.json deleted file mode 100644 index 1fe545668e9..00000000000 --- a/core/lib/dal/.sqlx/query-7511667f6b5b13c4062a29bff499fe7faa058a350d9de1970e5d5c341857a412.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n deleted AS (\n DELETE FROM storage_logs USING (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ) AS batches_to_prune\n WHERE\n storage_logs.miniblock_number < $1\n AND batches_to_prune.hashed_key = storage_logs.hashed_key\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "7511667f6b5b13c4062a29bff499fe7faa058a350d9de1970e5d5c341857a412" -} diff --git a/core/lib/dal/.sqlx/query-7ce9b4d426570d9bef3fddbcbd0703791fdf19fd14abad08a037bbef4da12517.json b/core/lib/dal/.sqlx/query-7ce9b4d426570d9bef3fddbcbd0703791fdf19fd14abad08a037bbef4da12517.json deleted file mode 100644 index b04987dbb5d..00000000000 --- a/core/lib/dal/.sqlx/query-7ce9b4d426570d9bef3fddbcbd0703791fdf19fd14abad08a037bbef4da12517.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n deleted AS (\n DELETE FROM miniblocks\n WHERE\n number <= $1\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "7ce9b4d426570d9bef3fddbcbd0703791fdf19fd14abad08a037bbef4da12517" -} diff --git a/core/lib/dal/.sqlx/query-8f662682747a24fbe122533f421466f8a4efab1a52acc26f3a6c6b219a46390b.json b/core/lib/dal/.sqlx/query-8f662682747a24fbe122533f421466f8a4efab1a52acc26f3a6c6b219a46390b.json new file mode 100644 index 00000000000..18c1bbd8a1e --- /dev/null +++ b/core/lib/dal/.sqlx/query-8f662682747a24fbe122533f421466f8a4efab1a52acc26f3a6c6b219a46390b.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM l2_to_l1_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "8f662682747a24fbe122533f421466f8a4efab1a52acc26f3a6c6b219a46390b" +} diff --git a/core/lib/dal/.sqlx/query-3d7536cfe7d88dceebff2125a51bcae561c0eea2b1cd8beb245b0cc66ebffcaa.json b/core/lib/dal/.sqlx/query-982a7566aebc4f94b37c9cbe32f0f6d8b037d7a5fade524a4faadb421399a00b.json similarity index 95% rename from core/lib/dal/.sqlx/query-3d7536cfe7d88dceebff2125a51bcae561c0eea2b1cd8beb245b0cc66ebffcaa.json rename to core/lib/dal/.sqlx/query-982a7566aebc4f94b37c9cbe32f0f6d8b037d7a5fade524a4faadb421399a00b.json index 9a43cc9fd3a..a0567d0917d 100644 --- a/core/lib/dal/.sqlx/query-3d7536cfe7d88dceebff2125a51bcae561c0eea2b1cd8beb245b0cc66ebffcaa.json +++ b/core/lib/dal/.sqlx/query-982a7566aebc4f94b37c9cbe32f0f6d8b037d7a5fade524a4faadb421399a00b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.miniblock_number AS block_number,\n transactions.nonce AS nonce,\n transactions.signature AS signature,\n transactions.initiator_address AS initiator_address,\n transactions.tx_format AS tx_format,\n transactions.value AS value,\n transactions.gas_limit AS gas_limit,\n transactions.max_fee_per_gas AS max_fee_per_gas,\n transactions.max_priority_fee_per_gas AS max_priority_fee_per_gas,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.l1_batch_number AS l1_batch_number,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.data->'contractAddress' AS \"execute_contract_address\",\n transactions.data->'calldata' AS \"calldata\",\n miniblocks.hash AS \"block_hash\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n WHERE\n transactions.miniblock_number = $1 AND transactions.index_in_block = $2", + "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.miniblock_number AS block_number,\n transactions.nonce AS nonce,\n transactions.signature AS signature,\n transactions.initiator_address AS initiator_address,\n transactions.tx_format AS tx_format,\n transactions.value AS value,\n transactions.gas_limit AS gas_limit,\n transactions.max_fee_per_gas AS max_fee_per_gas,\n transactions.max_priority_fee_per_gas AS max_priority_fee_per_gas,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.l1_batch_number AS l1_batch_number,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.data->'contractAddress' AS \"execute_contract_address\",\n transactions.data->'calldata' AS \"calldata\",\n miniblocks.hash AS \"block_hash\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n WHERE\n transactions.miniblock_number = $1 AND transactions.index_in_block = $2 AND transactions.data != '{}'::jsonb", "describe": { "columns": [ { @@ -115,5 +115,5 @@ false ] }, - "hash": "3d7536cfe7d88dceebff2125a51bcae561c0eea2b1cd8beb245b0cc66ebffcaa" + "hash": "982a7566aebc4f94b37c9cbe32f0f6d8b037d7a5fade524a4faadb421399a00b" } diff --git a/core/lib/dal/.sqlx/query-9a31c42d03be1bc564e7aaa8574b7c1834960bffb558cfbdbfb014ea929c815f.json b/core/lib/dal/.sqlx/query-9a31c42d03be1bc564e7aaa8574b7c1834960bffb558cfbdbfb014ea929c815f.json deleted file mode 100644 index fd7c140dda5..00000000000 --- a/core/lib/dal/.sqlx/query-9a31c42d03be1bc564e7aaa8574b7c1834960bffb558cfbdbfb014ea929c815f.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n soft.pruned_l1_batch AS last_soft_pruned_l1_batch,\n soft.pruned_miniblock AS last_soft_pruned_miniblock,\n hard.pruned_l1_batch AS last_hard_pruned_l1_batch,\n hard.pruned_miniblock AS last_hard_pruned_miniblock\n FROM\n (\n SELECT\n 1\n ) AS dummy\n LEFT JOIN (\n SELECT\n pruned_l1_batch,\n pruned_miniblock\n FROM\n pruning_log\n WHERE\n TYPE = 'Soft'\n ORDER BY\n pruned_l1_batch DESC\n LIMIT\n 1\n ) AS soft ON TRUE\n LEFT JOIN (\n SELECT\n pruned_l1_batch,\n pruned_miniblock\n FROM\n pruning_log\n WHERE\n TYPE = 'Hard'\n ORDER BY\n pruned_l1_batch DESC\n LIMIT\n 1\n ) AS hard ON TRUE;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_soft_pruned_l1_batch", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "last_soft_pruned_miniblock", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "last_hard_pruned_l1_batch", - "type_info": "Int8" - }, - { - "ordinal": 3, - "name": "last_hard_pruned_miniblock", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true, - true, - true, - true - ] - }, - "hash": "9a31c42d03be1bc564e7aaa8574b7c1834960bffb558cfbdbfb014ea929c815f" -} diff --git a/core/lib/dal/.sqlx/query-a51b8f1eeb6ef6800619e7a5a91d10c23ab2924f6a3f0594f6990af8ea9146a6.json b/core/lib/dal/.sqlx/query-a51b8f1eeb6ef6800619e7a5a91d10c23ab2924f6a3f0594f6990af8ea9146a6.json new file mode 100644 index 00000000000..9800b470795 --- /dev/null +++ b/core/lib/dal/.sqlx/query-a51b8f1eeb6ef6800619e7a5a91d10c23ab2924f6a3f0594f6990af8ea9146a6.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM events\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "a51b8f1eeb6ef6800619e7a5a91d10c23ab2924f6a3f0594f6990af8ea9146a6" +} diff --git a/core/lib/dal/.sqlx/query-ab13d2be03610a8372fdf03c9fa18a8505c7544ff0e167731b867ff2abbe9879.json b/core/lib/dal/.sqlx/query-ab13d2be03610a8372fdf03c9fa18a8505c7544ff0e167731b867ff2abbe9879.json deleted file mode 100644 index 013b881eab5..00000000000 --- a/core/lib/dal/.sqlx/query-ab13d2be03610a8372fdf03c9fa18a8505c7544ff0e167731b867ff2abbe9879.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n deleted AS (\n DELETE FROM call_traces USING (\n SELECT\n *\n FROM\n transactions\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ) AS matching_transactions\n WHERE\n matching_transactions.hash = call_traces.tx_hash\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "ab13d2be03610a8372fdf03c9fa18a8505c7544ff0e167731b867ff2abbe9879" -} diff --git a/core/lib/dal/.sqlx/query-ac47b807af0441cd522a41879f25679d90947c0af172c9a199cf9280aa557e95.json b/core/lib/dal/.sqlx/query-ac47b807af0441cd522a41879f25679d90947c0af172c9a199cf9280aa557e95.json new file mode 100644 index 00000000000..be3f6ca3c77 --- /dev/null +++ b/core/lib/dal/.sqlx/query-ac47b807af0441cd522a41879f25679d90947c0af172c9a199cf9280aa557e95.json @@ -0,0 +1,88 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n transactions.is_priority,\n transactions.initiator_address,\n transactions.gas_limit,\n transactions.gas_per_pubdata_limit,\n transactions.received_at,\n transactions.miniblock_number,\n transactions.error,\n transactions.effective_gas_price,\n transactions.refunded_gas,\n commit_tx.tx_hash AS \"eth_commit_tx_hash?\",\n prove_tx.tx_hash AS \"eth_prove_tx_hash?\",\n execute_tx.tx_hash AS \"eth_execute_tx_hash?\"\n FROM\n transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n transactions.hash = $1\n AND transactions.data != '{}'::jsonb\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "is_priority", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "initiator_address", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "gas_limit", + "type_info": "Numeric" + }, + { + "ordinal": 3, + "name": "gas_per_pubdata_limit", + "type_info": "Numeric" + }, + { + "ordinal": 4, + "name": "received_at", + "type_info": "Timestamp" + }, + { + "ordinal": 5, + "name": "miniblock_number", + "type_info": "Int8" + }, + { + "ordinal": 6, + "name": "error", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "effective_gas_price", + "type_info": "Numeric" + }, + { + "ordinal": 8, + "name": "refunded_gas", + "type_info": "Int8" + }, + { + "ordinal": 9, + "name": "eth_commit_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 10, + "name": "eth_prove_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 11, + "name": "eth_execute_tx_hash?", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + false, + false, + true, + true, + false, + true, + true, + true, + false, + false, + false, + false + ] + }, + "hash": "ac47b807af0441cd522a41879f25679d90947c0af172c9a199cf9280aa557e95" +} diff --git a/core/lib/dal/.sqlx/query-c0817ac0b9385cdc563d05d57d270531d0b0a90e3c8c478c86c70e3d7e324579.json b/core/lib/dal/.sqlx/query-c0817ac0b9385cdc563d05d57d270531d0b0a90e3c8c478c86c70e3d7e324579.json deleted file mode 100644 index 136b40c5471..00000000000 --- a/core/lib/dal/.sqlx/query-c0817ac0b9385cdc563d05d57d270531d0b0a90e3c8c478c86c70e3d7e324579.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n hashed_key,\n value,\n operation_number\n FROM\n storage_logs\n WHERE\n miniblock_number = $1\n ORDER BY\n operation_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "hashed_key", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "value", - "type_info": "Bytea" - }, - { - "ordinal": 2, - "name": "operation_number", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "c0817ac0b9385cdc563d05d57d270531d0b0a90e3c8c478c86c70e3d7e324579" -} diff --git a/core/lib/dal/.sqlx/query-c29909fda6f1e4116a9a28407805a9a38a8d8ff1fba9a834d37927793f532b7d.json b/core/lib/dal/.sqlx/query-c29909fda6f1e4116a9a28407805a9a38a8d8ff1fba9a834d37927793f532b7d.json new file mode 100644 index 00000000000..f202744b8c0 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c29909fda6f1e4116a9a28407805a9a38a8d8ff1fba9a834d37927793f532b7d.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM miniblocks\n WHERE\n number <= $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "c29909fda6f1e4116a9a28407805a9a38a8d8ff1fba9a834d37927793f532b7d" +} diff --git a/core/lib/dal/.sqlx/query-555f396946bdb8b84a5d77abbfc1397212b4767039a6c0e22697cf40969729af.json b/core/lib/dal/.sqlx/query-c75cdc655cd843a474f857e80b30685582bb37ba816a5434ee546d86ef9a8d9e.json similarity index 85% rename from core/lib/dal/.sqlx/query-555f396946bdb8b84a5d77abbfc1397212b4767039a6c0e22697cf40969729af.json rename to core/lib/dal/.sqlx/query-c75cdc655cd843a474f857e80b30685582bb37ba816a5434ee546d86ef9a8d9e.json index 1cb61dc4460..0cf33a5559f 100644 --- a/core/lib/dal/.sqlx/query-555f396946bdb8b84a5d77abbfc1397212b4767039a6c0e22697cf40969729af.json +++ b/core/lib/dal/.sqlx/query-c75cdc655cd843a474f857e80b30685582bb37ba816a5434ee546d86ef9a8d9e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n hashed_key,\n address,\n key,\n value,\n operation_number,\n tx_hash,\n miniblock_number\n FROM\n storage_logs\n ", + "query": "\n SELECT\n hashed_key,\n address,\n key,\n value,\n operation_number,\n tx_hash,\n miniblock_number\n FROM\n storage_logs\n ORDER BY\n miniblock_number,\n operation_number\n ", "describe": { "columns": [ { @@ -52,5 +52,5 @@ false ] }, - "hash": "555f396946bdb8b84a5d77abbfc1397212b4767039a6c0e22697cf40969729af" + "hash": "c75cdc655cd843a474f857e80b30685582bb37ba816a5434ee546d86ef9a8d9e" } diff --git a/core/lib/dal/.sqlx/query-d3e4ee6677ce9de438abf7529aaf64c789d3a8a1d6c96c58213c23a055cde751.json b/core/lib/dal/.sqlx/query-d3e4ee6677ce9de438abf7529aaf64c789d3a8a1d6c96c58213c23a055cde751.json new file mode 100644 index 00000000000..bf1570deefc --- /dev/null +++ b/core/lib/dal/.sqlx/query-d3e4ee6677ce9de438abf7529aaf64c789d3a8a1d6c96c58213c23a055cde751.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM call_traces\n WHERE\n tx_hash IN (\n SELECT\n hash\n FROM\n transactions\n WHERE\n miniblock_number BETWEEN $1 AND $2\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "d3e4ee6677ce9de438abf7529aaf64c789d3a8a1d6c96c58213c23a055cde751" +} diff --git a/core/lib/dal/.sqlx/query-e68bc95257fbb9d6c968c5557c0a4b3bcf5d7ecae5e7f7abf8315faca6bc4917.json b/core/lib/dal/.sqlx/query-e68bc95257fbb9d6c968c5557c0a4b3bcf5d7ecae5e7f7abf8315faca6bc4917.json deleted file mode 100644 index 289045d5555..00000000000 --- a/core/lib/dal/.sqlx/query-e68bc95257fbb9d6c968c5557c0a4b3bcf5d7ecae5e7f7abf8315faca6bc4917.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n deleted AS (\n DELETE FROM l1_batches\n WHERE\n number <= $1\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "e68bc95257fbb9d6c968c5557c0a4b3bcf5d7ecae5e7f7abf8315faca6bc4917" -} diff --git a/core/lib/dal/.sqlx/query-e6d86f3a4c87e2f70978971f9b5b7d884827ff78a6dfd2085a41588d6d0a7523.json b/core/lib/dal/.sqlx/query-e6d86f3a4c87e2f70978971f9b5b7d884827ff78a6dfd2085a41588d6d0a7523.json deleted file mode 100644 index c7a59137dfe..00000000000 --- a/core/lib/dal/.sqlx/query-e6d86f3a4c87e2f70978971f9b5b7d884827ff78a6dfd2085a41588d6d0a7523.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n deleted AS (\n DELETE FROM events\n WHERE\n miniblock_number <= $1\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n deleted\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "e6d86f3a4c87e2f70978971f9b5b7d884827ff78a6dfd2085a41588d6d0a7523" -} diff --git a/core/lib/dal/.sqlx/query-aac861efb4acb81d5cefa598c822bef649a6db197a36aca098cd8054909d82e9.json b/core/lib/dal/.sqlx/query-f406091f793e2eb09d9490f2f8f7ac942fc88835d4bd925e4144cfbb7bc1cf2c.json similarity index 96% rename from core/lib/dal/.sqlx/query-aac861efb4acb81d5cefa598c822bef649a6db197a36aca098cd8054909d82e9.json rename to core/lib/dal/.sqlx/query-f406091f793e2eb09d9490f2f8f7ac942fc88835d4bd925e4144cfbb7bc1cf2c.json index 1122b9c27eb..eb09b438c68 100644 --- a/core/lib/dal/.sqlx/query-aac861efb4acb81d5cefa598c822bef649a6db197a36aca098cd8054909d82e9.json +++ b/core/lib/dal/.sqlx/query-f406091f793e2eb09d9490f2f8f7ac942fc88835d4bd925e4144cfbb7bc1cf2c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.miniblock_number AS block_number,\n transactions.nonce AS nonce,\n transactions.signature AS signature,\n transactions.initiator_address AS initiator_address,\n transactions.tx_format AS tx_format,\n transactions.value AS value,\n transactions.gas_limit AS gas_limit,\n transactions.max_fee_per_gas AS max_fee_per_gas,\n transactions.max_priority_fee_per_gas AS max_priority_fee_per_gas,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.l1_batch_number AS l1_batch_number,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.data->'contractAddress' AS \"execute_contract_address\",\n transactions.data->'calldata' AS \"calldata\",\n miniblocks.hash AS \"block_hash\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n WHERE\n transactions.hash = ANY($1)", + "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.miniblock_number AS block_number,\n transactions.nonce AS nonce,\n transactions.signature AS signature,\n transactions.initiator_address AS initiator_address,\n transactions.tx_format AS tx_format,\n transactions.value AS value,\n transactions.gas_limit AS gas_limit,\n transactions.max_fee_per_gas AS max_fee_per_gas,\n transactions.max_priority_fee_per_gas AS max_priority_fee_per_gas,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.l1_batch_number AS l1_batch_number,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.data->'contractAddress' AS \"execute_contract_address\",\n transactions.data->'calldata' AS \"calldata\",\n miniblocks.hash AS \"block_hash\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n WHERE\n transactions.hash = ANY($1) AND transactions.data != '{}'::jsonb", "describe": { "columns": [ { @@ -114,5 +114,5 @@ true ] }, - "hash": "aac861efb4acb81d5cefa598c822bef649a6db197a36aca098cd8054909d82e9" + "hash": "f406091f793e2eb09d9490f2f8f7ac942fc88835d4bd925e4144cfbb7bc1cf2c" } diff --git a/core/lib/dal/.sqlx/query-f79e5c76293efda1bcd2b07c4dbaef121de4e55af741dccae40ccfd0bffc84e4.json b/core/lib/dal/.sqlx/query-f79e5c76293efda1bcd2b07c4dbaef121de4e55af741dccae40ccfd0bffc84e4.json deleted file mode 100644 index 991193a5f15..00000000000 --- a/core/lib/dal/.sqlx/query-f79e5c76293efda1bcd2b07c4dbaef121de4e55af741dccae40ccfd0bffc84e4.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n updated AS (\n UPDATE transactions\n SET\n input = NULL,\n data = '{}',\n execution_info = '{}',\n updated_at = NOW()\n WHERE\n miniblock_number BETWEEN $1 AND $2\n AND upgrade_id IS NULL\n RETURNING\n *\n )\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n updated\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "f79e5c76293efda1bcd2b07c4dbaef121de4e55af741dccae40ccfd0bffc84e4" -} diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 33f27d9b9bd..8598a3472ee 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -35,6 +35,7 @@ pub mod eth_sender_dal; pub mod events_dal; pub mod events_web3_dal; pub mod factory_deps_dal; +pub mod metrics; mod models; pub mod proof_generation_dal; pub mod protocol_versions_dal; @@ -54,14 +55,9 @@ pub mod tokens_web3_dal; pub mod transactions_dal; pub mod transactions_web3_dal; -pub mod metrics; - #[cfg(test)] mod tests; -#[cfg(test)] -mod pruning_dal_tests; - // This module is private and serves as a way to seal the trait. mod private { pub trait Sealed {} diff --git a/core/lib/dal/src/pruning_dal.rs b/core/lib/dal/src/pruning_dal.rs deleted file mode 100644 index 7f0c54f2901..00000000000 --- a/core/lib/dal/src/pruning_dal.rs +++ /dev/null @@ -1,438 +0,0 @@ -use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; -use zksync_types::{L1BatchNumber, MiniblockNumber}; - -use crate::Core; - -#[derive(Debug)] -pub struct PruningDal<'a, 'c> { - pub(crate) storage: &'a mut Connection<'c, Core>, -} - -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct PruningInfo { - pub last_soft_pruned_l1_batch: Option, - pub last_soft_pruned_miniblock: Option, - pub last_hard_pruned_l1_batch: Option, - pub last_hard_pruned_miniblock: Option, -} - -#[derive(Debug, sqlx::Type)] -#[sqlx(type_name = "prune_type")] -pub enum PruneType { - Soft, - Hard, -} - -impl PruningDal<'_, '_> { - pub async fn get_pruning_info(&mut self) -> DalResult { - let row = sqlx::query!( - r#" - SELECT - soft.pruned_l1_batch AS last_soft_pruned_l1_batch, - soft.pruned_miniblock AS last_soft_pruned_miniblock, - hard.pruned_l1_batch AS last_hard_pruned_l1_batch, - hard.pruned_miniblock AS last_hard_pruned_miniblock - FROM - ( - SELECT - 1 - ) AS dummy - LEFT JOIN ( - SELECT - pruned_l1_batch, - pruned_miniblock - FROM - pruning_log - WHERE - TYPE = 'Soft' - ORDER BY - pruned_l1_batch DESC - LIMIT - 1 - ) AS soft ON TRUE - LEFT JOIN ( - SELECT - pruned_l1_batch, - pruned_miniblock - FROM - pruning_log - WHERE - TYPE = 'Hard' - ORDER BY - pruned_l1_batch DESC - LIMIT - 1 - ) AS hard ON TRUE; - "# - ) - .instrument("get_last_soft_pruned_batch") - .report_latency() - .fetch_one(self.storage) - .await?; - Ok(PruningInfo { - last_soft_pruned_l1_batch: row - .last_soft_pruned_l1_batch - .map(|x| L1BatchNumber(x as u32)), - last_soft_pruned_miniblock: row - .last_soft_pruned_miniblock - .map(|x| MiniblockNumber(x as u32)), - last_hard_pruned_l1_batch: row - .last_hard_pruned_l1_batch - .map(|x| L1BatchNumber(x as u32)), - last_hard_pruned_miniblock: row - .last_hard_pruned_miniblock - .map(|x| MiniblockNumber(x as u32)), - }) - } - - pub async fn soft_prune_batches_range( - &mut self, - last_l1_batch_to_prune: L1BatchNumber, - last_miniblock_to_prune: MiniblockNumber, - ) -> DalResult<()> { - sqlx::query!( - r#" - INSERT INTO - pruning_log ( - pruned_l1_batch, - pruned_miniblock, - TYPE, - created_at, - updated_at - ) - VALUES - ($1, $2, $3, NOW(), NOW()) - "#, - i64::from(last_l1_batch_to_prune.0), - i64::from(last_miniblock_to_prune.0), - PruneType::Soft as PruneType, - ) - .instrument("soft_prune_batches_range#insert_pruning_log") - .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) - .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) - .with_arg("prune_type", &PruneType::Soft) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn hard_prune_batches_range( - &mut self, - last_l1_batch_to_prune: L1BatchNumber, - last_miniblock_to_prune: MiniblockNumber, - ) -> DalResult<()> { - let row = sqlx::query!( - r#" - SELECT - MIN(number) AS first_miniblock_to_prune - FROM - miniblocks - WHERE - l1_batch_number <= $1 - "#, - i64::from(last_l1_batch_to_prune.0), - ) - .instrument("hard_prune_batches_range#get_miniblocks_range") - .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) - .report_latency() - .fetch_one(self.storage) - .await?; - - // we don't have any miniblocks available when recovering from a snapshot - if row.first_miniblock_to_prune.is_some() { - let first_miniblock_to_prune = - MiniblockNumber(row.first_miniblock_to_prune.unwrap() as u32); - - let deleted_events = sqlx::query!( - r#" - WITH - deleted AS ( - DELETE FROM events - WHERE - miniblock_number <= $1 - RETURNING - * - ) - SELECT - COUNT(*) AS "count!" - FROM - deleted - "#, - i64::from(last_miniblock_to_prune.0), - ) - .instrument("hard_prune_batches_range#delete_events") - .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) - .report_latency() - .fetch_one(self.storage) - .await?; - - let deleted_l2_to_l1_logs = sqlx::query!( - r#" - WITH - deleted AS ( - DELETE FROM l2_to_l1_logs - WHERE - miniblock_number <= $1 - RETURNING - * - ) - SELECT - COUNT(*) AS "count!" - FROM - deleted - "#, - i64::from(last_miniblock_to_prune.0), - ) - .instrument("hard_prune_batches_range#delete_l2_to_l1_logs") - .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) - .report_latency() - .fetch_one(self.storage) - .await?; - - let deleted_call_traces = sqlx::query!( - r#" - WITH - deleted AS ( - DELETE FROM call_traces USING ( - SELECT - * - FROM - transactions - WHERE - miniblock_number BETWEEN $1 AND $2 - ) AS matching_transactions - WHERE - matching_transactions.hash = call_traces.tx_hash - RETURNING - * - ) - SELECT - COUNT(*) AS "count!" - FROM - deleted - "#, - i64::from(first_miniblock_to_prune.0), - i64::from(last_miniblock_to_prune.0), - ) - .instrument("hard_prune_batches_range#delete_call_traces") - .with_arg("first_miniblock_to_prune", &first_miniblock_to_prune) - .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) - .report_latency() - .fetch_one(self.storage) - .await?; - - sqlx::query!( - r#" - WITH - updated AS ( - UPDATE transactions - SET - input = NULL, - data = '{}', - execution_info = '{}', - updated_at = NOW() - WHERE - miniblock_number BETWEEN $1 AND $2 - AND upgrade_id IS NULL - RETURNING - * - ) - SELECT - COUNT(*) AS "count!" - FROM - updated - "#, - i64::from(first_miniblock_to_prune.0), - i64::from(last_miniblock_to_prune.0), - ) - .instrument("hard_prune_batches_range#clear_transactions_references") - .with_arg("first_miniblock_to_prune", &first_miniblock_to_prune) - .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) - .report_latency() - .fetch_one(self.storage) - .await?; - - //The deleting of logs is split into two queries to make it faster, - // only the first query has to go through all previous logs - // and the query optimizer should be happy with it - let deleted_storage_logs_from_past_batches = sqlx::query!( - r#" - WITH - deleted AS ( - DELETE FROM storage_logs USING ( - SELECT - * - FROM - storage_logs - WHERE - miniblock_number BETWEEN $1 AND $2 - ) AS batches_to_prune - WHERE - storage_logs.miniblock_number < $1 - AND batches_to_prune.hashed_key = storage_logs.hashed_key - RETURNING - * - ) - SELECT - COUNT(*) AS "count!" - FROM - deleted - "#, - i64::from(first_miniblock_to_prune.0), - i64::from(last_miniblock_to_prune.0), - ) - .instrument("hard_prune_batches_range#delete_overriden_storage_logs_from_past_batches") - .with_arg("first_miniblock_to_prune", &first_miniblock_to_prune) - .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) - .report_latency() - .fetch_one(self.storage) - .await?; - - let deleted_storage_logs_from_pruned_batches = sqlx::query!( - r#" - WITH - deleted AS ( - DELETE FROM storage_logs USING ( - SELECT - hashed_key, - MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op - FROM - storage_logs - WHERE - miniblock_number BETWEEN $1 AND $2 - GROUP BY - hashed_key - ) AS last_storage_logs - WHERE - storage_logs.miniblock_number BETWEEN $1 AND $2 - AND last_storage_logs.hashed_key = storage_logs.hashed_key - AND ( - storage_logs.miniblock_number != last_storage_logs.op[1] - OR storage_logs.operation_number != last_storage_logs.op[2] - ) - RETURNING - * - ) - SELECT - COUNT(*) AS "count!" - FROM - deleted - "#, - i64::from(first_miniblock_to_prune.0), - i64::from(last_miniblock_to_prune.0), - ) - .instrument( - "hard_prune_batches_range#delete_overriden_storage_logs_from_pruned_batches", - ) - .with_arg("first_miniblock_to_prune", &first_miniblock_to_prune) - .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) - .report_latency() - .fetch_one(self.storage) - .await?; - - let deleted_l1_batches = sqlx::query!( - r#" - WITH - deleted AS ( - DELETE FROM l1_batches - WHERE - number <= $1 - RETURNING - * - ) - SELECT - COUNT(*) AS "count!" - FROM - deleted - "#, - i64::from(last_l1_batch_to_prune.0), - ) - .instrument("hard_prune_batches_range#delete_l1_batches") - .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) - .report_latency() - .fetch_one(self.storage) - .await?; - - let deleted_miniblocks = sqlx::query!( - r#" - WITH - deleted AS ( - DELETE FROM miniblocks - WHERE - number <= $1 - RETURNING - * - ) - SELECT - COUNT(*) AS "count!" - FROM - deleted - "#, - i64::from(last_miniblock_to_prune.0), - ) - .instrument("hard_prune_batches_range#delete_miniblocks") - .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) - .report_latency() - .fetch_one(self.storage) - .await?; - - tracing::info!("Performed pruning of database, deleted {} l1_batches, {} miniblocks, {} storage_logs, {} events, {} call traces, {} l2_to_l1_logs", - deleted_l1_batches.count, - deleted_miniblocks.count, - deleted_storage_logs_from_past_batches.count + deleted_storage_logs_from_pruned_batches.count, - deleted_events.count, - deleted_call_traces.count, - deleted_l2_to_l1_logs.count) - } - - sqlx::query!( - r#" - INSERT INTO - pruning_log ( - pruned_l1_batch, - pruned_miniblock, - TYPE, - created_at, - updated_at - ) - VALUES - ($1, $2, $3, NOW(), NOW()) - "#, - i64::from(last_l1_batch_to_prune.0), - i64::from(last_miniblock_to_prune.0), - PruneType::Hard as PruneType - ) - .instrument("hard_prune_batches_range#insert_pruning_log") - .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) - .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) - .with_arg("prune_type", &PruneType::Hard) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - // This method must be separate as VACUUM is not supported inside a transaction - pub async fn run_vacuum_after_hard_pruning(&mut self) -> DalResult<()> { - sqlx::query!( - r#" - VACUUM l1_batches, - miniblocks, - storage_logs, - events, - call_traces, - l2_to_l1_logs, - transactions - "#, - ) - .instrument("hard_prune_batches_range#vacuum") - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } -} diff --git a/core/lib/dal/src/pruning_dal/mod.rs b/core/lib/dal/src/pruning_dal/mod.rs new file mode 100644 index 00000000000..6183e552a2d --- /dev/null +++ b/core/lib/dal/src/pruning_dal/mod.rs @@ -0,0 +1,470 @@ +use std::ops; + +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; +use zksync_types::{L1BatchNumber, MiniblockNumber}; + +use crate::Core; + +#[cfg(test)] +mod tests; + +#[derive(Debug)] +pub struct PruningDal<'a, 'c> { + pub(crate) storage: &'a mut Connection<'c, Core>, +} + +/// Information about Postgres pruning. +#[derive(Debug, Clone, Copy, Default, PartialEq)] +pub struct PruningInfo { + pub last_soft_pruned_l1_batch: Option, + pub last_soft_pruned_miniblock: Option, + pub last_hard_pruned_l1_batch: Option, + pub last_hard_pruned_miniblock: Option, +} + +/// Statistics about a single hard pruning iteration. +#[derive(Debug, Default)] +pub struct HardPruningStats { + pub deleted_l1_batches: u64, + pub deleted_miniblocks: u64, + pub deleted_storage_logs_from_past_batches: u64, + pub deleted_storage_logs_from_pruned_batches: u64, + pub deleted_events: u64, + pub deleted_call_traces: u64, + pub deleted_l2_to_l1_logs: u64, +} + +#[derive(Debug, sqlx::Type)] +#[sqlx(type_name = "prune_type")] +enum PruneType { + Soft, + Hard, +} + +impl PruningDal<'_, '_> { + pub async fn get_pruning_info(&mut self) -> DalResult { + let pruning_info = sqlx::query!( + r#" + WITH + soft AS ( + SELECT + pruned_l1_batch, + pruned_miniblock + FROM + pruning_log + WHERE + TYPE = 'Soft' + ORDER BY + pruned_l1_batch DESC + LIMIT + 1 + ), + hard AS ( + SELECT + pruned_l1_batch, + pruned_miniblock + FROM + pruning_log + WHERE + TYPE = 'Hard' + ORDER BY + pruned_l1_batch DESC + LIMIT + 1 + ) + SELECT + soft.pruned_l1_batch AS last_soft_pruned_l1_batch, + soft.pruned_miniblock AS last_soft_pruned_miniblock, + hard.pruned_l1_batch AS last_hard_pruned_l1_batch, + hard.pruned_miniblock AS last_hard_pruned_miniblock + FROM + soft + FULL JOIN hard ON TRUE + "# + ) + .map(|row| PruningInfo { + last_soft_pruned_l1_batch: row + .last_soft_pruned_l1_batch + .map(|num| L1BatchNumber(num as u32)), + last_soft_pruned_miniblock: row + .last_soft_pruned_miniblock + .map(|num| MiniblockNumber(num as u32)), + last_hard_pruned_l1_batch: row + .last_hard_pruned_l1_batch + .map(|num| L1BatchNumber(num as u32)), + last_hard_pruned_miniblock: row + .last_hard_pruned_miniblock + .map(|num| MiniblockNumber(num as u32)), + }) + .instrument("get_last_soft_pruned_batch") + .report_latency() + .fetch_optional(self.storage) + .await?; + Ok(pruning_info.unwrap_or_default()) + } + + pub async fn soft_prune_batches_range( + &mut self, + last_l1_batch_to_prune: L1BatchNumber, + last_miniblock_to_prune: MiniblockNumber, + ) -> DalResult<()> { + sqlx::query!( + r#" + INSERT INTO + pruning_log ( + pruned_l1_batch, + pruned_miniblock, + TYPE, + created_at, + updated_at + ) + VALUES + ($1, $2, $3, NOW(), NOW()) + "#, + i64::from(last_l1_batch_to_prune.0), + i64::from(last_miniblock_to_prune.0), + PruneType::Soft as PruneType, + ) + .instrument("soft_prune_batches_range#insert_pruning_log") + .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) + .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) + .with_arg("prune_type", &PruneType::Soft) + .report_latency() + .execute(self.storage) + .await?; + + Ok(()) + } + + pub async fn hard_prune_batches_range( + &mut self, + last_l1_batch_to_prune: L1BatchNumber, + last_miniblock_to_prune: MiniblockNumber, + ) -> DalResult { + let row = sqlx::query!( + r#" + SELECT + MIN(number) AS first_miniblock_to_prune + FROM + miniblocks + WHERE + l1_batch_number <= $1 + "#, + i64::from(last_l1_batch_to_prune.0), + ) + .instrument("hard_prune_batches_range#get_miniblocks_range") + .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) + .report_latency() + .fetch_one(self.storage) + .await?; + + // we don't have any miniblocks available when recovering from a snapshot + let stats = if let Some(first_miniblock_to_prune) = row.first_miniblock_to_prune { + let first_miniblock_to_prune = MiniblockNumber(first_miniblock_to_prune as u32); + + let deleted_events = self + .delete_events(first_miniblock_to_prune..=last_miniblock_to_prune) + .await?; + let deleted_l2_to_l1_logs = self + .delete_l2_to_l1_logs(first_miniblock_to_prune..=last_miniblock_to_prune) + .await?; + let deleted_call_traces = self + .delete_call_traces(first_miniblock_to_prune..=last_miniblock_to_prune) + .await?; + self.clear_transaction_fields(first_miniblock_to_prune..=last_miniblock_to_prune) + .await?; + + // The deleting of logs is split into two queries to make it faster, + // only the first query has to go through all previous logs + // and the query optimizer should be happy with it + let deleted_storage_logs_from_past_batches = self + .prune_storage_logs_from_past_miniblocks( + first_miniblock_to_prune..=last_miniblock_to_prune, + ) + .await?; + let deleted_storage_logs_from_pruned_batches = self + .prune_storage_logs_in_range(first_miniblock_to_prune..=last_miniblock_to_prune) + .await?; + let deleted_l1_batches = self.delete_l1_batches(last_l1_batch_to_prune).await?; + let deleted_miniblocks = self.delete_miniblocks(last_miniblock_to_prune).await?; + + HardPruningStats { + deleted_l1_batches, + deleted_miniblocks, + deleted_events, + deleted_l2_to_l1_logs, + deleted_call_traces, + deleted_storage_logs_from_past_batches, + deleted_storage_logs_from_pruned_batches, + } + } else { + HardPruningStats::default() + }; + + self.insert_hard_pruning_log(last_l1_batch_to_prune, last_miniblock_to_prune) + .await?; + Ok(stats) + } + + async fn delete_events( + &mut self, + miniblocks_to_prune: ops::RangeInclusive, + ) -> DalResult { + let execution_result = sqlx::query!( + r#" + DELETE FROM events + WHERE + miniblock_number BETWEEN $1 AND $2 + "#, + i64::from(miniblocks_to_prune.start().0), + i64::from(miniblocks_to_prune.end().0) + ) + .instrument("hard_prune_batches_range#delete_events") + .with_arg("miniblocks_to_prune", &miniblocks_to_prune) + .report_latency() + .execute(self.storage) + .await?; + Ok(execution_result.rows_affected()) + } + + async fn delete_l2_to_l1_logs( + &mut self, + miniblocks_to_prune: ops::RangeInclusive, + ) -> DalResult { + let execution_result = sqlx::query!( + r#" + DELETE FROM l2_to_l1_logs + WHERE + miniblock_number BETWEEN $1 AND $2 + "#, + i64::from(miniblocks_to_prune.start().0), + i64::from(miniblocks_to_prune.end().0) + ) + .instrument("hard_prune_batches_range#delete_l2_to_l1_logs") + .with_arg("miniblocks_to_prune", &miniblocks_to_prune) + .report_latency() + .execute(self.storage) + .await?; + Ok(execution_result.rows_affected()) + } + + // Call traces are returned via `TransactionsDal::get_call_trace()`, which is used by the `debug_traceTransaction` RPC method. + // It should be acceptable to return `None` for transactions in pruned miniblocks; this would make them indistinguishable + // from traces for non-existing transactions. + async fn delete_call_traces( + &mut self, + miniblocks_to_prune: ops::RangeInclusive, + ) -> DalResult { + let execution_result = sqlx::query!( + r#" + DELETE FROM call_traces + WHERE + tx_hash IN ( + SELECT + hash + FROM + transactions + WHERE + miniblock_number BETWEEN $1 AND $2 + ) + "#, + i64::from(miniblocks_to_prune.start().0), + i64::from(miniblocks_to_prune.end().0) + ) + .instrument("hard_prune_batches_range#delete_call_traces") + .with_arg("miniblocks_to_prune", &miniblocks_to_prune) + .report_latency() + .execute(self.storage) + .await?; + Ok(execution_result.rows_affected()) + } + + // The pruned fields are accessed as follows: + // + // - `input`: is a part of `StorageTransaction`, read via `TransactionsDal` (`get_miniblocks_to_reexecute`, + // `get_miniblocks_to_execute_for_l1_batch`, and `get_tx_by_hash`) and `TransactionsWeb3Dal::get_raw_miniblock_transactions()`. + // `get_tx_by_hash()` is only called on upgrade transactions, which are not pruned. The remaining methods tie transactions + // to a certain L1 batch / miniblock, and thus do naturally check pruning. + // - `data`: used by `TransactionsWeb3Dal` queries, which explicitly check whether it was pruned. + // - `execution_info`: not used in queries. + async fn clear_transaction_fields( + &mut self, + miniblocks_to_prune: ops::RangeInclusive, + ) -> DalResult { + let execution_result = sqlx::query!( + r#" + UPDATE transactions + SET + input = NULL, + data = '{}', + execution_info = '{}', + updated_at = NOW() + WHERE + miniblock_number BETWEEN $1 AND $2 + AND upgrade_id IS NULL + "#, + i64::from(miniblocks_to_prune.start().0), + i64::from(miniblocks_to_prune.end().0) + ) + .instrument("hard_prune_batches_range#clear_transaction_fields") + .with_arg("miniblocks_to_prune", &miniblocks_to_prune) + .report_latency() + .execute(self.storage) + .await?; + Ok(execution_result.rows_affected()) + } + + async fn prune_storage_logs_from_past_miniblocks( + &mut self, + miniblocks_to_prune: ops::RangeInclusive, + ) -> DalResult { + let execution_result = sqlx::query!( + r#" + DELETE FROM storage_logs USING ( + SELECT + * + FROM + storage_logs + WHERE + miniblock_number BETWEEN $1 AND $2 + ) AS batches_to_prune + WHERE + storage_logs.miniblock_number < $1 + AND batches_to_prune.hashed_key = storage_logs.hashed_key + "#, + i64::from(miniblocks_to_prune.start().0), + i64::from(miniblocks_to_prune.end().0) + ) + .instrument("hard_prune_batches_range#prune_storage_logs_from_past_miniblocks") + .with_arg("miniblocks_to_prune", &miniblocks_to_prune) + .report_latency() + .execute(self.storage) + .await?; + Ok(execution_result.rows_affected()) + } + + async fn prune_storage_logs_in_range( + &mut self, + miniblocks_to_prune: ops::RangeInclusive, + ) -> DalResult { + let execution_result = sqlx::query!( + r#" + DELETE FROM storage_logs USING ( + SELECT + hashed_key, + MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op + FROM + storage_logs + WHERE + miniblock_number BETWEEN $1 AND $2 + GROUP BY + hashed_key + ) AS last_storage_logs + WHERE + storage_logs.miniblock_number BETWEEN $1 AND $2 + AND last_storage_logs.hashed_key = storage_logs.hashed_key + AND ( + storage_logs.miniblock_number != last_storage_logs.op[1] + OR storage_logs.operation_number != last_storage_logs.op[2] + ) + "#, + i64::from(miniblocks_to_prune.start().0), + i64::from(miniblocks_to_prune.end().0) + ) + .instrument("hard_prune_batches_range#prune_storage_logs_in_range") + .with_arg("miniblocks_to_prune", &miniblocks_to_prune) + .report_latency() + .execute(self.storage) + .await?; + Ok(execution_result.rows_affected()) + } + + async fn delete_l1_batches(&mut self, last_l1_batch_to_prune: L1BatchNumber) -> DalResult { + let execution_result = sqlx::query!( + r#" + DELETE FROM l1_batches + WHERE + number <= $1 + "#, + i64::from(last_l1_batch_to_prune.0), + ) + .instrument("hard_prune_batches_range#delete_l1_batches") + .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) + .report_latency() + .execute(self.storage) + .await?; + Ok(execution_result.rows_affected()) + } + + async fn delete_miniblocks( + &mut self, + last_miniblock_to_prune: MiniblockNumber, + ) -> DalResult { + let execution_result = sqlx::query!( + r#" + DELETE FROM miniblocks + WHERE + number <= $1 + "#, + i64::from(last_miniblock_to_prune.0), + ) + .instrument("hard_prune_batches_range#delete_miniblocks") + .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) + .report_latency() + .execute(self.storage) + .await?; + Ok(execution_result.rows_affected()) + } + + async fn insert_hard_pruning_log( + &mut self, + last_l1_batch_to_prune: L1BatchNumber, + last_miniblock_to_prune: MiniblockNumber, + ) -> DalResult<()> { + sqlx::query!( + r#" + INSERT INTO + pruning_log ( + pruned_l1_batch, + pruned_miniblock, + TYPE, + created_at, + updated_at + ) + VALUES + ($1, $2, $3, NOW(), NOW()) + "#, + i64::from(last_l1_batch_to_prune.0), + i64::from(last_miniblock_to_prune.0), + PruneType::Hard as PruneType + ) + .instrument("hard_prune_batches_range#insert_pruning_log") + .with_arg("last_l1_batch_to_prune", &last_l1_batch_to_prune) + .with_arg("last_miniblock_to_prune", &last_miniblock_to_prune) + .report_latency() + .execute(self.storage) + .await?; + Ok(()) + } + + // This method must be separate as VACUUM is not supported inside a transaction + pub async fn run_vacuum_after_hard_pruning(&mut self) -> DalResult<()> { + sqlx::query!( + r#" + VACUUM l1_batches, + miniblocks, + storage_logs, + events, + call_traces, + l2_to_l1_logs, + transactions + "#, + ) + .instrument("hard_prune_batches_range#vacuum") + .report_latency() + .execute(self.storage) + .await?; + + Ok(()) + } +} diff --git a/core/lib/dal/src/pruning_dal/tests.rs b/core/lib/dal/src/pruning_dal/tests.rs new file mode 100644 index 00000000000..dbd96311ac3 --- /dev/null +++ b/core/lib/dal/src/pruning_dal/tests.rs @@ -0,0 +1,535 @@ +use std::ops; + +use zksync_contracts::BaseSystemContractsHashes; +use zksync_db_connection::connection::Connection; +use zksync_types::{ + block::L1BatchHeader, + fee::TransactionExecutionMetrics, + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + tx::IncludedTxLocation, + AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersion, + ProtocolVersionId, StorageKey, StorageLog, H256, +}; + +use super::*; +use crate::{ + storage_logs_dal::DbStorageLog, + tests::{ + create_miniblock_header, mock_execution_result, mock_l2_to_l1_log, mock_l2_transaction, + mock_vm_event, + }, + ConnectionPool, Core, CoreDal, +}; + +async fn insert_miniblock( + conn: &mut Connection<'_, Core>, + miniblock_number: MiniblockNumber, + l1_batch_number: L1BatchNumber, +) { + let miniblock1 = create_miniblock_header(miniblock_number.0); + conn.blocks_dal() + .insert_miniblock(&miniblock1) + .await + .unwrap(); + + conn.blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(l1_batch_number) + .await + .unwrap(); + + insert_events(conn, miniblock_number).await; + insert_l2_to_l1_logs(conn, miniblock_number).await; +} + +async fn insert_l2_to_l1_logs(conn: &mut Connection<'_, Core>, miniblock_number: MiniblockNumber) { + let first_location = IncludedTxLocation { + tx_hash: H256([1; 32]), + tx_index_in_miniblock: 0, + tx_initiator_address: Address::default(), + }; + let first_logs = vec![mock_l2_to_l1_log(), mock_l2_to_l1_log()]; + let second_location = IncludedTxLocation { + tx_hash: H256([2; 32]), + tx_index_in_miniblock: 1, + tx_initiator_address: Address::default(), + }; + let second_logs = vec![ + mock_l2_to_l1_log(), + mock_l2_to_l1_log(), + mock_l2_to_l1_log(), + ]; + let all_logs = vec![ + (first_location, first_logs.iter().collect()), + (second_location, second_logs.iter().collect()), + ]; + conn.events_dal() + .save_user_l2_to_l1_logs(miniblock_number, &all_logs) + .await + .unwrap(); +} + +async fn insert_events(conn: &mut Connection<'_, Core>, miniblock_number: MiniblockNumber) { + let first_location = IncludedTxLocation { + tx_hash: H256([1; 32]), + tx_index_in_miniblock: 0, + tx_initiator_address: Address::default(), + }; + let first_events = vec![mock_vm_event(0), mock_vm_event(1)]; + let second_location = IncludedTxLocation { + tx_hash: H256([2; 32]), + tx_index_in_miniblock: 1, + tx_initiator_address: Address::default(), + }; + let second_events = vec![mock_vm_event(2), mock_vm_event(3), mock_vm_event(4)]; + let all_events = vec![ + (first_location, first_events.iter().collect()), + (second_location, second_events.iter().collect()), + ]; + conn.events_dal() + .save_events(miniblock_number, &all_events) + .await + .unwrap(); +} + +async fn insert_l1_batch(conn: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber) { + let mut header = L1BatchHeader::new( + l1_batch_number, + 100, + BaseSystemContractsHashes { + bootloader: H256::repeat_byte(1), + default_aa: H256::repeat_byte(42), + }, + ProtocolVersionId::latest(), + ); + header.l1_tx_count = 3; + header.l2_tx_count = 5; + header.l2_to_l1_logs.push(UserL2ToL1Log(L2ToL1Log { + shard_id: 0, + is_service: false, + tx_number_in_block: 2, + sender: Address::repeat_byte(2), + key: H256::repeat_byte(3), + value: H256::zero(), + })); + header.l2_to_l1_messages.push(vec![22; 22]); + header.l2_to_l1_messages.push(vec![33; 33]); + + conn.blocks_dal() + .insert_mock_l1_batch(&header) + .await + .unwrap(); +} + +async fn insert_realistic_l1_batches(conn: &mut Connection<'_, Core>, l1_batches_count: u32) { + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + for l1_batch_number in 0..l1_batches_count { + insert_l1_batch(conn, L1BatchNumber(l1_batch_number)).await; + insert_miniblock( + conn, + MiniblockNumber(l1_batch_number * 2), + L1BatchNumber(l1_batch_number), + ) + .await; + insert_miniblock( + conn, + MiniblockNumber(l1_batch_number * 2 + 1), + L1BatchNumber(l1_batch_number), + ) + .await; + } +} + +async fn assert_l1_batch_objects_exists( + conn: &mut Connection<'_, Core>, + l1_batches_range: ops::RangeInclusive, +) { + for l1_batch_number in l1_batches_range.start().0..l1_batches_range.end().0 { + let l1_batch_number = L1BatchNumber(l1_batch_number); + assert!(conn + .blocks_dal() + .get_miniblock_header(MiniblockNumber(l1_batch_number.0 * 2)) + .await + .unwrap() + .is_some()); + + assert!(conn + .blocks_dal() + .get_miniblock_header(MiniblockNumber(l1_batch_number.0 * 2 + 1)) + .await + .unwrap() + .is_some()); + + assert!(conn + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await + .unwrap() + .is_some()); + } +} + +async fn assert_l1_batch_objects_dont_exist( + conn: &mut Connection<'_, Core>, + l1_batches_range: ops::RangeInclusive, +) { + let all_logs = conn + .storage_logs_dal() + .dump_all_storage_logs_for_tests() + .await; + + for l1_batch_number in l1_batches_range.start().0..l1_batches_range.end().0 { + let l1_batch_number = L1BatchNumber(l1_batch_number); + let mut miniblock_number = MiniblockNumber(l1_batch_number.0 * 2); + assert!(conn + .blocks_dal() + .get_miniblock_header(miniblock_number) + .await + .unwrap() + .is_none()); + let miniblock_logs: Vec<_> = all_logs + .iter() + .filter(|log| log.miniblock_number == miniblock_number) + .collect(); + assert!(miniblock_logs.is_empty(), "{miniblock_logs:?}"); + + miniblock_number += 1; + assert!(conn + .blocks_dal() + .get_miniblock_header(miniblock_number) + .await + .unwrap() + .is_none()); + let miniblock_logs: Vec<_> = all_logs + .iter() + .filter(|log| log.miniblock_number == miniblock_number) + .collect(); + assert!(miniblock_logs.is_empty(), "{miniblock_logs:?}"); + + assert!(conn + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await + .unwrap() + .is_none()); + } +} + +#[tokio::test] +async fn soft_pruning_works() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + let mut transaction = conn.start_transaction().await.unwrap(); + + assert_eq!( + PruningInfo { + last_soft_pruned_miniblock: None, + last_soft_pruned_l1_batch: None, + last_hard_pruned_miniblock: None, + last_hard_pruned_l1_batch: None + }, + transaction.pruning_dal().get_pruning_info().await.unwrap() + ); + + transaction + .pruning_dal() + .soft_prune_batches_range(L1BatchNumber(5), MiniblockNumber(11)) + .await + .unwrap(); + assert_eq!( + PruningInfo { + last_soft_pruned_miniblock: Some(MiniblockNumber(11)), + last_soft_pruned_l1_batch: Some(L1BatchNumber(5)), + last_hard_pruned_miniblock: None, + last_hard_pruned_l1_batch: None + }, + transaction.pruning_dal().get_pruning_info().await.unwrap() + ); + + transaction + .pruning_dal() + .soft_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) + .await + .unwrap(); + assert_eq!( + PruningInfo { + last_soft_pruned_miniblock: Some(MiniblockNumber(21)), + last_soft_pruned_l1_batch: Some(L1BatchNumber(10)), + last_hard_pruned_miniblock: None, + last_hard_pruned_l1_batch: None + }, + transaction.pruning_dal().get_pruning_info().await.unwrap() + ); + + transaction + .pruning_dal() + .hard_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) + .await + .unwrap(); + assert_eq!( + PruningInfo { + last_soft_pruned_miniblock: Some(MiniblockNumber(21)), + last_soft_pruned_l1_batch: Some(L1BatchNumber(10)), + last_hard_pruned_miniblock: Some(MiniblockNumber(21)), + last_hard_pruned_l1_batch: Some(L1BatchNumber(10)) + }, + transaction.pruning_dal().get_pruning_info().await.unwrap() + ); +} + +fn random_storage_log(hashed_key_seed: u8, value_seed: u8) -> StorageLog { + let key = StorageKey::new( + AccountTreeId::from_fixed_bytes([hashed_key_seed; 20]), + H256([hashed_key_seed; 32]), + ); + StorageLog::new_write_log(key, H256([value_seed; 32])) +} + +async fn insert_miniblock_storage_logs( + conn: &mut Connection<'_, Core>, + miniblock_number: MiniblockNumber, + storage_logs: Vec, +) { + conn.storage_logs_dal() + .insert_storage_logs(miniblock_number, &[(H256::zero(), storage_logs)]) + .await + .unwrap(); +} + +fn assert_miniblock_storage_logs_equal( + miniblock_number: MiniblockNumber, + actual_logs: &[DbStorageLog], + expected_logs: &[StorageLog], +) { + let actual_logs_for_miniblock: Vec<(H256, H256)> = actual_logs + .iter() + .filter_map(|log| { + (log.miniblock_number == miniblock_number).then_some((log.hashed_key, log.value)) + }) + .collect(); + let expected_logs: Vec<(H256, H256)> = expected_logs + .iter() + .map(|log| (log.key.hashed_key(), log.value)) + .collect(); + assert_eq!( + expected_logs, actual_logs_for_miniblock, + "logs don't match at miniblock {miniblock_number}" + ) +} + +#[tokio::test] +async fn storage_logs_pruning_works_correctly() { + let pool = ConnectionPool::::test_pool().await; + + let mut conn = pool.connection().await.unwrap(); + let mut transaction = conn.start_transaction().await.unwrap(); + insert_realistic_l1_batches(&mut transaction, 10).await; + insert_miniblock_storage_logs( + &mut transaction, + MiniblockNumber(1), + vec![random_storage_log(1, 1)], + ) + .await; + + insert_miniblock_storage_logs( + &mut transaction, + MiniblockNumber(0), + // first storage will be overwritten in 1st miniblock, + // the second one should be kept throughout the pruning + // the third one will be overwritten in 10th miniblock + vec![ + random_storage_log(1, 2), + random_storage_log(2, 3), + random_storage_log(3, 4), + ], + ) + .await; + + insert_miniblock_storage_logs( + &mut transaction, + MiniblockNumber(15), + // this storage log overrides log from block 0 + vec![random_storage_log(3, 5)], + ) + .await; + + insert_miniblock_storage_logs( + &mut transaction, + MiniblockNumber(17), + // there are two logs with the same hashed key, the second one should be overwritten + vec![random_storage_log(5, 5), random_storage_log(5, 7)], + ) + .await; + + transaction + .pruning_dal() + .hard_prune_batches_range(L1BatchNumber(4), MiniblockNumber(9)) + .await + .unwrap(); + let actual_logs = transaction + .storage_logs_dal() + .dump_all_storage_logs_for_tests() + .await; + + assert_miniblock_storage_logs_equal( + MiniblockNumber(0), + &actual_logs, + &[random_storage_log(2, 3), random_storage_log(3, 4)], + ); + assert_miniblock_storage_logs_equal( + MiniblockNumber(1), + &actual_logs, + &[random_storage_log(1, 1)], + ); + + transaction + .pruning_dal() + .hard_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) + .await + .unwrap(); + let actual_logs = transaction + .storage_logs_dal() + .dump_all_storage_logs_for_tests() + .await; + + assert_miniblock_storage_logs_equal( + MiniblockNumber(0), + &actual_logs, + &[random_storage_log(2, 3)], + ); + assert_miniblock_storage_logs_equal( + MiniblockNumber(1), + &actual_logs, + &[random_storage_log(1, 1)], + ); + assert_miniblock_storage_logs_equal( + MiniblockNumber(15), + &actual_logs, + &[random_storage_log(3, 5)], + ); + assert_miniblock_storage_logs_equal( + MiniblockNumber(17), + &actual_logs, + &[random_storage_log(5, 7)], + ); +} + +#[tokio::test] +async fn l1_batches_can_be_hard_pruned() { + let pool = ConnectionPool::::test_pool().await; + + let mut conn = pool.connection().await.unwrap(); + let mut transaction = conn.start_transaction().await.unwrap(); + insert_realistic_l1_batches(&mut transaction, 10).await; + + assert_l1_batch_objects_exists(&mut transaction, L1BatchNumber(1)..=L1BatchNumber(10)).await; + assert!(transaction + .pruning_dal() + .get_pruning_info() + .await + .unwrap() + .last_hard_pruned_l1_batch + .is_none()); + + transaction + .pruning_dal() + .hard_prune_batches_range(L1BatchNumber(5), MiniblockNumber(11)) + .await + .unwrap(); + + assert_l1_batch_objects_dont_exist(&mut transaction, L1BatchNumber(1)..=L1BatchNumber(5)).await; + assert_l1_batch_objects_exists(&mut transaction, L1BatchNumber(6)..=L1BatchNumber(10)).await; + assert_eq!( + Some(L1BatchNumber(5)), + transaction + .pruning_dal() + .get_pruning_info() + .await + .unwrap() + .last_hard_pruned_l1_batch + ); + + let stats = transaction + .pruning_dal() + .hard_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) + .await + .unwrap(); + assert_eq!(stats.deleted_l1_batches, 4); + assert_eq!(stats.deleted_miniblocks, 8); + assert_eq!(stats.deleted_events, 40); + assert_eq!(stats.deleted_l2_to_l1_logs, 40); + + assert_l1_batch_objects_dont_exist(&mut transaction, L1BatchNumber(1)..=L1BatchNumber(10)) + .await; + assert_eq!( + Some(L1BatchNumber(10)), + transaction + .pruning_dal() + .get_pruning_info() + .await + .unwrap() + .last_hard_pruned_l1_batch + ); +} + +#[tokio::test] +async fn transactions_are_handled_correctly_after_pruning() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + // Add a miniblock with a transaction end emulate its pruning. + let miniblock_header = create_miniblock_header(1); + let tx = mock_l2_transaction(); + let tx_hash = tx.hash(); + conn.transactions_dal() + .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .await + .unwrap(); + conn.blocks_dal() + .insert_miniblock(&miniblock_header) + .await + .unwrap(); + conn.transactions_dal() + .mark_txs_as_executed_in_miniblock( + MiniblockNumber(1), + &[mock_execution_result(tx.clone())], + 1.into(), + ) + .await + .unwrap(); + + let affected_count = conn + .pruning_dal() + .clear_transaction_fields(MiniblockNumber(1)..=MiniblockNumber(1)) + .await + .unwrap(); + assert_eq!(affected_count, 1); + + let api_transactions = conn + .transactions_web3_dal() + .get_transactions(&[tx_hash], L2ChainId::default()) + .await + .unwrap(); + assert!(api_transactions.is_empty(), "{api_transactions:?}"); + + let transaction_receipts = conn + .transactions_web3_dal() + .get_transaction_receipts(&[tx_hash]) + .await + .unwrap(); + assert!(transaction_receipts.is_empty(), "{transaction_receipts:?}"); + + let transaction_details = conn + .transactions_web3_dal() + .get_transaction_details(tx_hash) + .await + .unwrap(); + assert!(transaction_details.is_none(), "{transaction_details:?}"); +} diff --git a/core/lib/dal/src/pruning_dal_tests.rs b/core/lib/dal/src/pruning_dal_tests.rs deleted file mode 100644 index 636bf294bde..00000000000 --- a/core/lib/dal/src/pruning_dal_tests.rs +++ /dev/null @@ -1,477 +0,0 @@ -use crate::pruning_dal::PruningInfo; - -#[cfg(test)] -mod tests { - use std::ops; - - use zksync_contracts::BaseSystemContractsHashes; - use zksync_db_connection::connection::Connection; - use zksync_types::{ - block::L1BatchHeader, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - tx::IncludedTxLocation, - AccountTreeId, Address, L1BatchNumber, MiniblockNumber, ProtocolVersion, ProtocolVersionId, - StorageKey, StorageLog, H256, - }; - - use super::*; - use crate::{ - tests::{create_miniblock_header, mock_l2_to_l1_log, mock_vm_event}, - ConnectionPool, Core, CoreDal, - }; - - async fn insert_miniblock( - conn: &mut Connection<'_, Core>, - miniblock_number: MiniblockNumber, - l1_batch_number: L1BatchNumber, - ) { - let miniblock1 = create_miniblock_header(miniblock_number.0); - conn.blocks_dal() - .insert_miniblock(&miniblock1) - .await - .unwrap(); - - conn.blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(l1_batch_number) - .await - .unwrap(); - - insert_events(conn, miniblock_number).await; - insert_l2_to_l1_logs(conn, miniblock_number).await; - } - - async fn insert_l2_to_l1_logs( - conn: &mut Connection<'_, Core>, - miniblock_number: MiniblockNumber, - ) { - let first_location = IncludedTxLocation { - tx_hash: H256([1; 32]), - tx_index_in_miniblock: 0, - tx_initiator_address: Address::default(), - }; - let first_logs = vec![mock_l2_to_l1_log(), mock_l2_to_l1_log()]; - let second_location = IncludedTxLocation { - tx_hash: H256([2; 32]), - tx_index_in_miniblock: 1, - tx_initiator_address: Address::default(), - }; - let second_logs = vec![ - mock_l2_to_l1_log(), - mock_l2_to_l1_log(), - mock_l2_to_l1_log(), - ]; - let all_logs = vec![ - (first_location, first_logs.iter().collect()), - (second_location, second_logs.iter().collect()), - ]; - conn.events_dal() - .save_user_l2_to_l1_logs(miniblock_number, &all_logs) - .await - .unwrap(); - } - - async fn insert_events(conn: &mut Connection<'_, Core>, miniblock_number: MiniblockNumber) { - let first_location = IncludedTxLocation { - tx_hash: H256([1; 32]), - tx_index_in_miniblock: 0, - tx_initiator_address: Address::default(), - }; - let first_events = vec![mock_vm_event(0), mock_vm_event(1)]; - let second_location = IncludedTxLocation { - tx_hash: H256([2; 32]), - tx_index_in_miniblock: 1, - tx_initiator_address: Address::default(), - }; - let second_events = vec![mock_vm_event(2), mock_vm_event(3), mock_vm_event(4)]; - let all_events = vec![ - (first_location, first_events.iter().collect()), - (second_location, second_events.iter().collect()), - ]; - conn.events_dal() - .save_events(miniblock_number, &all_events) - .await - .unwrap(); - } - - async fn insert_l1_batch(conn: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber) { - let mut header = L1BatchHeader::new( - l1_batch_number, - 100, - BaseSystemContractsHashes { - bootloader: H256::repeat_byte(1), - default_aa: H256::repeat_byte(42), - }, - ProtocolVersionId::latest(), - ); - header.l1_tx_count = 3; - header.l2_tx_count = 5; - header.l2_to_l1_logs.push(UserL2ToL1Log(L2ToL1Log { - shard_id: 0, - is_service: false, - tx_number_in_block: 2, - sender: Address::repeat_byte(2), - key: H256::repeat_byte(3), - value: H256::zero(), - })); - header.l2_to_l1_messages.push(vec![22; 22]); - header.l2_to_l1_messages.push(vec![33; 33]); - - conn.blocks_dal() - .insert_mock_l1_batch(&header) - .await - .unwrap(); - } - - async fn insert_realistic_l1_batches(conn: &mut Connection<'_, Core>, l1_batches_count: u32) { - conn.protocol_versions_dal() - .save_protocol_version_with_tx(&ProtocolVersion::default()) - .await - .unwrap(); - - for l1_batch_number in 0..l1_batches_count { - insert_l1_batch(conn, L1BatchNumber(l1_batch_number)).await; - insert_miniblock( - conn, - MiniblockNumber(l1_batch_number * 2), - L1BatchNumber(l1_batch_number), - ) - .await; - insert_miniblock( - conn, - MiniblockNumber(l1_batch_number * 2 + 1), - L1BatchNumber(l1_batch_number), - ) - .await; - } - } - - async fn assert_l1_batch_objects_exists( - conn: &mut Connection<'_, Core>, - l1_batches_range: ops::RangeInclusive, - ) { - for l1_batch_number in l1_batches_range.start().0..l1_batches_range.end().0 { - let l1_batch_number = L1BatchNumber(l1_batch_number); - assert!(conn - .blocks_dal() - .get_miniblock_header(MiniblockNumber(l1_batch_number.0 * 2)) - .await - .unwrap() - .is_some()); - - assert!(conn - .blocks_dal() - .get_miniblock_header(MiniblockNumber(l1_batch_number.0 * 2 + 1)) - .await - .unwrap() - .is_some()); - - assert!(conn - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .unwrap() - .is_some()); - } - } - - async fn assert_l1_batch_objects_dont_exist( - conn: &mut Connection<'_, Core>, - l1_batches_range: ops::RangeInclusive, - ) { - for l1_batch_number in l1_batches_range.start().0..l1_batches_range.end().0 { - let l1_batch_number = L1BatchNumber(l1_batch_number); - assert!(conn - .blocks_dal() - .get_miniblock_header(MiniblockNumber(l1_batch_number.0 * 2)) - .await - .unwrap() - .is_none()); - assert_eq!( - 0, - conn.storage_logs_dal() - .get_miniblock_storage_logs(MiniblockNumber(l1_batch_number.0 * 2)) - .await - .len() - ); - - assert!(conn - .blocks_dal() - .get_miniblock_header(MiniblockNumber(l1_batch_number.0 * 2 + 1)) - .await - .unwrap() - .is_none()); - assert_eq!( - 0, - conn.storage_logs_dal() - .get_miniblock_storage_logs(MiniblockNumber(l1_batch_number.0 * 2 + 1)) - .await - .len() - ); - - assert!(conn - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .unwrap() - .is_none()); - } - } - - #[tokio::test] - async fn soft_pruning_works() { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - let mut transaction = conn.start_transaction().await.unwrap(); - - assert_eq!( - PruningInfo { - last_soft_pruned_miniblock: None, - last_soft_pruned_l1_batch: None, - last_hard_pruned_miniblock: None, - last_hard_pruned_l1_batch: None - }, - transaction.pruning_dal().get_pruning_info().await.unwrap() - ); - - transaction - .pruning_dal() - .soft_prune_batches_range(L1BatchNumber(5), MiniblockNumber(11)) - .await - .unwrap(); - assert_eq!( - PruningInfo { - last_soft_pruned_miniblock: Some(MiniblockNumber(11)), - last_soft_pruned_l1_batch: Some(L1BatchNumber(5)), - last_hard_pruned_miniblock: None, - last_hard_pruned_l1_batch: None - }, - transaction.pruning_dal().get_pruning_info().await.unwrap() - ); - - transaction - .pruning_dal() - .soft_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) - .await - .unwrap(); - assert_eq!( - PruningInfo { - last_soft_pruned_miniblock: Some(MiniblockNumber(21)), - last_soft_pruned_l1_batch: Some(L1BatchNumber(10)), - last_hard_pruned_miniblock: None, - last_hard_pruned_l1_batch: None - }, - transaction.pruning_dal().get_pruning_info().await.unwrap() - ); - - transaction - .pruning_dal() - .hard_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) - .await - .unwrap(); - assert_eq!( - PruningInfo { - last_soft_pruned_miniblock: Some(MiniblockNumber(21)), - last_soft_pruned_l1_batch: Some(L1BatchNumber(10)), - last_hard_pruned_miniblock: Some(MiniblockNumber(21)), - last_hard_pruned_l1_batch: Some(L1BatchNumber(10)) - }, - transaction.pruning_dal().get_pruning_info().await.unwrap() - ); - } - - fn random_storage_log(hashed_key_seed: u8, value_seed: u8) -> StorageLog { - let key = StorageKey::new( - AccountTreeId::from_fixed_bytes([hashed_key_seed; 20]), - H256([hashed_key_seed; 32]), - ); - StorageLog::new_write_log(key, H256([value_seed; 32])) - } - async fn insert_miniblock_storage_logs( - conn: &mut Connection<'_, Core>, - miniblock_number: MiniblockNumber, - storage_logs: Vec, - ) { - conn.storage_logs_dal() - .insert_storage_logs(miniblock_number, &[(H256::zero(), storage_logs)]) - .await - .unwrap(); - } - - async fn assert_miniblock_storage_logs_equal( - conn: &mut Connection<'_, Core>, - miniblock_number: MiniblockNumber, - expected_logs: Vec, - ) { - let actual_logs: Vec<(H256, H256)> = conn - .storage_logs_dal() - .get_miniblock_storage_logs(miniblock_number) - .await - .iter() - .map(|log| (log.0, log.1)) - .collect(); - let expected_logs: Vec<(H256, H256)> = expected_logs - .iter() - .enumerate() - .map(|(_enumeration_number, log)| (log.key.hashed_key(), log.value)) - .collect(); - assert_eq!( - expected_logs, actual_logs, - "logs don't match at miniblock {miniblock_number}" - ) - } - - #[tokio::test] - async fn storage_logs_pruning_works_correctly() { - let pool = ConnectionPool::::test_pool().await; - - let mut conn = pool.connection().await.unwrap(); - let mut transaction = conn.start_transaction().await.unwrap(); - insert_realistic_l1_batches(&mut transaction, 10).await; - insert_miniblock_storage_logs( - &mut transaction, - MiniblockNumber(1), - vec![random_storage_log(1, 1)], - ) - .await; - - insert_miniblock_storage_logs( - &mut transaction, - MiniblockNumber(0), - // first storage will be overwritten in 1st miniblock, - // the second one should be kept throughout the pruning - // the third one will be overwritten in 10th miniblock - vec![ - random_storage_log(1, 2), - random_storage_log(2, 3), - random_storage_log(3, 4), - ], - ) - .await; - - insert_miniblock_storage_logs( - &mut transaction, - MiniblockNumber(15), - // this storage log overrides log from block 0 - vec![random_storage_log(3, 5)], - ) - .await; - - insert_miniblock_storage_logs( - &mut transaction, - MiniblockNumber(17), - // there are two logs with the same hashed key, the second one should be overwritten - vec![random_storage_log(5, 5), random_storage_log(5, 7)], - ) - .await; - - transaction - .pruning_dal() - .hard_prune_batches_range(L1BatchNumber(4), MiniblockNumber(9)) - .await - .unwrap(); - - assert_miniblock_storage_logs_equal( - &mut transaction, - MiniblockNumber(0), - vec![random_storage_log(2, 3), random_storage_log(3, 4)], - ) - .await; - assert_miniblock_storage_logs_equal( - &mut transaction, - MiniblockNumber(1), - vec![random_storage_log(1, 1)], - ) - .await; - - transaction - .pruning_dal() - .hard_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) - .await - .unwrap(); - - assert_miniblock_storage_logs_equal( - &mut transaction, - MiniblockNumber(0), - vec![random_storage_log(2, 3)], - ) - .await; - - assert_miniblock_storage_logs_equal( - &mut transaction, - MiniblockNumber(1), - vec![random_storage_log(1, 1)], - ) - .await; - - assert_miniblock_storage_logs_equal( - &mut transaction, - MiniblockNumber(15), - vec![random_storage_log(3, 5)], - ) - .await; - - assert_miniblock_storage_logs_equal( - &mut transaction, - MiniblockNumber(17), - vec![random_storage_log(5, 7)], - ) - .await; - } - - #[tokio::test] - async fn l1_batches_can_be_hard_pruned() { - let pool = ConnectionPool::::test_pool().await; - - let mut conn = pool.connection().await.unwrap(); - let mut transaction = conn.start_transaction().await.unwrap(); - insert_realistic_l1_batches(&mut transaction, 10).await; - - assert_l1_batch_objects_exists(&mut transaction, L1BatchNumber(1)..=L1BatchNumber(10)) - .await; - assert!(transaction - .pruning_dal() - .get_pruning_info() - .await - .unwrap() - .last_hard_pruned_l1_batch - .is_none()); - - transaction - .pruning_dal() - .hard_prune_batches_range(L1BatchNumber(5), MiniblockNumber(11)) - .await - .unwrap(); - - assert_l1_batch_objects_dont_exist(&mut transaction, L1BatchNumber(1)..=L1BatchNumber(5)) - .await; - assert_l1_batch_objects_exists(&mut transaction, L1BatchNumber(6)..=L1BatchNumber(10)) - .await; - assert_eq!( - Some(L1BatchNumber(5)), - transaction - .pruning_dal() - .get_pruning_info() - .await - .unwrap() - .last_hard_pruned_l1_batch - ); - - transaction - .pruning_dal() - .hard_prune_batches_range(L1BatchNumber(10), MiniblockNumber(21)) - .await - .unwrap(); - - assert_l1_batch_objects_dont_exist(&mut transaction, L1BatchNumber(1)..=L1BatchNumber(10)) - .await; - assert_eq!( - Some(L1BatchNumber(10)), - transaction - .pruning_dal() - .get_pruning_info() - .await - .unwrap() - .last_hard_pruned_l1_batch - ); - } -} diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index 0ca06c320c0..5dd32fd98f6 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -276,41 +276,6 @@ impl StorageLogsDal<'_, '_> { Ok(()) } - /// Loads (hashed_key, value, operation_number) tuples for given miniblock_number. - /// Shouldn't be used in production. - #[cfg(test)] - pub async fn get_miniblock_storage_logs( - &mut self, - miniblock_number: MiniblockNumber, - ) -> Vec<(H256, H256, u32)> { - sqlx::query!( - r#" - SELECT - hashed_key, - value, - operation_number - FROM - storage_logs - WHERE - miniblock_number = $1 - ORDER BY - operation_number - "#, - i64::from(miniblock_number.0) - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| { - let hashed_key = H256::from_slice(&row.hashed_key); - let value = H256::from_slice(&row.value); - let operation_number: u32 = row.operation_number as u32; - (hashed_key, value, operation_number) - }) - .collect() - } - pub async fn is_contract_deployed_at_address(&mut self, address: Address) -> bool { let hashed_key = get_code_key(&address).hashed_key(); let row = sqlx::query!( @@ -665,6 +630,9 @@ impl StorageLogsDal<'_, '_> { miniblock_number FROM storage_logs + ORDER BY + miniblock_number, + operation_number "# ) .fetch_all(self.storage.conn()) diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index f6d7bde8aa2..d026ec5973d 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -75,7 +75,10 @@ impl TransactionsWeb3Dal<'_, '_> { AND sl.tx_hash = transactions.hash WHERE transactions.hash = ANY ($3) + AND transactions.data != '{}'::jsonb "#, + // ^ Filter out transactions with pruned data, which would lead to potentially incomplete / bogus + // transaction info. ACCOUNT_CODE_STORAGE_ADDRESS.as_bytes(), FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes(), &hash_bytes as &[&[u8]] @@ -179,7 +182,10 @@ impl TransactionsWeb3Dal<'_, '_> { LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number WHERE "#, - _ // WHERE condition + _, // WHERE condition + " AND transactions.data != '{}'::jsonb" + // ^ Filter out transactions with pruned data, which would lead to potentially incomplete / bogus + // transaction info. ], match (selector) { TransactionSelector::Hashes(hashes) => ( @@ -234,53 +240,52 @@ impl TransactionsWeb3Dal<'_, '_> { &mut self, hash: H256, ) -> DalResult> { - { - let storage_tx_details: Option = sqlx::query_as!( - StorageTransactionDetails, - r#" - SELECT - transactions.is_priority, - transactions.initiator_address, - transactions.gas_limit, - transactions.gas_per_pubdata_limit, - transactions.received_at, - transactions.miniblock_number, - transactions.error, - transactions.effective_gas_price, - transactions.refunded_gas, - commit_tx.tx_hash AS "eth_commit_tx_hash?", - prove_tx.tx_hash AS "eth_prove_tx_hash?", - execute_tx.tx_hash AS "eth_execute_tx_hash?" - FROM - transactions - LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number - LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number - LEFT JOIN eth_txs_history AS commit_tx ON ( - l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id - AND commit_tx.confirmed_at IS NOT NULL - ) - LEFT JOIN eth_txs_history AS prove_tx ON ( - l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id - AND prove_tx.confirmed_at IS NOT NULL - ) - LEFT JOIN eth_txs_history AS execute_tx ON ( - l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id - AND execute_tx.confirmed_at IS NOT NULL - ) - WHERE - transactions.hash = $1 - "#, - hash.as_bytes() - ) - .instrument("get_transaction_details") - .with_arg("hash", &hash) - .fetch_optional(self.storage) - .await?; - - let tx = storage_tx_details.map(|tx_details| tx_details.into()); + let row = sqlx::query_as!( + StorageTransactionDetails, + r#" + SELECT + transactions.is_priority, + transactions.initiator_address, + transactions.gas_limit, + transactions.gas_per_pubdata_limit, + transactions.received_at, + transactions.miniblock_number, + transactions.error, + transactions.effective_gas_price, + transactions.refunded_gas, + commit_tx.tx_hash AS "eth_commit_tx_hash?", + prove_tx.tx_hash AS "eth_prove_tx_hash?", + execute_tx.tx_hash AS "eth_execute_tx_hash?" + FROM + transactions + LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number + LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number + LEFT JOIN eth_txs_history AS commit_tx ON ( + l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id + AND commit_tx.confirmed_at IS NOT NULL + ) + LEFT JOIN eth_txs_history AS prove_tx ON ( + l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id + AND prove_tx.confirmed_at IS NOT NULL + ) + LEFT JOIN eth_txs_history AS execute_tx ON ( + l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id + AND execute_tx.confirmed_at IS NOT NULL + ) + WHERE + transactions.hash = $1 + AND transactions.data != '{}'::jsonb + "#, + // ^ Filter out transactions with pruned data, which would lead to potentially incomplete / bogus + // transaction info. + hash.as_bytes() + ) + .instrument("get_transaction_details") + .with_arg("hash", &hash) + .fetch_optional(self.storage) + .await?; - Ok(tx) - } + Ok(row.map(Into::into)) } /// Returns hashes of txs which were received after `from_timestamp` and the time of receiving the last tx. diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index 47e45ae9f83..9aaa2f34fe1 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -246,37 +246,39 @@ impl SnapshotsApplierConfig { } } -/// Applying application-level storage snapshots to the Postgres storage. +/// Strategy determining how snapshot recovery should proceed. #[derive(Debug)] -struct SnapshotsApplier<'a> { - connection_pool: &'a ConnectionPool, - main_node_client: &'a dyn SnapshotsApplierMainNodeClient, - blob_store: &'a dyn ObjectStore, - applied_snapshot_status: SnapshotRecoveryStatus, - health_updater: &'a HealthUpdater, - factory_deps_recovered: bool, - tokens_recovered: bool, +enum SnapshotRecoveryStrategy { + /// Snapshot recovery should proceed from scratch with the specified params. + New(SnapshotRecoveryStatus), + /// Snapshot recovery should continue with the specified params. + Resumed(SnapshotRecoveryStatus), + /// Snapshot recovery has already been completed. + Completed, } -impl<'a> SnapshotsApplier<'a> { - /// Recovers [`SnapshotRecoveryStatus`] from the storage and the main node. - async fn prepare_applied_snapshot_status( +impl SnapshotRecoveryStrategy { + async fn new( storage: &mut Connection<'_, Core>, main_node_client: &dyn SnapshotsApplierMainNodeClient, - ) -> Result<(SnapshotRecoveryStatus, bool), SnapshotsApplierError> { + ) -> Result { let latency = METRICS.initial_stage_duration[&InitialStage::FetchMetadataFromMainNode].start(); - let applied_snapshot_status = storage .snapshot_recovery_dal() .get_applied_snapshot_status() .await?; if let Some(applied_snapshot_status) = applied_snapshot_status { + let sealed_miniblock_number = + storage.blocks_dal().get_sealed_miniblock_number().await?; + if sealed_miniblock_number.is_some() { + return Ok(Self::Completed); + } + let latency = latency.observe(); tracing::info!("Re-initialized snapshots applier after reset/failure in {latency:?}"); - - Ok((applied_snapshot_status, false)) + Ok(Self::Resumed(applied_snapshot_status)) } else { let is_genesis_needed = storage.blocks_dal().is_genesis_needed().await?; if !is_genesis_needed { @@ -286,8 +288,7 @@ impl<'a> SnapshotsApplier<'a> { return Err(SnapshotsApplierError::Fatal(err)); } - let recovery_status = - SnapshotsApplier::create_fresh_recovery_status(main_node_client).await?; + let recovery_status = Self::create_fresh_recovery_status(main_node_client).await?; let storage_logs_count = storage .storage_logs_dal() @@ -304,97 +305,10 @@ impl<'a> SnapshotsApplier<'a> { let latency = latency.observe(); tracing::info!("Initialized fresh snapshots applier in {latency:?}"); - Ok((recovery_status, true)) + Ok(Self::New(recovery_status)) } } - async fn load_snapshot( - connection_pool: &'a ConnectionPool, - main_node_client: &'a dyn SnapshotsApplierMainNodeClient, - blob_store: &'a dyn ObjectStore, - health_updater: &'a HealthUpdater, - ) -> Result<(), SnapshotsApplierError> { - health_updater.update(HealthStatus::Ready.into()); - - let mut storage = connection_pool - .connection_tagged("snapshots_applier") - .await?; - let mut storage_transaction = storage.start_transaction().await?; - - if storage_transaction - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await? - .is_some() - && storage_transaction - .blocks_dal() - .get_sealed_miniblock_number() - .await? - .is_some() - { - return Ok(()); - } - - let (applied_snapshot_status, created_from_scratch) = - Self::prepare_applied_snapshot_status(&mut storage_transaction, main_node_client) - .await?; - - let mut this = Self { - connection_pool, - main_node_client, - blob_store, - applied_snapshot_status, - health_updater, - factory_deps_recovered: !created_from_scratch, - tokens_recovered: false, - }; - - METRICS.storage_logs_chunks_count.set( - this.applied_snapshot_status - .storage_logs_chunks_processed - .len(), - ); - METRICS.storage_logs_chunks_left_to_process.set( - this.applied_snapshot_status - .storage_logs_chunks_left_to_process(), - ); - this.update_health(); - - if created_from_scratch { - this.recover_factory_deps(&mut storage_transaction).await?; - storage_transaction - .snapshot_recovery_dal() - .insert_initial_recovery_status(&this.applied_snapshot_status) - .await?; - storage_transaction - .pruning_dal() - .soft_prune_batches_range( - this.applied_snapshot_status.l1_batch_number, - this.applied_snapshot_status.miniblock_number, - ) - .await?; - - storage_transaction - .pruning_dal() - .hard_prune_batches_range( - this.applied_snapshot_status.l1_batch_number, - this.applied_snapshot_status.miniblock_number, - ) - .await?; - } - storage_transaction.commit().await?; - drop(storage); - this.factory_deps_recovered = true; - this.update_health(); - - this.recover_storage_logs().await?; - this.recover_tokens().await?; - this.tokens_recovered = true; - this.update_health(); - - Ok(()) - } - async fn create_fresh_recovery_status( main_node_client: &dyn SnapshotsApplierMainNodeClient, ) -> Result { @@ -462,6 +376,98 @@ impl<'a> SnapshotsApplier<'a> { ); Ok(()) } +} + +/// Applying application-level storage snapshots to the Postgres storage. +#[derive(Debug)] +struct SnapshotsApplier<'a> { + connection_pool: &'a ConnectionPool, + main_node_client: &'a dyn SnapshotsApplierMainNodeClient, + blob_store: &'a dyn ObjectStore, + applied_snapshot_status: SnapshotRecoveryStatus, + health_updater: &'a HealthUpdater, + factory_deps_recovered: bool, + tokens_recovered: bool, +} + +impl<'a> SnapshotsApplier<'a> { + async fn load_snapshot( + connection_pool: &'a ConnectionPool, + main_node_client: &'a dyn SnapshotsApplierMainNodeClient, + blob_store: &'a dyn ObjectStore, + health_updater: &'a HealthUpdater, + ) -> Result<(), SnapshotsApplierError> { + health_updater.update(HealthStatus::Ready.into()); + + let mut storage = connection_pool + .connection_tagged("snapshots_applier") + .await?; + let mut storage_transaction = storage.start_transaction().await?; + + let strategy = + SnapshotRecoveryStrategy::new(&mut storage_transaction, main_node_client).await?; + let (applied_snapshot_status, created_from_scratch) = match strategy { + SnapshotRecoveryStrategy::Completed => return Ok(()), + SnapshotRecoveryStrategy::New(status) => (status, true), + SnapshotRecoveryStrategy::Resumed(status) => (status, false), + }; + let mut this = Self { + connection_pool, + main_node_client, + blob_store, + applied_snapshot_status, + health_updater, + factory_deps_recovered: !created_from_scratch, + tokens_recovered: false, + }; + + METRICS.storage_logs_chunks_count.set( + this.applied_snapshot_status + .storage_logs_chunks_processed + .len(), + ); + METRICS.storage_logs_chunks_left_to_process.set( + this.applied_snapshot_status + .storage_logs_chunks_left_to_process(), + ); + this.update_health(); + + if created_from_scratch { + this.recover_factory_deps(&mut storage_transaction).await?; + storage_transaction + .snapshot_recovery_dal() + .insert_initial_recovery_status(&this.applied_snapshot_status) + .await?; + + // Insert artificial entries into the pruning log so that it's guaranteed to match the snapshot recovery metadata. + // This allows to not deal with the corner cases when a node was recovered from a snapshot, but its pruning log is empty. + storage_transaction + .pruning_dal() + .soft_prune_batches_range( + this.applied_snapshot_status.l1_batch_number, + this.applied_snapshot_status.miniblock_number, + ) + .await?; + storage_transaction + .pruning_dal() + .hard_prune_batches_range( + this.applied_snapshot_status.l1_batch_number, + this.applied_snapshot_status.miniblock_number, + ) + .await?; + } + storage_transaction.commit().await?; + drop(storage); + this.factory_deps_recovered = true; + this.update_health(); + + this.recover_storage_logs().await?; + this.recover_tokens().await?; + this.tokens_recovered = true; + this.update_health(); + + Ok(()) + } fn update_health(&self) { let details = SnapshotsApplierHealthDetails { diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs index 7743f71e665..2c4a6b5b57e 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -1,11 +1,10 @@ use std::{ sync::{Arc, RwLock}, - time::Duration, + time::{Duration, Instant}, }; use anyhow::Context as _; -use chrono::{DateTime, Utc}; -use rand::random; +use rand::{thread_rng, Rng}; use tokio::runtime::Handle; use zksync_dal::{pruning_dal::PruningInfo, Connection, Core, CoreDal, DalError}; use zksync_state::PostgresStorageCaches; @@ -192,65 +191,88 @@ impl TxSharedArgs { } } +#[derive(Debug, Clone, Copy)] +struct BlockStartInfoInner { + info: PruningInfo, + cached_at: Instant, +} + +impl BlockStartInfoInner { + const MAX_CACHE_AGE: Duration = Duration::from_secs(20); + // We make max age a bit random so that all threads don't start refreshing cache at the same time + const MAX_RANDOM_DELAY: Duration = Duration::from_millis(100); + + fn is_expired(&self, now: Instant) -> bool { + if let Some(expired_for) = (now - self.cached_at).checked_sub(Self::MAX_CACHE_AGE) { + if expired_for > Self::MAX_RANDOM_DELAY { + return true; // The cache is definitely expired, regardless of the randomness below + } + // Minimize access to RNG, which could be mildly costly + expired_for > thread_rng().gen_range(Duration::ZERO..=Self::MAX_RANDOM_DELAY) + } else { + false // `now` is close to `self.cached_at`; the cache isn't expired + } + } +} + /// Information about first L1 batch / miniblock in the node storage. #[derive(Debug, Clone)] pub(crate) struct BlockStartInfo { - cached_pruning_info: Arc)>>, + cached_pruning_info: Arc>, } impl BlockStartInfo { pub async fn new(storage: &mut Connection<'_, Core>) -> anyhow::Result { + let info = storage.pruning_dal().get_pruning_info().await?; Ok(Self { - cached_pruning_info: Arc::from(RwLock::from(( - storage.pruning_dal().get_pruning_info().await?, - Utc::now(), - ))), + cached_pruning_info: Arc::new(RwLock::new(BlockStartInfoInner { + info, + cached_at: Instant::now(), + })), }) } - fn get_cache_state_copy(&self) -> (PruningInfo, DateTime) { - let current_cache = self + fn copy_inner(&self) -> BlockStartInfoInner { + *self .cached_pruning_info .read() - .expect("BlockStartInfo is poisoned"); - *current_cache - } - - fn is_cache_expired(&self, now: DateTime, last_cache_date: DateTime) -> bool { - const CACHE_MAX_AGE_MS: i64 = 20000; - // we make max age a bit random so that all threads don't start refreshing cache at the same time - let random_delay = - chrono::Duration::milliseconds(i64::from(random::()) % CACHE_MAX_AGE_MS / 2); - now - last_cache_date > chrono::Duration::milliseconds(CACHE_MAX_AGE_MS) + random_delay + .expect("BlockStartInfo is poisoned") } async fn update_cache( &self, storage: &mut Connection<'_, Core>, - now: DateTime, + now: Instant, ) -> anyhow::Result { - let new_pruning_info = storage.pruning_dal().get_pruning_info().await?; + let info = storage.pruning_dal().get_pruning_info().await?; let mut new_cached_pruning_info = self .cached_pruning_info .write() .expect("BlockStartInfo is poisoned"); - new_cached_pruning_info.0 = new_pruning_info; - new_cached_pruning_info.1 = now; - Ok(new_pruning_info) + Ok(if new_cached_pruning_info.cached_at < now { + *new_cached_pruning_info = BlockStartInfoInner { + info, + cached_at: now, + }; + info + } else { + // Got a newer cache already; no need to update it again. + new_cached_pruning_info.info + }) } async fn get_pruning_info( &self, storage: &mut Connection<'_, Core>, ) -> anyhow::Result { - let (last_cached_pruning_info, last_cache_date) = self.get_cache_state_copy(); - let now = Utc::now(); - if self.is_cache_expired(now, last_cache_date) { - //multiple threads may execute this query if we're very unlucky + let inner = self.copy_inner(); + let now = Instant::now(); + if inner.is_expired(now) { + // Multiple threads may execute this query if we're very unlucky self.update_cache(storage, now).await } else { - Ok(last_cached_pruning_info) + Ok(inner.info) } } diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index 84c76e84d56..f54de176ad9 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -24,8 +24,8 @@ use zksync_types::{ get_code_key, get_intrinsic_constants, l2::{error::TxCheckError::TxDuplication, L2Tx}, utils::storage_key_for_eth_balance, - AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, MiniblockNumber, Nonce, - PackedEthSignature, ProtocolVersionId, Transaction, VmVersion, H160, H256, MAX_L2_TX_GAS_LIMIT, + AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, Nonce, PackedEthSignature, + ProtocolVersionId, Transaction, VmVersion, H160, H256, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::h256_to_u256; @@ -35,8 +35,8 @@ use self::tx_sink::TxSink; use crate::{ api_server::{ execution_sandbox::{ - BlockArgs, BlockStartInfo, SubmitTxStage, TransactionExecutor, TxExecutionArgs, - TxSharedArgs, VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, + BlockArgs, SubmitTxStage, TransactionExecutor, TxExecutionArgs, TxSharedArgs, + VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, }, tx_sender::result::ApiCallResult, }, @@ -526,15 +526,11 @@ impl TxSender { async fn get_expected_nonce(&self, initiator_account: Address) -> anyhow::Result { let mut storage = self.acquire_replica_connection().await?; - let latest_block_number = storage.blocks_dal().get_sealed_miniblock_number().await?; - let latest_block_number = match latest_block_number { - Some(number) => number, - None => { - // We don't have miniblocks in the storage yet. Use the snapshot miniblock number instead. - let start = BlockStartInfo::new(&mut storage).await?; - MiniblockNumber(start.first_miniblock(&mut storage).await?.saturating_sub(1)) - } - }; + let latest_block_number = storage + .blocks_dal() + .get_sealed_miniblock_number() + .await? + .context("no miniblocks in storage")?; let nonce = storage .storage_web3_dal() diff --git a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs index 85c90b88e71..05630b2af66 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs @@ -1,7 +1,7 @@ //! Tests for the transaction sender. use zksync_config::configs::wallets::Wallets; -use zksync_types::{get_nonce_key, L1BatchNumber, StorageLog}; +use zksync_types::{get_nonce_key, L1BatchNumber, MiniblockNumber, StorageLog}; use super::*; use crate::{ @@ -113,14 +113,6 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { let tx_executor = MockTransactionExecutor::default().into(); let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(123)); - let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap(); - assert_eq!(nonce, Nonce(25)); - let missing_address = Address::repeat_byte(0xff); - let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); - assert_eq!(nonce, Nonce(0)); - storage .blocks_dal() .insert_miniblock(&create_miniblock(SNAPSHOT_MINIBLOCK_NUMBER.0 + 1)) @@ -143,6 +135,7 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { assert_eq!(nonce, Nonce(321)); let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap(); assert_eq!(nonce, Nonce(25)); + let missing_address = Address::repeat_byte(0xff); let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); assert_eq!(nonce, Nonce(0)); } diff --git a/core/lib/zksync_core/src/api_server/web3/pubsub.rs b/core/lib/zksync_core/src/api_server/web3/pubsub.rs index a8e14e59780..de006602923 100644 --- a/core/lib/zksync_core/src/api_server/web3/pubsub.rs +++ b/core/lib/zksync_core/src/api_server/web3/pubsub.rs @@ -1,6 +1,5 @@ //! (Largely) backend-agnostic logic for dealing with Web3 subscriptions. -use anyhow::Context as _; use chrono::NaiveDateTime; use futures::FutureExt; use tokio::{ @@ -25,7 +24,6 @@ use super::{ metrics::{SubscriptionType, PUB_SUB_METRICS}, namespaces::eth::EVENT_TOPIC_NUMBER_LIMIT, }; -use crate::api_server::execution_sandbox::BlockStartInfo; const BROADCAST_CHANNEL_CAPACITY: usize = 1024; const SUBSCRIPTION_SINK_SEND_TIMEOUT: Duration = Duration::from_secs(1); @@ -58,22 +56,29 @@ struct PubSubNotifier { } impl PubSubNotifier { - async fn get_starting_miniblock_number(&self) -> anyhow::Result { - let mut storage = self - .connection_pool - .connection_tagged("api") - .await - .context("connection_tagged")?; - let sealed_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number().await?; - Ok(match sealed_miniblock_number { - Some(number) => number, - None => { - // We don't have miniblocks in the storage yet. Use the snapshot miniblock number instead. - let start_info = BlockStartInfo::new(&mut storage).await?; - let first_miniblock = start_info.first_miniblock(&mut storage).await?; - MiniblockNumber(first_miniblock.saturating_sub(1)) + // Notifier tasks are spawned independently of the main server task, so we need to wait for + // Postgres to be non-empty separately. + async fn get_starting_miniblock_number( + &self, + stop_receiver: &mut watch::Receiver, + ) -> anyhow::Result> { + while !*stop_receiver.borrow_and_update() { + let mut storage = self.connection_pool.connection_tagged("api").await?; + if let Some(miniblock_number) = + storage.blocks_dal().get_sealed_miniblock_number().await? + { + return Ok(Some(miniblock_number)); } - }) + drop(storage); + + if tokio::time::timeout(self.polling_interval, stop_receiver.changed()) + .await + .is_ok() + { + break; + } + } + Ok(None) // we can only break from the loop if we've received a stop signal } fn emit_event(&self, event: PubSubEvent) { @@ -84,8 +89,15 @@ impl PubSubNotifier { } impl PubSubNotifier { - async fn notify_blocks(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { - let mut last_block_number = self.get_starting_miniblock_number().await?; + async fn notify_blocks(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let Some(mut last_block_number) = self + .get_starting_miniblock_number(&mut stop_receiver) + .await? + else { + tracing::info!("Stop signal received, pubsub_block_notifier is shutting down"); + return Ok(()); + }; + let mut timer = interval(self.polling_interval); loop { if *stop_receiver.borrow() { @@ -126,8 +138,7 @@ impl PubSubNotifier { ) -> anyhow::Result> { self.connection_pool .connection_tagged("api") - .await - .context("connection_tagged")? + .await? .blocks_web3_dal() .get_block_headers_after(last_block_number) .await @@ -167,16 +178,21 @@ impl PubSubNotifier { ) -> anyhow::Result> { self.connection_pool .connection_tagged("api") - .await - .context("connection_tagged")? + .await? .transactions_web3_dal() .get_pending_txs_hashes_after(last_time, None) .await .map_err(Into::into) } - async fn notify_logs(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { - let mut last_block_number = self.get_starting_miniblock_number().await?; + async fn notify_logs(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let Some(mut last_block_number) = self + .get_starting_miniblock_number(&mut stop_receiver) + .await? + else { + tracing::info!("Stop signal received, pubsub_logs_notifier is shutting down"); + return Ok(()); + }; let mut timer = interval(self.polling_interval); loop { @@ -207,8 +223,7 @@ impl PubSubNotifier { async fn new_logs(&self, last_block_number: MiniblockNumber) -> anyhow::Result> { self.connection_pool .connection_tagged("api") - .await - .context("connection_tagged")? + .await? .events_web3_dal() .get_all_logs(last_block_number) .await diff --git a/core/lib/zksync_core/src/db_pruner/mod.rs b/core/lib/zksync_core/src/db_pruner/mod.rs index 17057ced881..896758764b1 100644 --- a/core/lib/zksync_core/src/db_pruner/mod.rs +++ b/core/lib/zksync_core/src/db_pruner/mod.rs @@ -1,59 +1,100 @@ -mod metrics; -pub mod prune_conditions; +//! Postgres pruning component. -use std::{fmt::Debug, sync::Arc, time::Duration}; +use std::{fmt, sync::Arc, time::Duration}; +use anyhow::Context as _; use async_trait::async_trait; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_dal::{pruning_dal::HardPruningStats, Connection, ConnectionPool, Core, CoreDal}; use zksync_types::L1BatchNumber; -use crate::db_pruner::metrics::{MetricPruneType, METRICS}; +use self::{ + metrics::{MetricPruneType, METRICS}, + prune_conditions::{ + ConsistencyCheckerProcessedBatch, L1BatchExistsCondition, L1BatchOlderThanPruneCondition, + NextL1BatchHasMetadataCondition, NextL1BatchWasExecutedCondition, + }, +}; + +mod metrics; +mod prune_conditions; +/// Configuration #[derive(Debug)] pub struct DbPrunerConfig { + /// Delta between soft- and hard-removing data from Postgres. pub soft_and_hard_pruning_time_delta: Duration, + /// Sleep interval between pruning iterations. pub next_iterations_delay: Duration, + /// Number of L1 batches pruned at a time. The pruner will do nothing if there is less than this number + /// of batches to prune. pub pruned_batch_chunk_size: u32, + /// Minimum age of an L1 batch in order for it to be eligible for pruning. Setting this to zero + /// will effectively disable this pruning criterion. + pub minimum_l1_batch_age: Duration, } +/// Postgres database pruning component. #[derive(Debug)] pub struct DbPruner { config: DbPrunerConfig, + connection_pool: ConnectionPool, prune_conditions: Vec>, } /// Interface to be used for health checks. #[async_trait] -pub trait PruneCondition: Debug + Send + Sync + 'static { +trait PruneCondition: fmt::Debug + fmt::Display + Send + Sync + 'static { async fn is_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> anyhow::Result; } impl DbPruner { - pub fn new( + pub fn new(config: DbPrunerConfig, connection_pool: ConnectionPool) -> Self { + let conditions: Vec> = vec![ + Arc::new(L1BatchExistsCondition { + conn: connection_pool.clone(), + }), + Arc::new(NextL1BatchHasMetadataCondition { + conn: connection_pool.clone(), + }), + Arc::new(NextL1BatchWasExecutedCondition { + conn: connection_pool.clone(), + }), + Arc::new(L1BatchOlderThanPruneCondition { + minimum_age: config.minimum_l1_batch_age, + conn: connection_pool.clone(), + }), + Arc::new(ConsistencyCheckerProcessedBatch { + conn: connection_pool.clone(), + }), + ]; + Self::with_conditions(config, connection_pool, conditions) + } + + fn with_conditions( config: DbPrunerConfig, + connection_pool: ConnectionPool, prune_conditions: Vec>, - ) -> anyhow::Result { - Ok(Self { + ) -> Self { + Self { config, + connection_pool, prune_conditions, - }) + } } - pub async fn is_l1_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> bool { - let mut successful_conditions: Vec = vec![]; - let mut failed_conditions: Vec = vec![]; - let mut errored_conditions: Vec = vec![]; + async fn is_l1_batch_prunable(&self, l1_batch_number: L1BatchNumber) -> bool { + let mut successful_conditions = vec![]; + let mut failed_conditions = vec![]; + let mut errored_conditions = vec![]; for condition in &self.prune_conditions { match condition.is_batch_prunable(l1_batch_number).await { - Ok(true) => successful_conditions.push(format!("{condition:?}")), - Ok(false) => failed_conditions.push(format!("{condition:?}")), + Ok(true) => successful_conditions.push(condition.to_string()), + Ok(false) => failed_conditions.push(condition.to_string()), Err(error) => { - errored_conditions.push(format!("{condition:?}")); - tracing::warn!( - "Pruning condition for component {condition:?} resulted in an error: {error}" - ) + errored_conditions.push(condition.to_string()); + tracing::warn!("Pruning condition '{condition}' resulted in an error: {error}"); } } } @@ -69,25 +110,25 @@ impl DbPruner { result } - async fn update_l1_batches_metric(&self, pool: &ConnectionPool) -> anyhow::Result<()> { - let mut storage = pool.connection_tagged("db_pruner").await?; + async fn update_l1_batches_metric(&self) -> anyhow::Result<()> { + let mut storage = self.connection_pool.connection_tagged("db_pruner").await?; let first_l1_batch = storage.blocks_dal().get_earliest_l1_batch_number().await?; let last_l1_batch = storage.blocks_dal().get_sealed_l1_batch_number().await?; - if first_l1_batch.is_none() { + let Some(first_l1_batch) = first_l1_batch else { METRICS.not_pruned_l1_batches_count.set(0); return Ok(()); - } + }; + let last_l1_batch = last_l1_batch + .context("unreachable DB state: there's an earliest L1 batch, but no latest one")?; METRICS .not_pruned_l1_batches_count - .set((last_l1_batch.unwrap().0 - first_l1_batch.unwrap().0) as u64); + .set((last_l1_batch.0 - first_l1_batch.0).into()); Ok(()) } - async fn soft_prune(&self, pool: &ConnectionPool) -> anyhow::Result { + async fn soft_prune(&self, storage: &mut Connection<'_, Core>) -> anyhow::Result { let latency = METRICS.pruning_chunk_duration[&MetricPruneType::Soft].start(); - - let mut storage = pool.connection_tagged("db_pruner").await?; let mut transaction = storage.start_transaction().await?; let current_pruning_info = transaction.pruning_dal().get_pruning_info().await?; @@ -103,12 +144,11 @@ impl DbPruner { return Ok(false); } - let next_miniblock_to_prune = transaction + let (_, next_miniblock_to_prune) = transaction .blocks_dal() .get_miniblock_range_of_l1_batch(next_l1_batch_to_prune) .await? - .unwrap() - .1; + .with_context(|| format!("L1 batch #{next_l1_batch_to_prune} is ready to be pruned, but has no miniblocks"))?; transaction .pruning_dal() .soft_prune_batches_range(next_l1_batch_to_prune, next_miniblock_to_prune) @@ -124,24 +164,28 @@ impl DbPruner { Ok(true) } - async fn hard_prune(&self, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn hard_prune(&self, storage: &mut Connection<'_, Core>) -> anyhow::Result<()> { let latency = METRICS.pruning_chunk_duration[&MetricPruneType::Hard].start(); - - let mut storage = pool.connection_tagged("db_pruner").await?; let mut transaction = storage.start_transaction().await?; let current_pruning_info = transaction.pruning_dal().get_pruning_info().await?; - transaction + let last_soft_pruned_l1_batch = + current_pruning_info.last_soft_pruned_l1_batch.with_context(|| { + format!("bogus pruning info {current_pruning_info:?}: trying to hard-prune data, but there is no soft-pruned L1 batch") + })?; + let last_soft_pruned_miniblock = + current_pruning_info.last_soft_pruned_miniblock.with_context(|| { + format!("bogus pruning info {current_pruning_info:?}: trying to hard-prune data, but there is no soft-pruned miniblock") + })?; + + let stats = transaction .pruning_dal() - .hard_prune_batches_range( - current_pruning_info.last_soft_pruned_l1_batch.unwrap(), - current_pruning_info.last_soft_pruned_miniblock.unwrap(), - ) + .hard_prune_batches_range(last_soft_pruned_l1_batch, last_soft_pruned_miniblock) .await?; - + Self::report_hard_pruning_stats(stats); transaction.commit().await?; - let mut storage = pool.connection_tagged("db_pruner").await?; + let mut storage = self.connection_pool.connection_tagged("db_pruner").await?; storage .pruning_dal() .run_vacuum_after_hard_pruning() @@ -149,62 +193,88 @@ impl DbPruner { let latency = latency.observe(); tracing::info!( - "Hard pruned db l1_batches up to {} and miniblocks up to {}, operation took {:?}", - current_pruning_info.last_soft_pruned_l1_batch.unwrap(), - current_pruning_info.last_soft_pruned_miniblock.unwrap(), - latency + "Hard pruned db l1_batches up to {last_soft_pruned_l1_batch} and miniblocks up to {last_soft_pruned_miniblock}, \ + operation took {latency:?}" ); - Ok(()) } - pub async fn run_single_iteration(&self, pool: &ConnectionPool) -> anyhow::Result { - let mut storage = pool.connection_tagged("db_pruner").await?; + fn report_hard_pruning_stats(stats: HardPruningStats) { + let HardPruningStats { + deleted_l1_batches, + deleted_miniblocks, + deleted_storage_logs_from_past_batches, + deleted_storage_logs_from_pruned_batches, + deleted_events, + deleted_call_traces, + deleted_l2_to_l1_logs, + } = stats; + let deleted_storage_logs = + deleted_storage_logs_from_past_batches + deleted_storage_logs_from_pruned_batches; + tracing::info!( + "Performed pruning of database, deleted {deleted_l1_batches} L1 batches, {deleted_miniblocks} miniblocks, \ + {deleted_storage_logs} storage logs ({deleted_storage_logs_from_pruned_batches} from pruned batches + \ + {deleted_storage_logs_from_past_batches} from past batches), \ + {deleted_events} events, {deleted_call_traces} call traces, {deleted_l2_to_l1_logs} L2-to-L1 logs" + ); + } + + async fn run_single_iteration(&self) -> anyhow::Result { + let mut storage = self.connection_pool.connection_tagged("db_pruner").await?; let current_pruning_info = storage.pruning_dal().get_pruning_info().await?; - // If this if is not entered, it means that the node has restarted after soft pruning + // If this `if` is not entered, it means that the node has restarted after soft pruning if current_pruning_info.last_soft_pruned_l1_batch == current_pruning_info.last_hard_pruned_l1_batch { - let pruning_done = self.soft_prune(pool).await?; + let pruning_done = self.soft_prune(&mut storage).await?; if !pruning_done { return Ok(false); } } + drop(storage); // Don't hold a connection across a timeout tokio::time::sleep(self.config.soft_and_hard_pruning_time_delta).await; - self.hard_prune(pool).await?; - + let mut storage = self.connection_pool.connection_tagged("db_pruner").await?; + self.hard_prune(&mut storage).await?; Ok(true) } - pub async fn run( - self, - pool: ConnectionPool, - stop_receiver: watch::Receiver, - ) -> anyhow::Result<()> { - loop { - if *stop_receiver.borrow() { - tracing::info!("Stop signal received, shutting down DbPruner"); + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + while !*stop_receiver.borrow_and_update() { + if let Err(err) = self.update_l1_batches_metric().await { + tracing::warn!("Error updating DB pruning metrics: {err:?}"); } - let _ = self.update_l1_batches_metric(&pool).await; - // as this component is not really mission-critical, all errors are generally ignored - let pruning_done = self.run_single_iteration(&pool).await; - if let Err(e) = pruning_done { - tracing::warn!( - "Pruning error, retrying in {:?}, error was: {e}", - self.config.next_iterations_delay - ); - tokio::time::sleep(self.config.next_iterations_delay).await; - } else if !pruning_done.unwrap() { - tokio::time::sleep(self.config.next_iterations_delay).await; + + let should_sleep = match self.run_single_iteration().await { + Err(err) => { + // As this component is not really mission-critical, all errors are generally ignored + tracing::warn!( + "Pruning error, retrying in {:?}, error was: {err:?}", + self.config.next_iterations_delay + ); + true + } + Ok(pruning_done) => !pruning_done, + }; + + if should_sleep + && tokio::time::timeout(self.config.next_iterations_delay, stop_receiver.changed()) + .await + .is_ok() + { + // The pruner either received a stop signal, or the stop receiver was dropped. In any case, + // the pruner should exit. + break; } } + tracing::info!("Stop signal received, shutting down DB pruning"); + Ok(()) } } #[cfg(test)] mod tests { - use std::{collections::HashMap, fmt, fmt::Formatter}; + use std::collections::HashMap; use anyhow::anyhow; use multivm::zk_evm_latest::ethereum_types::H256; @@ -215,6 +285,7 @@ mod tests { use super::*; + #[derive(Debug)] struct ConditionMock { pub name: &'static str, pub is_batch_prunable_responses: HashMap, @@ -235,9 +306,9 @@ mod tests { } } - impl Debug for ConditionMock { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.name) + impl fmt::Display for ConditionMock { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(formatter, "{}", self.name) } } @@ -273,20 +344,21 @@ mod tests { .with_response(L1BatchNumber(3), true) .with_response(L1BatchNumber(4), true), ); - let pruner = DbPruner::new( + let pruner = DbPruner::with_conditions( DbPrunerConfig { - soft_and_hard_pruning_time_delta: Duration::from_secs(0), + soft_and_hard_pruning_time_delta: Duration::ZERO, pruned_batch_chunk_size: 1, - next_iterations_delay: Duration::from_secs(0), + next_iterations_delay: Duration::ZERO, + minimum_l1_batch_age: Duration::ZERO, }, + ConnectionPool::test_pool().await, vec![failing_check, other_failing_check], - ) - .unwrap(); + ); // first check succeeds, but second returns an error assert!(!pruner.is_l1_batch_prunable(L1BatchNumber(1)).await); - //second check fails + // second check fails assert!(!pruner.is_l1_batch_prunable(L1BatchNumber(2)).await); - //first check fails + // first check fails assert!(!pruner.is_l1_batch_prunable(L1BatchNumber(3)).await); assert!(pruner.is_l1_batch_prunable(L1BatchNumber(4)).await); @@ -347,17 +419,18 @@ mod tests { .unwrap(); let nothing_prunable_check = Arc::new(ConditionMock::name("nothing prunable")); - let pruner = DbPruner::new( + let pruner = DbPruner::with_conditions( DbPrunerConfig { - soft_and_hard_pruning_time_delta: Duration::from_secs(0), + soft_and_hard_pruning_time_delta: Duration::ZERO, pruned_batch_chunk_size: 5, - next_iterations_delay: Duration::from_secs(0), + next_iterations_delay: Duration::ZERO, + minimum_l1_batch_age: Duration::ZERO, }, + pool.clone(), vec![nothing_prunable_check], - ) - .unwrap(); + ); - pruner.run_single_iteration(&pool).await.unwrap(); + pruner.run_single_iteration().await.unwrap(); assert_eq!( PruningInfo { @@ -380,17 +453,18 @@ mod tests { .soft_prune_batches_range(L1BatchNumber(2), MiniblockNumber(5)) .await .unwrap(); - let pruner = DbPruner::new( + let pruner = DbPruner::with_conditions( DbPrunerConfig { - soft_and_hard_pruning_time_delta: Duration::from_secs(0), + soft_and_hard_pruning_time_delta: Duration::ZERO, pruned_batch_chunk_size: 5, - next_iterations_delay: Duration::from_secs(0), + next_iterations_delay: Duration::ZERO, + minimum_l1_batch_age: Duration::ZERO, }, + pool.clone(), vec![], //No checks, so every batch is prunable - ) - .unwrap(); + ); - pruner.run_single_iteration(&pool).await.unwrap(); + pruner.run_single_iteration().await.unwrap(); assert_eq!( PruningInfo { @@ -402,7 +476,7 @@ mod tests { conn.pruning_dal().get_pruning_info().await.unwrap() ); - pruner.run_single_iteration(&pool).await.unwrap(); + pruner.run_single_iteration().await.unwrap(); assert_eq!( PruningInfo { last_soft_pruned_l1_batch: Some(L1BatchNumber(7)), @@ -421,17 +495,18 @@ mod tests { insert_miniblocks(&mut conn, 10, 2).await; - let pruner = DbPruner::new( + let pruner = DbPruner::with_conditions( DbPrunerConfig { - soft_and_hard_pruning_time_delta: Duration::from_secs(0), + soft_and_hard_pruning_time_delta: Duration::ZERO, pruned_batch_chunk_size: 3, - next_iterations_delay: Duration::from_secs(0), + next_iterations_delay: Duration::ZERO, + minimum_l1_batch_age: Duration::ZERO, }, + pool.clone(), vec![], //No checks, so every batch is prunable - ) - .unwrap(); + ); - pruner.run_single_iteration(&pool).await.unwrap(); + pruner.run_single_iteration().await.unwrap(); assert_eq!( PruningInfo { @@ -443,7 +518,7 @@ mod tests { conn.pruning_dal().get_pruning_info().await.unwrap() ); - pruner.run_single_iteration(&pool).await.unwrap(); + pruner.run_single_iteration().await.unwrap(); assert_eq!( PruningInfo { last_soft_pruned_l1_batch: Some(L1BatchNumber(6)), @@ -466,17 +541,18 @@ mod tests { ConditionMock::name("first chunk prunable").with_response(L1BatchNumber(3), true), ); - let pruner = DbPruner::new( + let pruner = DbPruner::with_conditions( DbPrunerConfig { - soft_and_hard_pruning_time_delta: Duration::from_secs(0), + soft_and_hard_pruning_time_delta: Duration::ZERO, pruned_batch_chunk_size: 3, - next_iterations_delay: Duration::from_secs(0), + next_iterations_delay: Duration::ZERO, + minimum_l1_batch_age: Duration::ZERO, }, + pool.clone(), vec![first_chunk_prunable_check], - ) - .unwrap(); + ); - pruner.run_single_iteration(&pool).await.unwrap(); + pruner.run_single_iteration().await.unwrap(); assert_eq!( PruningInfo { @@ -488,8 +564,8 @@ mod tests { conn.pruning_dal().get_pruning_info().await.unwrap() ); - pruner.run_single_iteration(&pool).await.unwrap(); - //pruning shouldn't have progressed as chunk 6 cannot be pruned + pruner.run_single_iteration().await.unwrap(); + // pruning shouldn't have progressed as chunk 6 cannot be pruned assert_eq!( PruningInfo { last_soft_pruned_l1_batch: Some(L1BatchNumber(3)), diff --git a/core/lib/zksync_core/src/db_pruner/prune_conditions.rs b/core/lib/zksync_core/src/db_pruner/prune_conditions.rs index 38cd9844835..ee1b608ec07 100644 --- a/core/lib/zksync_core/src/db_pruner/prune_conditions.rs +++ b/core/lib/zksync_core/src/db_pruner/prune_conditions.rs @@ -1,7 +1,4 @@ -use std::{ - fmt::{Debug, Formatter}, - time::Duration, -}; +use std::{fmt, time::Duration}; use async_trait::async_trait; use chrono::Utc; @@ -10,14 +7,15 @@ use zksync_types::L1BatchNumber; use crate::db_pruner::PruneCondition; -pub struct L1BatchOlderThanPruneCondition { - pub minimal_age: Duration, +#[derive(Debug)] +pub(super) struct L1BatchOlderThanPruneCondition { + pub minimum_age: Duration, pub conn: ConnectionPool, } -impl Debug for L1BatchOlderThanPruneCondition { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "l1 Batch is older than {:?}", self.minimal_age) +impl fmt::Display for L1BatchOlderThanPruneCondition { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(formatter, "L1 Batch is older than {:?}", self.minimum_age) } } @@ -31,18 +29,19 @@ impl PruneCondition for L1BatchOlderThanPruneCondition { .await?; let is_old_enough = l1_batch_header.is_some() && (Utc::now().timestamp() as u64 - l1_batch_header.unwrap().timestamp - > self.minimal_age.as_secs()); + > self.minimum_age.as_secs()); Ok(is_old_enough) } } -pub struct NextL1BatchWasExecutedCondition { +#[derive(Debug)] +pub(super) struct NextL1BatchWasExecutedCondition { pub conn: ConnectionPool, } -impl Debug for NextL1BatchWasExecutedCondition { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "next l1 batch was executed") +impl fmt::Display for NextL1BatchWasExecutedCondition { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(formatter, "next L1 batch was executed") } } @@ -61,13 +60,14 @@ impl PruneCondition for NextL1BatchWasExecutedCondition { } } -pub struct NextL1BatchHasMetadataCondition { +#[derive(Debug)] +pub(super) struct NextL1BatchHasMetadataCondition { pub conn: ConnectionPool, } -impl Debug for NextL1BatchHasMetadataCondition { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "next l1 batch has metadata") +impl fmt::Display for NextL1BatchHasMetadataCondition { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(formatter, "next L1 batch has metadata") } } @@ -84,13 +84,14 @@ impl PruneCondition for NextL1BatchHasMetadataCondition { } } -pub struct L1BatchExistsCondition { +#[derive(Debug)] +pub(super) struct L1BatchExistsCondition { pub conn: ConnectionPool, } -impl Debug for L1BatchExistsCondition { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "l1 batch exists") +impl fmt::Display for L1BatchExistsCondition { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(formatter, "L1 batch exists") } } @@ -106,13 +107,14 @@ impl PruneCondition for L1BatchExistsCondition { } } -pub struct ConsistencyCheckerProcessedBatch { +#[derive(Debug)] +pub(super) struct ConsistencyCheckerProcessedBatch { pub conn: ConnectionPool, } -impl Debug for ConsistencyCheckerProcessedBatch { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "l1 batch was processed by consistency checker") +impl fmt::Display for ConsistencyCheckerProcessedBatch { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(formatter, "L1 batch was processed by consistency checker") } } From ce10c2f421f35d6b481cda1d76290c68950d7c1a Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Tue, 16 Apr 2024 18:07:54 +0300 Subject: [PATCH 28/29] chore(main): release core 23.0.0 (#1519) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit :robot: I have created a release *beep* *boop* --- ## [23.0.0](https://github.com/matter-labs/zksync-era/compare/core-v22.1.0...core-v23.0.0) (2024-04-16) ### ⚠ BREAKING CHANGES * **vm:** 1 5 0 support ([#1508](https://github.com/matter-labs/zksync-era/issues/1508)) ### Features * **api:** Add `tokens_whitelisted_for_paymaster` ([#1545](https://github.com/matter-labs/zksync-era/issues/1545)) ([6da89cd](https://github.com/matter-labs/zksync-era/commit/6da89cd5222435aa9994fb5989af75ecbe69b6fd)) * **api:** Log info about estimated fee ([#1611](https://github.com/matter-labs/zksync-era/issues/1611)) ([daed58c](https://github.com/matter-labs/zksync-era/commit/daed58ced42546b7ff1e38f12b44a861a2f41eee)) * Archive old prover jobs ([#1516](https://github.com/matter-labs/zksync-era/issues/1516)) ([201476c](https://github.com/matter-labs/zksync-era/commit/201476c8c1869c30605eb2acd462ae1dfe026fd1)) * Archiving of prover in gpu_prover_queue ([#1537](https://github.com/matter-labs/zksync-era/issues/1537)) ([a970629](https://github.com/matter-labs/zksync-era/commit/a9706294fe740cbc9af37eef8d968584a3ec4859)) * **block-reverter:** only require private key for sending revert transactions ([#1579](https://github.com/matter-labs/zksync-era/issues/1579)) ([27de6b7](https://github.com/matter-labs/zksync-era/commit/27de6b79d065ec5a25b5205017158256b9b62d00)) * **config:** Initialize log config from files as well ([#1566](https://github.com/matter-labs/zksync-era/issues/1566)) ([9e7db59](https://github.com/matter-labs/zksync-era/commit/9e7db5900c74019cf3368db99ed711e0f570852b)) * **configs:** Implement new format of configs and implement protobuf for it ([#1501](https://github.com/matter-labs/zksync-era/issues/1501)) ([086ba5b](https://github.com/matter-labs/zksync-era/commit/086ba5b40565db7c23697830af2b9910b8bd0e34)) * **db:** Wrap sqlx errors in DAL ([#1522](https://github.com/matter-labs/zksync-era/issues/1522)) ([6e9ed8c](https://github.com/matter-labs/zksync-era/commit/6e9ed8c0499830ba71a22b5e112d94aa7e91d517)) * EN Pruning ([#1418](https://github.com/matter-labs/zksync-era/issues/1418)) ([cea6578](https://github.com/matter-labs/zksync-era/commit/cea6578ffb037a2ad8476b6d3fb03416c1e55593)) * **en:** add consistency checker condition in db pruner ([#1653](https://github.com/matter-labs/zksync-era/issues/1653)) ([5ed92b9](https://github.com/matter-labs/zksync-era/commit/5ed92b9810cdb0dc0ceea594a8828cfbbf067006)) * **en:** add manual vacuum step in db pruning ([#1652](https://github.com/matter-labs/zksync-era/issues/1652)) ([c818be3](https://github.com/matter-labs/zksync-era/commit/c818be362aef9244bb644a2c50a620ac1ef077b4)) * **en:** Rate-limit L2 client requests ([#1500](https://github.com/matter-labs/zksync-era/issues/1500)) ([3f55f1e](https://github.com/matter-labs/zksync-era/commit/3f55f1e50e053c52cd9989581abf5848440920c1)) * **en:** Rework storing and using protective reads ([#1515](https://github.com/matter-labs/zksync-era/issues/1515)) ([13c0c45](https://github.com/matter-labs/zksync-era/commit/13c0c454b887f4dfad68cab9f2a5f421c1df5f8c)) * **en:** support for snapshots recovery in version_sync_task.rs ([#1585](https://github.com/matter-labs/zksync-era/issues/1585)) ([f911276](https://github.com/matter-labs/zksync-era/commit/f9112769c5885d44a6fb2d04fb2bf5abd4c0a2a2)) * **eth-watch:** Brush up Ethereum watcher component ([#1596](https://github.com/matter-labs/zksync-era/issues/1596)) ([b0b8f89](https://github.com/matter-labs/zksync-era/commit/b0b8f8932b03d98780d504f9acf394547dac7724)) * Expose component configs as info metrics ([#1584](https://github.com/matter-labs/zksync-era/issues/1584)) ([7c8ae40](https://github.com/matter-labs/zksync-era/commit/7c8ae40357a6ceeeb097c019588a8be18326bed1)) * **external-node:** external node distributed operation mode ([#1457](https://github.com/matter-labs/zksync-era/issues/1457)) ([777ffca](https://github.com/matter-labs/zksync-era/commit/777ffca152045c6a49298f714a44c8cfcde8a1d5)) * Extract commitment generator into a separate crate ([#1636](https://github.com/matter-labs/zksync-era/issues/1636)) ([f763d1f](https://github.com/matter-labs/zksync-era/commit/f763d1f193bac0dcdc367c8566b31d8384fe0651)) * Extract eth_watch and shared metrics into separate crates ([#1572](https://github.com/matter-labs/zksync-era/issues/1572)) ([4013771](https://github.com/matter-labs/zksync-era/commit/4013771e4a7a2a14828aa99153edb74a1861fa94)) * Finalize fee address migration ([#1617](https://github.com/matter-labs/zksync-era/issues/1617)) ([713f56b](https://github.com/matter-labs/zksync-era/commit/713f56b14433a39e8cc431be3150a9abe574984f)) * fix availability checker ([#1574](https://github.com/matter-labs/zksync-era/issues/1574)) ([b2f21fb](https://github.com/matter-labs/zksync-era/commit/b2f21fb1d72e65a738db9f7bc9f162a410d36c9b)) * **genesis:** Add genesis config generator ([#1671](https://github.com/matter-labs/zksync-era/issues/1671)) ([45164fa](https://github.com/matter-labs/zksync-era/commit/45164fa6cb174c04c8542246cdb79c4f393339af)) * **genesis:** mark system contracts bytecodes as known ([#1554](https://github.com/matter-labs/zksync-era/issues/1554)) ([5ffec51](https://github.com/matter-labs/zksync-era/commit/5ffec511736bdc74542280b391b253febbe517f1)) * Migrate gas limit to u64 ([#1538](https://github.com/matter-labs/zksync-era/issues/1538)) ([56dc049](https://github.com/matter-labs/zksync-era/commit/56dc049993d673635e55b545962bf4f6d4f32739)) * **node-framework:** Add consensus support ([#1546](https://github.com/matter-labs/zksync-era/issues/1546)) ([27fe475](https://github.com/matter-labs/zksync-era/commit/27fe475dcd22e33efdeaf0f8d24f32a595538e3f)) * **node-framework:** Add consistency checker ([#1527](https://github.com/matter-labs/zksync-era/issues/1527)) ([3c28c25](https://github.com/matter-labs/zksync-era/commit/3c28c2540f420602cf63748b5e6de3002bfe90fb)) * remove unused variables in prover configs ([#1564](https://github.com/matter-labs/zksync-era/issues/1564)) ([d32a019](https://github.com/matter-labs/zksync-era/commit/d32a01918b2e42b8187fb6740b510b9b8798cafe)) * Remove zksync-rs SDK ([#1559](https://github.com/matter-labs/zksync-era/issues/1559)) ([cc78e1d](https://github.com/matter-labs/zksync-era/commit/cc78e1d98cc51e86b3c1b70dbe3dc38deaa9f3c2)) * soft removal of `events_queue` table ([#1504](https://github.com/matter-labs/zksync-era/issues/1504)) ([5899bc6](https://github.com/matter-labs/zksync-era/commit/5899bc6d8f1b7ef03c2bb5d677e5d60cd6ed0fe5)) * **sqlx:** Use offline mode by default ([#1539](https://github.com/matter-labs/zksync-era/issues/1539)) ([af01edd](https://github.com/matter-labs/zksync-era/commit/af01edd6cedd96c0ce73b24e0d74452ec6c38d43)) * Use config for max number of circuits ([#1573](https://github.com/matter-labs/zksync-era/issues/1573)) ([9fcb87e](https://github.com/matter-labs/zksync-era/commit/9fcb87e9b126e2ad5465c6e2326d87cdc2f1a5cb)) * Validium ([#1461](https://github.com/matter-labs/zksync-era/issues/1461)) ([132a169](https://github.com/matter-labs/zksync-era/commit/132a1691de00eb3ca8fa7bd456a4591d84b24a5d)) * **vm:** 1 5 0 support ([#1508](https://github.com/matter-labs/zksync-era/issues/1508)) ([a6ccd25](https://github.com/matter-labs/zksync-era/commit/a6ccd2533b65a7464f097e2082b690bd426d7694)) ### Bug Fixes * **api:** Change error code for Web3Error::NotImplemented ([#1521](https://github.com/matter-labs/zksync-era/issues/1521)) ([0a13602](https://github.com/matter-labs/zksync-era/commit/0a13602e35fbfbb1abbdf7cefc33b2894754b199)) * **cache:** use factory deps cache correctly ([#1547](https://github.com/matter-labs/zksync-era/issues/1547)) ([a923e11](https://github.com/matter-labs/zksync-era/commit/a923e11ecfecc3de9b0b2cb578939a1f877a1e8a)) * **CI:** Less flaky CI ([#1536](https://github.com/matter-labs/zksync-era/issues/1536)) ([2444b53](https://github.com/matter-labs/zksync-era/commit/2444b5375fcd305bf2f0b7c2bb300316b99e37e2)) * **configs:** Make genesis fields optional ([#1555](https://github.com/matter-labs/zksync-era/issues/1555)) ([2d0ef46](https://github.com/matter-labs/zksync-era/commit/2d0ef46035142eafcb2974c323eab9fc04a4b6a7)) * contract verifier config test ([#1583](https://github.com/matter-labs/zksync-era/issues/1583)) ([030d447](https://github.com/matter-labs/zksync-era/commit/030d447cff0069cc5f218f767d9af7de5dba3a0f)) * **contract-verifier-api:** permissive cors for contract verifier api server ([#1525](https://github.com/matter-labs/zksync-era/issues/1525)) ([423f4a7](https://github.com/matter-labs/zksync-era/commit/423f4a7a906ad336a2853ebd6eb837bf7c0c0572)) * **db:** Fix "values cache update task failed" panics ([#1561](https://github.com/matter-labs/zksync-era/issues/1561)) ([f7c5c14](https://github.com/matter-labs/zksync-era/commit/f7c5c142e46de19abee540deb8a067934be17ca9)) * **en:** do not log error when whitelisted_tokens_for_aa is not supported ([#1600](https://github.com/matter-labs/zksync-era/issues/1600)) ([06c87f5](https://github.com/matter-labs/zksync-era/commit/06c87f5f58c325f6ca3652c729d85e56ab4e8ebe)) * **en:** Fix DB pool for Postgres metrics on EN ([#1675](https://github.com/matter-labs/zksync-era/issues/1675)) ([c51ca91](https://github.com/matter-labs/zksync-era/commit/c51ca91f05e65bffd52c190115bdb39180880f2b)) * **en:** improved tree recovery logs ([#1619](https://github.com/matter-labs/zksync-era/issues/1619)) ([ef12df7](https://github.com/matter-labs/zksync-era/commit/ef12df73a891579a87903055acae02a25da03ff6)) * **en:** Reduce amount of data in snapshot header ([#1528](https://github.com/matter-labs/zksync-era/issues/1528)) ([afa1cf1](https://github.com/matter-labs/zksync-era/commit/afa1cf1fd0359b27d6689b03bf76d3db2b3101b1)) * **eth-client:** Use local FeeHistory type ([#1552](https://github.com/matter-labs/zksync-era/issues/1552)) ([5a512e8](https://github.com/matter-labs/zksync-era/commit/5a512e8f853808332f9324d89be961a12b7fdbd0)) * instruction count diff always N/A in VM perf comparison ([#1608](https://github.com/matter-labs/zksync-era/issues/1608)) ([c0f3104](https://github.com/matter-labs/zksync-era/commit/c0f3104b63b32f681cb11233d3d41efd09b888a7)) * **vm:** Fix storage oracle and estimation ([#1634](https://github.com/matter-labs/zksync-era/issues/1634)) ([932b14b](https://github.com/matter-labs/zksync-era/commit/932b14b6ddee35375fbc302523da2d4d37f1d46b)) * **vm:** Increase log demuxer cycles on far calls ([#1575](https://github.com/matter-labs/zksync-era/issues/1575)) ([90eb9d8](https://github.com/matter-labs/zksync-era/commit/90eb9d8b2f3a50544e4020964dfea90b19a9894b)) ### Performance Improvements * **db:** rework "finalized" block SQL query ([#1524](https://github.com/matter-labs/zksync-era/issues/1524)) ([2b27290](https://github.com/matter-labs/zksync-era/commit/2b27290139cb3fa87412613e3b987d9d9c882275)) * **merkle tree:** Manage indices / filters in RocksDB ([#1550](https://github.com/matter-labs/zksync-era/issues/1550)) ([6bbfa06](https://github.com/matter-labs/zksync-era/commit/6bbfa064371089a31d6751de05682edda1dbfe2e)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --------- Co-authored-by: romanbrodetskiy --- .github/release-please/manifest.json | 2 +- Cargo.lock | 2 +- core/CHANGELOG.md | 68 ++++++++++++++++++++++++++++ core/bin/external_node/Cargo.toml | 2 +- 4 files changed, 71 insertions(+), 3 deletions(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index d9f05737997..46b9ceb25c2 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,4 +1,4 @@ { - "core": "22.1.0", + "core": "23.0.0", "prover": "12.2.0" } diff --git a/Cargo.lock b/Cargo.lock index 5f7e8af3a57..67d3ff8c2f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8599,7 +8599,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "22.1.0" +version = "23.0.0" dependencies = [ "anyhow", "clap 4.4.6", diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 5a8b3ce9e42..1066d6c5b8d 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,73 @@ # Changelog +## [23.0.0](https://github.com/matter-labs/zksync-era/compare/core-v22.1.0...core-v23.0.0) (2024-04-16) + + +### ⚠ BREAKING CHANGES + +* **vm:** 1 5 0 support ([#1508](https://github.com/matter-labs/zksync-era/issues/1508)) + +### Features + +* **api:** Add `tokens_whitelisted_for_paymaster` ([#1545](https://github.com/matter-labs/zksync-era/issues/1545)) ([6da89cd](https://github.com/matter-labs/zksync-era/commit/6da89cd5222435aa9994fb5989af75ecbe69b6fd)) +* **api:** Log info about estimated fee ([#1611](https://github.com/matter-labs/zksync-era/issues/1611)) ([daed58c](https://github.com/matter-labs/zksync-era/commit/daed58ced42546b7ff1e38f12b44a861a2f41eee)) +* Archive old prover jobs ([#1516](https://github.com/matter-labs/zksync-era/issues/1516)) ([201476c](https://github.com/matter-labs/zksync-era/commit/201476c8c1869c30605eb2acd462ae1dfe026fd1)) +* Archiving of prover in gpu_prover_queue ([#1537](https://github.com/matter-labs/zksync-era/issues/1537)) ([a970629](https://github.com/matter-labs/zksync-era/commit/a9706294fe740cbc9af37eef8d968584a3ec4859)) +* **block-reverter:** only require private key for sending revert transactions ([#1579](https://github.com/matter-labs/zksync-era/issues/1579)) ([27de6b7](https://github.com/matter-labs/zksync-era/commit/27de6b79d065ec5a25b5205017158256b9b62d00)) +* **config:** Initialize log config from files as well ([#1566](https://github.com/matter-labs/zksync-era/issues/1566)) ([9e7db59](https://github.com/matter-labs/zksync-era/commit/9e7db5900c74019cf3368db99ed711e0f570852b)) +* **configs:** Implement new format of configs and implement protobuf for it ([#1501](https://github.com/matter-labs/zksync-era/issues/1501)) ([086ba5b](https://github.com/matter-labs/zksync-era/commit/086ba5b40565db7c23697830af2b9910b8bd0e34)) +* **db:** Wrap sqlx errors in DAL ([#1522](https://github.com/matter-labs/zksync-era/issues/1522)) ([6e9ed8c](https://github.com/matter-labs/zksync-era/commit/6e9ed8c0499830ba71a22b5e112d94aa7e91d517)) +* EN Pruning ([#1418](https://github.com/matter-labs/zksync-era/issues/1418)) ([cea6578](https://github.com/matter-labs/zksync-era/commit/cea6578ffb037a2ad8476b6d3fb03416c1e55593)) +* **en:** add consistency checker condition in db pruner ([#1653](https://github.com/matter-labs/zksync-era/issues/1653)) ([5ed92b9](https://github.com/matter-labs/zksync-era/commit/5ed92b9810cdb0dc0ceea594a8828cfbbf067006)) +* **en:** add manual vacuum step in db pruning ([#1652](https://github.com/matter-labs/zksync-era/issues/1652)) ([c818be3](https://github.com/matter-labs/zksync-era/commit/c818be362aef9244bb644a2c50a620ac1ef077b4)) +* **en:** Rate-limit L2 client requests ([#1500](https://github.com/matter-labs/zksync-era/issues/1500)) ([3f55f1e](https://github.com/matter-labs/zksync-era/commit/3f55f1e50e053c52cd9989581abf5848440920c1)) +* **en:** Rework storing and using protective reads ([#1515](https://github.com/matter-labs/zksync-era/issues/1515)) ([13c0c45](https://github.com/matter-labs/zksync-era/commit/13c0c454b887f4dfad68cab9f2a5f421c1df5f8c)) +* **en:** support for snapshots recovery in version_sync_task.rs ([#1585](https://github.com/matter-labs/zksync-era/issues/1585)) ([f911276](https://github.com/matter-labs/zksync-era/commit/f9112769c5885d44a6fb2d04fb2bf5abd4c0a2a2)) +* **eth-watch:** Brush up Ethereum watcher component ([#1596](https://github.com/matter-labs/zksync-era/issues/1596)) ([b0b8f89](https://github.com/matter-labs/zksync-era/commit/b0b8f8932b03d98780d504f9acf394547dac7724)) +* Expose component configs as info metrics ([#1584](https://github.com/matter-labs/zksync-era/issues/1584)) ([7c8ae40](https://github.com/matter-labs/zksync-era/commit/7c8ae40357a6ceeeb097c019588a8be18326bed1)) +* **external-node:** external node distributed operation mode ([#1457](https://github.com/matter-labs/zksync-era/issues/1457)) ([777ffca](https://github.com/matter-labs/zksync-era/commit/777ffca152045c6a49298f714a44c8cfcde8a1d5)) +* Extract commitment generator into a separate crate ([#1636](https://github.com/matter-labs/zksync-era/issues/1636)) ([f763d1f](https://github.com/matter-labs/zksync-era/commit/f763d1f193bac0dcdc367c8566b31d8384fe0651)) +* Extract eth_watch and shared metrics into separate crates ([#1572](https://github.com/matter-labs/zksync-era/issues/1572)) ([4013771](https://github.com/matter-labs/zksync-era/commit/4013771e4a7a2a14828aa99153edb74a1861fa94)) +* Finalize fee address migration ([#1617](https://github.com/matter-labs/zksync-era/issues/1617)) ([713f56b](https://github.com/matter-labs/zksync-era/commit/713f56b14433a39e8cc431be3150a9abe574984f)) +* fix availability checker ([#1574](https://github.com/matter-labs/zksync-era/issues/1574)) ([b2f21fb](https://github.com/matter-labs/zksync-era/commit/b2f21fb1d72e65a738db9f7bc9f162a410d36c9b)) +* **genesis:** Add genesis config generator ([#1671](https://github.com/matter-labs/zksync-era/issues/1671)) ([45164fa](https://github.com/matter-labs/zksync-era/commit/45164fa6cb174c04c8542246cdb79c4f393339af)) +* **genesis:** mark system contracts bytecodes as known ([#1554](https://github.com/matter-labs/zksync-era/issues/1554)) ([5ffec51](https://github.com/matter-labs/zksync-era/commit/5ffec511736bdc74542280b391b253febbe517f1)) +* Migrate gas limit to u64 ([#1538](https://github.com/matter-labs/zksync-era/issues/1538)) ([56dc049](https://github.com/matter-labs/zksync-era/commit/56dc049993d673635e55b545962bf4f6d4f32739)) +* **node-framework:** Add consensus support ([#1546](https://github.com/matter-labs/zksync-era/issues/1546)) ([27fe475](https://github.com/matter-labs/zksync-era/commit/27fe475dcd22e33efdeaf0f8d24f32a595538e3f)) +* **node-framework:** Add consistency checker ([#1527](https://github.com/matter-labs/zksync-era/issues/1527)) ([3c28c25](https://github.com/matter-labs/zksync-era/commit/3c28c2540f420602cf63748b5e6de3002bfe90fb)) +* remove unused variables in prover configs ([#1564](https://github.com/matter-labs/zksync-era/issues/1564)) ([d32a019](https://github.com/matter-labs/zksync-era/commit/d32a01918b2e42b8187fb6740b510b9b8798cafe)) +* Remove zksync-rs SDK ([#1559](https://github.com/matter-labs/zksync-era/issues/1559)) ([cc78e1d](https://github.com/matter-labs/zksync-era/commit/cc78e1d98cc51e86b3c1b70dbe3dc38deaa9f3c2)) +* soft removal of `events_queue` table ([#1504](https://github.com/matter-labs/zksync-era/issues/1504)) ([5899bc6](https://github.com/matter-labs/zksync-era/commit/5899bc6d8f1b7ef03c2bb5d677e5d60cd6ed0fe5)) +* **sqlx:** Use offline mode by default ([#1539](https://github.com/matter-labs/zksync-era/issues/1539)) ([af01edd](https://github.com/matter-labs/zksync-era/commit/af01edd6cedd96c0ce73b24e0d74452ec6c38d43)) +* Use config for max number of circuits ([#1573](https://github.com/matter-labs/zksync-era/issues/1573)) ([9fcb87e](https://github.com/matter-labs/zksync-era/commit/9fcb87e9b126e2ad5465c6e2326d87cdc2f1a5cb)) +* Validium ([#1461](https://github.com/matter-labs/zksync-era/issues/1461)) ([132a169](https://github.com/matter-labs/zksync-era/commit/132a1691de00eb3ca8fa7bd456a4591d84b24a5d)) +* **vm:** 1 5 0 support ([#1508](https://github.com/matter-labs/zksync-era/issues/1508)) ([a6ccd25](https://github.com/matter-labs/zksync-era/commit/a6ccd2533b65a7464f097e2082b690bd426d7694)) + + +### Bug Fixes + +* **api:** Change error code for Web3Error::NotImplemented ([#1521](https://github.com/matter-labs/zksync-era/issues/1521)) ([0a13602](https://github.com/matter-labs/zksync-era/commit/0a13602e35fbfbb1abbdf7cefc33b2894754b199)) +* **cache:** use factory deps cache correctly ([#1547](https://github.com/matter-labs/zksync-era/issues/1547)) ([a923e11](https://github.com/matter-labs/zksync-era/commit/a923e11ecfecc3de9b0b2cb578939a1f877a1e8a)) +* **CI:** Less flaky CI ([#1536](https://github.com/matter-labs/zksync-era/issues/1536)) ([2444b53](https://github.com/matter-labs/zksync-era/commit/2444b5375fcd305bf2f0b7c2bb300316b99e37e2)) +* **configs:** Make genesis fields optional ([#1555](https://github.com/matter-labs/zksync-era/issues/1555)) ([2d0ef46](https://github.com/matter-labs/zksync-era/commit/2d0ef46035142eafcb2974c323eab9fc04a4b6a7)) +* contract verifier config test ([#1583](https://github.com/matter-labs/zksync-era/issues/1583)) ([030d447](https://github.com/matter-labs/zksync-era/commit/030d447cff0069cc5f218f767d9af7de5dba3a0f)) +* **contract-verifier-api:** permissive cors for contract verifier api server ([#1525](https://github.com/matter-labs/zksync-era/issues/1525)) ([423f4a7](https://github.com/matter-labs/zksync-era/commit/423f4a7a906ad336a2853ebd6eb837bf7c0c0572)) +* **db:** Fix "values cache update task failed" panics ([#1561](https://github.com/matter-labs/zksync-era/issues/1561)) ([f7c5c14](https://github.com/matter-labs/zksync-era/commit/f7c5c142e46de19abee540deb8a067934be17ca9)) +* **en:** do not log error when whitelisted_tokens_for_aa is not supported ([#1600](https://github.com/matter-labs/zksync-era/issues/1600)) ([06c87f5](https://github.com/matter-labs/zksync-era/commit/06c87f5f58c325f6ca3652c729d85e56ab4e8ebe)) +* **en:** Fix DB pool for Postgres metrics on EN ([#1675](https://github.com/matter-labs/zksync-era/issues/1675)) ([c51ca91](https://github.com/matter-labs/zksync-era/commit/c51ca91f05e65bffd52c190115bdb39180880f2b)) +* **en:** improved tree recovery logs ([#1619](https://github.com/matter-labs/zksync-era/issues/1619)) ([ef12df7](https://github.com/matter-labs/zksync-era/commit/ef12df73a891579a87903055acae02a25da03ff6)) +* **en:** Reduce amount of data in snapshot header ([#1528](https://github.com/matter-labs/zksync-era/issues/1528)) ([afa1cf1](https://github.com/matter-labs/zksync-era/commit/afa1cf1fd0359b27d6689b03bf76d3db2b3101b1)) +* **eth-client:** Use local FeeHistory type ([#1552](https://github.com/matter-labs/zksync-era/issues/1552)) ([5a512e8](https://github.com/matter-labs/zksync-era/commit/5a512e8f853808332f9324d89be961a12b7fdbd0)) +* instruction count diff always N/A in VM perf comparison ([#1608](https://github.com/matter-labs/zksync-era/issues/1608)) ([c0f3104](https://github.com/matter-labs/zksync-era/commit/c0f3104b63b32f681cb11233d3d41efd09b888a7)) +* **vm:** Fix storage oracle and estimation ([#1634](https://github.com/matter-labs/zksync-era/issues/1634)) ([932b14b](https://github.com/matter-labs/zksync-era/commit/932b14b6ddee35375fbc302523da2d4d37f1d46b)) +* **vm:** Increase log demuxer cycles on far calls ([#1575](https://github.com/matter-labs/zksync-era/issues/1575)) ([90eb9d8](https://github.com/matter-labs/zksync-era/commit/90eb9d8b2f3a50544e4020964dfea90b19a9894b)) + + +### Performance Improvements + +* **db:** rework "finalized" block SQL query ([#1524](https://github.com/matter-labs/zksync-era/issues/1524)) ([2b27290](https://github.com/matter-labs/zksync-era/commit/2b27290139cb3fa87412613e3b987d9d9c882275)) +* **merkle tree:** Manage indices / filters in RocksDB ([#1550](https://github.com/matter-labs/zksync-era/issues/1550)) ([6bbfa06](https://github.com/matter-labs/zksync-era/commit/6bbfa064371089a31d6751de05682edda1dbfe2e)) + ## [22.1.0](https://github.com/matter-labs/zksync-era/compare/core-v22.0.0...core-v22.1.0) (2024-03-28) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index d0f653abdb3..f2c38e8b1f8 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_external_node" -version = "22.1.0" # x-release-please-version +version = "23.0.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true From 02f7cd53542ff1a0d25bb9da38ee3c3fad8b0010 Mon Sep 17 00:00:00 2001 From: pompon0 Date: Tue, 16 Apr 2024 18:17:57 +0200 Subject: [PATCH 29/29] refactor: removed polling from external io (#1690) ExternalIO contained unnecessary sleeps, which slowed down block processing. --- .../zksync_core/src/sync_layer/external_io.rs | 130 ++++++++---------- .../zksync_core/src/sync_layer/sync_action.rs | 35 ++++- 2 files changed, 88 insertions(+), 77 deletions(-) diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 3eec7a18612..95722d69088 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -17,7 +17,7 @@ use super::{ }; use crate::state_keeper::{ io::{ - common::{load_pending_batch, poll_iters, IoCursor}, + common::{load_pending_batch, IoCursor}, L1BatchParams, MiniblockParams, PendingBatchData, StateKeeperIO, }, metrics::KEEPER_METRICS, @@ -25,9 +25,6 @@ use crate::state_keeper::{ updates::UpdatesManager, }; -/// The interval between the action queue polling attempts for the new actions. -const POLL_INTERVAL: Duration = Duration::from_millis(100); - /// ExternalIO is the IO abstraction for the state keeper that is used in the external node. /// It receives a sequence of actions from the fetcher via the action queue and propagates it /// into the state keeper. @@ -220,34 +217,31 @@ impl StateKeeperIO for ExternalIO { max_wait: Duration, ) -> anyhow::Result> { tracing::debug!("Waiting for the new batch params"); - for _ in 0..poll_iters(POLL_INTERVAL, max_wait) { - match self.actions.pop_action() { - Some(SyncAction::OpenBatch { - params, - number, - first_miniblock_number, - }) => { - anyhow::ensure!( - number == cursor.l1_batch, - "Batch number mismatch: expected {}, got {number}", - cursor.l1_batch - ); - anyhow::ensure!( - first_miniblock_number == cursor.next_miniblock, - "Miniblock number mismatch: expected {}, got {first_miniblock_number}", - cursor.next_miniblock - ); - return Ok(Some(params)); - } - Some(other) => { - anyhow::bail!("unexpected action in the action queue: {other:?}"); - } - None => { - tokio::time::sleep(POLL_INTERVAL).await; - } + let Some(action) = self.actions.recv_action(max_wait).await else { + return Ok(None); + }; + match action { + SyncAction::OpenBatch { + params, + number, + first_miniblock_number, + } => { + anyhow::ensure!( + number == cursor.l1_batch, + "Batch number mismatch: expected {}, got {number}", + cursor.l1_batch + ); + anyhow::ensure!( + first_miniblock_number == cursor.next_miniblock, + "Miniblock number mismatch: expected {}, got {first_miniblock_number}", + cursor.next_miniblock + ); + return Ok(Some(params)); + } + other => { + anyhow::bail!("unexpected action in the action queue: {other:?}"); } } - Ok(None) } async fn wait_for_new_miniblock_params( @@ -256,62 +250,52 @@ impl StateKeeperIO for ExternalIO { max_wait: Duration, ) -> anyhow::Result> { // Wait for the next miniblock to appear in the queue. - let actions = &mut self.actions; - for _ in 0..poll_iters(POLL_INTERVAL, max_wait) { - match actions.pop_action() { - Some(SyncAction::Miniblock { params, number }) => { - anyhow::ensure!( - number == cursor.next_miniblock, - "Miniblock number mismatch: expected {}, got {number}", - cursor.next_miniblock - ); - return Ok(Some(params)); - } - Some(other) => { - anyhow::bail!( - "Unexpected action in the queue while waiting for the next miniblock: {other:?}" - ); - } - None => { - tokio::time::sleep(POLL_INTERVAL).await; - } + let Some(action) = self.actions.recv_action(max_wait).await else { + return Ok(None); + }; + match action { + SyncAction::Miniblock { params, number } => { + anyhow::ensure!( + number == cursor.next_miniblock, + "Miniblock number mismatch: expected {}, got {number}", + cursor.next_miniblock + ); + return Ok(Some(params)); + } + other => { + anyhow::bail!( + "Unexpected action in the queue while waiting for the next miniblock: {other:?}" + ); } } - Ok(None) } async fn wait_for_next_tx( &mut self, max_wait: Duration, ) -> anyhow::Result> { - let actions = &mut self.actions; tracing::debug!( "Waiting for the new tx, next action is {:?}", - actions.peek_action() + self.actions.peek_action() ); - for _ in 0..poll_iters(POLL_INTERVAL, max_wait) { - match actions.peek_action() { - Some(SyncAction::Tx(_)) => { - let SyncAction::Tx(tx) = actions.pop_action().unwrap() else { - unreachable!() - }; - return Ok(Some(Transaction::from(*tx))); - } - Some(SyncAction::SealMiniblock | SyncAction::SealBatch) => { - // No more transactions in the current miniblock; the state keeper should seal it. - return Ok(None); - } - Some(other) => { - anyhow::bail!( - "Unexpected action in the queue while waiting for the next transaction: {other:?}" - ); - } - _ => { - tokio::time::sleep(POLL_INTERVAL).await; - } + let Some(action) = self.actions.peek_action_async(max_wait).await else { + return Ok(None); + }; + match action { + SyncAction::Tx(tx) => { + self.actions.pop_action().unwrap(); + return Ok(Some(Transaction::from(*tx))); + } + SyncAction::SealMiniblock | SyncAction::SealBatch => { + // No more transactions in the current miniblock; the state keeper should seal it. + return Ok(None); + } + other => { + anyhow::bail!( + "Unexpected action in the queue while waiting for the next transaction: {other:?}" + ); } } - Ok(None) } async fn rollback(&mut self, tx: Transaction) -> anyhow::Result<()> { diff --git a/core/lib/zksync_core/src/sync_layer/sync_action.rs b/core/lib/zksync_core/src/sync_layer/sync_action.rs index 7b009872f7a..0292d9805bb 100644 --- a/core/lib/zksync_core/src/sync_layer/sync_action.rs +++ b/core/lib/zksync_core/src/sync_layer/sync_action.rs @@ -90,11 +90,24 @@ impl ActionQueue { QUEUE_METRICS.action_queue_size.dec_by(1); return Some(peeked); } - let action = self.receiver.try_recv().ok(); - if action.is_some() { - QUEUE_METRICS.action_queue_size.dec_by(1); + let action = self.receiver.try_recv().ok()?; + QUEUE_METRICS.action_queue_size.dec_by(1); + Some(action) + } + + /// Removes the first action from the queue. + pub(super) async fn recv_action( + &mut self, + max_wait: tokio::time::Duration, + ) -> Option { + if let Some(action) = self.pop_action() { + return Some(action); } - action + let action = tokio::time::timeout(max_wait, self.receiver.recv()) + .await + .ok()??; + QUEUE_METRICS.action_queue_size.dec_by(1); + Some(action) } /// Returns the first action from the queue without removing it. @@ -105,6 +118,20 @@ impl ActionQueue { self.peeked = self.receiver.try_recv().ok(); self.peeked.clone() } + + /// Returns the first action from the queue without removing it. + pub(super) async fn peek_action_async( + &mut self, + max_wait: tokio::time::Duration, + ) -> Option { + if let Some(action) = &self.peeked { + return Some(action.clone()); + } + self.peeked = tokio::time::timeout(max_wait, self.receiver.recv()) + .await + .ok()?; + self.peeked.clone() + } } /// An instruction for the ExternalIO to request a certain action from the state keeper.