diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 80caab713a7..a4d23db44de 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -416,6 +416,10 @@ impl PostgresConfig { } } +fn read_operator_address() -> anyhow::Result
{ + Ok(std::env::var("EN_OPERATOR_ADDR")?.parse()?) +} + pub(crate) fn read_consensus_config() -> anyhow::Result { let path = std::env::var("EN_CONSENSUS_CONFIG_PATH") .context("EN_CONSENSUS_CONFIG_PATH env variable is not set")?; @@ -425,6 +429,7 @@ pub(crate) fn read_consensus_config() -> anyhow::Result anyhow::Result
{ + Ok(std::env::var("CHAIN_STATE_KEEPER_FEE_ACCOUNT_ADDR")?.parse()?) +} pub(crate) fn read_consensus_config() -> anyhow::Result { let path = std::env::var("CONSENSUS_CONFIG_PATH").context("CONSENSUS_CONFIG_PATH")?; @@ -13,5 +18,6 @@ pub(crate) fn read_consensus_config() -> anyhow::Result $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n is_finished,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n l1_gas_price,\n l2_fair_gas_price,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n base_fee_per_gas,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", "describe": { "columns": [ { @@ -15,166 +15,191 @@ }, { "ordinal": 2, + "name": "is_finished", + "type_info": "Bool" + }, + { + "ordinal": 3, "name": "l1_tx_count", "type_info": "Int4" }, { - "ordinal": 3, + "ordinal": 4, "name": "l2_tx_count", "type_info": "Int4" }, { - "ordinal": 4, + "ordinal": 5, + "name": "fee_account_address", + "type_info": "Bytea" + }, + { + "ordinal": 6, "name": "bloom", "type_info": "Bytea" }, { - "ordinal": 5, + "ordinal": 7, "name": "priority_ops_onchain_data", "type_info": "ByteaArray" }, { - "ordinal": 6, + "ordinal": 8, "name": "hash", "type_info": "Bytea" }, { - "ordinal": 7, + "ordinal": 9, "name": "parent_hash", "type_info": "Bytea" }, { - "ordinal": 8, + "ordinal": 10, "name": "commitment", "type_info": "Bytea" }, { - "ordinal": 9, + "ordinal": 11, "name": "compressed_write_logs", "type_info": "Bytea" }, { - "ordinal": 10, + "ordinal": 12, "name": "compressed_contracts", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 13, "name": "eth_prove_tx_id", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 14, "name": "eth_commit_tx_id", "type_info": "Int4" }, { - "ordinal": 13, + "ordinal": 15, "name": "eth_execute_tx_id", "type_info": "Int4" }, { - "ordinal": 14, + "ordinal": 16, "name": "merkle_root_hash", "type_info": "Bytea" }, { - "ordinal": 15, + "ordinal": 17, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 16, + "ordinal": 18, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 17, + "ordinal": 19, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 18, + "ordinal": 20, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 19, + "ordinal": 21, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 20, + "ordinal": 22, "name": "l2_l1_compressed_messages", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 23, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 24, + "name": "l1_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 25, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 26, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 23, + "ordinal": 27, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 24, + "ordinal": 28, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 29, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 30, + "name": "base_fee_per_gas", + "type_info": "Numeric" + }, + { + "ordinal": 31, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 32, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 33, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 34, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 30, + "ordinal": 35, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 31, + "ordinal": 36, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 32, + "ordinal": 37, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 33, + "ordinal": 38, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 34, + "ordinal": 39, "name": "pubdata_input", "type_info": "Bytea" } @@ -192,6 +217,8 @@ false, false, false, + false, + false, true, true, true, @@ -208,10 +235,13 @@ true, true, true, + false, + false, true, true, true, true, + false, true, true, true, @@ -223,5 +253,5 @@ true ] }, - "hash": "883ab3d601e2dfef03ad36e5987577821fc8ce2f81cb029d0f64801d5f743388" + "hash": "cddf48514aa2aa249d0530d44c741368993009bb4bd90c2ad177ce56317aa04c" } diff --git a/core/lib/dal/.sqlx/query-cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57.json b/core/lib/dal/.sqlx/query-cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57.json deleted file mode 100644 index da21c126347..00000000000 --- a/core/lib/dal/.sqlx/query-cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4", - "Int8", - "ByteaArray", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Bytea", - "Bytea", - "Int4", - "ByteaArray", - "Int8Array", - "Bytea", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "cea366a9d0da60bf03c71be26862929e051270056ebf113a657a464f89c7fd57" -} diff --git a/core/lib/dal/.sqlx/query-dd86833a1fa5240e2b225daf32fa594a00a78e400dc44fd3b2634529278ab38c.json b/core/lib/dal/.sqlx/query-d1b261f4057e4113b96eb87c9e20015eeb3ef2643ceda3024504a471b24d1283.json similarity index 64% rename from core/lib/dal/.sqlx/query-dd86833a1fa5240e2b225daf32fa594a00a78e400dc44fd3b2634529278ab38c.json rename to core/lib/dal/.sqlx/query-d1b261f4057e4113b96eb87c9e20015eeb3ef2643ceda3024504a471b24d1283.json index a98e8e0004c..fd6ed893c23 100644 --- a/core/lib/dal/.sqlx/query-dd86833a1fa5240e2b225daf32fa594a00a78e400dc44fd3b2634529278ab38c.json +++ b/core/lib/dal/.sqlx/query-d1b261f4057e4113b96eb87c9e20015eeb3ef2643ceda3024504a471b24d1283.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n is_finished,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n l1_gas_price,\n l2_fair_gas_price,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n base_fee_per_gas,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -15,166 +15,191 @@ }, { "ordinal": 2, + "name": "is_finished", + "type_info": "Bool" + }, + { + "ordinal": 3, "name": "l1_tx_count", "type_info": "Int4" }, { - "ordinal": 3, + "ordinal": 4, "name": "l2_tx_count", "type_info": "Int4" }, { - "ordinal": 4, + "ordinal": 5, + "name": "fee_account_address", + "type_info": "Bytea" + }, + { + "ordinal": 6, "name": "bloom", "type_info": "Bytea" }, { - "ordinal": 5, + "ordinal": 7, "name": "priority_ops_onchain_data", "type_info": "ByteaArray" }, { - "ordinal": 6, + "ordinal": 8, "name": "hash", "type_info": "Bytea" }, { - "ordinal": 7, + "ordinal": 9, "name": "parent_hash", "type_info": "Bytea" }, { - "ordinal": 8, + "ordinal": 10, "name": "commitment", "type_info": "Bytea" }, { - "ordinal": 9, + "ordinal": 11, "name": "compressed_write_logs", "type_info": "Bytea" }, { - "ordinal": 10, + "ordinal": 12, "name": "compressed_contracts", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 13, "name": "eth_prove_tx_id", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 14, "name": "eth_commit_tx_id", "type_info": "Int4" }, { - "ordinal": 13, + "ordinal": 15, "name": "eth_execute_tx_id", "type_info": "Int4" }, { - "ordinal": 14, + "ordinal": 16, "name": "merkle_root_hash", "type_info": "Bytea" }, { - "ordinal": 15, + "ordinal": 17, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 16, + "ordinal": 18, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 17, + "ordinal": 19, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 18, + "ordinal": 20, "name": "compressed_initial_writes", "type_info": "Bytea" }, { - "ordinal": 19, + "ordinal": 21, "name": "compressed_repeated_writes", "type_info": "Bytea" }, { - "ordinal": 20, + "ordinal": 22, "name": "l2_l1_compressed_messages", "type_info": "Bytea" }, { - "ordinal": 21, + "ordinal": 23, "name": "l2_l1_merkle_root", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 24, + "name": "l1_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 25, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 26, "name": "rollup_last_leaf_index", "type_info": "Int8" }, { - "ordinal": 23, + "ordinal": 27, "name": "zkporter_is_available", "type_info": "Bool" }, { - "ordinal": 24, + "ordinal": 28, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 29, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 30, + "name": "base_fee_per_gas", + "type_info": "Numeric" + }, + { + "ordinal": 31, "name": "aux_data_hash", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 32, "name": "pass_through_data_hash", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 33, "name": "meta_parameters_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 34, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 30, + "ordinal": 35, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 31, + "ordinal": 36, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 32, + "ordinal": 37, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 33, + "ordinal": 38, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 34, + "ordinal": 39, "name": "pubdata_input", "type_info": "Bytea" } @@ -189,6 +214,8 @@ false, false, false, + false, + false, true, true, true, @@ -205,10 +232,13 @@ true, true, true, + false, + false, true, true, true, true, + false, true, true, true, @@ -220,5 +250,5 @@ true ] }, - "hash": "dd86833a1fa5240e2b225daf32fa594a00a78e400dc44fd3b2634529278ab38c" + "hash": "d1b261f4057e4113b96eb87c9e20015eeb3ef2643ceda3024504a471b24d1283" } diff --git a/core/lib/dal/.sqlx/query-c195037dcf6031a90f407f652657956350786f3596c7302bdeb8d813f9fbf621.json b/core/lib/dal/.sqlx/query-d8e0f98a67ffb53a1caa6820f8475da2787332deca5708d1d08730cdbfc73541.json similarity index 58% rename from core/lib/dal/.sqlx/query-c195037dcf6031a90f407f652657956350786f3596c7302bdeb8d813f9fbf621.json rename to core/lib/dal/.sqlx/query-d8e0f98a67ffb53a1caa6820f8475da2787332deca5708d1d08730cdbfc73541.json index 0b8a91d7bc8..f0ea745821f 100644 --- a/core/lib/dal/.sqlx/query-c195037dcf6031a90f407f652657956350786f3596c7302bdeb8d813f9fbf621.json +++ b/core/lib/dal/.sqlx/query-d8e0f98a67ffb53a1caa6820f8475da2787332deca5708d1d08730cdbfc73541.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id = $1\n OR eth_prove_tx_id = $1\n OR eth_execute_tx_id = $1\n ", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n is_finished,\n fee_account_address,\n l2_to_l1_logs,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id = $1\n OR eth_prove_tx_id = $1\n OR eth_execute_tx_id = $1\n ", "describe": { "columns": [ { @@ -25,56 +25,81 @@ }, { "ordinal": 4, + "name": "is_finished", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "fee_account_address", + "type_info": "Bytea" + }, + { + "ordinal": 6, "name": "l2_to_l1_logs", "type_info": "ByteaArray" }, { - "ordinal": 5, + "ordinal": 7, "name": "l2_to_l1_messages", "type_info": "ByteaArray" }, { - "ordinal": 6, + "ordinal": 8, "name": "bloom", "type_info": "Bytea" }, { - "ordinal": 7, + "ordinal": 9, "name": "priority_ops_onchain_data", "type_info": "ByteaArray" }, { - "ordinal": 8, + "ordinal": 10, "name": "used_contract_hashes", "type_info": "Jsonb" }, { - "ordinal": 9, + "ordinal": 11, + "name": "base_fee_per_gas", + "type_info": "Numeric" + }, + { + "ordinal": 12, + "name": "l1_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 13, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 14, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 10, + "ordinal": 15, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 16, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 17, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 13, + "ordinal": 18, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 19, "name": "pubdata_input", "type_info": "Bytea" } @@ -94,6 +119,11 @@ false, false, false, + false, + false, + false, + false, + false, true, true, true, @@ -102,5 +132,5 @@ true ] }, - "hash": "c195037dcf6031a90f407f652657956350786f3596c7302bdeb8d813f9fbf621" + "hash": "d8e0f98a67ffb53a1caa6820f8475da2787332deca5708d1d08730cdbfc73541" } diff --git a/core/lib/dal/.sqlx/query-eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f.json b/core/lib/dal/.sqlx/query-eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f.json deleted file mode 100644 index 5cd05036f98..00000000000 --- a/core/lib/dal/.sqlx/query-eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT COUNT(*)\n FROM information_schema.columns\n WHERE table_name = 'l1_batches' AND column_name = 'fee_account_address'\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - null - ] - }, - "hash": "eab03e888f20020462ede2cd59fc0d68195346daf5f38d102eab1c1b73b0f82f" -} diff --git a/core/lib/dal/.sqlx/query-f76231781e5e267e9571c3f9daa902c4f720483abb5833ff15ecfa3a2602d4e5.json b/core/lib/dal/.sqlx/query-f76231781e5e267e9571c3f9daa902c4f720483abb5833ff15ecfa3a2602d4e5.json deleted file mode 100644 index 392d44ef63d..00000000000 --- a/core/lib/dal/.sqlx/query-f76231781e5e267e9571c3f9daa902c4f720483abb5833ff15ecfa3a2602d4e5.json +++ /dev/null @@ -1,226 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n parent_hash,\n commitment,\n compressed_write_logs,\n compressed_contracts,\n eth_prove_tx_id,\n eth_commit_tx_id,\n eth_execute_tx_id,\n merkle_root_hash,\n l2_to_l1_logs,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_compressed_messages,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "bloom", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "priority_ops_onchain_data", - "type_info": "ByteaArray" - }, - { - "ordinal": 6, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "parent_hash", - "type_info": "Bytea" - }, - { - "ordinal": 8, - "name": "commitment", - "type_info": "Bytea" - }, - { - "ordinal": 9, - "name": "compressed_write_logs", - "type_info": "Bytea" - }, - { - "ordinal": 10, - "name": "compressed_contracts", - "type_info": "Bytea" - }, - { - "ordinal": 11, - "name": "eth_prove_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 12, - "name": "eth_commit_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 13, - "name": "eth_execute_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 14, - "name": "merkle_root_hash", - "type_info": "Bytea" - }, - { - "ordinal": 15, - "name": "l2_to_l1_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 16, - "name": "l2_to_l1_messages", - "type_info": "ByteaArray" - }, - { - "ordinal": 17, - "name": "used_contract_hashes", - "type_info": "Jsonb" - }, - { - "ordinal": 18, - "name": "compressed_initial_writes", - "type_info": "Bytea" - }, - { - "ordinal": 19, - "name": "compressed_repeated_writes", - "type_info": "Bytea" - }, - { - "ordinal": 20, - "name": "l2_l1_compressed_messages", - "type_info": "Bytea" - }, - { - "ordinal": 21, - "name": "l2_l1_merkle_root", - "type_info": "Bytea" - }, - { - "ordinal": 22, - "name": "rollup_last_leaf_index", - "type_info": "Int8" - }, - { - "ordinal": 23, - "name": "zkporter_is_available", - "type_info": "Bool" - }, - { - "ordinal": 24, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 25, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 26, - "name": "aux_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 27, - "name": "pass_through_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 28, - "name": "meta_parameters_hash", - "type_info": "Bytea" - }, - { - "ordinal": 29, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 30, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 31, - "name": "compressed_state_diffs", - "type_info": "Bytea" - }, - { - "ordinal": 32, - "name": "events_queue_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 33, - "name": "bootloader_initial_content_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 34, - "name": "pubdata_input", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - true, - true, - true, - true - ] - }, - "hash": "f76231781e5e267e9571c3f9daa902c4f720483abb5833ff15ecfa3a2602d4e5" -} diff --git a/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.down.sql b/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.down.sql deleted file mode 100644 index 8a0717ff839..00000000000 --- a/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -ALTER TABLE l1_batches - ALTER COLUMN fee_account_address DROP DEFAULT, - ALTER COLUMN is_finished DROP DEFAULT; -ALTER TABLE miniblocks - DROP COLUMN fee_account_address; diff --git a/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.up.sql b/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.up.sql deleted file mode 100644 index e6ec1f49278..00000000000 --- a/core/lib/dal/migrations/20231212121822_add_miniblocks_fee_account_address.up.sql +++ /dev/null @@ -1,9 +0,0 @@ -ALTER TABLE miniblocks - ADD COLUMN fee_account_address BYTEA NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'::bytea; --- ^ Add a default value so that DB queries don't fail even if the DB migration is not completed. - --- Set default values for columns in `l1_batches` that will be removed, so that INSERTs can work --- w/o setting these columns. -ALTER TABLE l1_batches - ALTER COLUMN fee_account_address SET DEFAULT '\x0000000000000000000000000000000000000000'::bytea, - ALTER COLUMN is_finished SET DEFAULT true; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index ee7598d6ed3..b02068165ae 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -50,6 +50,8 @@ impl BlocksDal<'_, '_> { MAX(number) AS "number" FROM l1_batches + WHERE + is_finished = TRUE "# ) .instrument("get_sealed_block_number") @@ -151,11 +153,16 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, timestamp, + is_finished, + fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, used_contract_hashes, + base_fee_per_gas, + l1_gas_price, + l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, @@ -189,8 +196,10 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, + is_finished, l1_tx_count, l2_tx_count, + fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -209,10 +218,13 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, + l1_gas_price, + l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -248,11 +260,16 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, timestamp, + is_finished, + fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, used_contract_hashes, + base_fee_per_gas, + l1_gas_price, + l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, @@ -459,6 +476,8 @@ impl BlocksDal<'_, '_> { // Serialization should always succeed. let used_contract_hashes = serde_json::to_value(&header.used_contract_hashes) .expect("failed to serialize used_contract_hashes to JSON value"); + let base_fee_per_gas = BigDecimal::from_u64(header.base_fee_per_gas) + .context("block.base_fee_per_gas should fit in u64")?; let storage_refunds: Vec<_> = storage_refunds.iter().map(|n| *n as i64).collect(); let mut transaction = self.storage.start_transaction().await?; @@ -470,6 +489,8 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, timestamp, + is_finished, + fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, @@ -479,6 +500,9 @@ impl BlocksDal<'_, '_> { predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, + base_fee_per_gas, + l1_gas_price, + l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, @@ -511,6 +535,11 @@ impl BlocksDal<'_, '_> { $18, $19, $20, + $21, + $22, + $23, + $24, + $25, NOW(), NOW() ) @@ -519,6 +548,8 @@ impl BlocksDal<'_, '_> { header.l1_tx_count as i32, header.l2_tx_count as i32, header.timestamp as i64, + header.is_finished, + header.fee_account_address.as_bytes(), &l2_to_l1_logs, &header.l2_to_l1_messages, header.bloom.as_bytes(), @@ -528,6 +559,9 @@ impl BlocksDal<'_, '_> { predicted_block_gas.execute as i64, initial_bootloader_contents, used_contract_hashes, + base_fee_per_gas, + header.l1_gas_price as i64, + header.l2_fair_gas_price as i64, header.base_system_contracts_hashes.bootloader.as_bytes(), header.base_system_contracts_hashes.default_aa.as_bytes(), header.protocol_version.map(|v| v as i32), @@ -572,7 +606,6 @@ impl BlocksDal<'_, '_> { hash, l1_tx_count, l2_tx_count, - fee_account_address, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, @@ -586,14 +619,13 @@ impl BlocksDal<'_, '_> { updated_at ) VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, NOW(), NOW()) + ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, NOW(), NOW()) "#, miniblock_header.number.0 as i64, miniblock_header.timestamp as i64, miniblock_header.hash.as_bytes(), miniblock_header.l1_tx_count as i32, miniblock_header.l2_tx_count as i32, - miniblock_header.fee_account_address.as_bytes(), base_fee_per_gas, miniblock_header.batch_fee_input.l1_gas_price() as i64, miniblock_header.batch_fee_input.fair_l2_gas_price() as i64, @@ -618,7 +650,7 @@ impl BlocksDal<'_, '_> { pub async fn get_last_sealed_miniblock_header( &mut self, ) -> sqlx::Result> { - let header = sqlx::query_as!( + Ok(sqlx::query_as!( StorageMiniblockHeader, r#" SELECT @@ -627,7 +659,6 @@ impl BlocksDal<'_, '_> { hash, l1_tx_count, l2_tx_count, - fee_account_address AS "fee_account_address!", base_fee_per_gas, l1_gas_price, l2_fair_gas_price, @@ -646,25 +677,15 @@ impl BlocksDal<'_, '_> { "#, ) .fetch_optional(self.storage.conn()) - .await?; - - let Some(header) = header else { - return Ok(None); - }; - let mut header = MiniblockHeader::from(header); - // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - #[allow(deprecated)] - self.maybe_load_fee_address(&mut header.fee_account_address, header.number) - .await?; - - Ok(Some(header)) + .await? + .map(Into::into)) } pub async fn get_miniblock_header( &mut self, miniblock_number: MiniblockNumber, ) -> sqlx::Result> { - let header = sqlx::query_as!( + Ok(sqlx::query_as!( StorageMiniblockHeader, r#" SELECT @@ -673,7 +694,6 @@ impl BlocksDal<'_, '_> { hash, l1_tx_count, l2_tx_count, - fee_account_address AS "fee_account_address!", base_fee_per_gas, l1_gas_price, l2_fair_gas_price, @@ -691,18 +711,8 @@ impl BlocksDal<'_, '_> { miniblock_number.0 as i64, ) .fetch_optional(self.storage.conn()) - .await?; - - let Some(header) = header else { - return Ok(None); - }; - let mut header = MiniblockHeader::from(header); - // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - #[allow(deprecated)] - self.maybe_load_fee_address(&mut header.fee_account_address, header.number) - .await?; - - Ok(Some(header)) + .await? + .map(Into::into)) } pub async fn mark_miniblocks_as_executed_in_l1_batch( @@ -927,8 +937,10 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, + is_finished, l1_tx_count, l2_tx_count, + fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -947,10 +959,13 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, + l1_gas_price, + l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1108,8 +1123,10 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, + is_finished, l1_tx_count, l2_tx_count, + fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1128,10 +1145,13 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, + l1_gas_price, + l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1216,8 +1236,10 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, + is_finished, l1_tx_count, l2_tx_count, + fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1236,10 +1258,13 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, + l1_gas_price, + l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1298,8 +1323,10 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, + is_finished, l1_tx_count, l2_tx_count, + fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1318,10 +1345,13 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, + l1_gas_price, + l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1432,8 +1462,10 @@ impl BlocksDal<'_, '_> { SELECT number, timestamp, + is_finished, l1_tx_count, l2_tx_count, + fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1452,10 +1484,13 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, + l1_gas_price, + l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1505,8 +1540,10 @@ impl BlocksDal<'_, '_> { SELECT number, l1_batches.timestamp, + is_finished, l1_tx_count, l2_tx_count, + fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1525,10 +1562,13 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, + l1_gas_price, + l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1588,8 +1628,10 @@ impl BlocksDal<'_, '_> { SELECT number, l1_batches.timestamp, + is_finished, l1_tx_count, l2_tx_count, + fee_account_address, bloom, priority_ops_onchain_data, hash, @@ -1608,10 +1650,13 @@ impl BlocksDal<'_, '_> { compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, + l1_gas_price, + l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -2089,44 +2134,24 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn get_fee_address_for_miniblock( - &mut self, - number: MiniblockNumber, - ) -> sqlx::Result> { - let Some(mut fee_account_address) = self.raw_fee_address_for_miniblock(number).await? - else { - return Ok(None); - }; - - // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - #[allow(deprecated)] - self.maybe_load_fee_address(&mut fee_account_address, number) - .await?; - Ok(Some(fee_account_address)) - } - - async fn raw_fee_address_for_miniblock( + pub async fn get_fee_address_for_l1_batch( &mut self, - number: MiniblockNumber, + l1_batch_number: L1BatchNumber, ) -> sqlx::Result> { - let Some(row) = sqlx::query!( + Ok(sqlx::query!( r#" SELECT fee_account_address FROM - miniblocks + l1_batches WHERE number = $1 "#, - number.0 as i32 + l1_batch_number.0 as i32 ) .fetch_optional(self.storage.conn()) .await? - else { - return Ok(None); - }; - - Ok(Some(Address::from_slice(&row.fee_account_address))) + .map(|row| Address::from_slice(&row.fee_account_address))) } pub async fn get_virtual_blocks_for_miniblock( @@ -2150,149 +2175,7 @@ impl BlocksDal<'_, '_> { } } -/// Temporary methods for migrating `fee_account_address`. -#[deprecated(note = "will be removed after the fee address migration is complete")] -impl BlocksDal<'_, '_> { - pub(crate) async fn maybe_load_fee_address( - &mut self, - fee_address: &mut Address, - miniblock_number: MiniblockNumber, - ) -> sqlx::Result<()> { - if *fee_address != Address::default() { - return Ok(()); - } - - // This clause should be triggered only for non-migrated miniblock rows. After `fee_account_address` - // is filled for all miniblocks, it won't be called; thus, `fee_account_address` column could be removed - // from `l1_batches` even with this code present. - let Some(row) = sqlx::query!( - r#" - SELECT - l1_batches.fee_account_address - FROM - l1_batches - INNER JOIN miniblocks ON miniblocks.l1_batch_number = l1_batches.number - WHERE - miniblocks.number = $1 - "#, - miniblock_number.0 as i32 - ) - .fetch_optional(self.storage.conn()) - .await? - else { - return Ok(()); - }; - - *fee_address = Address::from_slice(&row.fee_account_address); - Ok(()) - } - - /// Checks whether `fee_account_address` is migrated for the specified miniblock. Returns - /// `Ok(None)` if the miniblock doesn't exist. - pub async fn is_fee_address_migrated( - &mut self, - number: MiniblockNumber, - ) -> sqlx::Result> { - Ok(self - .raw_fee_address_for_miniblock(number) - .await? - .map(|address| address != Address::default())) - } - - /// Copies `fee_account_address` for pending miniblocks (ones without an associated L1 batch) - /// from the last L1 batch. Returns the number of affected rows. - pub async fn copy_fee_account_address_for_pending_miniblocks(&mut self) -> sqlx::Result { - let execution_result = sqlx::query!( - r#" - UPDATE miniblocks - SET - fee_account_address = ( - SELECT - l1_batches.fee_account_address - FROM - l1_batches - ORDER BY - l1_batches.number DESC - LIMIT - 1 - ) - WHERE - l1_batch_number IS NULL - AND fee_account_address = '\x0000000000000000000000000000000000000000'::bytea - "# - ) - .execute(self.storage.conn()) - .await?; - - Ok(execution_result.rows_affected()) - } - - pub async fn check_l1_batches_have_fee_account_address(&mut self) -> sqlx::Result { - let count = sqlx::query_scalar!( - r#" - SELECT COUNT(*) - FROM information_schema.columns - WHERE table_name = 'l1_batches' AND column_name = 'fee_account_address' - "# - ) - .fetch_one(self.storage.conn()) - .await? - .unwrap_or(0); - - Ok(count > 0) - } - - /// Copies `fee_account_address` for miniblocks in the given range from the L1 batch they belong to. - /// Returns the number of affected rows. - pub async fn copy_fee_account_address_for_miniblocks( - &mut self, - numbers: ops::RangeInclusive, - ) -> sqlx::Result { - let execution_result = sqlx::query!( - r#" - UPDATE miniblocks - SET - fee_account_address = l1_batches.fee_account_address - FROM - l1_batches - WHERE - l1_batches.number = miniblocks.l1_batch_number - AND miniblocks.number BETWEEN $1 AND $2 - AND miniblocks.fee_account_address = '\x0000000000000000000000000000000000000000'::bytea - "#, - numbers.start().0 as i64, - numbers.end().0 as i64 - ) - .execute(self.storage.conn()) - .await?; - - Ok(execution_result.rows_affected()) - } - - /// Sets `fee_account_address` for an L1 batch. Should only be used in tests. - pub async fn set_l1_batch_fee_address( - &mut self, - l1_batch: L1BatchNumber, - fee_account_address: Address, - ) -> sqlx::Result<()> { - sqlx::query!( - r#" - UPDATE l1_batches - SET - fee_account_address = $1::bytea - WHERE - number = $2 - "#, - fee_account_address.as_bytes(), - l1_batch.0 as i64 - ) - .execute(self.storage.conn()) - .await?; - Ok(()) - } -} - -/// These methods should only be used for tests. +/// These functions should only be used for tests. impl BlocksDal<'_, '_> { // The actual l1 batch hash is only set by the metadata calculator. pub async fn set_l1_batch_hash( @@ -2352,12 +2235,20 @@ mod tests { }; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool}; + use crate::ConnectionPool; #[tokio::test] async fn loading_l1_batch_header() { let pool = ConnectionPool::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); + conn.blocks_dal() + .delete_l1_batches(L1BatchNumber(0)) + .await + .unwrap(); + conn.blocks_dal() + .delete_initial_writes(L1BatchNumber(0)) + .await + .unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -2365,6 +2256,7 @@ mod tests { let mut header = L1BatchHeader::new( L1BatchNumber(1), 100, + Address::default(), BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), @@ -2414,12 +2306,21 @@ mod tests { async fn getting_predicted_gas() { let pool = ConnectionPool::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); + conn.blocks_dal() + .delete_l1_batches(L1BatchNumber(0)) + .await + .unwrap(); + conn.blocks_dal() + .delete_initial_writes(L1BatchNumber(0)) + .await + .unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; let mut header = L1BatchHeader::new( L1BatchNumber(1), 100, + Address::default(), BaseSystemContractsHashes::default(), ProtocolVersionId::default(), ); @@ -2469,144 +2370,4 @@ mod tests { assert_eq!(gas, 3 * expected_gas); } } - - #[allow(deprecated)] // that's the whole point - #[tokio::test] - async fn checking_fee_account_address_in_l1_batches() { - let pool = ConnectionPool::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); - assert!(conn - .blocks_dal() - .check_l1_batches_have_fee_account_address() - .await - .unwrap()); - } - - #[allow(deprecated)] // that's the whole point - #[tokio::test] - async fn ensuring_fee_account_address_for_miniblocks() { - let pool = ConnectionPool::test_pool().await; - let mut conn = pool.access_storage().await.unwrap(); - conn.protocol_versions_dal() - .save_protocol_version_with_tx(ProtocolVersion::default()) - .await; - - for number in [1, 2] { - let l1_batch = L1BatchHeader::new( - L1BatchNumber(number), - 100, - BaseSystemContractsHashes { - bootloader: H256::repeat_byte(1), - default_aa: H256::repeat_byte(42), - }, - ProtocolVersionId::latest(), - ); - let miniblock = MiniblockHeader { - fee_account_address: Address::default(), - ..create_miniblock_header(number) - }; - conn.blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); - conn.blocks_dal() - .insert_mock_l1_batch(&l1_batch) - .await - .unwrap(); - conn.blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(number)) - .await - .unwrap(); - - assert_eq!( - conn.blocks_dal() - .is_fee_address_migrated(miniblock.number) - .await - .unwrap(), - Some(false) - ); - } - - // Manually set `fee_account_address` for the inserted L1 batches. - conn.blocks_dal() - .set_l1_batch_fee_address(L1BatchNumber(1), Address::repeat_byte(0x23)) - .await - .unwrap(); - conn.blocks_dal() - .set_l1_batch_fee_address(L1BatchNumber(2), Address::repeat_byte(0x42)) - .await - .unwrap(); - - // Add a pending miniblock. - let miniblock = MiniblockHeader { - fee_account_address: Address::default(), - ..create_miniblock_header(3) - }; - conn.blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); - - let rows_affected = conn - .blocks_dal() - .copy_fee_account_address_for_miniblocks(MiniblockNumber(0)..=MiniblockNumber(100)) - .await - .unwrap(); - - assert_eq!(rows_affected, 2); - let first_miniblock_addr = conn - .blocks_dal() - .raw_fee_address_for_miniblock(MiniblockNumber(1)) - .await - .unwrap() - .expect("No fee address for block #1"); - assert_eq!(first_miniblock_addr, Address::repeat_byte(0x23)); - let second_miniblock_addr = conn - .blocks_dal() - .raw_fee_address_for_miniblock(MiniblockNumber(2)) - .await - .unwrap() - .expect("No fee address for block #1"); - assert_eq!(second_miniblock_addr, Address::repeat_byte(0x42)); - // The pending miniblock should not be affected. - let pending_miniblock_addr = conn - .blocks_dal() - .raw_fee_address_for_miniblock(MiniblockNumber(3)) - .await - .unwrap() - .expect("No fee address for block #3"); - assert_eq!(pending_miniblock_addr, Address::default()); - assert_eq!( - conn.blocks_dal() - .is_fee_address_migrated(MiniblockNumber(3)) - .await - .unwrap(), - Some(false) - ); - - let rows_affected = conn - .blocks_dal() - .copy_fee_account_address_for_pending_miniblocks() - .await - .unwrap(); - assert_eq!(rows_affected, 1); - - let pending_miniblock_addr = conn - .blocks_dal() - .raw_fee_address_for_miniblock(MiniblockNumber(3)) - .await - .unwrap() - .expect("No fee address for block #3"); - assert_eq!(pending_miniblock_addr, Address::repeat_byte(0x42)); - - for number in 1..=3 { - assert_eq!( - conn.blocks_dal() - .is_fee_address_migrated(MiniblockNumber(number)) - .await - .unwrap(), - Some(true) - ); - } - } } diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index eef22324dac..f9afb2b5828 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -3,6 +3,7 @@ use sqlx::Row; use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ api, + ethabi::Address, l2_to_l1_log::L2ToL1Log, vm_trace::Call, web3::types::{BlockHeader, U64}, @@ -508,139 +509,122 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_block_details( &mut self, block_number: MiniblockNumber, + current_operator_address: Address, ) -> sqlx::Result> { - let storage_block_details = sqlx::query_as!( - StorageBlockDetails, - r#" - SELECT - miniblocks.number, - COALESCE( - miniblocks.l1_batch_number, - ( - SELECT - (MAX(number) + 1) - FROM - l1_batches + { + let storage_block_details = sqlx::query_as!( + StorageBlockDetails, + r#" + SELECT + miniblocks.number, + COALESCE( + miniblocks.l1_batch_number, + ( + SELECT + (MAX(number) + 1) + FROM + l1_batches + ) + ) AS "l1_batch_number!", + miniblocks.timestamp, + miniblocks.l1_tx_count, + miniblocks.l2_tx_count, + miniblocks.hash AS "root_hash?", + commit_tx.tx_hash AS "commit_tx_hash?", + commit_tx.confirmed_at AS "committed_at?", + prove_tx.tx_hash AS "prove_tx_hash?", + prove_tx.confirmed_at AS "proven_at?", + execute_tx.tx_hash AS "execute_tx_hash?", + execute_tx.confirmed_at AS "executed_at?", + miniblocks.l1_gas_price, + miniblocks.l2_fair_gas_price, + miniblocks.bootloader_code_hash, + miniblocks.default_aa_code_hash, + miniblocks.protocol_version, + l1_batches.fee_account_address AS "fee_account_address?" + FROM + miniblocks + LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number + LEFT JOIN eth_txs_history AS commit_tx ON ( + l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id + AND commit_tx.confirmed_at IS NOT NULL ) - ) AS "l1_batch_number!", - miniblocks.timestamp, - miniblocks.l1_tx_count, - miniblocks.l2_tx_count, - miniblocks.hash AS "root_hash?", - commit_tx.tx_hash AS "commit_tx_hash?", - commit_tx.confirmed_at AS "committed_at?", - prove_tx.tx_hash AS "prove_tx_hash?", - prove_tx.confirmed_at AS "proven_at?", - execute_tx.tx_hash AS "execute_tx_hash?", - execute_tx.confirmed_at AS "executed_at?", - miniblocks.l1_gas_price, - miniblocks.l2_fair_gas_price, - miniblocks.bootloader_code_hash, - miniblocks.default_aa_code_hash, - miniblocks.protocol_version, - miniblocks.fee_account_address - FROM - miniblocks - LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number - LEFT JOIN eth_txs_history AS commit_tx ON ( - l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id - AND commit_tx.confirmed_at IS NOT NULL - ) - LEFT JOIN eth_txs_history AS prove_tx ON ( - l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id - AND prove_tx.confirmed_at IS NOT NULL - ) - LEFT JOIN eth_txs_history AS execute_tx ON ( - l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id - AND execute_tx.confirmed_at IS NOT NULL - ) - WHERE - miniblocks.number = $1 - "#, - block_number.0 as i64 - ) - .instrument("get_block_details") - .with_arg("block_number", &block_number) - .report_latency() - .fetch_optional(self.storage.conn()) - .await?; - - let Some(storage_block_details) = storage_block_details else { - return Ok(None); - }; - let mut details = api::BlockDetails::from(storage_block_details); - - // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - #[allow(deprecated)] - self.storage - .blocks_dal() - .maybe_load_fee_address(&mut details.operator_address, details.number) + LEFT JOIN eth_txs_history AS prove_tx ON ( + l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id + AND prove_tx.confirmed_at IS NOT NULL + ) + LEFT JOIN eth_txs_history AS execute_tx ON ( + l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id + AND execute_tx.confirmed_at IS NOT NULL + ) + WHERE + miniblocks.number = $1 + "#, + block_number.0 as i64 + ) + .instrument("get_block_details") + .with_arg("block_number", &block_number) + .report_latency() + .fetch_optional(self.storage.conn()) .await?; - Ok(Some(details)) + + Ok(storage_block_details.map(|storage_block_details| { + storage_block_details.into_block_details(current_operator_address) + })) + } } pub async fn get_l1_batch_details( &mut self, l1_batch_number: L1BatchNumber, ) -> sqlx::Result> { - let l1_batch_details: Option = sqlx::query_as!( - StorageL1BatchDetails, - r#" - WITH - mb AS ( - SELECT - l1_gas_price, - l2_fair_gas_price - FROM - miniblocks - WHERE - l1_batch_number = $1 - LIMIT - 1 - ) - SELECT - l1_batches.number, - l1_batches.timestamp, - l1_batches.l1_tx_count, - l1_batches.l2_tx_count, - l1_batches.hash AS "root_hash?", - commit_tx.tx_hash AS "commit_tx_hash?", - commit_tx.confirmed_at AS "committed_at?", - prove_tx.tx_hash AS "prove_tx_hash?", - prove_tx.confirmed_at AS "proven_at?", - execute_tx.tx_hash AS "execute_tx_hash?", - execute_tx.confirmed_at AS "executed_at?", - mb.l1_gas_price, - mb.l2_fair_gas_price, - l1_batches.bootloader_code_hash, - l1_batches.default_aa_code_hash - FROM - l1_batches - INNER JOIN mb ON TRUE - LEFT JOIN eth_txs_history AS commit_tx ON ( - l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id - AND commit_tx.confirmed_at IS NOT NULL - ) - LEFT JOIN eth_txs_history AS prove_tx ON ( - l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id - AND prove_tx.confirmed_at IS NOT NULL - ) - LEFT JOIN eth_txs_history AS execute_tx ON ( - l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id - AND execute_tx.confirmed_at IS NOT NULL - ) - WHERE - l1_batches.number = $1 - "#, - l1_batch_number.0 as i64 - ) - .instrument("get_l1_batch_details") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .fetch_optional(self.storage.conn()) - .await?; + { + let l1_batch_details: Option = sqlx::query_as!( + StorageL1BatchDetails, + r#" + SELECT + l1_batches.number, + l1_batches.timestamp, + l1_batches.l1_tx_count, + l1_batches.l2_tx_count, + l1_batches.hash AS "root_hash?", + commit_tx.tx_hash AS "commit_tx_hash?", + commit_tx.confirmed_at AS "committed_at?", + prove_tx.tx_hash AS "prove_tx_hash?", + prove_tx.confirmed_at AS "proven_at?", + execute_tx.tx_hash AS "execute_tx_hash?", + execute_tx.confirmed_at AS "executed_at?", + l1_batches.l1_gas_price, + l1_batches.l2_fair_gas_price, + l1_batches.bootloader_code_hash, + l1_batches.default_aa_code_hash + FROM + l1_batches + LEFT JOIN eth_txs_history AS commit_tx ON ( + l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id + AND commit_tx.confirmed_at IS NOT NULL + ) + LEFT JOIN eth_txs_history AS prove_tx ON ( + l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id + AND prove_tx.confirmed_at IS NOT NULL + ) + LEFT JOIN eth_txs_history AS execute_tx ON ( + l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id + AND execute_tx.confirmed_at IS NOT NULL + ) + WHERE + l1_batches.number = $1 + "#, + l1_batch_number.0 as i64 + ) + .instrument("get_l1_batch_details") + .with_arg("l1_batch_number", &l1_batch_number) + .report_latency() + .fetch_optional(self.storage.conn()) + .await?; - Ok(l1_batch_details.map(Into::into)) + Ok(l1_batch_details.map(api::L1BatchDetails::from)) + } } } @@ -650,7 +634,7 @@ mod tests { block::{MiniblockHasher, MiniblockHeader}, fee::TransactionExecutionMetrics, snapshots::SnapshotRecoveryStatus, - Address, MiniblockNumber, ProtocolVersion, ProtocolVersionId, + MiniblockNumber, ProtocolVersion, ProtocolVersionId, }; use super::*; diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 854e0e0871a..c53541166d3 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,7 +1,7 @@ use anyhow::Context as _; use zksync_consensus_roles::validator; use zksync_consensus_storage::ReplicaState; -use zksync_types::MiniblockNumber; +use zksync_types::{Address, MiniblockNumber}; pub use crate::models::storage_sync::Payload; use crate::StorageProcessor; @@ -134,6 +134,7 @@ impl ConsensusDal<'_, '_> { pub async fn block_payload( &mut self, block_number: validator::BlockNumber, + operator_address: Address, ) -> anyhow::Result> { let block_number = MiniblockNumber(block_number.0.try_into()?); let Some(block) = self @@ -149,7 +150,7 @@ impl ConsensusDal<'_, '_> { .transactions_web3_dal() .get_raw_miniblock_transactions(block_number) .await?; - Ok(Some(block.into_payload(transactions))) + Ok(Some(block.into_payload(operator_address, transactions))) } /// Inserts a certificate for the miniblock `cert.header().number`. @@ -161,7 +162,11 @@ impl ConsensusDal<'_, '_> { /// which will help us to detect bugs in the consensus implementation /// while it is "fresh". If it turns out to take too long, /// we can remove the verification checks later. - pub async fn insert_certificate(&mut self, cert: &validator::CommitQC) -> anyhow::Result<()> { + pub async fn insert_certificate( + &mut self, + cert: &validator::CommitQC, + operator_address: Address, + ) -> anyhow::Result<()> { let header = &cert.message.proposal; let mut txn = self.storage.start_transaction().await?; if let Some(last) = txn.consensus_dal().last_certificate().await? { @@ -179,7 +184,7 @@ impl ConsensusDal<'_, '_> { } let want_payload = txn .consensus_dal() - .block_payload(cert.message.proposal.number) + .block_payload(cert.message.proposal.number, operator_address) .await? .context("corresponding miniblock is missing")?; anyhow::ensure!( diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 7cd96e7f676..2aa5e4d30ef 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -28,13 +28,18 @@ pub enum StorageL1BatchConvertError { pub struct StorageL1BatchHeader { pub number: i64, pub timestamp: i64, + pub is_finished: bool, pub l1_tx_count: i32, pub l2_tx_count: i32, + pub fee_account_address: Vec, pub l2_to_l1_logs: Vec>, pub l2_to_l1_messages: Vec>, pub bloom: Vec, pub priority_ops_onchain_data: Vec>, pub used_contract_hashes: serde_json::Value, + pub base_fee_per_gas: BigDecimal, + pub l1_gas_price: i64, + pub l2_fair_gas_price: i64, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, pub protocol_version: Option, @@ -62,7 +67,9 @@ impl From for L1BatchHeader { L1BatchHeader { number: L1BatchNumber(l1_batch.number as u32), + is_finished: l1_batch.is_finished, timestamp: l1_batch.timestamp as u64, + fee_account_address: Address::from_slice(&l1_batch.fee_account_address), priority_ops_onchain_data, l1_tx_count: l1_batch.l1_tx_count as u16, l2_tx_count: l1_batch.l2_tx_count as u16, @@ -72,10 +79,16 @@ impl From for L1BatchHeader { bloom: H2048::from_slice(&l1_batch.bloom), used_contract_hashes: serde_json::from_value(l1_batch.used_contract_hashes) .expect("invalid value for used_contract_hashes in the DB"), + base_fee_per_gas: l1_batch + .base_fee_per_gas + .to_u64() + .expect("base_fee_per_gas should fit in u64"), base_system_contracts_hashes: convert_base_system_contracts_hashes( l1_batch.bootloader_code_hash, l1_batch.default_aa_code_hash, ), + l1_gas_price: l1_batch.l1_gas_price as u64, + l2_fair_gas_price: l1_batch.l2_fair_gas_price as u64, system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: l1_batch .protocol_version @@ -113,8 +126,10 @@ fn convert_base_system_contracts_hashes( pub struct StorageL1Batch { pub number: i64, pub timestamp: i64, + pub is_finished: bool, pub l1_tx_count: i32, pub l2_tx_count: i32, + pub fee_account_address: Vec, pub bloom: Vec, pub l2_to_l1_logs: Vec>, pub priority_ops_onchain_data: Vec>, @@ -146,6 +161,10 @@ pub struct StorageL1Batch { pub used_contract_hashes: serde_json::Value, + pub base_fee_per_gas: BigDecimal, + pub l1_gas_price: i64, + pub l2_fair_gas_price: i64, + pub system_logs: Vec>, pub compressed_state_diffs: Option>, @@ -169,7 +188,9 @@ impl From for L1BatchHeader { L1BatchHeader { number: L1BatchNumber(l1_batch.number as u32), + is_finished: l1_batch.is_finished, timestamp: l1_batch.timestamp as u64, + fee_account_address: Address::from_slice(&l1_batch.fee_account_address), priority_ops_onchain_data, l1_tx_count: l1_batch.l1_tx_count as u16, l2_tx_count: l1_batch.l2_tx_count as u16, @@ -179,10 +200,16 @@ impl From for L1BatchHeader { bloom: H2048::from_slice(&l1_batch.bloom), used_contract_hashes: serde_json::from_value(l1_batch.used_contract_hashes) .expect("invalid value for used_contract_hashes in the DB"), + base_fee_per_gas: l1_batch + .base_fee_per_gas + .to_u64() + .expect("base_fee_per_gas should fit in u64"), base_system_contracts_hashes: convert_base_system_contracts_hashes( l1_batch.bootloader_code_hash, l1_batch.default_aa_code_hash, ), + l1_gas_price: l1_batch.l1_gas_price as u64, + l2_fair_gas_price: l1_batch.l2_fair_gas_price as u64, system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: l1_batch .protocol_version @@ -348,58 +375,61 @@ pub struct StorageBlockDetails { pub l2_fair_gas_price: i64, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, - pub fee_account_address: Vec, + pub fee_account_address: Option>, // May be None if the block is not yet sealed pub protocol_version: Option, } -impl From for api::BlockDetails { - fn from(details: StorageBlockDetails) -> Self { - let status = if details.number == 0 || details.execute_tx_hash.is_some() { +impl StorageBlockDetails { + pub(crate) fn into_block_details(self, current_operator_address: Address) -> api::BlockDetails { + let status = if self.number == 0 || self.execute_tx_hash.is_some() { api::BlockStatus::Verified } else { api::BlockStatus::Sealed }; let base = api::BlockDetailsBase { - timestamp: details.timestamp as u64, - l1_tx_count: details.l1_tx_count as usize, - l2_tx_count: details.l2_tx_count as usize, + timestamp: self.timestamp as u64, + l1_tx_count: self.l1_tx_count as usize, + l2_tx_count: self.l2_tx_count as usize, status, - root_hash: details.root_hash.as_deref().map(H256::from_slice), - commit_tx_hash: details + root_hash: self.root_hash.as_deref().map(H256::from_slice), + commit_tx_hash: self .commit_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect commit_tx hash")), - committed_at: details + committed_at: self .committed_at .map(|committed_at| DateTime::from_naive_utc_and_offset(committed_at, Utc)), - prove_tx_hash: details + prove_tx_hash: self .prove_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect prove_tx hash")), - proven_at: details + proven_at: self .proven_at .map(|proven_at| DateTime::::from_naive_utc_and_offset(proven_at, Utc)), - execute_tx_hash: details + execute_tx_hash: self .execute_tx_hash .as_deref() .map(|hash| H256::from_str(hash).expect("Incorrect execute_tx hash")), - executed_at: details + executed_at: self .executed_at .map(|executed_at| DateTime::::from_naive_utc_and_offset(executed_at, Utc)), - l1_gas_price: details.l1_gas_price as u64, - l2_fair_gas_price: details.l2_fair_gas_price as u64, + l1_gas_price: self.l1_gas_price as u64, + l2_fair_gas_price: self.l2_fair_gas_price as u64, base_system_contracts_hashes: convert_base_system_contracts_hashes( - details.bootloader_code_hash, - details.default_aa_code_hash, + self.bootloader_code_hash, + self.default_aa_code_hash, ), }; api::BlockDetails { base, - number: MiniblockNumber(details.number as u32), - l1_batch_number: L1BatchNumber(details.l1_batch_number as u32), - operator_address: Address::from_slice(&details.fee_account_address), - protocol_version: details + number: MiniblockNumber(self.number as u32), + l1_batch_number: L1BatchNumber(self.l1_batch_number as u32), + operator_address: self + .fee_account_address + .map(|fee_account_address| Address::from_slice(&fee_account_address)) + .unwrap_or(current_operator_address), + protocol_version: self .protocol_version .map(|v| (v as u16).try_into().unwrap()), } @@ -480,7 +510,6 @@ pub struct StorageMiniblockHeader { pub hash: Vec, pub l1_tx_count: i32, pub l2_tx_count: i32, - pub fee_account_address: Vec, pub base_fee_per_gas: BigDecimal, pub l1_gas_price: i64, // L1 gas price assumed in the corresponding batch @@ -530,7 +559,6 @@ impl From for MiniblockHeader { hash: H256::from_slice(&row.hash), l1_tx_count: row.l1_tx_count as u16, l2_tx_count: row.l2_tx_count as u16, - fee_account_address: Address::from_slice(&row.fee_account_address), base_fee_per_gas: row.base_fee_per_gas.to_u64().unwrap(), batch_fee_input: fee_input, base_system_contracts_hashes: convert_base_system_contracts_hashes( diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index db1487a4548..2836d2820d8 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -19,7 +19,7 @@ pub(crate) struct StorageSyncBlock { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, - pub fee_account_address: Vec, + pub fee_account_address: Option>, // May be None if the block is not yet sealed pub protocol_version: i32, pub virtual_blocks: i64, pub hash: Vec, @@ -42,7 +42,7 @@ pub(crate) struct SyncBlock { pub l2_fair_gas_price: u64, pub fair_pubdata_price: Option, pub base_system_contracts_hashes: BaseSystemContractsHashes, - pub fee_account_address: Address, + pub fee_account_address: Option
, pub virtual_blocks: u32, pub hash: H256, pub protocol_version: ProtocolVersionId, @@ -85,7 +85,10 @@ impl TryFrom for SyncBlock { ) .context("default_aa_code_hash")?, }, - fee_account_address: parse_h160(&block.fee_account_address) + fee_account_address: block + .fee_account_address + .map(|a| parse_h160(&a)) + .transpose() .context("fee_account_address")?, virtual_blocks: block.virtual_blocks.try_into().context("virtual_blocks")?, hash: parse_h256(&block.hash).context("hash")?, @@ -98,7 +101,11 @@ impl TryFrom for SyncBlock { } impl SyncBlock { - pub(crate) fn into_api(self, transactions: Option>) -> en::SyncBlock { + pub(crate) fn into_api( + self, + current_operator_address: Address, + transactions: Option>, + ) -> en::SyncBlock { en::SyncBlock { number: self.number, l1_batch_number: self.l1_batch_number, @@ -108,7 +115,7 @@ impl SyncBlock { l2_fair_gas_price: self.l2_fair_gas_price, fair_pubdata_price: self.fair_pubdata_price, base_system_contracts_hashes: self.base_system_contracts_hashes, - operator_address: self.fee_account_address, + operator_address: self.fee_account_address.unwrap_or(current_operator_address), transactions, virtual_blocks: Some(self.virtual_blocks), hash: Some(self.hash), @@ -116,7 +123,11 @@ impl SyncBlock { } } - pub(crate) fn into_payload(self, transactions: Vec) -> Payload { + pub(crate) fn into_payload( + self, + current_operator_address: Address, + transactions: Vec, + ) -> Payload { Payload { protocol_version: self.protocol_version, hash: self.hash, @@ -126,7 +137,7 @@ impl SyncBlock { l2_fair_gas_price: self.l2_fair_gas_price, fair_pubdata_price: self.fair_pubdata_price, virtual_blocks: self.virtual_blocks, - operator_address: self.fee_account_address, + operator_address: self.fee_account_address.unwrap_or(current_operator_address), transactions, last_in_batch: self.last_in_batch, } diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index 066236eebe6..c287bfbde08 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -790,12 +790,14 @@ mod tests { use crate::{tests::create_miniblock_header, ConnectionPool}; async fn insert_miniblock(conn: &mut StorageProcessor<'_>, number: u32, logs: Vec) { - let header = L1BatchHeader::new( + let mut header = L1BatchHeader::new( L1BatchNumber(number), 0, + Address::default(), BaseSystemContractsHashes::default(), ProtocolVersionId::default(), ); + header.is_finished = true; conn.blocks_dal() .insert_mock_l1_batch(&header) .await diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 312c46acba2..e2b48ebb5f6 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -278,6 +278,7 @@ mod tests { let l1_batch_header = L1BatchHeader::new( L1BatchNumber(0), 0, + Address::repeat_byte(0x42), Default::default(), ProtocolVersionId::latest(), ); @@ -381,6 +382,7 @@ mod tests { let l1_batch_header = L1BatchHeader::new( snapshot_recovery.l1_batch_number + 1, 100, + Address::repeat_byte(0x42), Default::default(), ProtocolVersionId::latest(), ); diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 284ce317555..3a95b02fa8a 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -1,4 +1,4 @@ -use zksync_types::{api::en, MiniblockNumber}; +use zksync_types::{api::en, Address, MiniblockNumber}; use crate::{ instrument::InstrumentExt, @@ -49,9 +49,10 @@ impl SyncDal<'_, '_> { miniblocks.virtual_blocks, miniblocks.hash, miniblocks.protocol_version AS "protocol_version!", - miniblocks.fee_account_address AS "fee_account_address!" + l1_batches.fee_account_address AS "fee_account_address?" FROM miniblocks + LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number WHERE miniblocks.number = $1 "#, @@ -64,20 +65,13 @@ impl SyncDal<'_, '_> { else { return Ok(None); }; - - let mut block = SyncBlock::try_from(block)?; - // FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - #[allow(deprecated)] - self.storage - .blocks_dal() - .maybe_load_fee_address(&mut block.fee_account_address, block.number) - .await?; - Ok(Some(block)) + Ok(Some(block.try_into()?)) } pub async fn sync_block( &mut self, block_number: MiniblockNumber, + current_operator_address: Address, include_transactions: bool, ) -> anyhow::Result> { let _latency = MethodLatency::new("sync_dal_sync_block"); @@ -85,25 +79,24 @@ impl SyncDal<'_, '_> { return Ok(None); }; let transactions = if include_transactions { - let transactions = self - .storage - .transactions_web3_dal() - .get_raw_miniblock_transactions(block_number) - .await?; - Some(transactions) + Some( + self.storage + .transactions_web3_dal() + .get_raw_miniblock_transactions(block_number) + .await?, + ) } else { None }; - Ok(Some(block.into_api(transactions))) + Ok(Some(block.into_api(current_operator_address, transactions))) } } #[cfg(test)] mod tests { use zksync_types::{ - block::{L1BatchHeader, MiniblockHeader}, - fee::TransactionExecutionMetrics, - Address, L1BatchNumber, ProtocolVersion, ProtocolVersionId, Transaction, + block::L1BatchHeader, fee::TransactionExecutionMetrics, L1BatchNumber, ProtocolVersion, + ProtocolVersionId, Transaction, }; use super::*; @@ -128,6 +121,7 @@ mod tests { let mut l1_batch_header = L1BatchHeader::new( L1BatchNumber(0), 0, + Address::repeat_byte(0x42), Default::default(), ProtocolVersionId::latest(), ); @@ -140,18 +134,16 @@ mod tests { .await .unwrap(); + let operator_address = Address::repeat_byte(1); assert!(conn .sync_dal() - .sync_block(MiniblockNumber(1), false) + .sync_block(MiniblockNumber(1), operator_address, false) .await .unwrap() .is_none()); // Insert another block in the store. - let miniblock_header = MiniblockHeader { - fee_account_address: Address::repeat_byte(0x42), - ..create_miniblock_header(1) - }; + let miniblock_header = create_miniblock_header(1); let tx = mock_l2_transaction(); conn.transactions_dal() .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) @@ -170,7 +162,7 @@ mod tests { let block = conn .sync_dal() - .sync_block(MiniblockNumber(1), false) + .sync_block(MiniblockNumber(1), operator_address, false) .await .unwrap() .expect("no sync block"); @@ -194,12 +186,12 @@ mod tests { block.l2_fair_gas_price, miniblock_header.batch_fee_input.fair_l2_gas_price() ); - assert_eq!(block.operator_address, miniblock_header.fee_account_address); + assert_eq!(block.operator_address, operator_address); assert!(block.transactions.is_none()); let block = conn .sync_dal() - .sync_block(MiniblockNumber(1), true) + .sync_block(MiniblockNumber(1), operator_address, true) .await .unwrap() .expect("no sync block"); @@ -219,12 +211,12 @@ mod tests { let block = conn .sync_dal() - .sync_block(MiniblockNumber(1), true) + .sync_block(MiniblockNumber(1), operator_address, true) .await .unwrap() .expect("no sync block"); assert_eq!(block.l1_batch_number, L1BatchNumber(1)); assert!(block.last_in_batch); - assert_eq!(block.operator_address, miniblock_header.fee_account_address); + assert_eq!(block.operator_address, l1_batch_header.fee_account_address); } } diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 8094f37216c..378ba3435d3 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -36,7 +36,6 @@ pub(crate) fn create_miniblock_header(number: u32) -> MiniblockHeader { hash: MiniblockHasher::new(number, 0, H256::zero()).finalize(protocol_version), l1_tx_count: 0, l2_tx_count: 0, - fee_account_address: Address::default(), gas_per_pubdata_limit: 100, base_fee_per_gas: 100, batch_fee_input: BatchFeeInput::l1_pegged(100, 100), diff --git a/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs b/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs index 1258e6b472f..3af7bcd3e05 100644 --- a/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs +++ b/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs @@ -2,11 +2,7 @@ use zksync_types::{fee_model::BatchFeeInput, Address, L1BatchNumber, H256}; use super::L2BlockEnv; -/// Unique params for each L1 batch. -/// -/// Eventually, most of these parameters (`l1_gas_price`, `fair_l2_gas_price`, `fee_account`, -/// `enforced_base_fee`) will be moved to [`L2BlockEnv`]. For now, the VM doesn't support changing -/// them in the middle of execution; that's why these params are specified here. +/// Unique params for each batch #[derive(Debug, Clone)] pub struct L1BatchEnv { // If previous batch hash is None, then this is the first batch diff --git a/core/lib/snapshots_applier/src/tests.rs b/core/lib/snapshots_applier/src/tests.rs index 0859c9e59b6..12495a4a538 100644 --- a/core/lib/snapshots_applier/src/tests.rs +++ b/core/lib/snapshots_applier/src/tests.rs @@ -64,7 +64,13 @@ mod utils { root_hash: H256, ) -> L1BatchWithMetadata { L1BatchWithMetadata { - header: L1BatchHeader::new(l1_batch_number, 0, Default::default(), Default::default()), + header: L1BatchHeader::new( + l1_batch_number, + 0, + Default::default(), + Default::default(), + Default::default(), + ), metadata: L1BatchMetadata { root_hash, rollup_last_leaf_index: 0, diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 9c4fca8285e..632400d6811 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -77,7 +77,6 @@ pub(crate) async fn create_miniblock( hash: H256::from_low_u64_be(u64::from(miniblock_number.0)), l1_tx_count: 0, l2_tx_count: 0, - fee_account_address: Address::default(), base_fee_per_gas: 0, batch_fee_input: Default::default(), gas_per_pubdata_limit: 0, @@ -102,7 +101,14 @@ pub(crate) async fn create_l1_batch( l1_batch_number: L1BatchNumber, logs_for_initial_writes: &[StorageLog], ) { - let header = L1BatchHeader::new(l1_batch_number, 0, Default::default(), Default::default()); + let mut header = L1BatchHeader::new( + l1_batch_number, + 0, + Address::default(), + Default::default(), + Default::default(), + ); + header.is_finished = true; conn.blocks_dal() .insert_mock_l1_batch(&header) .await diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 950c6f4e268..48765e27e0f 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -1,7 +1,7 @@ use std::{fmt, ops}; use serde::{Deserialize, Serialize}; -use zksync_basic_types::{Address, H2048, H256, U256}; +use zksync_basic_types::{H2048, H256, U256}; use zksync_contracts::BaseSystemContractsHashes; use zksync_system_constants::SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER; use zksync_utils::concat_and_hash; @@ -11,7 +11,7 @@ use crate::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, priority_op_onchain_data::PriorityOpOnchainData, web3::signing::keccak256, - AccountTreeId, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, + AccountTreeId, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, }; /// Represents a successfully deployed smart contract. @@ -35,8 +35,12 @@ impl DeployedContract { pub struct L1BatchHeader { /// Numeric ID of the block. Starts from 1, 0 block is considered genesis block and has no transactions. pub number: L1BatchNumber, + /// Whether block is sealed or not (doesn't correspond to committing/verifying it on the L1). + pub is_finished: bool, /// Timestamp when block was first created. pub timestamp: u64, + /// Address of the fee account that was used when block was created + pub fee_account_address: Address, /// Total number of processed priority operations in the block pub l1_tx_count: u16, /// Total number of processed txs that was requested offchain @@ -51,6 +55,12 @@ pub struct L1BatchHeader { pub bloom: H2048, /// Hashes of contracts used this block pub used_contract_hashes: Vec, + /// The EIP1559 base_fee used in this block. + pub base_fee_per_gas: u64, + /// The assumed L1 gas price within the block. + pub l1_gas_price: u64, + /// The L2 gas price that the operator agrees on. + pub l2_fair_gas_price: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, /// System logs are those emitted as part of the Vm execution. pub system_logs: Vec, @@ -67,7 +77,6 @@ pub struct MiniblockHeader { pub hash: H256, pub l1_tx_count: u16, pub l2_tx_count: u16, - pub fee_account_address: Address, pub base_fee_per_gas: u64, // Min wei per gas that txs in this miniblock need to have. pub batch_fee_input: BatchFeeInput, @@ -92,12 +101,15 @@ impl L1BatchHeader { pub fn new( number: L1BatchNumber, timestamp: u64, + fee_account_address: Address, base_system_contracts_hashes: BaseSystemContractsHashes, protocol_version: ProtocolVersionId, ) -> L1BatchHeader { Self { number, + is_finished: false, timestamp, + fee_account_address, l1_tx_count: 0, l2_tx_count: 0, priority_ops_onchain_data: vec![], @@ -105,6 +117,9 @@ impl L1BatchHeader { l2_to_l1_messages: vec![], bloom: H2048::default(), used_contract_hashes: vec![], + base_fee_per_gas: 0, + l1_gas_price: 0, + l2_fair_gas_price: 0, base_system_contracts_hashes, system_logs: vec![], protocol_version: Some(protocol_version), diff --git a/core/lib/vm_utils/src/lib.rs b/core/lib/vm_utils/src/lib.rs index a04ad45f748..62bb8c2b62e 100644 --- a/core/lib/vm_utils/src/lib.rs +++ b/core/lib/vm_utils/src/lib.rs @@ -41,10 +41,10 @@ pub fn create_vm( .block_on( connection .blocks_dal() - .get_fee_address_for_miniblock(miniblock_number + 1), + .get_fee_address_for_l1_batch(l1_batch_number), )? .with_context(|| { - format!("l1_batch_number {l1_batch_number:?} must have fee_account_address") + format!("l1_batch_number {l1_batch_number:?} must have fee_address_account") })?; // In the state keeper, this value is used to reject execution. diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs index fdef4f4a36a..92781ae8f68 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs @@ -31,7 +31,11 @@ impl EnNamespace { .map_err(|err| internal_error(METHOD_NAME, err))?; storage .sync_dal() - .sync_block(block_number, include_transactions) + .sync_block( + block_number, + self.state.tx_sender.0.sender_config.fee_account_addr, + include_transactions, + ) .await .map_err(|err| internal_error(METHOD_NAME, err)) } diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 269250c295d..86f879a8737 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -444,7 +444,10 @@ impl ZksNamespace { let mut storage = self.access_storage(METHOD_NAME).await?; let block_details = storage .blocks_web3_dal() - .get_block_details(block_number) + .get_block_details( + block_number, + self.state.tx_sender.0.sender_config.fee_account_addr, + ) .await .map_err(|err| internal_error(METHOD_NAME, err)); diff --git a/core/lib/zksync_core/src/consensus/mod.rs b/core/lib/zksync_core/src/consensus/mod.rs index e0681502503..db98b061547 100644 --- a/core/lib/zksync_core/src/consensus/mod.rs +++ b/core/lib/zksync_core/src/consensus/mod.rs @@ -1,12 +1,11 @@ //! Consensus-related functionality. - #![allow(clippy::redundant_locals)] - use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor as executor; use zksync_consensus_roles::validator; use zksync_consensus_storage::BlockStore; use zksync_dal::ConnectionPool; +use zksync_types::Address; use self::storage::Store; use crate::sync_layer::{sync_action::ActionQueueSender, MainNodeClient, SyncState}; @@ -24,6 +23,7 @@ mod tests; pub struct MainNodeConfig { pub executor: executor::Config, pub validator: executor::ValidatorConfig, + pub operator_address: Address, } impl MainNodeConfig { @@ -36,7 +36,7 @@ impl MainNodeConfig { "currently only consensus with just 1 validator is supported" ); scope::run!(&ctx, |ctx, s| async { - let store = Store::new(pool); + let store = Store::new(pool, self.operator_address); let mut block_store = store.clone().into_block_store(); block_store .try_init_genesis(ctx, &self.validator.key) @@ -88,6 +88,7 @@ pub async fn run_main_node_state_fetcher( #[derive(Debug, Clone)] pub struct FetcherConfig { pub executor: executor::Config, + pub operator_address: Address, } impl FetcherConfig { @@ -99,7 +100,7 @@ impl FetcherConfig { actions: ActionQueueSender, ) -> anyhow::Result<()> { scope::run!(ctx, |ctx, s| async { - let store = Store::new(pool); + let store = Store::new(pool, self.operator_address); let mut block_store = store.clone().into_block_store(); block_store .set_actions_queue(ctx, actions) diff --git a/core/lib/zksync_core/src/consensus/storage/mod.rs b/core/lib/zksync_core/src/consensus/storage/mod.rs index bac1c54f3e6..bafcf3d235c 100644 --- a/core/lib/zksync_core/src/consensus/storage/mod.rs +++ b/core/lib/zksync_core/src/consensus/storage/mod.rs @@ -1,12 +1,11 @@ //! Storage implementation based on DAL. - use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, sync, time}; use zksync_consensus_bft::PayloadManager; use zksync_consensus_roles::validator; use zksync_consensus_storage::{BlockStoreState, PersistentBlockStore, ReplicaState, ReplicaStore}; use zksync_dal::{consensus_dal::Payload, ConnectionPool}; -use zksync_types::MiniblockNumber; +use zksync_types::{Address, MiniblockNumber}; #[cfg(test)] mod testonly; @@ -62,9 +61,14 @@ impl<'a> CtxStorage<'a> { &mut self, ctx: &ctx::Ctx, number: validator::BlockNumber, + operator_address: Address, ) -> ctx::Result> { Ok(ctx - .wait(self.0.consensus_dal().block_payload(number)) + .wait( + self.0 + .consensus_dal() + .block_payload(number, operator_address), + ) .await??) } @@ -104,9 +108,14 @@ impl<'a> CtxStorage<'a> { &mut self, ctx: &ctx::Ctx, cert: &validator::CommitQC, + operator_address: Address, ) -> ctx::Result<()> { Ok(ctx - .wait(self.0.consensus_dal().insert_certificate(cert)) + .wait( + self.0 + .consensus_dal() + .insert_certificate(cert, operator_address), + ) .await??) } @@ -183,6 +192,7 @@ impl Cursor { #[derive(Clone, Debug)] pub(super) struct Store { pool: ConnectionPool, + operator_address: Address, } /// Wrapper of `ConnectionPool` implementing `PersistentBlockStore`. @@ -195,8 +205,11 @@ pub(super) struct BlockStore { impl Store { /// Creates a `Store`. `pool` should have multiple connections to work efficiently. - pub fn new(pool: ConnectionPool) -> Self { - Self { pool } + pub fn new(pool: ConnectionPool, operator_address: Address) -> Self { + Self { + pool, + operator_address, + } } /// Converts `Store` into a `BlockStore`. @@ -238,7 +251,7 @@ impl BlockStore { return Ok(()); } let payload = txn - .payload(ctx, number) + .payload(ctx, number, self.inner.operator_address) .await .wrap("payload()")? .context("miniblock disappeared")?; @@ -251,9 +264,13 @@ impl BlockStore { .block_number(number) .payload(payload.encode()) .push(); - txn.insert_certificate(ctx, &genesis.blocks[0].justification) - .await - .wrap("insert_certificate()")?; + txn.insert_certificate( + ctx, + &genesis.blocks[0].justification, + self.inner.operator_address, + ) + .await + .wrap("insert_certificate()")?; txn.commit(ctx).await.wrap("commit()") } @@ -308,7 +325,7 @@ impl PersistentBlockStore for BlockStore { .wrap("certificate()")? .context("not found")?; let payload = storage - .payload(ctx, number) + .payload(ctx, number, self.inner.operator_address) .await .wrap("payload()")? .context("miniblock disappeared from storage")?; @@ -347,7 +364,7 @@ impl PersistentBlockStore for BlockStore { .wrap("last_miniblock_number()")?; if number >= block.header().number { storage - .insert_certificate(ctx, &block.justification) + .insert_certificate(ctx, &block.justification, self.inner.operator_address) .await .wrap("insert_certificate()")?; return Ok(()); @@ -393,7 +410,11 @@ impl PayloadManager for Store { drop(storage); loop { let mut storage = CtxStorage::access(ctx, &self.pool).await.wrap("access()")?; - if let Some(payload) = storage.payload(ctx, block_number).await.wrap("payload()")? { + if let Some(payload) = storage + .payload(ctx, block_number, self.operator_address) + .await + .wrap("payload()")? + { let encoded_payload = payload.encode(); if encoded_payload.0.len() > 1 << 20 { tracing::warn!( diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs index 88a23d17ba3..e38bd5142a5 100644 --- a/core/lib/zksync_core/src/consensus/testonly.rs +++ b/core/lib/zksync_core/src/consensus/testonly.rs @@ -178,6 +178,7 @@ pub(super) struct StateKeeper { fee_per_gas: u64, gas_per_pubdata: u32, + operator_address: Address, pub(super) actions_sender: ActionQueueSender, pub(super) pool: ConnectionPool, @@ -186,13 +187,17 @@ pub(super) struct StateKeeper { /// Fake StateKeeper task to be executed in the background. pub(super) struct StateKeeperRunner { actions_queue: ActionQueue, + operator_address: Address, pool: ConnectionPool, } impl StateKeeper { /// Constructs and initializes a new `StateKeeper`. /// Caller has to run `StateKeeperRunner.run()` task in the background. - pub async fn new(pool: ConnectionPool) -> anyhow::Result<(Self, StateKeeperRunner)> { + pub async fn new( + pool: ConnectionPool, + operator_address: Address, + ) -> anyhow::Result<(Self, StateKeeperRunner)> { // ensure genesis let mut storage = pool.access_storage().await.context("access_storage()")?; if storage @@ -201,7 +206,9 @@ impl StateKeeper { .await .context("is_genesis_needed()")? { - ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + let mut params = GenesisParams::mock(); + params.first_validator = operator_address; + ensure_genesis_state(&mut storage, L2ChainId::default(), ¶ms) .await .context("ensure_genesis_state()")?; } @@ -233,10 +240,12 @@ impl StateKeeper { batch_sealed: !pending_batch, fee_per_gas: 10, gas_per_pubdata: 100, + operator_address, actions_sender, pool: pool.clone(), }, StateKeeperRunner { + operator_address, actions_queue, pool: pool.clone(), }, @@ -255,7 +264,7 @@ impl StateKeeper { l1_gas_price: 2, l2_fair_gas_price: 3, fair_pubdata_price: Some(24), - operator_address: GenesisParams::mock().first_validator, + operator_address: self.operator_address, protocol_version: ProtocolVersionId::latest(), first_miniblock_info: (self.last_block, 1), } @@ -312,7 +321,7 @@ impl StateKeeper { /// Creates a new `BlockStore` for the underlying `ConnectionPool`. pub fn store(&self) -> BlockStore { - Store::new(self.pool.clone()).into_block_store() + Store::new(self.pool.clone(), self.operator_address).into_block_store() } // Wait for all pushed miniblocks to be produced. @@ -322,7 +331,7 @@ impl StateKeeper { loop { let mut storage = CtxStorage::access(ctx, &self.pool).await.wrap("access()")?; if storage - .payload(ctx, self.last_block()) + .payload(ctx, self.last_block(), self.operator_address) .await .wrap("storage.payload()")? .is_some() @@ -381,7 +390,7 @@ impl StateKeeperRunner { self.actions_queue, SyncState::new(), Box::::default(), - Address::repeat_byte(11), + self.operator_address, u32::MAX, L2ChainId::default(), ) diff --git a/core/lib/zksync_core/src/consensus/tests.rs b/core/lib/zksync_core/src/consensus/tests.rs index 7d3ed8f0df8..f4acfd83fc5 100644 --- a/core/lib/zksync_core/src/consensus/tests.rs +++ b/core/lib/zksync_core/src/consensus/tests.rs @@ -9,10 +9,13 @@ use zksync_consensus_storage::PersistentBlockStore as _; use zksync_consensus_utils::no_copy::NoCopy; use zksync_dal::{connection::TestTemplate, ConnectionPool}; use zksync_protobuf::testonly::test_encode_random; +use zksync_types::Address; use super::*; use crate::consensus::storage::CtxStorage; +const OPERATOR_ADDRESS: Address = Address::repeat_byte(17); + async fn make_blocks( ctx: &ctx::Ctx, pool: &ConnectionPool, @@ -23,7 +26,7 @@ async fn make_blocks( let mut blocks: Vec = vec![]; while !range.is_empty() { let payload = storage - .payload(ctx, range.start) + .payload(ctx, range.start, OPERATOR_ADDRESS) .await .wrap(range.start)? .context("payload not found")? @@ -56,7 +59,7 @@ async fn test_validator_block_store() { // Fetch a suffix of blocks that we will generate (fake) certs for. let want = scope::run!(ctx, |ctx, s| async { // Start state keeper. - let (mut sk, runner) = testonly::StateKeeper::new(pool.clone()).await?; + let (mut sk, runner) = testonly::StateKeeper::new(pool.clone(), OPERATOR_ADDRESS).await?; s.spawn_bg(runner.run(ctx)); sk.push_random_blocks(rng, 10).await; sk.wait_for_miniblocks(ctx).await?; @@ -73,7 +76,7 @@ async fn test_validator_block_store() { // Insert blocks one by one and check the storage state. for (i, block) in want.iter().enumerate() { - let store = Store::new(pool.clone()).into_block_store(); + let store = Store::new(pool.clone(), OPERATOR_ADDRESS).into_block_store(); store.store_next_block(ctx, block).await.unwrap(); assert_eq!(want[..i + 1], storage::testonly::dump(ctx, &store).await); } @@ -91,7 +94,7 @@ async fn test_validator() { scope::run!(ctx, |ctx, s| async { // Start state keeper. let pool = ConnectionPool::test_pool().await; - let (mut sk, runner) = testonly::StateKeeper::new(pool).await?; + let (mut sk, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; s.spawn_bg(runner.run(ctx)); // Populate storage with a bunch of blocks. @@ -111,6 +114,7 @@ async fn test_validator() { let cfg = MainNodeConfig { executor: cfg.node.clone(), validator: cfg.validator.clone(), + operator_address: OPERATOR_ADDRESS, }; s.spawn_bg(cfg.run(ctx, sk.pool.clone())); sk.store() @@ -167,6 +171,7 @@ async fn test_fetcher() { let mut cfg = MainNodeConfig { executor: cfg.node, validator: cfg.validator, + operator_address: OPERATOR_ADDRESS, }; let mut fetcher_cfgs = vec![connect_full_node(rng, &mut cfg.executor)]; while fetcher_cfgs.len() < FETCHERS { @@ -175,13 +180,16 @@ async fn test_fetcher() { } let fetcher_cfgs: Vec<_> = fetcher_cfgs .into_iter() - .map(|executor| FetcherConfig { executor }) + .map(|executor| FetcherConfig { + executor, + operator_address: OPERATOR_ADDRESS, + }) .collect(); // Create an initial database snapshot, which contains a cert for genesis block. let pool = scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::test_pool().await; - let (mut sk, runner) = testonly::StateKeeper::new(pool).await?; + let (mut sk, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(cfg.clone().run(ctx, sk.pool.clone())); sk.push_random_blocks(rng, 5).await; @@ -198,7 +206,7 @@ async fn test_fetcher() { scope::run!(ctx, |ctx, s| async { // Run validator. let pool = template.create_db().await?; - let (mut validator, runner) = testonly::StateKeeper::new(pool).await?; + let (mut validator, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; s.spawn_bg(async { runner .run(ctx) @@ -213,7 +221,7 @@ async fn test_fetcher() { for (i, cfg) in fetcher_cfgs.into_iter().enumerate() { let i = NoCopy::from(i); let pool = template.create_db().await?; - let (fetcher, runner) = testonly::StateKeeper::new(pool).await?; + let (fetcher, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; fetchers.push(fetcher.store()); s.spawn_bg(async { let i = i; @@ -258,16 +266,18 @@ async fn test_fetcher_backfill_certs() { let mut cfg = MainNodeConfig { executor: cfg.node, validator: cfg.validator, + operator_address: OPERATOR_ADDRESS, }; let fetcher_cfg = FetcherConfig { executor: connect_full_node(rng, &mut cfg.executor), + operator_address: OPERATOR_ADDRESS, }; // Create an initial database snapshot, which contains some blocks: some with certs, some // without. let pool = scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::test_pool().await; - let (mut sk, runner) = testonly::StateKeeper::new(pool).await?; + let (mut sk, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; s.spawn_bg(runner.run(ctx)); // Some blocks with certs. @@ -294,13 +304,13 @@ async fn test_fetcher_backfill_certs() { scope::run!(ctx, |ctx, s| async { // Run validator. let pool = template.create_db().await?; - let (mut validator, runner) = testonly::StateKeeper::new(pool).await?; + let (mut validator, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(cfg.run(ctx, validator.pool.clone())); // Run fetcher. let pool = template.create_db().await?; - let (fetcher, runner) = testonly::StateKeeper::new(pool).await?; + let (fetcher, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; let fetcher_store = fetcher.store(); s.spawn_bg(runner.run(ctx)); s.spawn_bg(fetcher_cfg.run(ctx, fetcher.pool, fetcher.actions_sender)); diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index 9d9021c7c15..b524225cb42 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -302,12 +302,14 @@ pub(crate) async fn create_genesis_l1_batch( tx: None, }; - let genesis_l1_batch_header = L1BatchHeader::new( + let mut genesis_l1_batch_header = L1BatchHeader::new( L1BatchNumber(0), 0, + first_validator_address, base_system_contracts.hashes(), protocol_version, ); + genesis_l1_batch_header.is_finished = true; let genesis_miniblock_header = MiniblockHeader { number: MiniblockNumber(0), @@ -315,7 +317,6 @@ pub(crate) async fn create_genesis_l1_batch( hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), l1_tx_count: 0, l2_tx_count: 0, - fee_account_address: first_validator_address, base_fee_per_gas: 0, gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(protocol_version.into()), batch_fee_input: BatchFeeInput::l1_pegged(0, 0), diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index cd1cb7b7f0c..89bbf7f818e 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -753,7 +753,7 @@ async fn add_state_keeper_to_task_futures(mut left: u32, mut right: u32, mut f: F) -> Result +where + F: FnMut(u32) -> Fut, + Fut: Future>, +{ + while left + 1 < right { + let middle = (left + right) / 2; + assert!(middle < right); // middle <= (right - 2 + right) / 2 = right - 1 + + if f(middle).await? { + left = middle; + } else { + right = middle; + } + } + Ok(left) +} diff --git a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs deleted file mode 100644 index aa1256a70ff..00000000000 --- a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs +++ /dev/null @@ -1,340 +0,0 @@ -//! Temporary module for migrating fee addresses from L1 batches to miniblocks. - -// FIXME (PLA-728): remove after 2nd phase of `fee_account_address` migration - -use std::time::{Duration, Instant}; - -use anyhow::Context as _; -use tokio::sync::watch; -use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_types::MiniblockNumber; - -/// Runs the migration for pending miniblocks. -pub(crate) async fn migrate_pending_miniblocks(storage: &mut StorageProcessor<'_>) { - let started_at = Instant::now(); - tracing::info!("Started migrating `fee_account_address` for pending miniblocks"); - - #[allow(deprecated)] - let l1_batches_have_fee_account_address = storage - .blocks_dal() - .check_l1_batches_have_fee_account_address() - .await - .expect("Failed getting metadata for l1_batches table"); - if !l1_batches_have_fee_account_address { - tracing::info!("`l1_batches.fee_account_address` column is removed; assuming that the migration is complete"); - return; - } - - #[allow(deprecated)] - let rows_affected = storage - .blocks_dal() - .copy_fee_account_address_for_pending_miniblocks() - .await - .expect("Failed migrating `fee_account_address` for pending miniblocks"); - let elapsed = started_at.elapsed(); - tracing::info!("Migrated `fee_account_address` for {rows_affected} miniblocks in {elapsed:?}"); -} - -/// Runs the migration for non-pending miniblocks. Should be run as a background task. -pub(crate) async fn migrate_miniblocks( - pool: ConnectionPool, - last_miniblock: MiniblockNumber, - stop_receiver: watch::Receiver, -) -> anyhow::Result<()> { - let MigrationOutput { - miniblocks_affected, - } = migrate_miniblocks_inner( - pool, - last_miniblock, - 100_000, - Duration::from_secs(1), - stop_receiver, - ) - .await?; - - tracing::info!("Finished fee address migration with {miniblocks_affected} affected miniblocks"); - Ok(()) -} - -#[derive(Debug, Default)] -struct MigrationOutput { - miniblocks_affected: u64, -} - -/// It's important for the `chunk_size` to be a constant; this ensures that each chunk is migrated atomically. -async fn migrate_miniblocks_inner( - pool: ConnectionPool, - last_miniblock: MiniblockNumber, - chunk_size: u32, - sleep_interval: Duration, - stop_receiver: watch::Receiver, -) -> anyhow::Result { - anyhow::ensure!(chunk_size > 0, "Chunk size must be positive"); - - let mut storage = pool.access_storage().await?; - #[allow(deprecated)] - let l1_batches_have_fee_account_address = storage - .blocks_dal() - .check_l1_batches_have_fee_account_address() - .await - .expect("Failed getting metadata for l1_batches table"); - drop(storage); - if !l1_batches_have_fee_account_address { - tracing::info!("`l1_batches.fee_account_address` column is removed; assuming that the migration is complete"); - return Ok(MigrationOutput::default()); - } - - let mut chunk_start = MiniblockNumber(0); - let mut miniblocks_affected = 0; - - tracing::info!( - "Migrating `fee_account_address` for miniblocks {chunk_start}..={last_miniblock} \ - in chunks of {chunk_size} miniblocks" - ); - while chunk_start <= last_miniblock { - let chunk_end = last_miniblock.min(chunk_start + chunk_size - 1); - let chunk = chunk_start..=chunk_end; - - let mut storage = pool.access_storage().await?; - let is_chunk_migrated = is_fee_address_migrated(&mut storage, chunk_start).await?; - - if is_chunk_migrated { - tracing::debug!("`fee_account_address` is migrated for chunk {chunk:?}"); - } else { - tracing::debug!("Migrating `fee_account_address` for miniblocks chunk {chunk:?}"); - - #[allow(deprecated)] - let rows_affected = storage - .blocks_dal() - .copy_fee_account_address_for_miniblocks(chunk.clone()) - .await - .with_context(|| format!("Failed migrating miniblocks chunk {chunk:?}"))?; - tracing::debug!("Migrated {rows_affected} miniblocks in chunk {chunk:?}"); - miniblocks_affected += rows_affected; - } - drop(storage); - - if *stop_receiver.borrow() { - tracing::info!("Stop signal received; fee address migration shutting down"); - return Ok(MigrationOutput { - miniblocks_affected, - }); - } - chunk_start = chunk_end + 1; - - if !is_chunk_migrated { - tokio::time::sleep(sleep_interval).await; - } - } - - Ok(MigrationOutput { - miniblocks_affected, - }) -} - -#[allow(deprecated)] -async fn is_fee_address_migrated( - storage: &mut StorageProcessor<'_>, - miniblock: MiniblockNumber, -) -> anyhow::Result { - storage - .blocks_dal() - .is_fee_address_migrated(miniblock) - .await - .with_context(|| format!("Failed getting fee address for miniblock #{miniblock}"))? - .with_context(|| format!("Miniblock #{miniblock} disappeared")) -} - -#[cfg(test)] -mod tests { - use test_casing::test_casing; - use zksync_contracts::BaseSystemContractsHashes; - use zksync_types::{ - block::L1BatchHeader, Address, L1BatchNumber, ProtocolVersion, ProtocolVersionId, - }; - - use super::*; - use crate::utils::testonly::create_miniblock; - - async fn prepare_storage(storage: &mut StorageProcessor<'_>) { - storage - .protocol_versions_dal() - .save_protocol_version_with_tx(ProtocolVersion::default()) - .await; - for number in 0..5 { - let miniblock = create_miniblock(number); - storage - .blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); - - let l1_batch = L1BatchHeader::new( - L1BatchNumber(number), - number.into(), - BaseSystemContractsHashes::default(), - ProtocolVersionId::latest(), - ); - storage - .blocks_dal() - .insert_mock_l1_batch(&l1_batch) - .await - .unwrap(); - #[allow(deprecated)] - storage - .blocks_dal() - .set_l1_batch_fee_address( - l1_batch.number, - Address::from_low_u64_be(u64::from(number) + 1), - ) - .await - .unwrap(); - storage - .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(l1_batch.number) - .await - .unwrap(); - } - } - - async fn assert_migration(storage: &mut StorageProcessor<'_>) { - for number in 0..5 { - assert!(is_fee_address_migrated(storage, MiniblockNumber(number)) - .await - .unwrap()); - - let fee_address = storage - .blocks_dal() - .get_fee_address_for_miniblock(MiniblockNumber(number)) - .await - .unwrap() - .expect("no fee address"); - let expected_address = Address::from_low_u64_be(u64::from(number) + 1); - assert_eq!(fee_address, expected_address); - } - } - - #[test_casing(3, [1, 2, 3])] - #[tokio::test] - async fn migration_basics(chunk_size: u32) { - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - prepare_storage(&mut storage).await; - drop(storage); - - let (_stop_sender, stop_receiver) = watch::channel(false); - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(4), - chunk_size, - Duration::ZERO, - stop_receiver.clone(), - ) - .await - .unwrap(); - - assert_eq!(result.miniblocks_affected, 5); - - // Check that all blocks are migrated. - let mut storage = pool.access_storage().await.unwrap(); - assert_migration(&mut storage).await; - drop(storage); - - // Check that migration can run again w/o returning an error, hanging up etc. - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(4), - chunk_size, - Duration::ZERO, - stop_receiver, - ) - .await - .unwrap(); - - assert_eq!(result.miniblocks_affected, 0); - } - - #[test_casing(3, [1, 2, 3])] - #[tokio::test] - async fn stopping_and_resuming_migration(chunk_size: u32) { - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - prepare_storage(&mut storage).await; - - let (_stop_sender, stop_receiver) = watch::channel(true); // signal stop right away - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(4), - chunk_size, - Duration::from_secs(1_000), - stop_receiver, - ) - .await - .unwrap(); - - // Migration should stop after a single chunk. - assert_eq!(result.miniblocks_affected, u64::from(chunk_size)); - - // Check that migration resumes from the same point. - let (_stop_sender, stop_receiver) = watch::channel(false); - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(4), - chunk_size, - Duration::ZERO, - stop_receiver, - ) - .await - .unwrap(); - - assert_eq!(result.miniblocks_affected, 5 - u64::from(chunk_size)); - assert_migration(&mut storage).await; - } - - #[test_casing(3, [1, 2, 3])] - #[tokio::test] - async fn new_blocks_added_during_migration(chunk_size: u32) { - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - prepare_storage(&mut storage).await; - - let (_stop_sender, stop_receiver) = watch::channel(true); // signal stop right away - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(4), - chunk_size, - Duration::from_secs(1_000), - stop_receiver, - ) - .await - .unwrap(); - - // Migration should stop after a single chunk. - assert_eq!(result.miniblocks_affected, u64::from(chunk_size)); - - // Insert a new miniblock to the storage with a defined fee account address. - let mut miniblock = create_miniblock(5); - miniblock.fee_account_address = Address::repeat_byte(1); - storage - .blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); - - // Resume the migration. - let (_stop_sender, stop_receiver) = watch::channel(false); - let result = migrate_miniblocks_inner( - pool.clone(), - MiniblockNumber(5), - chunk_size, - Duration::ZERO, - stop_receiver, - ) - .await - .unwrap(); - - // The new miniblock should not be affected. - assert_eq!(result.miniblocks_affected, 5 - u64::from(chunk_size)); - assert_migration(&mut storage).await; - } -} diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index f2686011003..df124335b4b 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -29,8 +29,7 @@ use crate::{ extractors, io::{ common::{l1_batch_params, load_pending_batch, poll_iters}, - fee_address_migration, MiniblockParams, MiniblockSealerHandle, PendingBatchData, - StateKeeperIO, + MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, mempool_actor::l2_tx_filter, metrics::KEEPER_METRICS, @@ -431,7 +430,7 @@ impl MempoolIO { .await .unwrap() .expect("empty storage not supported"); // FIXME (PLA-703): handle empty storage - fee_address_migration::migrate_pending_miniblocks(&mut storage).await; + drop(storage); Self { diff --git a/core/lib/zksync_core/src/state_keeper/io/mod.rs b/core/lib/zksync_core/src/state_keeper/io/mod.rs index 16cc15e03b0..d1366858116 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mod.rs @@ -21,7 +21,6 @@ use super::{ }; pub(crate) mod common; -pub(crate) mod fee_address_migration; pub(crate) mod mempool; pub(crate) mod seal_logic; #[cfg(test)] diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index 8725370ed1d..b2423365500 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -10,7 +10,7 @@ use chrono::Utc; use itertools::Itertools; use multivm::{ interface::{FinishedL1Batch, L1BatchEnv}, - utils::get_max_gas_per_pubdata_byte, + utils::{get_batch_base_fee, get_max_gas_per_pubdata_byte}, }; use vm_utils::storage::wait_for_prev_l1_batch_params; use zksync_dal::StorageProcessor; @@ -136,7 +136,9 @@ impl UpdatesManager { let l1_batch = L1BatchHeader { number: l1_batch_env.number, + is_finished: true, timestamp: l1_batch_env.timestamp, + fee_account_address: l1_batch_env.fee_account, priority_ops_onchain_data: self.l1_batch.priority_ops_onchain_data.clone(), l1_tx_count: l1_tx_count as u16, l2_tx_count: l2_tx_count as u16, @@ -144,6 +146,9 @@ impl UpdatesManager { l2_to_l1_messages, bloom: Default::default(), used_contract_hashes: finished_batch.final_execution_state.used_contract_hashes, + base_fee_per_gas: get_batch_base_fee(l1_batch_env, self.protocol_version().into()), + l1_gas_price: self.l1_gas_price(), + l2_fair_gas_price: self.fair_l2_gas_price(), base_system_contracts_hashes: self.base_system_contract_hashes(), protocol_version: Some(self.protocol_version()), system_logs: finished_batch.final_execution_state.system_logs, @@ -347,7 +352,6 @@ impl MiniblockSealCommand { hash: self.miniblock.get_miniblock_hash(), l1_tx_count: l1_tx_count as u16, l2_tx_count: l2_tx_count as u16, - fee_account_address: self.fee_account_address, base_fee_per_gas: self.base_fee_per_gas, batch_fee_input: self.fee_input, base_system_contracts_hashes: self.base_system_contracts_hashes, diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 0dcd0408493..7ba6a12f53f 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -242,7 +242,6 @@ async fn processing_storage_logs_when_sealing_miniblock() { miniblock_number: MiniblockNumber(3), miniblock, first_tx_index: 0, - fee_account_address: Address::repeat_byte(0x23), fee_input: BatchFeeInput::PubdataIndependent(PubdataIndependentBatchFeeModelInput { l1_gas_price: 100, fair_l2_gas_price: 100, @@ -323,7 +322,6 @@ async fn processing_events_when_sealing_miniblock() { miniblock_number, miniblock, first_tx_index: 0, - fee_account_address: Address::repeat_byte(0x23), fee_input: BatchFeeInput::PubdataIndependent(PubdataIndependentBatchFeeModelInput { l1_gas_price: 100, fair_l2_gas_price: 100, @@ -421,6 +419,7 @@ async fn test_miniblock_and_l1_batch_processing( .unwrap() .expect("No L1 batch #1"); assert_eq!(l1_batch_header.l2_tx_count, 1); + assert!(l1_batch_header.is_finished); } #[tokio::test] diff --git a/core/lib/zksync_core/src/state_keeper/keeper.rs b/core/lib/zksync_core/src/state_keeper/keeper.rs index 14829c5cf61..38179ada7d9 100644 --- a/core/lib/zksync_core/src/state_keeper/keeper.rs +++ b/core/lib/zksync_core/src/state_keeper/keeper.rs @@ -1,13 +1,11 @@ use std::{ convert::Infallible, - future::{self, Future}, time::{Duration, Instant}, }; use anyhow::Context as _; use multivm::interface::{Halt, L1BatchEnv, SystemEnv}; use tokio::sync::watch; -use zksync_dal::ConnectionPool; use zksync_types::{ block::MiniblockExecutionData, l2::TransactionType, protocol_version::ProtocolUpgradeTx, storage_writes_deduplicator::StorageWritesDeduplicator, Transaction, @@ -22,7 +20,7 @@ use super::{ types::ExecutionMetricsForCriteria, updates::UpdatesManager, }; -use crate::{gas_tracker::gas_count_from_writes, state_keeper::io::fee_address_migration}; +use crate::gas_tracker::gas_count_from_writes; /// Amount of time to block on waiting for some resource. The exact value is not really important, /// we only need it to not block on waiting indefinitely and be able to process cancellation requests. @@ -78,21 +76,6 @@ impl ZkSyncStateKeeper { } } - /// Temporary method to migrate fee addresses from L1 batches to miniblocks. - pub fn run_fee_address_migration( - &self, - pool: ConnectionPool, - ) -> impl Future> { - let last_miniblock = self.io.current_miniblock_number() - 1; - let stop_receiver = self.stop_receiver.clone(); - async move { - fee_address_migration::migrate_miniblocks(pool, last_miniblock, stop_receiver).await?; - future::pending::<()>().await; - // ^ Since this is run as a task, we don't want it to exit on success (this would shut down the node). - anyhow::Ok(()) - } - } - pub async fn run(mut self) -> anyhow::Result<()> { match self.run_inner().await { Ok(_) => unreachable!(), diff --git a/core/lib/zksync_core/src/state_keeper/updates/mod.rs b/core/lib/zksync_core/src/state_keeper/updates/mod.rs index faee5a5fbff..7718882af28 100644 --- a/core/lib/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/updates/mod.rs @@ -26,7 +26,6 @@ pub mod miniblock_updates; #[derive(Debug, Clone, PartialEq)] pub struct UpdatesManager { batch_timestamp: u64, - fee_account_address: Address, batch_fee_input: BatchFeeInput, base_fee_per_gas: u64, base_system_contract_hashes: BaseSystemContractsHashes, @@ -44,7 +43,6 @@ impl UpdatesManager { ) -> Self { Self { batch_timestamp: l1_batch_env.timestamp, - fee_account_address: l1_batch_env.fee_account, batch_fee_input: l1_batch_env.fee_input, base_fee_per_gas: get_batch_base_fee(&l1_batch_env, protocol_version.into()), protocol_version, @@ -69,6 +67,14 @@ impl UpdatesManager { self.base_system_contract_hashes } + pub(crate) fn l1_gas_price(&self) -> u64 { + self.batch_fee_input.l1_gas_price() + } + + pub(crate) fn fair_l2_gas_price(&self) -> u64 { + self.batch_fee_input.fair_l2_gas_price() + } + pub(crate) fn seal_miniblock_command( &self, l1_batch_number: L1BatchNumber, @@ -81,7 +87,6 @@ impl UpdatesManager { miniblock_number, miniblock: self.miniblock.clone(), first_tx_index: self.l1_batch.executed_transactions.len(), - fee_account_address: self.fee_account_address, fee_input: self.batch_fee_input, base_fee_per_gas: self.base_fee_per_gas, base_system_contracts_hashes: self.base_system_contract_hashes, @@ -167,7 +172,6 @@ pub(crate) struct MiniblockSealCommand { pub miniblock_number: MiniblockNumber, pub miniblock: MiniblockUpdates, pub first_tx_index: usize, - pub fee_account_address: Address, pub fee_input: BatchFeeInput, pub base_fee_per_gas: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs index f18f316a968..730eef0bc4f 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs @@ -108,7 +108,7 @@ impl L1BatchStagesMap { for (number, stage) in self.iter() { let local_details = storage .blocks_web3_dal() - .get_block_details(MiniblockNumber(number.0)) + .get_block_details(MiniblockNumber(number.0), Address::zero()) .await .unwrap() .unwrap_or_else(|| panic!("no details for block #{number}")); diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index c800e83703f..8057ef77b72 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -23,8 +23,7 @@ use crate::{ state_keeper::{ io::{ common::{l1_batch_params, load_pending_batch, poll_iters}, - fee_address_migration, MiniblockParams, MiniblockSealerHandle, PendingBatchData, - StateKeeperIO, + MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, metrics::KEEPER_METRICS, seal_criteria::IoSealCriteria, @@ -85,9 +84,6 @@ impl ExternalIO { .await .unwrap() .expect("empty storage not supported"); // FIXME (PLA-703): handle empty storage - // We must run the migration for pending miniblocks synchronously, since we use `fee_account_address` - // from a pending miniblock in `load_pending_batch()` implementation. - fee_address_migration::migrate_pending_miniblocks(&mut storage).await; drop(storage); tracing::info!( @@ -249,6 +245,19 @@ impl StateKeeperIO for ExternalIO { async fn load_pending_batch(&mut self) -> Option { let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); + // TODO (BFT-99): Do not assume that fee account is the same as in previous batch. + let fee_account = storage + .blocks_dal() + .get_l1_batch_header(self.current_l1_batch_number - 1) + .await + .unwrap() + .unwrap_or_else(|| { + panic!( + "No block header for batch {}", + self.current_l1_batch_number - 1 + ) + }) + .fee_account_address; let pending_miniblock_number = { let (_, last_miniblock_number_included_in_l1_batch) = storage .blocks_dal() @@ -263,7 +272,6 @@ impl StateKeeperIO for ExternalIO { .get_miniblock_header(pending_miniblock_number) .await .unwrap()?; - let fee_account = pending_miniblock_header.fee_account_address; if pending_miniblock_header.protocol_version.is_none() { // Fetch protocol version ID for pending miniblocks to know which VM to use to re-execute them. diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 2fc010f4a78..5500a6c3f49 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -28,7 +28,7 @@ use crate::{ const TEST_TIMEOUT: Duration = Duration::from_secs(10); const POLL_INTERVAL: Duration = Duration::from_millis(50); -pub(crate) const OPERATOR_ADDRESS: Address = Address::repeat_byte(1); +pub const OPERATOR_ADDRESS: Address = Address::repeat_byte(1); fn open_l1_batch(number: u32, timestamp: u64, first_miniblock_number: u32) -> SyncAction { SyncAction::OpenBatch { @@ -68,7 +68,7 @@ impl StateKeeperHandles { actions, sync_state.clone(), Box::::default(), - Address::repeat_byte(1), + OPERATOR_ADDRESS, u32::MAX, L2ChainId::default(), ) @@ -253,7 +253,7 @@ async fn external_io_with_multiple_miniblocks() { let sync_block = storage .sync_dal() - .sync_block(MiniblockNumber(number), true) + .sync_block(MiniblockNumber(number), OPERATOR_ADDRESS, true) .await .unwrap() .unwrap_or_else(|| panic!("Sync block #{} is not persisted", number)); diff --git a/core/lib/zksync_core/src/utils/mod.rs b/core/lib/zksync_core/src/utils/mod.rs index 9b3fa3da799..7d919d31f88 100644 --- a/core/lib/zksync_core/src/utils/mod.rs +++ b/core/lib/zksync_core/src/utils/mod.rs @@ -1,9 +1,8 @@ //! Miscellaneous utils used by multiple components. -use std::{future::Future, time::Duration}; +use std::time::Duration; use anyhow::Context as _; -use async_trait::async_trait; use tokio::sync::watch; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::L1BatchNumber; @@ -11,44 +10,6 @@ use zksync_types::L1BatchNumber; #[cfg(test)] pub(crate) mod testonly; -/// Fallible and async predicate for binary search. -#[async_trait] -pub(crate) trait BinarySearchPredicate: Send { - type Error; - - async fn eval(&mut self, argument: u32) -> Result; -} - -#[async_trait] -impl BinarySearchPredicate for F -where - F: Send + FnMut(u32) -> Fut, - Fut: Send + Future>, -{ - type Error = E; - - async fn eval(&mut self, argument: u32) -> Result { - self(argument).await - } -} - -/// Finds the greatest `u32` value for which `f` returns `true`. -pub(crate) async fn binary_search_with( - mut left: u32, - mut right: u32, - mut predicate: P, -) -> Result { - while left + 1 < right { - let middle = (left + right) / 2; - if predicate.eval(middle).await? { - left = middle; - } else { - right = middle; - } - } - Ok(left) -} - /// Repeatedly polls the DB until there is an L1 batch. We may not have such a batch initially /// if the DB is recovered from an application-level snapshot. /// @@ -133,15 +94,6 @@ mod tests { use super::*; use crate::genesis::{ensure_genesis_state, GenesisParams}; - #[tokio::test] - async fn test_binary_search() { - for divergence_point in [1, 50, 51, 100] { - let mut f = |x| async move { Ok::<_, ()>(x < divergence_point) }; - let result = binary_search_with(0, 100, &mut f).await; - assert_eq!(result, Ok(divergence_point - 1)); - } - } - #[tokio::test] async fn waiting_for_l1_batch_success() { let pool = ConnectionPool::test_pool().await; diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index 3684e2af909..222e941c7ae 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -29,7 +29,6 @@ pub(crate) fn create_miniblock(number: u32) -> MiniblockHeader { l2_tx_count: 0, base_fee_per_gas: 100, batch_fee_input: BatchFeeInput::l1_pegged(100, 100), - fee_account_address: Address::zero(), gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(ProtocolVersionId::latest().into()), base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), @@ -39,12 +38,15 @@ pub(crate) fn create_miniblock(number: u32) -> MiniblockHeader { /// Creates an L1 batch header with the specified number and deterministic contents. pub(crate) fn create_l1_batch(number: u32) -> L1BatchHeader { - L1BatchHeader::new( + let mut header = L1BatchHeader::new( L1BatchNumber(number), number.into(), + Address::default(), BaseSystemContractsHashes::default(), ProtocolVersionId::latest(), - ) + ); + header.is_finished = true; + header } /// Creates metadata for an L1 batch with the specified number. diff --git a/infrastructure/zk/src/format_sql.ts b/infrastructure/zk/src/format_sql.ts index 1e2bd2261c5..2465ec20ba2 100644 --- a/infrastructure/zk/src/format_sql.ts +++ b/infrastructure/zk/src/format_sql.ts @@ -34,7 +34,7 @@ function formatQuery(query: string) { return formattedQuery; } -function extractQueryFromRustString(query: string, isRaw: boolean): string { +function extractQueryFromRustString(query: string): string { query = query.trim(); if (query.endsWith(',')) { query = query.slice(0, query.length - 1); @@ -46,10 +46,9 @@ function extractQueryFromRustString(query: string, isRaw: boolean): string { query = query.slice(3, query.length - 2); } - // Get rid of all "\" characters, both from escapes and line breaks. - if (!isRaw) { - query = query.replace(/\\(.|\n)/g, '$1'); - } + //getting rid of all "\" characters, both from escapes and line breaks + query = query.replace(/\\/g, ''); + return query; } @@ -64,9 +63,9 @@ function addIndent(query: string, indent: number) { .join('\n'); } -function formatRustStringQuery(query: string, isRaw: boolean) { +function formatRustStringQuery(query: string) { const baseIndent = query.search(/\S/); - const rawQuery = extractQueryFromRustString(query, isRaw); + const rawQuery = extractQueryFromRustString(query); const formattedQuery = formatQuery(rawQuery); const reconstructedRustString = embedTextInsideRustString(formattedQuery); @@ -82,7 +81,7 @@ function formatOneLineQuery(line: string): string { const queryEnd = isRawString ? line.indexOf('"#') + 2 : line.slice(1).search(/(^|[^\\])"/) + 3; const suffix = line.slice(queryEnd); const query = line.slice(0, queryEnd); - let formattedQuery = formatRustStringQuery(query, isRawString); + let formattedQuery = formatRustStringQuery(query); formattedQuery = addIndent(formattedQuery, baseIndent); return prefix + '\n' + formattedQuery + '\n' + suffix; @@ -125,7 +124,7 @@ async function formatFile(filePath: string, check: boolean) { } if (isInsideQuery) { - const queryNotEmpty = builtQuery !== '' || line.trim().length > 1; + const queryNotEmpty = builtQuery || line.trim().length > 1; const rawStringQueryEnded = line.endsWith('"#,') || line.endsWith('"#'); const regularStringQueryEnded = (line.endsWith('",') || line.endsWith('"')) && queryNotEmpty; builtQuery += line + '\n'; @@ -136,7 +135,7 @@ async function formatFile(filePath: string, check: boolean) { ) { isInsideQuery = false; let endedWithComma = builtQuery.trimEnd().endsWith(','); - modifiedFile += formatRustStringQuery(builtQuery, isRawString).trimEnd(); + modifiedFile += formatRustStringQuery(builtQuery).trimEnd(); modifiedFile += endedWithComma ? ',' : ''; modifiedFile += '\n'; }