Skip to content

Commit

Permalink
fix(boojnet): various boojnet fixes (#462)
Browse files Browse the repository at this point in the history
## What ❔

- sanity checks for some system log values
- bug fixed that real proofs can be sent before batches are committed
- commitment is set only by full tree
- fix eth_watcher index out of range for EOA-controlled upgrades

## Why ❔

bug fixes

## Checklist

<!-- Check your PR fulfills the following items. -->
<!-- For draft PRs check the boxes as you complete them. -->

- [x] PR title corresponds to the body of PR (we generate changelog
entries from PRs).
- [ ] Tests for the changes have been added / updated.
- [x] Documentation comments have been added / updated.
- [x] Code has been formatted via `zk fmt` and `zk lint`.

---------

Co-authored-by: “perekopskiy” <perekopskiy@dlit.dp.ua>
Co-authored-by: Lyova Potyomkin <lyova.potyomkin@gmail.com>
Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com>
  • Loading branch information
4 people committed Nov 13, 2023
1 parent 5f61d8d commit f13648c
Show file tree
Hide file tree
Showing 8 changed files with 185 additions and 75 deletions.
2 changes: 2 additions & 0 deletions core/lib/constants/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ pub mod crypto;
pub mod ethereum;
pub mod fees;
pub mod system_context;
pub mod system_logs;
pub mod trusted_slots;

pub use blocks::*;
Expand All @@ -12,4 +13,5 @@ pub use crypto::*;
pub use ethereum::*;
pub use fees::*;
pub use system_context::*;
pub use system_logs::*;
pub use trusted_slots::*;
5 changes: 5 additions & 0 deletions core/lib/constants/src/system_logs.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
/// The key of the system log with value of the L2->L1 logs tree root hash
pub const L2_TO_L1_LOGS_TREE_ROOT_KEY: u32 = 0;

/// The key of the system log with value of the state diff hash
pub const STATE_DIFF_HASH_KEY: u32 = 2;
112 changes: 72 additions & 40 deletions core/lib/dal/sqlx-data.json
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,26 @@
},
"query": "\n WITH events_select AS (\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE miniblock_number > $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n )\n SELECT miniblocks.hash as \"block_hash?\",\n address as \"address!\", topic1 as \"topic1!\", topic2 as \"topic2!\", topic3 as \"topic3!\", topic4 as \"topic4!\", value as \"value!\",\n miniblock_number as \"miniblock_number!\", miniblocks.l1_batch_number as \"l1_batch_number?\", tx_hash as \"tx_hash!\",\n tx_index_in_block as \"tx_index_in_block!\", event_index_in_block as \"event_index_in_block!\", event_index_in_tx as \"event_index_in_tx!\"\n FROM events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n "
},
"06d90ea65c1e06bd871f090a0fb0e8772ea5e923f1da5310bedd8dc90e0827f4": {
"describe": {
"columns": [
{
"name": "eth_commit_tx_id",
"ordinal": 0,
"type_info": "Int4"
}
],
"nullable": [
true
],
"parameters": {
"Left": [
"Int8"
]
}
},
"query": "SELECT eth_commit_tx_id FROM l1_batches WHERE number = $1"
},
"07310d96fc7e258154ad510684e33d196907ebd599e926d305e5ef9f26afa2fa": {
"describe": {
"columns": [
Expand All @@ -320,6 +340,30 @@
},
"query": "INSERT INTO eth_txs_history (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at, confirmed_at) VALUES ($1, 0, 0, $2, '\\x00', now(), now(), $3) RETURNING id"
},
"09768b376996b96add16a02d1a59231cb9b525cd5bd19d22a76149962d4c91c2": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Bytea",
"Bytea",
"Bytea",
"Bytea",
"Bytea",
"Bytea",
"Bool",
"Bytea",
"Int8",
"Bytea",
"Bytea",
"Bytea",
"Int8"
]
}
},
"query": "UPDATE l1_batches SET hash = $1, merkle_root_hash = $2, compressed_repeated_writes = $3, compressed_initial_writes = $4, l2_l1_compressed_messages = $5, l2_l1_merkle_root = $6, zkporter_is_available = $7, parent_hash = $8, rollup_last_leaf_index = $9, pass_through_data_hash = $10, meta_parameters_hash = $11, compressed_state_diffs = $12, updated_at = now() WHERE number = $13 AND hash IS NULL"
},
"0c212f47b9a0e719f947a419be8284837b1b01aa23994ba6401b420790b802b8": {
"describe": {
"columns": [],
Expand Down Expand Up @@ -1942,6 +1986,20 @@
},
"query": "SELECT recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash, recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash\n FROM protocol_versions\n WHERE id = $1\n "
},
"21c29846f4253081057b86cc1b7ce4ef3ae618c5561c876502dc7f4e773ee91e": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int8",
"Bytea",
"Bytea"
]
}
},
"query": "INSERT INTO commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment) VALUES ($1, $2, $3) ON CONFLICT (l1_batch_number) DO NOTHING"
},
"22b57675a726d9cfeb82a60ba50c36cab1548d197ea56a7658d3f005df07c60b": {
"describe": {
"columns": [
Expand Down Expand Up @@ -5653,20 +5711,6 @@
},
"query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN leaf_aggregation_witness_jobs lawj ON prover_jobs.l1_batch_number = lawj.l1_batch_number\n WHERE lawj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 0\n GROUP BY prover_jobs.l1_batch_number, lawj.number_of_basic_circuits\n HAVING COUNT(*) = lawj.number_of_basic_circuits)\n RETURNING l1_batch_number;\n "
},
"68a9ba78f60674bc047e4af6eb2a379725da047f2e6c06bce96a33852565cc95": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int8",
"Bytea",
"Bytea"
]
}
},
"query": "INSERT INTO commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment) VALUES ($1, $2, $3) ON CONFLICT (l1_batch_number) DO UPDATE SET events_queue_commitment = $2, bootloader_initial_content_commitment = $3"
},
"6939e766e122458b2ac618d19b2759c4a7298ef72b81e8c3957e0a5cf35c9552": {
"describe": {
"columns": [
Expand Down Expand Up @@ -9736,6 +9780,20 @@
},
"query": "SELECT bytecode, bytecode_hash FROM factory_deps WHERE bytecode_hash = ANY($1)"
},
"c0904ee4179531cfb9d458a17f753085dc2ed957b30a89119d7534112add3876": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int8",
"Bytea",
"Bytea"
]
}
},
"query": "UPDATE l1_batches SET commitment = $2, aux_data_hash = $3, updated_at = now() WHERE number = $1"
},
"c178e1574d2a16cb90bcc5d5333a4f8dd2a69e0c12b4e7e108a8dcc6000669a5": {
"describe": {
"columns": [
Expand Down Expand Up @@ -10731,32 +10789,6 @@
},
"query": "\n UPDATE witness_inputs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n AND protocol_version = ANY($4)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs.*\n "
},
"e03756d19dfdf4cdffa81154e690dc7c36024dad5363e0c5440606a5a50eef53": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Bytea",
"Bytea",
"Bytea",
"Bytea",
"Bytea",
"Bytea",
"Bytea",
"Bool",
"Bytea",
"Int8",
"Bytea",
"Bytea",
"Bytea",
"Bytea",
"Int8"
]
}
},
"query": "UPDATE l1_batches SET hash = $1, merkle_root_hash = $2, commitment = $3, compressed_repeated_writes = $4, compressed_initial_writes = $5, l2_l1_compressed_messages = $6, l2_l1_merkle_root = $7, zkporter_is_available = $8, parent_hash = $9, rollup_last_leaf_index = $10, aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, compressed_state_diffs = $14, updated_at = now() WHERE number = $15 AND hash IS NULL"
},
"e05a8c74653afc78c892ddfd08e60ab040d2b2f7c4b5ee110988eac2dd0dd90d": {
"describe": {
"columns": [
Expand Down
78 changes: 54 additions & 24 deletions core/lib/dal/src/blocks_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -587,24 +587,22 @@ impl BlocksDal<'_, '_> {

let update_result = sqlx::query!(
"UPDATE l1_batches \
SET hash = $1, merkle_root_hash = $2, commitment = $3, \
compressed_repeated_writes = $4, compressed_initial_writes = $5, \
l2_l1_compressed_messages = $6, l2_l1_merkle_root = $7, \
zkporter_is_available = $8, parent_hash = $9, rollup_last_leaf_index = $10, \
aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, \
compressed_state_diffs = $14, updated_at = now() \
WHERE number = $15 AND hash IS NULL",
SET hash = $1, merkle_root_hash = $2, \
compressed_repeated_writes = $3, compressed_initial_writes = $4, \
l2_l1_compressed_messages = $5, l2_l1_merkle_root = $6, \
zkporter_is_available = $7, parent_hash = $8, rollup_last_leaf_index = $9, \
pass_through_data_hash = $10, meta_parameters_hash = $11, \
compressed_state_diffs = $12, updated_at = now() \
WHERE number = $13 AND hash IS NULL",
metadata.root_hash.as_bytes(),
metadata.merkle_root_hash.as_bytes(),
metadata.commitment.as_bytes(),
metadata.repeated_writes_compressed,
metadata.initial_writes_compressed,
metadata.l2_l1_messages_compressed,
metadata.l2_l1_merkle_root.as_bytes(),
metadata.block_meta_params.zkporter_is_available,
previous_root_hash.as_bytes(),
metadata.rollup_last_leaf_index as i64,
metadata.aux_data_hash.as_bytes(),
metadata.pass_through_data_hash.as_bytes(),
metadata.meta_parameters_hash.as_bytes(),
metadata.state_diffs_compressed,
Expand All @@ -616,21 +614,38 @@ impl BlocksDal<'_, '_> {
.execute(transaction.conn())
.await?;

sqlx::query!(
"INSERT INTO commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment) \
VALUES ($1, $2, $3) \
ON CONFLICT (l1_batch_number) DO UPDATE SET events_queue_commitment = $2, bootloader_initial_content_commitment = $3",
number.0 as i64,
metadata.events_queue_commitment.map(|h| h.0.to_vec()),
metadata
.bootloader_initial_content_commitment
.map(|h| h.0.to_vec()),
)
.instrument("save_batch_commitments")
.with_arg("number", &number)
.report_latency()
.execute(transaction.conn())
.await?;
if metadata.events_queue_commitment.is_some() {
// Save `commitment`, `aux_data_hash`, `events_queue_commitment`, `bootloader_initial_content_commitment`.
sqlx::query!(
"INSERT INTO commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment) \
VALUES ($1, $2, $3) \
ON CONFLICT (l1_batch_number) DO NOTHING",
number.0 as i64,
metadata.events_queue_commitment.map(|h| h.0.to_vec()),
metadata
.bootloader_initial_content_commitment
.map(|h| h.0.to_vec()),
)
.instrument("save_batch_commitments")
.with_arg("number", &number)
.report_latency()
.execute(transaction.conn())
.await?;

sqlx::query!(
"UPDATE l1_batches \
SET commitment = $2, aux_data_hash = $3, updated_at = now() \
WHERE number = $1",
number.0 as i64,
metadata.commitment.as_bytes(),
metadata.aux_data_hash.as_bytes(),
)
.instrument("save_batch_aux_commitment")
.with_arg("number", &number)
.report_latency()
.execute(transaction.conn())
.await?;
}

if update_result.rows_affected() == 0 {
tracing::debug!(
Expand Down Expand Up @@ -739,6 +754,21 @@ impl BlocksDal<'_, '_> {
Ok(L1BatchNumber(row.number as u32))
}

pub async fn get_eth_commit_tx_id(
&mut self,
l1_batch_number: L1BatchNumber,
) -> sqlx::Result<Option<u64>> {
let row = sqlx::query!(
"SELECT eth_commit_tx_id FROM l1_batches \
WHERE number = $1",
l1_batch_number.0 as i64
)
.fetch_optional(self.storage.conn())
.await?;

Ok(row.and_then(|row| row.eth_commit_tx_id.map(|n| n as u64)))
}

/// Returns the number of the last L1 batch for which an Ethereum prove tx was sent and confirmed.
pub async fn get_number_of_last_l1_batch_proven_on_eth(
&mut self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,7 @@ impl<S: WriteStorage, H: HistoryMode> VmTracer<S, H> for PubdataTracer<S> {

// Apply the pubdata to the current memory
let mut memory_to_apply = vec![];

apply_pubdata_to_memory(&mut memory_to_apply, pubdata_input);
state.memory.populate_page(
BOOTLOADER_HEAP_PAGE as usize,
Expand Down
34 changes: 33 additions & 1 deletion core/lib/types/src/commitment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,14 @@
//! transactions, thus the calculations are done separately and asynchronously.

use serde::{Deserialize, Serialize};
use zksync_utils::u256_to_h256;

use std::{collections::HashMap, convert::TryFrom};

use zksync_mini_merkle_tree::MiniMerkleTree;
use zksync_system_constants::ZKPORTER_IS_AVAILABLE;
use zksync_system_constants::{
L2_TO_L1_LOGS_TREE_ROOT_KEY, STATE_DIFF_HASH_KEY, ZKPORTER_IS_AVAILABLE,
};

use crate::{
block::L1BatchHeader,
Expand Down Expand Up @@ -353,6 +356,22 @@ impl L1BatchAuxiliaryOutput {
events_state_queue_hash: H256,
protocol_version: ProtocolVersionId,
) -> Self {
let state_diff_hash_from_logs = system_logs.iter().find_map(|log| {
if log.0.key == u256_to_h256(STATE_DIFF_HASH_KEY.into()) {
Some(log.0.value)
} else {
None
}
});

let merke_tree_root_from_logs = system_logs.iter().find_map(|log| {
if log.0.key == u256_to_h256(L2_TO_L1_LOGS_TREE_ROOT_KEY.into()) {
Some(log.0.value)
} else {
None
}
});

let (
l2_l1_logs_compressed,
initial_writes_compressed,
Expand Down Expand Up @@ -403,6 +422,19 @@ impl L1BatchAuxiliaryOutput {
let l2_l1_logs_merkle_root =
MiniMerkleTree::new(merkle_tree_leaves, Some(min_tree_size)).merkle_root();

if !system_logs.is_empty() {
assert_eq!(
state_diffs_hash,
state_diff_hash_from_logs.unwrap(),
"State diff hash mismatch"
);
assert_eq!(
l2_l1_logs_merkle_root,
merke_tree_root_from_logs.unwrap(),
"L2 L1 logs tree root mismatch"
);
}

Self {
l2_l1_logs_compressed,
initial_writes_compressed,
Expand Down
20 changes: 10 additions & 10 deletions core/lib/types/src/protocol_version.rs
Original file line number Diff line number Diff line change
Expand Up @@ -274,18 +274,19 @@ impl TryFrom<Log> for ProtocolUpgrade {
ParamType::Uint(256), // version id
ParamType::Address, // allow list address
])],
&init_calldata[4..],
init_calldata
.get(4..)
.ok_or(crate::ethabi::Error::InvalidData)?,
)?;

let mut decoded = match decoded.remove(0) {
Token::Tuple(x) => x,
_ => unreachable!(),
let Token::Tuple(mut decoded) = decoded.remove(0) else {
unreachable!();
};

let mut transaction = match decoded.remove(0) {
Token::Tuple(x) => x,
_ => unreachable!(),
let Token::Tuple(mut transaction) = decoded.remove(0) else {
unreachable!()
};

let factory_deps = decoded.remove(0).into_array().unwrap();

let tx = {
Expand Down Expand Up @@ -399,9 +400,8 @@ impl TryFrom<Log> for ProtocolUpgrade {
let default_account_code_hash =
H256::from_slice(&decoded.remove(0).into_fixed_bytes().unwrap());
let verifier_address = decoded.remove(0).into_address().unwrap();
let mut verifier_params = match decoded.remove(0) {
Token::Tuple(tx) => tx,
_ => unreachable!(),
let Token::Tuple(mut verifier_params) = decoded.remove(0) else {
unreachable!()
};
let recursion_node_level_vk_hash =
H256::from_slice(&verifier_params.remove(0).into_fixed_bytes().unwrap());
Expand Down
Loading

0 comments on commit f13648c

Please sign in to comment.