Skip to content

Commit

Permalink
Merge pull request #542 from matter-labs/sb-l1-l2-cutom-da-with-sl
Browse files Browse the repository at this point in the history
Sync layer with custom DA
  • Loading branch information
StanislavBreadless committed Jun 25, 2024
2 parents fcfbb59 + 42be7e5 commit 43f7232
Show file tree
Hide file tree
Showing 46 changed files with 1,270 additions and 371 deletions.
11 changes: 11 additions & 0 deletions l1-contracts/contracts/common/interfaces/IL1Messenger.sol
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
// SPDX-License-Identifier: MIT

pragma solidity 0.8.24;
/**
* @author Matter Labs
* @custom:security-contact security@matterlabs.dev
* @notice The interface of the L1 Messenger contract, responsible for sending messages to L1.
*/
interface IL1Messenger {
function sendToL1(bytes memory _message) external returns (bytes32);
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ contract ExecutorProvingTest is ExecutorFacet {
CommitBatchInfo calldata _newBatch,
bytes32 _expectedSystemContractUpgradeTxHash,
PubdataPricingMode
) external pure returns (LogProcessingOutput memory logOutput) {
) external view returns (LogProcessingOutput memory logOutput) {
return _processL2Logs(_newBatch, _expectedSystemContractUpgradeTxHash);
}

Expand Down
12 changes: 6 additions & 6 deletions l1-contracts/contracts/dev-contracts/test/TestExecutor.sol
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ import {ExecutorFacet} from "../../state-transition/chain-deps/facets/Executor.s
pragma solidity 0.8.24;

contract TestExecutor is ExecutorFacet {
/// @dev Since we want to test the blob functionality we want mock the calls to the blobhash opcode.
function _getBlobVersionedHash(uint256 _index) internal view virtual override returns (bytes32 versionedHash) {
(bool success, bytes memory data) = s.blobVersionedHashRetriever.staticcall(abi.encode(_index));
require(success, "vc");
versionedHash = abi.decode(data, (bytes32));
}
// /// @dev Since we want to test the blob functionality we want mock the calls to the blobhash opcode.
// function _getBlobVersionedHash(uint256 _index) internal view virtual override returns (bytes32 versionedHash) {
// (bool success, bytes memory data) = s.blobVersionedHashRetriever.staticcall(abi.encode(_index));
// require(success, "vc");
// versionedHash = abi.decode(data, (bytes32));
// }
}
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,12 @@ struct ZkSyncHyperchainStorage {
uint128 baseTokenGasPriceMultiplierDenominator;
/// @dev The optional address of the contract that has to be used for transaction filtering/whitelisting
address transactionFilterer;
/// @dev The address of the l1DAValidator contract.
/// This contract is responsible for the verification of the correctness of the DA on L1.
address l1DAValidator;
/// @dev The address of the contract on L2 that is responsible for the data availability verification.
/// This contract sends `l2DAValidatorOutputHash` to L1 via L2->L1 system log and it will routed to the `l1DAValidator` contract.
address l2DAValidator;
/// @dev the Asset Id of the baseToken
bytes32 baseTokenAssetId;
/// @dev address of the synclayer, only set on L1 if settling on it
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,28 @@ contract AdminFacet is ZkSyncHyperchainBase, IAdmin {
emit NewTransactionFilterer(oldTransactionFilterer, _transactionFilterer);
}

/// @notice Sets the DA validator pair with the given addresses.
/// @dev It does not check for these addresses to be non-zero, since when migrating to a new settlement
/// layer, we set them to zero.
function _setDAValidatorPair(address _l1DAValidator, address _l2DAValidator) internal {
address oldL1DAValidator = s.l1DAValidator;
address oldL2DAValidator = s.l2DAValidator;

s.l1DAValidator = _l1DAValidator;
s.l2DAValidator = _l2DAValidator;

emit NewL1DAValidator(oldL1DAValidator, _l1DAValidator);
emit NewL2DAValidator(oldL2DAValidator, _l2DAValidator);
}

/// @inheritdoc IAdmin
function setDAValidatorPair(address _l1DAValidator, address _l2DAValidator) external onlyAdmin {
require(_l1DAValidator != address(0), "AdminFacet: L1DAValidator address is zero");
require(_l2DAValidator != address(0), "AdminFacet: L2DAValidator address is zero");

_setDAValidatorPair(_l1DAValidator, _l2DAValidator);
}

/*//////////////////////////////////////////////////////////////
UPGRADE EXECUTION
//////////////////////////////////////////////////////////////*/
Expand Down Expand Up @@ -307,6 +329,8 @@ contract AdminFacet is ZkSyncHyperchainBase, IAdmin {
s.l2SystemContractsUpgradeTxHash = _commitment.l2SystemContractsUpgradeTxHash;
s.l2SystemContractsUpgradeBatchNumber = _commitment.l2SystemContractsUpgradeBatchNumber;

_setDAValidatorPair(address(0), address(0));

emit MigrationComplete();
}

Expand Down
179 changes: 29 additions & 150 deletions l1-contracts/contracts/state-transition/chain-deps/facets/Executor.sol
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@ pragma solidity 0.8.24;

import {ZkSyncHyperchainBase} from "./ZkSyncHyperchainBase.sol";
// import {IBridgehub} from "../../../bridgehub/IBridgehub.sol";
import {COMMIT_TIMESTAMP_NOT_OLDER, COMMIT_TIMESTAMP_APPROXIMATION_DELTA, EMPTY_STRING_KECCAK, L2_TO_L1_LOG_SERIALIZE_SIZE, MAX_L2_TO_L1_LOGS_COMMITMENT_BYTES, PACKED_L2_BLOCK_TIMESTAMP_MASK, PUBLIC_INPUT_SHIFT, POINT_EVALUATION_PRECOMPILE_ADDR} from "../../../common/Config.sol";
import {IExecutor, L2_LOG_ADDRESS_OFFSET, L2_LOG_KEY_OFFSET, L2_LOG_VALUE_OFFSET, SystemLogKey, LogProcessingOutput, PubdataSource, BLS_MODULUS, PUBDATA_COMMITMENT_SIZE, PUBDATA_COMMITMENT_CLAIMED_VALUE_OFFSET, PUBDATA_COMMITMENT_COMMITMENT_OFFSET, MAX_NUMBER_OF_BLOBS, TOTAL_BLOBS_IN_COMMITMENT, BLOB_SIZE_BYTES} from "../../chain-interfaces/IExecutor.sol";
import {COMMIT_TIMESTAMP_NOT_OLDER, COMMIT_TIMESTAMP_APPROXIMATION_DELTA, EMPTY_STRING_KECCAK, L2_TO_L1_LOG_SERIALIZE_SIZE, MAX_L2_TO_L1_LOGS_COMMITMENT_BYTES, PACKED_L2_BLOCK_TIMESTAMP_MASK, PUBLIC_INPUT_SHIFT} from "../../../common/Config.sol";
import {IExecutor, L2_LOG_ADDRESS_OFFSET, L2_LOG_KEY_OFFSET, L2_LOG_VALUE_OFFSET, SystemLogKey, LogProcessingOutput, MAX_NUMBER_OF_BLOBS, TOTAL_BLOBS_IN_COMMITMENT} from "../../chain-interfaces/IExecutor.sol";
import {PriorityQueue, PriorityOperation} from "../../libraries/PriorityQueue.sol";
import {UncheckedMath} from "../../../common/libraries/UncheckedMath.sol";
import {UnsafeBytes} from "../../../common/libraries/UnsafeBytes.sol";
import {L2_BOOTLOADER_ADDRESS, L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, L2_SYSTEM_CONTEXT_SYSTEM_CONTRACT_ADDR, L2_PUBDATA_CHUNK_PUBLISHER_ADDR} from "../../../common/L2ContractAddresses.sol";
import {PubdataPricingMode} from "../ZkSyncHyperchainStorage.sol";
import {L2_BOOTLOADER_ADDRESS, L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, L2_SYSTEM_CONTEXT_SYSTEM_CONTRACT_ADDR} from "../../../common/L2ContractAddresses.sol";
import {IStateTransitionManager} from "../../IStateTransitionManager.sol";
import {IL1DAValidator, L1DAValidatorOutput} from "../../chain-interfaces/IL1DAValidator.sol";

// While formally the following import is not used, it is needed to inherit documentation from it
import {IZkSyncHyperchainBase} from "../../chain-interfaces/IZkSyncHyperchainBase.sol";
Expand All @@ -35,46 +35,19 @@ contract ExecutorFacet is ZkSyncHyperchainBase, IExecutor {
StoredBatchInfo memory _previousBatch,
CommitBatchInfo calldata _newBatch,
bytes32 _expectedSystemContractUpgradeTxHash
) internal view returns (StoredBatchInfo memory) {
require(_newBatch.batchNumber == _previousBatch.batchNumber + 1, "f"); // only commit next batch

uint8 pubdataSource = uint8(bytes1(_newBatch.pubdataCommitments[0]));
PubdataPricingMode pricingMode = s.feeParams.pubdataPricingMode;
require(
pricingMode == PubdataPricingMode.Validium ||
pubdataSource == uint8(PubdataSource.Calldata) ||
pubdataSource == uint8(PubdataSource.Blob),
"us"
);
) internal returns (StoredBatchInfo memory) {
require(_newBatch.batchNumber == _previousBatch.batchNumber + 1, "f"); // only commit next batchs

// Check that batch contain all meta information for L2 logs.
// Get the chained hash of priority transaction hashes.
LogProcessingOutput memory logOutput = _processL2Logs(_newBatch, _expectedSystemContractUpgradeTxHash);

bytes32[] memory blobCommitments = new bytes32[](MAX_NUMBER_OF_BLOBS);
if (pricingMode == PubdataPricingMode.Validium) {
// skipping data validation for validium, we just check that the data is empty
require(_newBatch.pubdataCommitments.length == 1, "EF: v0l");
for (uint8 i = uint8(SystemLogKey.BLOB_ONE_HASH_KEY); i <= uint8(SystemLogKey.BLOB_SIX_HASH_KEY); ++i) {
logOutput.blobHashes[i - uint8(SystemLogKey.BLOB_ONE_HASH_KEY)] = bytes32(0);
}
} else if (pubdataSource == uint8(PubdataSource.Blob)) {
// In this scenario, pubdataCommitments is a list of: opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes)) = 144 bytes
blobCommitments = _verifyBlobInformation(_newBatch.pubdataCommitments[1:], logOutput.blobHashes);
} else if (pubdataSource == uint8(PubdataSource.Calldata)) {
// In this scenario pubdataCommitments is actual pubdata consisting of l2 to l1 logs, l2 to l1 message, compressed smart contract bytecode, and compressed state diffs
require(_newBatch.pubdataCommitments.length <= BLOB_SIZE_BYTES, "cz");
require(
logOutput.pubdataHash ==
keccak256(_newBatch.pubdataCommitments[1:_newBatch.pubdataCommitments.length - 32]),
"wp"
);
blobCommitments[0] = bytes32(
_newBatch.pubdataCommitments[_newBatch.pubdataCommitments.length - 32:_newBatch
.pubdataCommitments
.length]
);
}
L1DAValidatorOutput memory daOutput = IL1DAValidator(s.l1DAValidator).checkDA(
s.chainId,
logOutput.l2DAValidatorOutputHash,
_newBatch.operatorDAInput,
TOTAL_BLOBS_IN_COMMITMENT
);

require(_previousBatch.batchHash == logOutput.previousBatchHash, "l");
// Check that the priority operation hash in the L2 logs is as expected
Expand All @@ -88,9 +61,9 @@ contract ExecutorFacet is ZkSyncHyperchainBase, IExecutor {
// Create batch commitment for the proof verification
bytes32 commitment = _createBatchCommitment(
_newBatch,
logOutput.stateDiffHash,
blobCommitments,
logOutput.blobHashes
daOutput.stateDiffHash,
daOutput.blobsOpeningCommitments,
daOutput.blobsLinearHashes
);

return
Expand Down Expand Up @@ -140,12 +113,10 @@ contract ExecutorFacet is ZkSyncHyperchainBase, IExecutor {
function _processL2Logs(
CommitBatchInfo calldata _newBatch,
bytes32 _expectedSystemContractUpgradeTxHash
) internal pure returns (LogProcessingOutput memory logOutput) {
) internal view returns (LogProcessingOutput memory logOutput) {
// Copy L2 to L1 logs into memory.
bytes memory emittedL2Logs = _newBatch.systemLogs;

logOutput.blobHashes = new bytes32[](MAX_NUMBER_OF_BLOBS);

// Used as bitmap to set/check log processing happens exactly once.
// See SystemLogKey enum in Constants.sol for ordering.
uint256 processedLogs;
Expand All @@ -169,12 +140,6 @@ contract ExecutorFacet is ZkSyncHyperchainBase, IExecutor {
if (logKey == uint256(SystemLogKey.L2_TO_L1_LOGS_TREE_ROOT_KEY)) {
require(logSender == L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, "lm");
logOutput.l2LogsTreeRoot = logValue;
} else if (logKey == uint256(SystemLogKey.TOTAL_L2_TO_L1_PUBDATA_KEY)) {
require(logSender == L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, "ln");
logOutput.pubdataHash = logValue;
} else if (logKey == uint256(SystemLogKey.STATE_DIFF_HASH_KEY)) {
require(logSender == L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, "lb");
logOutput.stateDiffHash = logValue;
} else if (logKey == uint256(SystemLogKey.PACKED_BATCH_AND_L2_BLOCK_TIMESTAMP_KEY)) {
require(logSender == L2_SYSTEM_CONTEXT_SYSTEM_CONTRACT_ADDR, "sc");
logOutput.packedBatchAndL2BlockTimestamp = uint256(logValue);
Expand All @@ -187,17 +152,12 @@ contract ExecutorFacet is ZkSyncHyperchainBase, IExecutor {
} else if (logKey == uint256(SystemLogKey.NUMBER_OF_LAYER_1_TXS_KEY)) {
require(logSender == L2_BOOTLOADER_ADDRESS, "bk");
logOutput.numberOfLayer1Txs = uint256(logValue);
} else if (
logKey >= uint256(SystemLogKey.BLOB_ONE_HASH_KEY) && logKey <= uint256(SystemLogKey.BLOB_SIX_HASH_KEY)
) {
require(logSender == L2_PUBDATA_CHUNK_PUBLISHER_ADDR, "pc");
uint8 blobNumber = uint8(logKey) - uint8(SystemLogKey.BLOB_ONE_HASH_KEY);

// While the fact that `blobNumber` is a valid blob number is implicitly checked by the fact
// that Solidity provides array overflow protection, we still double check it manually in case
// we accidentally put `unchecked` at the top of the loop and generally for better error messages.
require(blobNumber < MAX_NUMBER_OF_BLOBS, "b6");
logOutput.blobHashes[blobNumber] = logValue;
} else if (logKey == uint256(SystemLogKey.USED_L2_DA_VALIDATOR_ADDRESS_KEY)) {
require(logSender == L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, "bk");
require(s.l2DAValidator == address(uint160(uint256(logValue))), "lo");
} else if (logKey == uint256(SystemLogKey.L2_DA_VALIDATOR_OUTPUT_HASH_KEY)) {
require(logSender == L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, "lp2");
logOutput.l2DAValidatorOutputHash = logValue;
} else if (logKey == uint256(SystemLogKey.EXPECTED_SYSTEM_CONTRACT_UPGRADE_TX_HASH_KEY)) {
require(logSender == L2_BOOTLOADER_ADDRESS, "bu");
require(_expectedSystemContractUpgradeTxHash == logValue, "ut");
Expand All @@ -206,13 +166,16 @@ contract ExecutorFacet is ZkSyncHyperchainBase, IExecutor {
}
}

// FIXME: temporarily old logs were kept for backwards compaitibility. This check can not work now.
//
// We only require 13 logs to be checked, the 14th is if we are expecting a protocol upgrade
// Without the protocol upgrade we expect 13 logs: 2^13 - 1 = 8191
// With the protocol upgrade we expect 14 logs: 2^14 - 1 = 16383
if (_expectedSystemContractUpgradeTxHash == bytes32(0)) {
require(processedLogs == 8191, "b7");
// require(processedLogs == 127, "b7");
} else {
require(processedLogs == 16383, "b8");
// FIXME: do restore this code to the one that was before
require(_checkBit(processedLogs, uint8(SystemLogKey.EXPECTED_SYSTEM_CONTRACT_UPGRADE_TX_HASH_KEY)), "b8");
}
}

Expand Down Expand Up @@ -567,8 +530,8 @@ contract ExecutorFacet is ZkSyncHyperchainBase, IExecutor {
) internal pure returns (bytes32[] memory blobAuxOutputWords) {
// These invariants should be checked by the caller of this function, but we double check
// just in case.
require(_blobCommitments.length == MAX_NUMBER_OF_BLOBS, "b10");
require(_blobHashes.length == MAX_NUMBER_OF_BLOBS, "b11");
require(_blobCommitments.length == TOTAL_BLOBS_IN_COMMITMENT, "b10");
require(_blobHashes.length == TOTAL_BLOBS_IN_COMMITMENT, "b11");

// for each blob we have:
// linear hash (hash of preimage from system logs) and
Expand Down Expand Up @@ -600,88 +563,4 @@ contract ExecutorFacet is ZkSyncHyperchainBase, IExecutor {
function _setBit(uint256 _bitMap, uint8 _index) internal pure returns (uint256) {
return _bitMap | (1 << _index);
}

/// @notice Calls the point evaluation precompile and verifies the output
/// Verify p(z) = y given commitment that corresponds to the polynomial p(x) and a KZG proof.
/// Also verify that the provided commitment matches the provided versioned_hash.
///
function _pointEvaluationPrecompile(
bytes32 _versionedHash,
bytes32 _openingPoint,
bytes calldata _openingValueCommitmentProof
) internal view {
bytes memory precompileInput = abi.encodePacked(_versionedHash, _openingPoint, _openingValueCommitmentProof);

(bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(precompileInput);

// We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
// response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
require(success, "failed to call point evaluation precompile");
(, uint256 result) = abi.decode(data, (uint256, uint256));
require(result == BLS_MODULUS, "precompile unexpected output");
}

/// @dev Verifies that the blobs contain the correct data by calling the point evaluation precompile. For the precompile we need:
/// versioned hash || opening point || opening value || commitment || proof
/// the _pubdataCommitments will contain the last 4 values, the versioned hash is pulled from the BLOBHASH opcode
/// pubdataCommitments is a list of: opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes)) = 144 bytes
function _verifyBlobInformation(
bytes calldata _pubdataCommitments,
bytes32[] memory _blobHashes
) internal view returns (bytes32[] memory blobCommitments) {
uint256 versionedHashIndex = 0;

require(_pubdataCommitments.length > 0, "pl");
require(_pubdataCommitments.length <= PUBDATA_COMMITMENT_SIZE * MAX_NUMBER_OF_BLOBS, "bd");
require(_pubdataCommitments.length % PUBDATA_COMMITMENT_SIZE == 0, "bs");
blobCommitments = new bytes32[](MAX_NUMBER_OF_BLOBS);

// solhint-disable-next-line gas-length-in-loops
for (uint256 i = 0; i < _pubdataCommitments.length; i += PUBDATA_COMMITMENT_SIZE) {
bytes32 blobVersionedHash = _getBlobVersionedHash(versionedHashIndex);

require(blobVersionedHash != bytes32(0), "vh");

// First 16 bytes is the opening point. While we get the point as 16 bytes, the point evaluation precompile
// requires it to be 32 bytes. The blob commitment must use the opening point as 16 bytes though.
bytes32 openingPoint = bytes32(
uint256(uint128(bytes16(_pubdataCommitments[i:i + PUBDATA_COMMITMENT_CLAIMED_VALUE_OFFSET])))
);

_pointEvaluationPrecompile(
blobVersionedHash,
openingPoint,
_pubdataCommitments[i + PUBDATA_COMMITMENT_CLAIMED_VALUE_OFFSET:i + PUBDATA_COMMITMENT_SIZE]
);

// Take the hash of the versioned hash || opening point || claimed value
blobCommitments[versionedHashIndex] = keccak256(
abi.encodePacked(blobVersionedHash, _pubdataCommitments[i:i + PUBDATA_COMMITMENT_COMMITMENT_OFFSET])
);
++versionedHashIndex;
}

// This check is required because we want to ensure that there aren't any extra blobs trying to be published.
// Calling the BLOBHASH opcode with an index > # blobs - 1 yields bytes32(0)
bytes32 versionedHash = _getBlobVersionedHash(versionedHashIndex);
require(versionedHash == bytes32(0), "lh");

// We verify that for each set of blobHash/blobCommitment are either both empty
// or there are values for both.
for (uint256 i = 0; i < MAX_NUMBER_OF_BLOBS; ++i) {
require(
(_blobHashes[i] == bytes32(0) && blobCommitments[i] == bytes32(0)) ||
(_blobHashes[i] != bytes32(0) && blobCommitments[i] != bytes32(0)),
"bh"
);
}
}

function _getBlobVersionedHash(uint256 /** _index */) internal view virtual returns (bytes32 /** versionedHash*/) {
// FIXME: enable blobs
revert("Blobs not supported on this codebase yet");
// assembly {
// versionedHash := blobhash(_index)
// }
}
}
Loading

0 comments on commit 43f7232

Please sign in to comment.