diff --git a/.travis.yml b/.travis.yml index 929095ae56..f1e6a121b4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -64,17 +64,17 @@ matrix: include: # We don't run tests, linters and quck check in fork branch, since they will be covered in PR. - name: Tests on macOS - if: 'tag IS NOT present AND (type = pull_request OR branch IN (master, staging, staging2, trying))' + if: 'tag IS NOT present AND (type = pull_request OR branch IN (master, staging, staging2, trying) OR branch =~ /^ckb2021/)' os: osx - name: Tests on Linux - if: 'tag IS NOT present AND (type = pull_request OR branch IN (master, staging, staging2, trying) OR repo != nervosnetwork/ckb)' + if: 'tag IS NOT present AND (type = pull_request OR branch IN (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR repo != nervosnetwork/ckb)' os: linux - name: PR Integration if: 'tag IS NOT present AND branch != develop AND branch !~ /^rc\// AND (type = pull_request OR repo != nervosnetwork/ckb)' os: linux script: make CKB_TEST_SEC_COEFFICIENT=5 CKB_TEST_ARGS="-c 4 --no-report" integration - name: Linters - if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR repo != nervosnetwork/ckb)' + if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR repo != nervosnetwork/ckb)' os: linux install: - cargo fmt --version || travis_retry rustup component add rustfmt @@ -99,7 +99,7 @@ matrix: - make clippy - mv rust-toolchain.bak rust-toolchain - name: Quick Check - if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR repo != nervosnetwork/ckb)' + if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR repo != nervosnetwork/ckb)' os: linux cache: false addons: { apt: { packages: [] } } @@ -119,7 +119,7 @@ matrix: script: - devtools/ci/check-cyclic-dependencies.py --dev - name: Security Audit & Licenses - if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR repo != nervosnetwork/ckb)' + if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR repo != nervosnetwork/ckb)' os: linux install: - cargo deny --version || travis_retry cargo install cargo-deny --locked @@ -129,7 +129,7 @@ matrix: - make check-licenses - name: WASM build - if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR repo != nervosnetwork/ckb)' + if: 'tag IS NOT present AND (type = pull_request OR branch in (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR repo != nervosnetwork/ckb)' os: linux script: - export PATH=/usr/lib/llvm-8/bin:$PATH @@ -141,11 +141,11 @@ matrix: os: linux script: make bench-test - name: Integration on macOS - if: 'tag IS NOT present AND type != pull_request AND (branch IN (master, staging, staging2, trying) OR branch =~ /^rc\// OR (branch = develop AND commit_message !~ /^Merge #\d+/))' + if: 'tag IS NOT present AND type != pull_request AND (branch IN (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR branch =~ /^rc\// OR (branch = develop AND commit_message !~ /^Merge #\d+/))' os: osx script: make CKB_TEST_ARGS="-c 1 --no-report" integration - name: Integration on Linux - if: 'tag IS NOT present AND type != pull_request AND (branch IN (master, staging, staging2, trying) OR branch =~ /^rc\// OR (branch = develop AND commit_message !~ /^Merge #\d+/))' + if: 'tag IS NOT present AND type != pull_request AND (branch IN (master, staging, staging2, trying) OR branch =~ /^ckb2021/ OR branch =~ /^rc\// OR (branch = develop AND commit_message !~ /^Merge #\d+/))' os: linux script: make CKB_TEST_ARGS="-c 1 --no-report" integration - name: Code Coverage diff --git a/Cargo.lock b/Cargo.lock index 82febeced5..35c09704a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -547,6 +547,7 @@ dependencies = [ name = "ckb-chain-spec" version = "0.43.0-pre" dependencies = [ + "ckb-constant", "ckb-crypto", "ckb-dao-utils", "ckb-error", diff --git a/azure-pipelines.yml b/azure-pipelines.yml index ad7e2124d8..4081bba105 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -19,7 +19,8 @@ jobs: eq(variables['Build.Reason'], 'PullRequest'), ne(variables['System.PullRequest.SourceBranch'], 'develop') ), - eq(variables['Build.SourceBranch'], 'refs/heads/master') + eq(variables['Build.SourceBranch'], 'refs/heads/master'), + startsWith(variables['Build.SourceBranch'], 'refs/heads/ckb2021') ) ) pool: @@ -41,7 +42,8 @@ jobs: ne(variables['Build.Reason'], 'PullRequest'), or( startsWith(variables['Build.SourceBranch'], 'refs/heads/rc/'), - in(variables['Build.SourceBranch'], 'refs/heads/master', 'refs/heads/develop', 'refs/heads/staging2', 'refs/heads/trying') + in(variables['Build.SourceBranch'], 'refs/heads/master', 'refs/heads/develop', 'refs/heads/staging2', 'refs/heads/trying'), + startsWith(variables['Build.SourceBranch'], 'refs/heads/ckb2021') ) ) pool: diff --git a/db-schema/src/lib.rs b/db-schema/src/lib.rs index bef2d51317..d1af69e8d2 100644 --- a/db-schema/src/lib.rs +++ b/db-schema/src/lib.rs @@ -3,7 +3,7 @@ /// Column families alias type pub type Col = &'static str; /// Total column number -pub const COLUMNS: u32 = 15; +pub const COLUMNS: u32 = 16; /// Column store chain index pub const COLUMN_INDEX: Col = "0"; /// Column store block's header @@ -36,6 +36,8 @@ pub const COLUMN_CELL_DATA: Col = "12"; pub const COLUMN_NUMBER_HASH: Col = "13"; /// Column store cell data hash pub const COLUMN_CELL_DATA_HASH: Col = "14"; +/// Column store block extension data +pub const COLUMN_BLOCK_EXTENSION: Col = "15"; /// META_TIP_HEADER_KEY tracks the latest known best block header pub const META_TIP_HEADER_KEY: &[u8] = b"TIP_HEADER"; diff --git a/freezer/src/freezer.rs b/freezer/src/freezer.rs index 0390e43418..6bec32b882 100644 --- a/freezer/src/freezer.rs +++ b/freezer/src/freezer.rs @@ -55,9 +55,12 @@ impl Freezer { .retrieve(freezer_number - 1) .map_err(internal_error)? .ok_or_else(|| internal_error("freezer inconsistent"))?; - let block = packed::BlockReader::from_slice(&raw_block) + let block = packed::BlockReader::from_compatible_slice(&raw_block) .map_err(internal_error)? .to_entity(); + if block.count_extra_fields() > 1 { + return Err(internal_error("block has more than one extra fields")); + } tip = Some(block.header().into_view()); } @@ -147,9 +150,12 @@ impl Freezer { .retrieve(item) .map_err(internal_error)? .expect("frozen number sync with files"); - let block = packed::BlockReader::from_slice(&raw_block) + let block = packed::BlockReader::from_compatible_slice(&raw_block) .map_err(internal_error)? .to_entity(); + if block.count_extra_fields() > 1 { + return Err(internal_error("block has more than one extra fields")); + } inner.tip = Some(block.header().into_view()); } Ok(()) diff --git a/rpc/src/module/experiment.rs b/rpc/src/module/experiment.rs index 7d037092e8..72c3809a85 100644 --- a/rpc/src/module/experiment.rs +++ b/rpc/src/module/experiment.rs @@ -13,7 +13,7 @@ use ckb_types::{ prelude::*, H256, }; -use ckb_verification::ScriptVerifier; +use ckb_verification::{ScriptVerifier, TxVerifyEnv}; use jsonrpc_core::Result; use jsonrpc_derive::rpc; use std::collections::HashSet; @@ -248,8 +248,15 @@ impl<'a> DryRunner<'a> { Ok(resolved) => { let consensus = snapshot.consensus(); let max_cycles = consensus.max_block_cycles; - match ScriptVerifier::new(&resolved, &snapshot.as_data_provider()) - .verify(max_cycles) + let tip_header = snapshot.tip_header(); + let tx_env = TxVerifyEnv::new_submit(&tip_header); + match ScriptVerifier::new( + &resolved, + consensus, + &snapshot.as_data_provider(), + &tx_env, + ) + .verify(max_cycles) { Ok(cycles) => Ok(DryRunResult { cycles: cycles.into(), diff --git a/script/src/lib.rs b/script/src/lib.rs index 363b68fa4e..dcd5078855 100644 --- a/script/src/lib.rs +++ b/script/src/lib.rs @@ -6,8 +6,10 @@ mod syscalls; mod type_id; mod types; mod verify; +mod verify_env; pub use crate::error::{ScriptError, TransactionScriptError}; pub use crate::ill_transaction_checker::IllTransactionChecker; pub use crate::types::{ScriptGroup, ScriptGroupType}; pub use crate::verify::TransactionScriptsVerifier; +pub use crate::verify_env::TxVerifyEnv; diff --git a/script/src/verify.rs b/script/src/verify.rs index 68c66d4842..20be98f2bf 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -7,8 +7,9 @@ use crate::{ }, type_id::TypeIdSystemScript, types::{ScriptGroup, ScriptGroupType}, + verify_env::TxVerifyEnv, }; -use ckb_chain_spec::consensus::TYPE_ID_CODE_HASH; +use ckb_chain_spec::consensus::{Consensus, TYPE_ID_CODE_HASH}; use ckb_error::Error; #[cfg(feature = "logging")] use ckb_logger::{debug, info}; @@ -75,19 +76,50 @@ impl LazyData { } } +#[derive(Debug, PartialEq, Eq, Clone)] +enum Binaries { + Unique((Byte32, LazyData)), + Duplicate((Byte32, LazyData)), + Multiple, +} + +impl Binaries { + fn new(data_hash: Byte32, data: LazyData) -> Self { + Self::Unique((data_hash, data)) + } + + fn merge(&mut self, data_hash: &Byte32) { + match self { + Self::Unique(ref old) | Self::Duplicate(ref old) => { + if old.0 != *data_hash { + *self = Self::Multiple; + } else { + *self = Self::Duplicate(old.to_owned()); + } + } + Self::Multiple => { + *self = Self::Multiple; + } + } + } +} + /// This struct leverages CKB VM to verify transaction inputs. /// /// FlatBufferBuilder owned `Vec` that grows as needed, in the /// future, we might refactor this to share buffer to achieve zero-copy pub struct TransactionScriptsVerifier<'a, DL> { data_loader: &'a DL, + consensus: &'a Consensus, + tx_env: &'a TxVerifyEnv, + debug_printer: Box, outputs: Vec, rtx: &'a ResolvedTransaction, binaries_by_data_hash: HashMap, - binaries_by_type_hash: HashMap, + binaries_by_type_hash: HashMap, lock_groups: HashMap, type_groups: HashMap, @@ -102,7 +134,9 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D /// * `data_loader` - used to load cell data. pub fn new( rtx: &'a ResolvedTransaction, + consensus: &'a Consensus, data_loader: &'a DL, + tx_env: &'a TxVerifyEnv, ) -> TransactionScriptsVerifier<'a, DL> { let tx_hash = rtx.transaction.hash(); let resolved_cell_deps = &rtx.resolved_cell_deps; @@ -129,18 +163,19 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D .collect(); let mut binaries_by_data_hash: HashMap = HashMap::default(); - let mut binaries_by_type_hash: HashMap = HashMap::default(); + let mut binaries_by_type_hash: HashMap = HashMap::default(); for cell_meta in resolved_cell_deps { let data_hash = data_loader .load_cell_data_hash(cell_meta) .expect("cell data hash"); let lazy = LazyData::from_cell_meta(&cell_meta); - binaries_by_data_hash.insert(data_hash, lazy.clone()); + binaries_by_data_hash.insert(data_hash.to_owned(), lazy.to_owned()); + if let Some(t) = &cell_meta.cell_output.type_().to_opt() { binaries_by_type_hash .entry(t.calc_script_hash()) - .and_modify(|e| e.1 = true) - .or_insert((lazy, false)); + .and_modify(|bin| bin.merge(&data_hash)) + .or_insert_with(|| Binaries::new(data_hash.to_owned(), lazy.to_owned())); } } @@ -172,6 +207,8 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D TransactionScriptsVerifier { data_loader, + consensus, + tx_env, binaries_by_data_hash, binaries_by_type_hash, outputs, @@ -307,12 +344,23 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D } } ScriptHashType::Type => { - if let Some((lazy, multiple)) = self.binaries_by_type_hash.get(&script.code_hash()) - { - if *multiple { - Err(ScriptError::MultipleMatches) - } else { - Ok(lazy.access(self.data_loader)) + if let Some(ref bin) = self.binaries_by_type_hash.get(&script.code_hash()) { + match bin { + Binaries::Unique((_, ref lazy)) => Ok(lazy.access(self.data_loader)), + Binaries::Duplicate((_, ref lazy)) => { + let proposal_window = self.consensus.tx_proposal_window(); + let epoch_number = self.tx_env.epoch_number(proposal_window); + if self + .consensus + .hardfork_switch() + .is_allow_multiple_matches_on_identical_data_enabled(epoch_number) + { + Ok(lazy.access(self.data_loader)) + } else { + Err(ScriptError::MultipleMatches) + } + } + Binaries::Multiple => Err(ScriptError::MultipleMatches), } } else { Err(ScriptError::InvalidCodeHash) @@ -490,8 +538,8 @@ mod tests { use ckb_store::{data_loader_wrapper::DataLoaderWrapper, ChainDB}; use ckb_types::{ core::{ - capacity_bytes, cell::CellMetaBuilder, Capacity, Cycle, DepType, ScriptHashType, - TransactionBuilder, TransactionInfo, + capacity_bytes, cell::CellMetaBuilder, Capacity, Cycle, DepType, HeaderView, + ScriptHashType, TransactionBuilder, TransactionInfo, }, h256, packed::{ @@ -502,7 +550,9 @@ mod tests { }; use faster_hex::hex_encode; - use ckb_chain_spec::consensus::{TWO_IN_TWO_OUT_BYTES, TWO_IN_TWO_OUT_CYCLES}; + use ckb_chain_spec::consensus::{ + ConsensusBuilder, TWO_IN_TWO_OUT_BYTES, TWO_IN_TWO_OUT_CYCLES, + }; use ckb_error::assert_error_eq; use ckb_test_chain_utils::{ always_success_cell, ckb_testnet_consensus, secp256k1_blake160_sighash_cell, @@ -612,8 +662,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(600).is_ok()); } @@ -672,8 +727,13 @@ mod tests { }; let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(100_000_000).is_ok()); @@ -753,8 +813,13 @@ mod tests { }; let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(100_000_000).is_ok()); } @@ -845,8 +910,13 @@ mod tests { }; let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(100_000_000).unwrap_err(), @@ -910,7 +980,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; + + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(100_000_000).unwrap_err(), @@ -962,7 +1038,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; + + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(100_000_000).unwrap_err(), @@ -1045,7 +1127,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; + + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(100_000_000).is_ok()); } @@ -1119,8 +1207,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(100_000_000).unwrap_err(), @@ -1184,7 +1277,13 @@ mod tests { }; let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; + + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); // Cycles can tell that both lock and type scripts are executed assert_eq!( @@ -1247,8 +1346,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); if let Err(err) = verifier.verify(TYPE_ID_CYCLES * 2) { panic!("expect verification ok, got: {:?}", err); @@ -1309,8 +1413,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); // two groups need exec, so cycles not TYPE_ID_CYCLES - 1 assert_error_eq!( @@ -1382,8 +1491,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(1_001_000).is_ok()); } @@ -1441,8 +1555,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert!(verifier.verify(1_001_000).is_ok()); } @@ -1515,8 +1634,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(1_001_000).unwrap_err(), @@ -1595,8 +1719,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(1_001_000).unwrap_err(), @@ -1664,8 +1793,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); assert_error_eq!( verifier.verify(TYPE_ID_CYCLES * 2).unwrap_err(), @@ -1821,8 +1955,13 @@ mod tests { let store = new_store(); let data_loader = DataLoaderWrapper::new(&store); + let consensus = ConsensusBuilder::default().build(); + let tx_env = { + let header = HeaderView::new_advanced_builder().build(); + TxVerifyEnv::new_commit(&header) + }; - let verifier = TransactionScriptsVerifier::new(&rtx, &data_loader); + let verifier = TransactionScriptsVerifier::new(&rtx, &consensus, &data_loader, &tx_env); let cycle = verifier.verify(TWO_IN_TWO_OUT_CYCLES).unwrap(); assert!(cycle <= TWO_IN_TWO_OUT_CYCLES); diff --git a/script/src/verify_env.rs b/script/src/verify_env.rs new file mode 100644 index 0000000000..e840d5d7d8 --- /dev/null +++ b/script/src/verify_env.rs @@ -0,0 +1,120 @@ +//! Transaction verification environment. + +use ckb_chain_spec::consensus::ProposalWindow; +use ckb_types::{ + core::{BlockNumber, EpochNumber, EpochNumberWithFraction, HeaderView}, + packed::Byte32, +}; + +/// The phase that transactions are in. +#[derive(Debug, Clone, Copy)] +enum TxVerifyPhase { + /// The transaction has just been submitted. + /// + /// So the transaction will be: + /// - proposed after (or in) the `tip_number + 1` block. + /// - committed after (or in) `tip_number + 1 + proposal_window.closest()` block. + Submitted, + /// The transaction has already been proposed before several blocks. + /// + /// Assume that the inner block number is `N`. + /// So the transaction is proposed in the `tip_number - N` block. + /// Then it will be committed after (or in) the `tip_number - N + proposal_window.closest()` block. + Proposed(BlockNumber), + /// The transaction is commit. + /// + /// So the transaction will be committed in current block. + Committed, +} + +/// The environment that transactions are in. +#[derive(Debug, Clone)] +pub struct TxVerifyEnv { + // Please keep these fields to be private. + // So we can update this struct easier when we want to add more data. + phase: TxVerifyPhase, + // Current Tip Environment + number: BlockNumber, + epoch: EpochNumberWithFraction, + hash: Byte32, + parent_hash: Byte32, +} + +impl TxVerifyEnv { + /// The transaction has just been submitted. + /// + /// The input is current tip header. + pub fn new_submit(header: &HeaderView) -> Self { + Self { + phase: TxVerifyPhase::Submitted, + number: header.number(), + epoch: header.epoch(), + hash: header.hash(), + parent_hash: header.parent_hash(), + } + } + + /// The transaction has already been proposed before several blocks. + /// + /// The input is current tip header and how many blocks have been passed since the transaction was proposed. + pub fn new_proposed(header: &HeaderView, n_blocks: BlockNumber) -> Self { + Self { + phase: TxVerifyPhase::Proposed(n_blocks), + number: header.number(), + epoch: header.epoch(), + hash: header.hash(), + parent_hash: header.parent_hash(), + } + } + + /// The transaction will committed in current block. + /// + /// The input is current tip header. + pub fn new_commit(header: &HeaderView) -> Self { + Self { + phase: TxVerifyPhase::Committed, + number: header.number(), + epoch: header.epoch(), + hash: header.hash(), + parent_hash: header.parent_hash(), + } + } + + /// The block number of the earliest block which the transaction will committed in. + pub fn block_number(&self, proposal_window: ProposalWindow) -> BlockNumber { + match self.phase { + TxVerifyPhase::Submitted => self.number + 1 + proposal_window.closest(), + TxVerifyPhase::Proposed(already_proposed) => { + self.number.saturating_sub(already_proposed) + proposal_window.closest() + } + TxVerifyPhase::Committed => self.number, + } + } + + /// The epoch number of the earliest epoch which the transaction will committed in. + pub fn epoch_number(&self, proposal_window: ProposalWindow) -> EpochNumber { + let n_blocks = match self.phase { + TxVerifyPhase::Submitted => 1 + proposal_window.closest(), + TxVerifyPhase::Proposed(already_proposed) => { + proposal_window.closest().saturating_sub(already_proposed) + } + TxVerifyPhase::Committed => 0, + }; + self.epoch.minimum_epoch_number_after_n_blocks(n_blocks) + } + + /// The parent block hash of the earliest block which the transaction will committed in. + pub fn parent_hash(&self) -> Byte32 { + match self.phase { + TxVerifyPhase::Submitted => &self.hash, + TxVerifyPhase::Proposed(_) => &self.hash, + TxVerifyPhase::Committed => &self.parent_hash, + } + .to_owned() + } + + /// The earliest epoch which the transaction will committed in. + pub fn epoch(&self) -> EpochNumberWithFraction { + self.epoch + } +} diff --git a/spec/Cargo.toml b/spec/Cargo.toml index 9e9f8a09ee..f6d8936fa8 100644 --- a/spec/Cargo.toml +++ b/spec/Cargo.toml @@ -11,6 +11,7 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] serde = { version = "1.0", features = ["derive"] } toml = "0.5" +ckb-constant = { path = "../util/constant", version = "= 0.43.0-pre" } ckb-types = { path = "../util/types", version = "= 0.43.0-pre" } ckb-pow = { path = "../pow", version = "= 0.43.0-pre" } ckb-resource = { path = "../resource", version = "= 0.43.0-pre" } diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index e1279e377d..2bf066b662 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -16,8 +16,9 @@ use ckb_types::{ bytes::Bytes, constants::{BLOCK_VERSION, TX_VERSION}, core::{ - BlockBuilder, BlockNumber, BlockView, Capacity, Cycle, EpochExt, EpochNumber, - EpochNumberWithFraction, HeaderView, Ratio, TransactionBuilder, TransactionView, Version, + hardfork::HardForkSwitch, BlockBuilder, BlockNumber, BlockView, Capacity, Cycle, EpochExt, + EpochNumber, EpochNumberWithFraction, HeaderView, Ratio, TransactionBuilder, + TransactionView, Version, }, h160, h256, packed::{Byte32, CellInput, CellOutput, Script}, @@ -270,6 +271,7 @@ impl ConsensusBuilder { primary_epoch_reward_halving_interval: DEFAULT_PRIMARY_EPOCH_REWARD_HALVING_INTERVAL, permanent_difficulty_in_dummy: false, + hardfork_switch: HardForkSwitch::new_without_any_enabled(), }, } } @@ -390,6 +392,12 @@ impl ConsensusBuilder { self } + /// Sets median_time_block_count for the new Consensus. + pub fn median_time_block_count(mut self, median_time_block_count: usize) -> Self { + self.inner.median_time_block_count = median_time_block_count; + self + } + /// Sets tx_proposal_window for the new Consensus. pub fn tx_proposal_window(mut self, proposal_window: ProposalWindow) -> Self { self.inner.tx_proposal_window = proposal_window; @@ -445,6 +453,12 @@ impl ConsensusBuilder { self.inner.max_block_proposals_limit = max_block_proposals_limit; self } + + /// Sets a hard fork switch for the new Consensus. + pub fn hardfork_switch(mut self, hardfork_switch: HardForkSwitch) -> Self { + self.inner.hardfork_switch = hardfork_switch; + self + } } /// Struct Consensus defines various parameters that influence chain consensus @@ -519,6 +533,8 @@ pub struct Consensus { pub primary_epoch_reward_halving_interval: EpochNumber, /// Keep difficulty be permanent if the pow is dummy pub permanent_difficulty_in_dummy: bool, + /// A switch to select hard fork features base on the epoch number. + pub hardfork_switch: HardForkSwitch, } // genesis difficulty should not be zero @@ -909,6 +925,17 @@ impl Consensus { self.primary_epoch_reward(epoch.number() + 1) } } + + /// Returns the hardfork switch. + pub fn hardfork_switch(&self) -> &HardForkSwitch { + &self.hardfork_switch + } +} + +/// Trait for consensus provider. +pub trait ConsensusProvider { + /// Returns the `Consensus`. + fn get_consensus(&self) -> &Consensus; } /// Corresponding epoch information of next block diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs new file mode 100644 index 0000000000..e8021f0784 --- /dev/null +++ b/spec/src/hardfork.rs @@ -0,0 +1,84 @@ +//! Hard forks parameters. + +use ckb_constant::hardfork::{mainnet, testnet}; +use ckb_types::core::{ + hardfork::{HardForkSwitch, HardForkSwitchBuilder}, + EpochNumber, +}; +use serde::{Deserialize, Serialize}; + +/// Hard forks parameters for spec. +#[derive(Default, Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct HardForkConfig { + // TODO ckb2021 Update all rfc numbers and fix all links, after all proposals are merged. + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0221: Option, + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0222: Option, + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0223: Option, + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0224: Option, +} + +macro_rules! check_default { + ($config:ident, $feature:ident, $expected:expr) => { + match $config.$feature { + Some(input) if input != $expected => { + let errmsg = format!( + "The value for hard fork feature \"{}\" is incorrect, actual: {}, expected: {}. + Don't set it for mainnet or testnet, or set it as a correct value.", + stringify!($feature), + input, + $expected, + ); + Err(errmsg) + }, + _ => Ok($expected), + }? + }; +} + +impl HardForkConfig { + /// If all parameters which have been set are correct for mainnet, then + /// sets all `None` to default values, otherwise, return an `Err`. + pub fn complete_mainnet(&self) -> Result { + let mut b = HardForkSwitch::new_builder(); + b = self.update_builder_via_edition(b, mainnet::CKB2021_START_EPOCH)?; + b.build() + } + + /// If all parameters which have been set are correct for testnet, then + /// sets all `None` to default values, otherwise, return an `Err`. + pub fn complete_testnet(&self) -> Result { + let mut b = HardForkSwitch::new_builder(); + b = self.update_builder_via_edition(b, testnet::CKB2021_START_EPOCH)?; + b.build() + } + + fn update_builder_via_edition( + &self, + builder: HardForkSwitchBuilder, + ckb2021: EpochNumber, + ) -> Result { + let builder = builder + .rfc_pr_0221(check_default!(self, rfc_pr_0221, ckb2021)) + .rfc_pr_0222(check_default!(self, rfc_pr_0222, ckb2021)) + .rfc_pr_0223(check_default!(self, rfc_pr_0223, ckb2021)) + .rfc_pr_0224(check_default!(self, rfc_pr_0224, ckb2021)); + Ok(builder) + } + + /// Converts to a hard fork switch. + /// + /// Enable features which are set to `None` at the user provided epoch. + pub fn complete_with_default(&self, default: EpochNumber) -> Result { + HardForkSwitch::new_builder() + .rfc_pr_0221(self.rfc_pr_0221.unwrap_or(default)) + .rfc_pr_0222(self.rfc_pr_0222.unwrap_or(default)) + .rfc_pr_0223(self.rfc_pr_0223.unwrap_or(default)) + .rfc_pr_0224(self.rfc_pr_0224.unwrap_or(default)) + .build() + } +} diff --git a/spec/src/lib.rs b/spec/src/lib.rs index 5d2fca0e88..aad021eab3 100644 --- a/spec/src/lib.rs +++ b/spec/src/lib.rs @@ -25,8 +25,9 @@ use ckb_resource::{ use ckb_types::{ bytes::Bytes, core::{ - capacity_bytes, BlockBuilder, BlockNumber, BlockView, Capacity, Cycle, EpochNumber, - EpochNumberWithFraction, Ratio, ScriptHashType, TransactionBuilder, TransactionView, + capacity_bytes, hardfork::HardForkSwitch, BlockBuilder, BlockNumber, BlockView, Capacity, + Cycle, EpochNumber, EpochNumberWithFraction, Ratio, ScriptHashType, TransactionBuilder, + TransactionView, }, h256, packed, prelude::*, @@ -40,9 +41,11 @@ use std::fmt; use std::sync::Arc; pub use error::SpecError; +pub use hardfork::HardForkConfig; pub mod consensus; mod error; +mod hardfork; // Just a random secp256k1 secret key for dep group input cell's lock const SPECIAL_CELL_PRIVKEY: H256 = @@ -220,6 +223,11 @@ pub struct Params { /// See [`orphan_rate_target`](consensus/struct.Consensus.html#structfield.orphan_rate_target) #[serde(skip_serializing_if = "Option::is_none")] pub orphan_rate_target: Option<(u32, u32)>, + /// The parameters for hard fork features. + /// + /// See [`hardfork_switch`](consensus/struct.Consensus.html#structfield.hardfork_switch) + #[serde(skip_serializing_if = "Option::is_none")] + pub hardfork: Option, } impl Params { @@ -459,10 +467,25 @@ impl ChainSpec { Ok(()) } + /// Completes all parameters for hard fork features and creates a hard fork switch. + /// + /// Verify the parameters for mainnet and testnet, because all start epoch numbers + /// for mainnet and testnet are fixed. + fn build_hardfork_switch(&self) -> Result> { + let config = self.params.hardfork.as_ref().cloned().unwrap_or_default(); + match self.name.as_str() { + "mainnet" => config.complete_mainnet(), + "testnet" => config.complete_testnet(), + _ => config.complete_with_default(0), + } + .map_err(Into::into) + } + /// Build consensus instance /// /// [Consensus](consensus/struct.Consensus.html) pub fn build_consensus(&self) -> Result> { + let hardfork_switch = self.build_hardfork_switch()?; let genesis_epoch_ext = build_genesis_epoch_ext( self.params.initial_primary_epoch_reward(), self.genesis.compact_target, @@ -492,6 +515,7 @@ impl ChainSpec { .permanent_difficulty_in_dummy(self.params.permanent_difficulty_in_dummy()) .max_block_proposals_limit(self.params.max_block_proposals_limit()) .orphan_rate_target(self.params.orphan_rate_target()) + .hardfork_switch(hardfork_switch) .build(); Ok(consensus) @@ -539,7 +563,7 @@ impl ChainSpec { .parent_hash(self.genesis.parent_hash.pack()) .timestamp(self.genesis.timestamp.pack()) .compact_target(self.genesis.compact_target.pack()) - .uncles_hash(self.genesis.uncles_hash.pack()) + .extra_hash(self.genesis.uncles_hash.pack()) .dao(dao) .nonce(u128::from_le_bytes(self.genesis.nonce.to_le_bytes()).pack()) .transaction(cellbase_transaction) diff --git a/store/src/cache.rs b/store/src/cache.rs index 3b12acd2d8..cc7c32381b 100644 --- a/store/src/cache.rs +++ b/store/src/cache.rs @@ -2,7 +2,7 @@ use ckb_app_config::StoreConfig; use ckb_types::{ bytes::Bytes, core::{HeaderView, TransactionView, UncleBlockVecView}, - packed::{Byte32, ProposalShortIdVec}, + packed::{self, Byte32, ProposalShortIdVec}, }; use ckb_util::Mutex; use lru::LruCache; @@ -21,6 +21,8 @@ pub struct StoreCache { pub block_tx_hashes: Mutex>>, /// TODO(doc): @quake pub block_uncles: Mutex>, + /// The cache of block extension sections. + pub block_extensions: Mutex>>, /// TODO(doc): @quake pub cellbase: Mutex>, } @@ -41,6 +43,7 @@ impl StoreCache { block_proposals: Mutex::new(LruCache::new(config.block_proposals_cache_size)), block_tx_hashes: Mutex::new(LruCache::new(config.block_tx_hashes_cache_size)), block_uncles: Mutex::new(LruCache::new(config.block_uncles_cache_size)), + block_extensions: Mutex::new(LruCache::new(config.block_extensions_cache_size)), cellbase: Mutex::new(LruCache::new(config.cellbase_cache_size)), } } diff --git a/store/src/store.rs b/store/src/store.rs index 72ef1073ac..b0bbf37d62 100644 --- a/store/src/store.rs +++ b/store/src/store.rs @@ -2,10 +2,10 @@ use crate::cache::StoreCache; use crate::data_loader_wrapper::DataLoaderWrapper; use ckb_db::iter::{DBIter, Direction, IteratorMode}; use ckb_db_schema::{ - Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_HEADER, - COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, - COLUMN_CELL_DATA_HASH, COLUMN_EPOCH, COLUMN_INDEX, COLUMN_META, COLUMN_TRANSACTION_INFO, - COLUMN_UNCLES, META_CURRENT_EPOCH_KEY, META_TIP_HEADER_KEY, + Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_EXTENSION, + COLUMN_BLOCK_HEADER, COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL, + COLUMN_CELL_DATA, COLUMN_CELL_DATA_HASH, COLUMN_EPOCH, COLUMN_INDEX, COLUMN_META, + COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, META_CURRENT_EPOCH_KEY, META_TIP_HEADER_KEY, }; use ckb_freezer::Freezer; use ckb_types::{ @@ -48,8 +48,9 @@ pub trait ChainStore<'a>: Send + Sync + Sized { if let Some(freezer) = self.freezer() { if header.number() > 0 && header.number() < freezer.number() { let raw_block = freezer.retrieve(header.number()).expect("block frozen")?; - let raw_block = - packed::BlockReader::from_slice_should_be_ok(&raw_block).to_entity(); + let raw_block = packed::BlockReader::from_compatible_slice(&raw_block) + .expect("checked data") + .to_entity(); return Some(raw_block.into_view()); } } @@ -60,7 +61,14 @@ pub trait ChainStore<'a>: Send + Sync + Sized { let proposals = self .get_block_proposal_txs_ids(h) .expect("block proposal_ids must be stored"); - Some(BlockView::new_unchecked(header, uncles, body, proposals)) + let extension_opt = self.get_block_extension(h); + + let block = if let Some(extension) = extension_opt { + BlockView::new_unchecked_with_extension(header, uncles, body, proposals, extension) + } else { + BlockView::new_unchecked(header, uncles, body, proposals) + }; + Some(block) } /// Get header by block header hash @@ -127,7 +135,18 @@ pub trait ChainStore<'a>: Send + Sync + Sized { .to_entity() }) .expect("block proposal_ids must be stored"); - Some(BlockView::new_unchecked(header, uncles, body, proposals)) + + let extension_opt = self + .get(COLUMN_BLOCK_EXTENSION, hash.as_slice()) + .map(|slice| packed::BytesReader::from_slice_should_be_ok(&slice.as_ref()).to_entity()); + + let block = if let Some(extension) = extension_opt { + BlockView::new_unchecked_with_extension(header, uncles, body, proposals, extension) + } else { + BlockView::new_unchecked(header, uncles, body, proposals) + }; + + Some(block) } /// Get all transaction-hashes in block body by block header hash @@ -210,6 +229,24 @@ pub trait ChainStore<'a>: Send + Sync + Sized { } } + /// Get block extension by block header hash + fn get_block_extension(&'a self, hash: &packed::Byte32) -> Option { + if let Some(cache) = self.cache() { + if let Some(data) = cache.block_extensions.lock().get(hash) { + return data.clone(); + } + }; + + let ret = self + .get(COLUMN_BLOCK_EXTENSION, hash.as_slice()) + .map(|slice| packed::BytesReader::from_slice_should_be_ok(&slice.as_ref()).to_entity()); + + if let Some(cache) = self.cache() { + cache.block_extensions.lock().put(hash.clone(), ret.clone()); + } + ret + } + /// Get block ext by block header hash fn get_block_ext(&'a self, block_hash: &packed::Byte32) -> Option { self.get(COLUMN_BLOCK_EXT, block_hash.as_slice()) @@ -283,7 +320,8 @@ pub trait ChainStore<'a>: Send + Sync + Sized { let raw_block = freezer .retrieve(tx_info.block_number) .expect("block frozen")?; - let raw_block_reader = packed::BlockReader::from_slice_should_be_ok(&raw_block); + let raw_block_reader = + packed::BlockReader::from_compatible_slice(&raw_block).expect("checked data"); let tx_reader = raw_block_reader.transactions().get(tx_info.index)?; return Some((tx_reader.to_entity().into_view(), tx_info)); } diff --git a/store/src/transaction.rs b/store/src/transaction.rs index 13dbb8744b..c4d0c90d23 100644 --- a/store/src/transaction.rs +++ b/store/src/transaction.rs @@ -5,10 +5,11 @@ use ckb_db::{ DBVector, RocksDBTransaction, RocksDBTransactionSnapshot, }; use ckb_db_schema::{ - Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_HEADER, - COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, - COLUMN_CELL_DATA_HASH, COLUMN_EPOCH, COLUMN_INDEX, COLUMN_META, COLUMN_NUMBER_HASH, - COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, META_CURRENT_EPOCH_KEY, META_TIP_HEADER_KEY, + Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_EXTENSION, + COLUMN_BLOCK_HEADER, COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL, + COLUMN_CELL_DATA, COLUMN_CELL_DATA_HASH, COLUMN_EPOCH, COLUMN_INDEX, COLUMN_META, + COLUMN_NUMBER_HASH, COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, META_CURRENT_EPOCH_KEY, + META_TIP_HEADER_KEY, }; use ckb_error::Error; use ckb_freezer::Freezer; @@ -126,6 +127,13 @@ impl StoreTransaction { let txs_len: packed::Uint32 = (block.transactions().len() as u32).pack(); self.insert_raw(COLUMN_BLOCK_HEADER, hash.as_slice(), header.as_slice())?; self.insert_raw(COLUMN_BLOCK_UNCLE, hash.as_slice(), uncles.as_slice())?; + if let Some(extension) = block.extension() { + self.insert_raw( + COLUMN_BLOCK_EXTENSION, + hash.as_slice(), + &extension.as_slice(), + )?; + } self.insert_raw( COLUMN_NUMBER_HASH, packed::NumberHash::new_builder() @@ -157,6 +165,7 @@ impl StoreTransaction { let txs_len = block.transactions().len(); self.delete(COLUMN_BLOCK_HEADER, hash.as_slice())?; self.delete(COLUMN_BLOCK_UNCLE, hash.as_slice())?; + self.delete(COLUMN_BLOCK_EXTENSION, hash.as_slice())?; self.delete(COLUMN_BLOCK_PROPOSAL_IDS, hash.as_slice())?; self.delete( COLUMN_NUMBER_HASH, diff --git a/store/src/write_batch.rs b/store/src/write_batch.rs index 18d69f95a7..db22694cfc 100644 --- a/store/src/write_batch.rs +++ b/store/src/write_batch.rs @@ -1,7 +1,7 @@ use ckb_db::RocksDBWriteBatch; use ckb_db_schema::{ - Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_HEADER, COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, - COLUMN_CELL, COLUMN_CELL_DATA, COLUMN_CELL_DATA_HASH, COLUMN_NUMBER_HASH, + Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EXTENSION, COLUMN_BLOCK_HEADER, COLUMN_BLOCK_PROPOSAL_IDS, + COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, COLUMN_CELL_DATA_HASH, COLUMN_NUMBER_HASH, }; use ckb_error::Error; use ckb_types::{core::BlockNumber, packed, prelude::*}; @@ -94,6 +94,7 @@ impl StoreWriteBatch { txs_len: u32, ) -> Result<(), Error> { self.inner.delete(COLUMN_BLOCK_UNCLE, hash.as_slice())?; + self.inner.delete(COLUMN_BLOCK_EXTENSION, hash.as_slice())?; self.inner .delete(COLUMN_BLOCK_PROPOSAL_IDS, hash.as_slice())?; self.inner.delete( diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 06fa5b5133..608576ca08 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -654,8 +654,52 @@ impl CKBProtocolHandler for Relayer { return; } - let msg = match packed::RelayMessage::from_slice(&data) { - Ok(msg) => msg.to_enum(), + let msg = match packed::RelayMessageReader::from_compatible_slice(&data) { + Ok(msg) => { + let item = msg.to_enum(); + if let packed::RelayMessageUnionReader::CompactBlock(ref reader) = item { + if reader.count_extra_fields() > 1 { + info_target!( + crate::LOG_TARGET_RELAY, + "Peer {} sends us a malformed message: \ + too many fields in CompactBlock", + peer_index + ); + nc.ban_peer( + peer_index, + BAD_MESSAGE_BAN_TIME, + String::from( + "send us a malformed message: \ + too many fields in CompactBlock", + ), + ); + return; + } else { + item + } + } else { + match packed::RelayMessageReader::from_slice(&data) { + Ok(msg) => msg.to_enum(), + _ => { + info_target!( + crate::LOG_TARGET_RELAY, + "Peer {} sends us a malformed message: \ + too many fields", + peer_index + ); + nc.ban_peer( + peer_index, + BAD_MESSAGE_BAN_TIME, + String::from( + "send us a malformed message \ + too many fields", + ), + ); + return; + } + } + } + } _ => { info_target!( crate::LOG_TARGET_RELAY, @@ -688,7 +732,7 @@ impl CKBProtocolHandler for Relayer { } let start_time = Instant::now(); - self.process(nc, peer_index, msg.as_reader()); + self.process(nc, peer_index, msg); debug_target!( crate::LOG_TARGET_RELAY, "process message={}, peer={}, cost={:?}", diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 1880fe19d5..1ea28c2640 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -677,8 +677,50 @@ impl CKBProtocolHandler for Synchronizer { peer_index: PeerIndex, data: Bytes, ) { - let msg = match packed::SyncMessage::from_slice(&data) { - Ok(msg) => msg.to_enum(), + let msg = match packed::SyncMessageReader::from_compatible_slice(&data) { + Ok(msg) => { + let item = msg.to_enum(); + if let packed::SyncMessageUnionReader::SendBlock(ref reader) = item { + if reader.count_extra_fields() > 1 { + info!( + "Peer {} sends us a malformed message: \ + too many fields in SendBlock", + peer_index + ); + nc.ban_peer( + peer_index, + BAD_MESSAGE_BAN_TIME, + String::from( + "send us a malformed message: \ + too many fields in SendBlock", + ), + ); + return; + } else { + item + } + } else { + match packed::SyncMessageReader::from_slice(&data) { + Ok(msg) => msg.to_enum(), + _ => { + info!( + "Peer {} sends us a malformed message: \ + too many fields", + peer_index + ); + nc.ban_peer( + peer_index, + BAD_MESSAGE_BAN_TIME, + String::from( + "send us a malformed message: \ + too many fields", + ), + ); + return; + } + } + } + } _ => { info!("Peer {} sends us a malformed message", peer_index); nc.ban_peer( @@ -702,7 +744,7 @@ impl CKBProtocolHandler for Synchronizer { } let start_time = Instant::now(); - self.process(nc.as_ref(), peer_index, msg.as_reader()); + self.process(nc.as_ref(), peer_index, msg); debug!( "process message={}, peer={}, cost={:?}", msg.item_name(), diff --git a/test/src/main.rs b/test/src/main.rs index 61fc184d2f..d2dc560bec 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -488,7 +488,14 @@ fn all_specs() -> Vec> { Box::new(CellBeingCellDepThenSpentInSameBlockTestSubmitBlock), Box::new(CellBeingCellDepAndSpentInSameBlockTestGetBlockTemplate), Box::new(CellBeingCellDepAndSpentInSameBlockTestGetBlockTemplateMultiple), - Box::new(DuplicateCellDeps), + // Test hard fork features + Box::new(CheckAbsoluteEpochSince), + Box::new(CheckRelativeEpochSince), + Box::new(CheckBlockExtension), + Box::new(DuplicateCellDepsForDataHashTypeLockScript), + Box::new(DuplicateCellDepsForDataHashTypeTypeScript), + Box::new(DuplicateCellDepsForTypeHashTypeLockScript), + Box::new(DuplicateCellDepsForTypeHashTypeTypeScript), ]; specs.shuffle(&mut thread_rng()); specs diff --git a/test/src/rpc.rs b/test/src/rpc.rs index ea90842c05..f5f8860d1c 100644 --- a/test/src/rpc.rs +++ b/test/src/rpc.rs @@ -85,7 +85,7 @@ impl RpcClient { pub fn get_tip_header(&self) -> HeaderView { self.inner .get_tip_header() - .expect("rpc call get_block_hash") + .expect("rpc call get_tip_header") } pub fn get_live_cell(&self, out_point: OutPoint, with_data: bool) -> CellWithStatus { diff --git a/test/src/specs/dao/dao_user.rs b/test/src/specs/dao/dao_user.rs index 9d60ca2b42..285e3cde2f 100644 --- a/test/src/specs/dao/dao_user.rs +++ b/test/src/specs/dao/dao_user.rs @@ -133,7 +133,7 @@ impl<'a> DAOUser<'a> { let prepare_utxo_headers = self.utxo_headers(&self.prepare_utxo); let inputs = prepare_utxo_headers.iter().map(|(txo, _)| { let minimal_unlock_point = self.minimal_unlock_point(&txo.out_point()); - let since = since_from_absolute_epoch_number(minimal_unlock_point.full_value()); + let since = since_from_absolute_epoch_number(minimal_unlock_point); CellInput::new(txo.out_point(), since) }); let output_capacity = deposit_utxo_headers diff --git a/test/src/specs/hardfork/mod.rs b/test/src/specs/hardfork/mod.rs new file mode 100644 index 0000000000..422849ec4a --- /dev/null +++ b/test/src/specs/hardfork/mod.rs @@ -0,0 +1,3 @@ +mod v2021; + +pub use v2021::*; diff --git a/test/src/specs/hardfork/v2021/cell_deps.rs b/test/src/specs/hardfork/v2021/cell_deps.rs new file mode 100644 index 0000000000..85d9daf114 --- /dev/null +++ b/test/src/specs/hardfork/v2021/cell_deps.rs @@ -0,0 +1,1085 @@ +use crate::{ + util::{ + cell::gen_spendable, + check::{assert_epoch_should_be, assert_epoch_should_less_than, is_transaction_committed}, + mining::{mine, mine_until_bool, mine_until_epoch}, + }, + utils::assert_send_transaction_fail, + Node, Spec, +}; +use ckb_jsonrpc_types as rpc; +use ckb_logger::{debug, info}; +use ckb_types::{ + core::{Capacity, DepType, ScriptHashType, TransactionBuilder, TransactionView}, + packed, + prelude::*, +}; +use std::fmt; + +const GENESIS_EPOCH_LENGTH: u64 = 10; +const CKB2021_START_EPOCH: u64 = 10; + +const TEST_CASES_COUNT: usize = (8 + 4) * 3; +const CELL_DEPS_COUNT: usize = 2 + 3 + 2; +const INITIAL_INPUTS_COUNT: usize = 2 + CELL_DEPS_COUNT + TEST_CASES_COUNT; + +pub struct DuplicateCellDepsForDataHashTypeLockScript; +pub struct DuplicateCellDepsForDataHashTypeTypeScript; +pub struct DuplicateCellDepsForTypeHashTypeLockScript; +pub struct DuplicateCellDepsForTypeHashTypeTypeScript; + +struct NewScript { + data: packed::Bytes, + cell_dep: packed::CellDep, + data_hash: packed::Byte32, + type_hash: packed::Byte32, +} + +#[derive(Debug, Clone, Copy)] +enum ExpectedResult { + ShouldBePassed, + DuplicateCellDeps, + MultipleMatchesLock, + MultipleMatchesType, +} + +const PASS: ExpectedResult = ExpectedResult::ShouldBePassed; +const DUP: ExpectedResult = ExpectedResult::DuplicateCellDeps; +const MML: ExpectedResult = ExpectedResult::MultipleMatchesLock; +const MMT: ExpectedResult = ExpectedResult::MultipleMatchesType; + +// For all: +// - code1 and code2 are cell deps with same data +// - dep_group1 and dep_group2 are cell deps which point to different code cell deps with same data +// - dep_group2 and dep_group2_copy are cell deps which point to same code cell deps +// For type hash type only: +// - code3 has same type with code1 (code2) but different data +// - dep_group3 has same type with dep_group1 (dep_group2, dep_group2_copy) but different data +struct CellDepSet { + code1: packed::CellDep, + code2: packed::CellDep, + dep_group1: packed::CellDep, + dep_group2: packed::CellDep, + dep_group2_copy: packed::CellDep, + code3: packed::CellDep, + dep_group3: packed::CellDep, +} + +struct DuplicateCellDepsTestRunner { + tag: &'static str, +} + +impl Spec for DuplicateCellDepsForDataHashTypeLockScript { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + let ckb2019_last_epoch = CKB2021_START_EPOCH - 1; + let runner = DuplicateCellDepsTestRunner::new("data-hash-type/lock-script"); + let mut original_inputs = gen_spendable(node, INITIAL_INPUTS_COUNT) + .into_iter() + .map(|input| packed::CellInput::new(input.out_point, 0)); + let script1 = NewScript::new_with_id(node, 1, &mut original_inputs, None); + let mut inputs = { + let txs = original_inputs.by_ref().take(TEST_CASES_COUNT).collect(); + runner.use_new_data_script_replace_lock_script(node, txs, &script1) + }; + let deps = runner.create_cell_dep_set(node, &mut original_inputs, &script1, None); + let tb = TransactionView::new_advanced_builder(); + { + info!("CKB v2019:"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + } + assert_epoch_should_less_than(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine_until_epoch(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + { + info!("CKB v2019 (boundary):"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + } + mine(node, 1); + { + info!("CKB v2021:"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0222 = Some(CKB2021_START_EPOCH); + } + } +} + +impl Spec for DuplicateCellDepsForDataHashTypeTypeScript { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + let ckb2019_last_epoch = CKB2021_START_EPOCH - 1; + let runner = DuplicateCellDepsTestRunner::new("data-hash-type/type-script"); + let mut original_inputs = gen_spendable(node, INITIAL_INPUTS_COUNT) + .into_iter() + .map(|input| packed::CellInput::new(input.out_point, 0)); + let script1 = NewScript::new_with_id(node, 1, &mut original_inputs, None); + let mut inputs = { + let txs = original_inputs.by_ref().take(TEST_CASES_COUNT).collect(); + runner.add_new_data_script_as_type_script(node, txs, &script1) + }; + let deps = runner.create_cell_dep_set(node, &mut original_inputs, &script1, None); + let tb = TransactionView::new_advanced_builder().cell_dep(node.always_success_cell_dep()); + { + info!("CKB v2019:"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + } + assert_epoch_should_less_than(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine_until_epoch(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + { + info!("CKB v2019 (boundary):"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + } + mine(node, 1); + { + info!("CKB v2021:"); + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0222 = Some(CKB2021_START_EPOCH); + } + } +} + +impl Spec for DuplicateCellDepsForTypeHashTypeLockScript { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + let ckb2019_last_epoch = CKB2021_START_EPOCH - 1; + let runner = DuplicateCellDepsTestRunner::new("type-hash-type/lock-script"); + let mut original_inputs = gen_spendable(node, INITIAL_INPUTS_COUNT) + .into_iter() + .map(|input| packed::CellInput::new(input.out_point, 0)); + let script0 = NewScript::new_with_id(node, 0, &mut original_inputs, None); + let script1 = NewScript::new_with_id(node, 1, &mut original_inputs, Some(&script0)); + let mut inputs = { + let txs = original_inputs.by_ref().take(TEST_CASES_COUNT).collect(); + runner.use_new_data_script_replace_type_script(node, txs, &script1) + }; + let deps = runner.create_cell_dep_set(node, &mut original_inputs, &script1, Some(&script0)); + let tb = TransactionView::new_advanced_builder(); + { + info!("CKB v2019:"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, MML); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MML); + } + assert_epoch_should_less_than(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine_until_epoch(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + { + info!("CKB v2019 (boundary):"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, MML); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MML); + } + assert_epoch_should_be(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine(node, 1); + { + info!("CKB v2021:"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MML); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MML); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0222 = Some(CKB2021_START_EPOCH); + } + } +} + +impl Spec for DuplicateCellDepsForTypeHashTypeTypeScript { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + let ckb2019_last_epoch = CKB2021_START_EPOCH - 1; + let runner = DuplicateCellDepsTestRunner::new("type-hash-type/type-script"); + let mut original_inputs = gen_spendable(node, INITIAL_INPUTS_COUNT) + .into_iter() + .map(|input| packed::CellInput::new(input.out_point, 0)); + let script0 = NewScript::new_with_id(node, 0, &mut original_inputs, None); + let script1 = NewScript::new_with_id(node, 1, &mut original_inputs, Some(&script0)); + let mut inputs = { + let txs = original_inputs.by_ref().take(TEST_CASES_COUNT).collect(); + runner.add_new_type_script_as_type_script(node, txs, &script1) + }; + let deps = runner.create_cell_dep_set(node, &mut original_inputs, &script1, Some(&script0)); + let tb = TransactionView::new_advanced_builder().cell_dep(node.always_success_cell_dep()); + { + info!("CKB v2019:"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + } + assert_epoch_should_less_than(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine_until_epoch(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + { + info!("CKB v2019 (boundary):"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + } + assert_epoch_should_be(node, ckb2019_last_epoch, epoch_length - 4, epoch_length); + mine(node, 1); + { + info!("CKB v2021:"); + runner.test_same_data_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_hybrid_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_data_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_same_point_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + + runner.test_duplicate_code_type(node, &deps, &mut inputs, &tb, DUP); + runner.test_duplicate_dep_group_type(node, &deps, &mut inputs, &tb, DUP); + + runner.test_single_code_type(node, &deps, &mut inputs, &tb, PASS); + runner.test_single_dep_group_type(node, &deps, &mut inputs, &tb, PASS); + + // Type hash type only + runner.test_same_type_not_same_data_code_type(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v1(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_hybrid_type_v2(node, &deps, &mut inputs, &tb, MMT); + runner.test_same_type_not_same_data_dep_group_type(node, &deps, &mut inputs, &tb, MMT); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0222 = Some(CKB2021_START_EPOCH); + } + } +} + +impl NewScript { + fn new_with_id( + node: &Node, + id: u8, + inputs: &mut impl Iterator, + type_script_opt: Option<&Self>, + ) -> Self { + let original_data = node.always_success_raw_data(); + let data = packed::Bytes::new_builder() + .extend(original_data.as_ref().iter().map(|x| (*x).into())) + .push(id.into()) + .build(); + let tx = Self::deploy(node, &data, inputs, type_script_opt); + let cell_dep = packed::CellDep::new_builder() + .out_point(packed::OutPoint::new(tx.hash(), 0)) + .dep_type(DepType::Code.into()) + .build(); + let data_hash = packed::CellOutput::calc_data_hash(&data.raw_data()); + let type_hash = tx + .output(0) + .unwrap() + .type_() + .to_opt() + .unwrap() + .calc_script_hash(); + Self { + data, + cell_dep, + data_hash, + type_hash, + } + } + + fn deploy( + node: &Node, + data: &packed::Bytes, + inputs: &mut impl Iterator, + type_script_opt: Option<&Self>, + ) -> TransactionView { + let (type_script, tx_template) = if let Some(script) = type_script_opt { + ( + script.as_data_script(), + TransactionView::new_advanced_builder().cell_dep(script.cell_dep()), + ) + } else { + ( + node.always_success_script(), + TransactionView::new_advanced_builder(), + ) + }; + let cell_input = inputs.next().unwrap(); + let cell_output = packed::CellOutput::new_builder() + .type_(Some(type_script).pack()) + .build_exact_capacity(Capacity::bytes(data.len()).unwrap()) + .unwrap(); + let tx = tx_template + .cell_dep(node.always_success_cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(data.clone()) + .build(); + node.submit_transaction(&tx); + mine_until_bool(node, || is_transaction_committed(node, &tx)); + tx + } + + fn data(&self) -> packed::Bytes { + self.data.clone() + } + + fn cell_dep(&self) -> packed::CellDep { + self.cell_dep.clone() + } + + fn as_data_script(&self) -> packed::Script { + packed::Script::new_builder() + .code_hash(self.data_hash.clone()) + .hash_type(ScriptHashType::Data.into()) + .build() + } + + fn as_type_script(&self) -> packed::Script { + packed::Script::new_builder() + .code_hash(self.type_hash.clone()) + .hash_type(ScriptHashType::Type.into()) + .build() + } +} + +impl fmt::Display for ExpectedResult { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::ShouldBePassed => write!(f, "allowed"), + _ => write!(f, "not allowed"), + } + } +} + +impl ExpectedResult { + fn error_message(self) -> Option<&'static str> { + match self { + Self::ShouldBePassed => None, + Self::DuplicateCellDeps => Some( + "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ + Verification failed Transaction(DuplicateCellDeps(", + ), + Self::MultipleMatchesLock => Some( + "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ + Verification failed Script(TransactionScriptError \ + { source: Inputs[0].Lock, cause: MultipleMatches })", + ), + Self::MultipleMatchesType => Some( + "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ + Verification failed Script(TransactionScriptError \ + { source: Inputs[0].Type, cause: MultipleMatches })", + ), + } + } +} + +impl DuplicateCellDepsTestRunner { + fn new(tag: &'static str) -> Self { + Self { tag } + } + + fn submit_transaction_until_committed(&self, node: &Node, tx: &TransactionView) { + debug!( + "[{}] >>> >>> submit: submit transaction {:#x}.", + self.tag, + tx.hash() + ); + node.submit_transaction(tx); + mine_until_bool(node, || is_transaction_committed(node, tx)); + } +} + +// Convert Lock Script or Type Script +impl DuplicateCellDepsTestRunner { + fn create_initial_inputs( + &self, + node: &Node, + txs: Vec, + ) -> impl Iterator { + for tx in &txs { + node.rpc_client().send_transaction(tx.data().into()); + } + mine_until_bool(node, || { + txs.iter().all(|tx| is_transaction_committed(node, &tx)) + }); + txs.into_iter().map(|tx| { + let out_point = packed::OutPoint::new(tx.hash(), 0); + packed::CellInput::new(out_point, 0) + }) + } + + fn get_previous_output(node: &Node, cell_input: &packed::CellInput) -> rpc::CellOutput { + let previous_output = cell_input.previous_output(); + let previous_output_index: usize = previous_output.index().unpack(); + node.rpc_client() + .get_transaction(previous_output.tx_hash()) + .unwrap() + .transaction + .inner + .outputs[previous_output_index] + .clone() + } + + fn use_new_data_script_replace_lock_script( + &self, + node: &Node, + inputs: Vec, + new_script: &NewScript, + ) -> impl Iterator { + let txs = inputs + .into_iter() + .map(|cell_input| { + let input_cell = Self::get_previous_output(node, &cell_input); + let cell_output = packed::CellOutput::new_builder() + .capacity((input_cell.capacity.value() - 1).pack()) + .lock(new_script.as_data_script()) + .build(); + TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .cell_dep(new_script.cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(Default::default()) + .build() + }) + .collect::>(); + self.create_initial_inputs(node, txs) + } + + fn add_new_data_script_as_type_script( + &self, + node: &Node, + inputs: Vec, + new_script: &NewScript, + ) -> impl Iterator { + let txs = inputs + .into_iter() + .map(|cell_input| { + let input_cell = Self::get_previous_output(node, &cell_input); + let cell_output = packed::CellOutput::new_builder() + .capacity((input_cell.capacity.value() - 1).pack()) + .lock(node.always_success_script()) + .type_(Some(new_script.as_data_script()).pack()) + .build(); + TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .cell_dep(new_script.cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(Default::default()) + .build() + }) + .collect::>(); + self.create_initial_inputs(node, txs) + } + + fn use_new_data_script_replace_type_script( + &self, + node: &Node, + inputs: Vec, + new_script: &NewScript, + ) -> impl Iterator { + let txs = inputs + .into_iter() + .map(|cell_input| { + let input_cell = Self::get_previous_output(node, &cell_input); + let cell_output = packed::CellOutput::new_builder() + .capacity((input_cell.capacity.value() - 1).pack()) + .lock(new_script.as_type_script()) + .build(); + TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .cell_dep(new_script.cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(Default::default()) + .build() + }) + .collect::>(); + self.create_initial_inputs(node, txs) + } + + fn add_new_type_script_as_type_script( + &self, + node: &Node, + inputs: Vec, + new_script: &NewScript, + ) -> impl Iterator { + let txs = inputs + .into_iter() + .map(|cell_input| { + let input_cell = Self::get_previous_output(node, &cell_input); + let cell_output = packed::CellOutput::new_builder() + .capacity((input_cell.capacity.value() - 1).pack()) + .lock(node.always_success_script()) + .type_(Some(new_script.as_type_script()).pack()) + .build(); + TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .cell_dep(new_script.cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(Default::default()) + .build() + }) + .collect::>(); + self.create_initial_inputs(node, txs) + } +} + +// Create All Cell Deps for Test +impl DuplicateCellDepsTestRunner { + fn create_cell_dep_set( + &self, + node: &Node, + inputs: &mut impl Iterator, + script: &NewScript, + type_script_opt: Option<&NewScript>, + ) -> CellDepSet { + let code_txs = { + let tx_template = { + let script_output = if let Some(type_script) = type_script_opt { + packed::CellOutput::new_builder() + .type_(Some(type_script.as_data_script()).pack()) + } else { + packed::CellOutput::new_builder() + } + .build_exact_capacity(Capacity::bytes(script.data().len()).unwrap()) + .unwrap(); + if let Some(type_script) = type_script_opt { + TransactionView::new_advanced_builder().cell_dep(type_script.cell_dep()) + } else { + TransactionView::new_advanced_builder() + } + .output(script_output) + .output_data(script.data()) + }; + self.create_transactions_as_code_type_cell_deps(node, inputs, &tx_template) + }; + + let dep_group_txs = { + let tx_template = TransactionView::new_advanced_builder(); + self.create_transactions_as_depgroup_type_cell_deps( + node, + inputs, + &tx_template, + &code_txs, + ) + }; + let incorrect_opt = type_script_opt.map(|type_script| { + self.create_transactions_as_incorrect_cell_deps(node, inputs, type_script) + }); + self.combine_cell_deps(code_txs, dep_group_txs, incorrect_opt) + } + + fn create_transactions_as_code_type_cell_deps( + &self, + node: &Node, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + ) -> (TransactionView, TransactionView) { + info!( + "[{}] >>> warm up: create 2 transactions as code-type cell deps.", + self.tag + ); + let tx_template = tx_template.clone().cell_dep(node.always_success_cell_dep()); + let dep1_tx = tx_template.clone().input(inputs.next().unwrap()).build(); + let dep2_tx = tx_template.input(inputs.next().unwrap()).build(); + self.submit_transaction_until_committed(node, &dep1_tx); + self.submit_transaction_until_committed(node, &dep2_tx); + (dep1_tx, dep2_tx) + } + + fn create_transactions_as_depgroup_type_cell_deps( + &self, + node: &Node, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + code_txs: &(TransactionView, TransactionView), + ) -> (TransactionView, TransactionView, TransactionView) { + info!( + "[{}] >>> warm up: create 3 transactions as depgroup-type cell deps.", + self.tag + ); + let (ref dep1_tx, ref dep2_tx) = code_txs; + let tx_template = tx_template.clone().cell_dep(node.always_success_cell_dep()); + let dep1_op = packed::OutPoint::new(dep1_tx.hash(), 0); + let dep2_op = packed::OutPoint::new(dep2_tx.hash(), 0); + let dep3_data = vec![dep1_op].pack().as_bytes().pack(); + let dep4_data = vec![dep2_op].pack().as_bytes().pack(); + let dep3_output = packed::CellOutput::new_builder() + .build_exact_capacity(Capacity::bytes(dep3_data.len()).unwrap()) + .unwrap(); + let dep4_output = packed::CellOutput::new_builder() + .build_exact_capacity(Capacity::bytes(dep4_data.len()).unwrap()) + .unwrap(); + let dep3_tx = tx_template + .clone() + .input(inputs.next().unwrap()) + .output(dep3_output) + .output_data(dep3_data) + .build(); + let dep4_tx = tx_template + .clone() + .input(inputs.next().unwrap()) + .output(dep4_output.clone()) + .output_data(dep4_data.clone()) + .build(); + let dep4b_tx = tx_template + .input(inputs.next().unwrap()) + .output(dep4_output) + .output_data(dep4_data) + .build(); + self.submit_transaction_until_committed(node, &dep3_tx); + self.submit_transaction_until_committed(node, &dep4_tx); + self.submit_transaction_until_committed(node, &dep4b_tx); + (dep3_tx, dep4_tx, dep4b_tx) + } + + fn create_transactions_as_incorrect_cell_deps( + &self, + node: &Node, + inputs: &mut impl Iterator, + type_script: &NewScript, + ) -> (TransactionView, TransactionView) { + info!( + "[{}] >>> warm up: create 2 transactions as incorrect cell deps.", + self.tag + ); + let original_data = node.always_success_raw_data(); + let dep5_data = packed::Bytes::new_builder() + .extend(original_data.as_ref().iter().map(|x| (*x).into())) + .build(); + let dep5_output = packed::CellOutput::new_builder() + .type_(Some(type_script.as_data_script()).pack()) + .build_exact_capacity(Capacity::bytes(dep5_data.len()).unwrap()) + .unwrap(); + let dep5_tx = TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .cell_dep(type_script.cell_dep()) + .input(inputs.next().unwrap()) + .output(dep5_output) + .output_data(dep5_data) + .build(); + let dep5_op = packed::OutPoint::new(dep5_tx.hash(), 0); + let dep6_data = vec![dep5_op].pack().as_bytes().pack(); + let dep6_output = packed::CellOutput::new_builder() + .build_exact_capacity(Capacity::bytes(dep6_data.len()).unwrap()) + .unwrap(); + let dep6_tx = TransactionView::new_advanced_builder() + .cell_dep(node.always_success_cell_dep()) + .input(inputs.next().unwrap()) + .output(dep6_output) + .output_data(dep6_data) + .build(); + self.submit_transaction_until_committed(node, &dep5_tx); + self.submit_transaction_until_committed(node, &dep6_tx); + (dep5_tx, dep6_tx) + } + + fn combine_cell_deps( + &self, + code_txs: (TransactionView, TransactionView), + dep_group_txs: (TransactionView, TransactionView, TransactionView), + incorrect_opt: Option<(TransactionView, TransactionView)>, + ) -> CellDepSet { + info!("[{}] >>> warm up: create all cell deps for test.", self.tag); + let (dep1_tx, dep2_tx) = code_txs; + let dep1_op = packed::OutPoint::new(dep1_tx.hash(), 0); + let dep2_op = packed::OutPoint::new(dep2_tx.hash(), 0); + let code1 = packed::CellDep::new_builder() + .out_point(dep1_op) + .dep_type(DepType::Code.into()) + .build(); + let code2 = packed::CellDep::new_builder() + .out_point(dep2_op) + .dep_type(DepType::Code.into()) + .build(); + let (dep3_tx, dep4_tx, dep4b_tx) = dep_group_txs; + let dep_group1 = packed::CellDep::new_builder() + .out_point(packed::OutPoint::new(dep3_tx.hash(), 0)) + .dep_type(DepType::DepGroup.into()) + .build(); + let dep_group2 = packed::CellDep::new_builder() + .out_point(packed::OutPoint::new(dep4_tx.hash(), 0)) + .dep_type(DepType::DepGroup.into()) + .build(); + let dep_group2_copy = packed::CellDep::new_builder() + .out_point(packed::OutPoint::new(dep4b_tx.hash(), 0)) + .dep_type(DepType::DepGroup.into()) + .build(); + let (code3, dep_group3) = if let Some((dep5_tx, dep6_tx)) = incorrect_opt { + let dep3_op = packed::OutPoint::new(dep5_tx.hash(), 0); + let code3 = packed::CellDep::new_builder() + .out_point(dep3_op) + .dep_type(DepType::Code.into()) + .build(); + let dep_group3 = packed::CellDep::new_builder() + .out_point(packed::OutPoint::new(dep6_tx.hash(), 0)) + .dep_type(DepType::DepGroup.into()) + .build(); + (code3, dep_group3) + } else { + (Default::default(), Default::default()) + }; + CellDepSet { + code1, + code2, + dep_group1, + dep_group2, + dep_group2_copy, + code3, + dep_group3, + } + } +} + +// Implementation All Test Cases +impl DuplicateCellDepsTestRunner { + fn test_result( + &self, + node: &Node, + inputs: &mut impl Iterator, + tx_builder: TransactionBuilder, + expected: ExpectedResult, + ) { + let empty_output = packed::CellOutput::new_builder() + .build_exact_capacity(Capacity::shannons(0)) + .unwrap(); + let tx = tx_builder + .input(inputs.next().unwrap()) + .output(empty_output) + .output_data(Default::default()) + .build(); + if let Some(errmsg) = expected.error_message() { + assert_send_transaction_fail(node, &tx, &errmsg); + } else { + self.submit_transaction_until_committed(node, &tx); + } + } + + fn test_single_code_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: duplicate code-type cell deps is {}.", + self.tag, expected + ); + let tx = tx_template.clone().cell_dep(deps.code1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_single_dep_group_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: duplicate code-type cell deps is {}.", + self.tag, expected + ); + let tx = tx_template.clone().cell_dep(deps.dep_group1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_duplicate_code_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: duplicate code-type cell deps is {}.", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code1.clone()) + .cell_dep(deps.code1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_data_code_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two code-type cell deps have same data is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code1.clone()) + .cell_dep(deps.code2.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_data_hybrid_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: hybrid-type cell deps have same data is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code1.clone()) + .cell_dep(deps.dep_group1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_duplicate_dep_group_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: duplicate dep_group-type cell deps is {}.", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.dep_group1.clone()) + .cell_dep(deps.dep_group1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_data_dep_group_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two dep_group-type cell deps have same data is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.dep_group1.clone()) + .cell_dep(deps.dep_group2.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_point_dep_group_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two dep_group-type cell deps have a same point is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.dep_group2.clone()) + .cell_dep(deps.dep_group2_copy.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_type_not_same_data_code_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two code-type cell deps have same type but not same data is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code1.clone()) + .cell_dep(deps.code3.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_type_not_same_data_hybrid_type_v1( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two hybrid-type cell deps have same type but not same data v1 is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code1.clone()) + .cell_dep(deps.dep_group3.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_type_not_same_data_hybrid_type_v2( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two hybrid-type cell deps have same type but not same data v2 is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.code3.clone()) + .cell_dep(deps.dep_group1.clone()); + self.test_result(node, inputs, tx, expected); + } + + fn test_same_type_not_same_data_dep_group_type( + &self, + node: &Node, + deps: &CellDepSet, + inputs: &mut impl Iterator, + tx_template: &TransactionBuilder, + expected: ExpectedResult, + ) { + info!( + "[{}] >>> test: two dep_group-type cell deps have same type but not same data is {}", + self.tag, expected + ); + let tx = tx_template + .clone() + .cell_dep(deps.dep_group1.clone()) + .cell_dep(deps.dep_group3.clone()); + self.test_result(node, inputs, tx, expected); + } +} diff --git a/test/src/specs/hardfork/v2021/extension.rs b/test/src/specs/hardfork/v2021/extension.rs new file mode 100644 index 0000000000..3359b2f5da --- /dev/null +++ b/test/src/specs/hardfork/v2021/extension.rs @@ -0,0 +1,185 @@ +use crate::{ + node::waiting_for_sync, + util::{ + check::{assert_epoch_should_be, assert_submit_block_fail, assert_submit_block_ok}, + mining::{mine, mine_until_epoch, mine_until_out_bootstrap_period}, + }, + utils::wait_until, +}; +use crate::{Node, Spec}; +use ckb_logger::{info, trace}; +use ckb_types::prelude::*; + +const GENESIS_EPOCH_LENGTH: u64 = 10; + +const ERROR_UNKNOWN_FIELDS: &str = "Invalid: Block(UnknownFields("; +const ERROR_EMPTY_EXT: &str = "Invalid: Block(EmptyBlockExtension("; +const ERROR_MAX_LIMIT: &str = "Invalid: Block(ExceededMaximumBlockExtensionBytes("; + +pub struct CheckBlockExtension; + +impl Spec for CheckBlockExtension { + crate::setup!(num_nodes: 3); + + fn run(&self, nodes: &mut Vec) { + { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + + mine_until_out_bootstrap_period(node); + + assert_epoch_should_be(node, 1, 2, epoch_length); + { + info!("CKB v2019, empty extension field is failed"); + test_extension_via_size(node, Some(0), Err(ERROR_UNKNOWN_FIELDS)); + } + { + info!("CKB v2019, overlength extension field is failed"); + test_extension_via_size(node, Some(97), Err(ERROR_UNKNOWN_FIELDS)); + } + for size in &[1, 16, 32, 64, 96] { + info!("CKB v2019, {}-bytes extension field is failed", size); + test_extension_via_size(node, Some(*size), Err(ERROR_UNKNOWN_FIELDS)); + } + assert_epoch_should_be(node, 1, 2, epoch_length); + { + info!("CKB v2019, no extension field is passed"); + test_extension_via_size(node, None, Ok(())); + } + assert_epoch_should_be(node, 1, 3, epoch_length); + + mine_until_epoch(node, 1, epoch_length - 2, epoch_length); + { + info!("CKB v2019, empty extension field is failed (boundary)"); + test_extension_via_size(node, Some(0), Err(ERROR_UNKNOWN_FIELDS)); + } + { + info!("CKB v2019, overlength extension field is failed (boundary)"); + test_extension_via_size(node, Some(97), Err(ERROR_UNKNOWN_FIELDS)); + } + for size in &[1, 16, 32, 64, 96] { + info!( + "CKB v2019, {}-bytes extension field is failed (boundary)", + size + ); + test_extension_via_size(node, Some(*size), Err(ERROR_UNKNOWN_FIELDS)); + } + { + info!("CKB v2019, no extension field is passed (boundary)"); + test_extension_via_size(node, None, Ok(())); + } + assert_epoch_should_be(node, 1, epoch_length - 1, epoch_length); + + { + info!("CKB v2021, empty extension field is failed (boundary)"); + test_extension_via_size(node, Some(0), Err(ERROR_EMPTY_EXT)); + } + { + info!("CKB v2021, overlength extension field is failed (boundary)"); + test_extension_via_size(node, Some(97), Err(ERROR_MAX_LIMIT)); + } + assert_epoch_should_be(node, 1, epoch_length - 1, epoch_length); + for size in &[1, 16, 32, 64, 96] { + info!( + "CKB v2021, {}-bytes extension field is passed (boundary)", + size + ); + test_extension_via_size(node, Some(*size), Ok(())); + } + { + info!("CKB v2021, no extension field is passed (boundary)"); + test_extension_via_size(node, None, Ok(())); + } + assert_epoch_should_be(node, 2, 5, epoch_length); + + mine_until_epoch(node, 4, 0, epoch_length); + { + info!("CKB v2021, empty extension field is failed"); + test_extension_via_size(node, Some(0), Err(ERROR_EMPTY_EXT)); + } + { + info!("CKB v2021, overlength extension field is failed"); + test_extension_via_size(node, Some(97), Err(ERROR_MAX_LIMIT)); + } + assert_epoch_should_be(node, 4, 0, epoch_length); + for size in &[1, 16, 32, 64, 96] { + info!("CKB v2021, {}-bytes extension field is passed", size); + test_extension_via_size(node, Some(*size), Ok(())); + } + { + info!("CKB v2021, no extension field is passed"); + test_extension_via_size(node, None, Ok(())); + } + assert_epoch_should_be(node, 4, 6, epoch_length); + } + + { + info!("test sync blocks for two nodes"); + let node0 = &nodes[0]; + let node1 = &nodes[1]; + + let rpc_client0 = node0.rpc_client(); + let rpc_client1 = node1.rpc_client(); + + node1.connect(node0); + let ret = wait_until(30, || { + let number0 = rpc_client0.get_tip_block_number(); + let number1 = rpc_client1.get_tip_block_number(); + trace!("block number: node0: {}, node1: {}", number0, number1); + number0 == number1 + }); + assert!(ret, "node1 should get same tip header with node0"); + } + + { + info!("test reload data from store after restart the node"); + let node0 = &mut nodes[0]; + node0.stop(); + node0.start(); + } + + { + info!("test sync blocks for all nodes"); + let node0 = &nodes[0]; + let node1 = &nodes[1]; + let node2 = &nodes[2]; + + let rpc_client0 = node0.rpc_client(); + let rpc_client2 = node2.rpc_client(); + + node1.connect(node0); + node2.connect(node0); + let ret = wait_until(30, || { + let header0 = rpc_client0.get_tip_header(); + let header2 = rpc_client2.get_tip_header(); + header0 == header2 + }); + assert!(ret, "node2 should get same tip header with node0"); + + mine(node2, 5); + + info!("test sync blocks"); + waiting_for_sync(nodes); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0224 = Some(2); + } + } +} + +fn test_extension_via_size(node: &Node, size: Option, result: Result<(), &'static str>) { + let block = node + .new_block_builder(None, None, None) + .extension(size.map(|s| vec![0u8; s].pack())) + .build(); + if let Err(errmsg) = result { + assert_submit_block_fail(node, &block, errmsg); + } else { + assert_submit_block_ok(node, &block); + } +} diff --git a/test/src/specs/hardfork/v2021/mod.rs b/test/src/specs/hardfork/v2021/mod.rs new file mode 100644 index 0000000000..0b4de99a0b --- /dev/null +++ b/test/src/specs/hardfork/v2021/mod.rs @@ -0,0 +1,10 @@ +mod cell_deps; +mod extension; +mod since; + +pub use cell_deps::{ + DuplicateCellDepsForDataHashTypeLockScript, DuplicateCellDepsForDataHashTypeTypeScript, + DuplicateCellDepsForTypeHashTypeLockScript, DuplicateCellDepsForTypeHashTypeTypeScript, +}; +pub use extension::CheckBlockExtension; +pub use since::{CheckAbsoluteEpochSince, CheckRelativeEpochSince}; diff --git a/test/src/specs/hardfork/v2021/since.rs b/test/src/specs/hardfork/v2021/since.rs new file mode 100644 index 0000000000..b42923d62f --- /dev/null +++ b/test/src/specs/hardfork/v2021/since.rs @@ -0,0 +1,205 @@ +use crate::util::{ + check::{self, assert_epoch_should_be}, + mining::{mine, mine_until_epoch, mine_until_out_bootstrap_period}, +}; +use crate::utils::{ + assert_send_transaction_fail, since_from_absolute_epoch_number, + since_from_relative_epoch_number, +}; +use crate::{Node, Spec}; + +use ckb_logger::info; +use ckb_types::core::{EpochNumberWithFraction, TransactionView}; + +const GENESIS_EPOCH_LENGTH: u64 = 10; + +const ERROR_IMMATURE: &str = "TransactionFailedToVerify: Verification failed Transaction(Immature("; +const ERROR_INVALID_SINCE: &str = + "TransactionFailedToVerify: Verification failed Transaction(InvalidSince("; + +pub struct CheckAbsoluteEpochSince; +pub struct CheckRelativeEpochSince; + +impl Spec for CheckAbsoluteEpochSince { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + + mine_until_out_bootstrap_period(node); + + assert_epoch_should_be(node, 1, 2, epoch_length); + { + info!("CKB v2019, since absolute epoch failed"); + let tx = create_tx_since_absolute_epoch(node, 1, 3); + assert_send_transaction_fail(node, &tx, ERROR_IMMATURE); + } + { + info!("CKB v2019, since absolute epoch failed"); + let tx = create_tx_since_absolute_epoch(node, 1, 2); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + mine_until_epoch(node, 1, epoch_length - 2, epoch_length); + { + info!("CKB v2019, since absolute epoch failed (boundary)"); + let tx = create_tx_since_absolute_epoch(node, 1, epoch_length - 1); + assert_send_transaction_fail(node, &tx, ERROR_IMMATURE); + } + { + info!("CKB v2019, since absolute epoch ok (boundary)"); + let tx = create_tx_since_absolute_epoch(node, 1, epoch_length - 2); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + mine(&node, 1); + { + info!("CKB v2019, since absolute epoch failed (boundary, malformed)"); + let tx = create_tx_since_absolute_epoch(node, 0, (epoch_length - 1) + epoch_length); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + mine(&node, 1); + assert_epoch_should_be(node, 2, 0, epoch_length); + { + info!("CKB v2021, since absolute epoch failed (boundary, malformed)"); + let tx = create_tx_since_absolute_epoch(node, 0, epoch_length * 2); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + { + info!("CKB v2021, since absolute epoch failed (boundary, malformed)"); + let tx = create_tx_since_absolute_epoch(node, 1, epoch_length); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + { + info!("CKB v2021, since absolute epoch ok (boundary)"); + let tx = create_tx_since_absolute_epoch(node, 2, 0); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + mine_until_epoch(node, 3, 0, epoch_length); + { + info!("CKB v2021, since absolute epoch failed (malformed)"); + let tx = create_tx_since_absolute_epoch(node, 0, epoch_length * 3); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + { + info!("CKB v2021, since absolute epoch failed (malformed)"); + let tx = create_tx_since_absolute_epoch(node, 1, epoch_length * 2); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + { + info!("CKB v2021, since absolute epoch failed (malformed)"); + let tx = create_tx_since_absolute_epoch(node, 2, epoch_length); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + { + info!("CKB v2021, since absolute epoch ok"); + let tx = create_tx_since_absolute_epoch(node, 3, 0); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0223 = Some(2); + } + } +} + +impl Spec for CheckRelativeEpochSince { + fn run(&self, nodes: &mut Vec) { + let node = &nodes[0]; + let epoch_length = GENESIS_EPOCH_LENGTH; + + mine_until_out_bootstrap_period(node); + + assert_epoch_should_be(node, 1, 2, epoch_length); + mine_until_epoch(node, 1, epoch_length - 4, epoch_length); + { + info!("CKB v2019, since relative epoch failed"); + let tx = create_tx_since_relative_epoch(node, 1, 0); + mine(&node, epoch_length - 1); + assert_send_transaction_fail(node, &tx, ERROR_IMMATURE); + mine(&node, 1); + info!("CKB v2019, since relative epoch ok"); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + assert_epoch_should_be(node, 2, epoch_length - 4, epoch_length); + { + info!("CKB v2019, since relative epoch failed (malformed)"); + let tx = create_tx_since_relative_epoch(node, 0, epoch_length); + mine(&node, epoch_length - 1); + assert_send_transaction_fail(node, &tx, ERROR_IMMATURE); + mine(&node, 1); + info!("CKB v2019, since relative epoch ok (malformed)"); + let res = node.rpc_client().send_transaction_result(tx.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + } + assert_epoch_should_be(node, 3, epoch_length - 4, epoch_length); + { + let tx1 = create_tx_since_relative_epoch(node, 0, epoch_length); + mine(&node, 1); + let tx2 = create_tx_since_relative_epoch(node, 0, epoch_length); + mine(&node, epoch_length - 2); + + info!("CKB v2019, since relative epoch failed (boundary, malformed)"); + assert_send_transaction_fail(node, &tx1, ERROR_IMMATURE); + mine(&node, 1); + info!("CKB v2019, since relative epoch ok (boundary, malformed)"); + let res = node.rpc_client().send_transaction_result(tx1.data().into()); + assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); + + info!("CKB v2021, since relative epoch failed (boundary, malformed)"); + assert_send_transaction_fail(node, &tx2, ERROR_IMMATURE); + + mine(&node, 1); + info!("CKB v2021, since relative epoch failed (boundary, malformed)"); + assert_send_transaction_fail(node, &tx2, ERROR_INVALID_SINCE); + + info!("CKB v2019, since relative epoch transaction will be committed (boundary, malformed)"); + assert_epoch_should_be(node, 4, epoch_length - 3, epoch_length); + assert!(check::is_transaction_pending(node, &tx1)); + mine(&node, 1); + assert!(check::is_transaction_proposed(node, &tx1)); + mine(&node, 1); + assert!(check::is_transaction_committed(node, &tx1)); + assert_epoch_should_be(node, 4, epoch_length - 1, epoch_length); + } + { + info!("CKB v2021, since relative epoch failed (malformed)"); + let tx = create_tx_since_relative_epoch(node, 0, epoch_length); + mine(&node, epoch_length - 1); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + mine(&node, 1); + info!("CKB v2021, since relative epoch failed (malformed)"); + assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.rfc_pr_0223 = Some(5); + } + } +} + +fn create_tx_since_absolute_epoch(node: &Node, number: u64, index: u64) -> TransactionView { + let epoch_length = GENESIS_EPOCH_LENGTH; + let epoch = EpochNumberWithFraction::new(number, index, epoch_length); + let since = since_from_absolute_epoch_number(epoch); + let cellbase = node.get_tip_block().transactions()[0].clone(); + node.new_transaction_with_since(cellbase.hash(), since) +} + +fn create_tx_since_relative_epoch(node: &Node, number: u64, index: u64) -> TransactionView { + let epoch_length = GENESIS_EPOCH_LENGTH; + let epoch = EpochNumberWithFraction::new(number, index, epoch_length); + let since = since_from_relative_epoch_number(epoch); + let cellbase = node.get_tip_block().transactions()[0].clone(); + node.new_transaction_with_since(cellbase.hash(), since) +} diff --git a/test/src/specs/mod.rs b/test/src/specs/mod.rs index 6dde71565c..0f9a2da317 100644 --- a/test/src/specs/mod.rs +++ b/test/src/specs/mod.rs @@ -1,6 +1,7 @@ mod alert; mod consensus; mod dao; +mod hardfork; mod mining; mod p2p; mod relay; @@ -11,6 +12,7 @@ mod tx_pool; pub use alert::*; pub use consensus::*; pub use dao::*; +pub use hardfork::*; pub use mining::*; pub use p2p::*; pub use relay::*; diff --git a/test/src/specs/tx_pool/duplicate_cell_deps.rs b/test/src/specs/tx_pool/duplicate_cell_deps.rs deleted file mode 100644 index e0593b648d..0000000000 --- a/test/src/specs/tx_pool/duplicate_cell_deps.rs +++ /dev/null @@ -1,192 +0,0 @@ -use crate::{ - util::{cell::gen_spendable, check::is_transaction_committed, mining::mine_until_bool}, - utils::assert_send_transaction_fail, - Node, Spec, -}; -use ckb_logger::info; -use ckb_types::{ - core::{Capacity, DepType, TransactionBuilder}, - packed, - prelude::*, -}; - -pub struct DuplicateCellDeps; - -impl Spec for DuplicateCellDeps { - fn run(&self, nodes: &mut Vec) { - let node0 = &nodes[0]; - let always_success_bytes: packed::Bytes = node0.always_success_raw_data().pack(); - let always_success_output = packed::CellOutput::new_builder() - .build_exact_capacity(Capacity::bytes(always_success_bytes.len()).unwrap()) - .unwrap(); - let empty_output = packed::CellOutput::new_builder() - .build_exact_capacity(Capacity::shannons(0)) - .unwrap(); - - let mut initial_inputs = gen_spendable(node0, 2 + 3 + 6) - .into_iter() - .map(|input| packed::CellInput::new(input.out_point, 0)); - - info!("warm up: create 2 transactions as code-type cell deps."); - let dep1_tx = TransactionBuilder::default() - .cell_dep(node0.always_success_cell_dep()) - .input(initial_inputs.next().unwrap()) - .output(always_success_output.clone()) - .output_data(always_success_bytes.clone()) - .build(); - let dep2_tx = TransactionBuilder::default() - .cell_dep(node0.always_success_cell_dep()) - .input(initial_inputs.next().unwrap()) - .output(always_success_output) - .output_data(always_success_bytes) - .build(); - node0.submit_transaction(&dep1_tx); - node0.submit_transaction(&dep2_tx); - mine_until_bool(node0, || is_transaction_committed(node0, &dep1_tx)); - mine_until_bool(node0, || is_transaction_committed(node0, &dep2_tx)); - - info!("warm up: create 3 transactions as depgroup-type cell deps."); - let dep1_op = packed::OutPoint::new(dep1_tx.hash(), 0); - let dep2_op = packed::OutPoint::new(dep2_tx.hash(), 0); - let dep3_data = vec![dep1_op.clone()].pack().as_bytes().pack(); - let dep4_data = vec![dep2_op.clone()].pack().as_bytes().pack(); - let dep3_output = packed::CellOutput::new_builder() - .build_exact_capacity(Capacity::bytes(dep3_data.len()).unwrap()) - .unwrap(); - let dep4_output = packed::CellOutput::new_builder() - .build_exact_capacity(Capacity::bytes(dep4_data.len()).unwrap()) - .unwrap(); - let dep3_tx = TransactionBuilder::default() - .cell_dep(node0.always_success_cell_dep()) - .input(initial_inputs.next().unwrap()) - .output(dep3_output) - .output_data(dep3_data) - .build(); - let dep4_tx = TransactionBuilder::default() - .cell_dep(node0.always_success_cell_dep()) - .input(initial_inputs.next().unwrap()) - .output(dep4_output.clone()) - .output_data(dep4_data.clone()) - .build(); - let dep4b_tx = TransactionBuilder::default() - .cell_dep(node0.always_success_cell_dep()) - .input(initial_inputs.next().unwrap()) - .output(dep4_output) - .output_data(dep4_data) - .build(); - node0.submit_transaction(&dep3_tx); - node0.submit_transaction(&dep4_tx); - node0.submit_transaction(&dep4b_tx); - mine_until_bool(node0, || is_transaction_committed(node0, &dep3_tx)); - mine_until_bool(node0, || is_transaction_committed(node0, &dep4_tx)); - mine_until_bool(node0, || is_transaction_committed(node0, &dep4b_tx)); - - info!("warm up: create all cell deps for test."); - let dep1 = packed::CellDep::new_builder() - .out_point(dep1_op) - .dep_type(DepType::Code.into()) - .build(); - let dep2 = packed::CellDep::new_builder() - .out_point(dep2_op) - .dep_type(DepType::Code.into()) - .build(); - let dep3 = packed::CellDep::new_builder() - .out_point(packed::OutPoint::new(dep3_tx.hash(), 0)) - .dep_type(DepType::DepGroup.into()) - .build(); - let dep4 = packed::CellDep::new_builder() - .out_point(packed::OutPoint::new(dep4_tx.hash(), 0)) - .dep_type(DepType::DepGroup.into()) - .build(); - let dep4b = packed::CellDep::new_builder() - .out_point(packed::OutPoint::new(dep4b_tx.hash(), 0)) - .dep_type(DepType::DepGroup.into()) - .build(); - - { - info!("test: duplicate code-type cell deps is not allowed."); - let tx = TransactionBuilder::default() - .cell_dep(dep1.clone()) - .cell_dep(dep1.clone()) - .input(initial_inputs.next().unwrap()) - .output(empty_output.clone()) - .output_data(Default::default()) - .build(); - assert_send_transaction_fail( - node0, - &tx, - "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ - Verification failed Transaction(DuplicateCellDeps(", - ); - } - - { - info!("test: two code-type cell deps have same data is allowed"); - let tx = TransactionBuilder::default() - .cell_dep(dep1.clone()) - .cell_dep(dep2) - .input(initial_inputs.next().unwrap()) - .output(empty_output.clone()) - .output_data(Default::default()) - .build(); - node0.submit_transaction(&tx); - mine_until_bool(node0, || is_transaction_committed(node0, &tx)); - } - - { - info!("test: hybrid types cell deps have same data is allowed"); - let tx = TransactionBuilder::default() - .cell_dep(dep1) - .cell_dep(dep3.clone()) - .input(initial_inputs.next().unwrap()) - .output(empty_output.clone()) - .output_data(Default::default()) - .build(); - node0.submit_transaction(&tx); - mine_until_bool(node0, || is_transaction_committed(node0, &tx)); - } - - { - info!("test: duplicate depgroup-type cell deps is not allowed."); - let tx = TransactionBuilder::default() - .cell_dep(dep3.clone()) - .cell_dep(dep3.clone()) - .input(initial_inputs.next().unwrap()) - .output(empty_output.clone()) - .output_data(Default::default()) - .build(); - assert_send_transaction_fail( - node0, - &tx, - "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ - Verification failed Transaction(DuplicateCellDeps(", - ); - } - - { - info!("test: two depgroup-type cell deps have same data is allowed"); - let tx = TransactionBuilder::default() - .cell_dep(dep4.clone()) - .cell_dep(dep4b) - .input(initial_inputs.next().unwrap()) - .output(empty_output.clone()) - .output_data(Default::default()) - .build(); - node0.submit_transaction(&tx); - mine_until_bool(node0, || is_transaction_committed(node0, &tx)); - } - - { - info!("test: two depgroup-type cell deps point to same data is allowed"); - let tx = TransactionBuilder::default() - .cell_dep(dep3) - .cell_dep(dep4) - .input(initial_inputs.next().unwrap()) - .output(empty_output) - .output_data(Default::default()) - .build(); - node0.submit_transaction(&tx); - mine_until_bool(node0, || is_transaction_committed(node0, &tx)); - } - } -} diff --git a/test/src/specs/tx_pool/mod.rs b/test/src/specs/tx_pool/mod.rs index 938d8cebbb..29edb92d6c 100644 --- a/test/src/specs/tx_pool/mod.rs +++ b/test/src/specs/tx_pool/mod.rs @@ -4,7 +4,6 @@ mod dead_cell_deps; mod depend_tx_in_same_block; mod descendant; mod different_txs_with_same_input; -mod duplicate_cell_deps; mod limit; mod pool_reconcile; mod pool_resurrect; @@ -27,7 +26,6 @@ pub use dead_cell_deps::*; pub use depend_tx_in_same_block::*; pub use descendant::*; pub use different_txs_with_same_input::*; -pub use duplicate_cell_deps::*; pub use limit::*; pub use pool_reconcile::*; pub use pool_resurrect::*; diff --git a/test/src/specs/tx_pool/valid_since.rs b/test/src/specs/tx_pool/valid_since.rs index b8d0df1e17..886cd0c306 100644 --- a/test/src/specs/tx_pool/valid_since.rs +++ b/test/src/specs/tx_pool/valid_since.rs @@ -1,4 +1,7 @@ -use crate::util::mining::{mine, mine_until_out_bootstrap_period}; +use crate::util::{ + check, + mining::{mine, mine_until_out_bootstrap_period}, +}; use crate::utils::{ assert_send_transaction_fail, since_from_absolute_block_number, since_from_absolute_timestamp, since_from_relative_block_number, since_from_relative_timestamp, @@ -6,7 +9,7 @@ use crate::utils::{ use crate::{Node, Spec, DEFAULT_TX_PROPOSAL_WINDOW}; use ckb_logger::info; -use ckb_types::core::BlockNumber; +use ckb_types::core::{BlockNumber, TransactionView}; use std::thread::sleep; use std::time::Duration; @@ -32,7 +35,8 @@ impl Spec for ValidSince { impl ValidSince { pub fn test_since_relative_block_number(&self, node: &Node) { mine_until_out_bootstrap_period(node); - let relative: BlockNumber = 5; + let started_tip_number = node.get_tip_block_number(); + let relative: BlockNumber = 10; let since = since_from_relative_block_number(relative); let transaction = { let cellbase = node.get_tip_block().transactions()[0].clone(); @@ -40,7 +44,7 @@ impl ValidSince { }; // Failed to send transaction since SinceImmaturity - for _ in 1..relative { + for _ in 1..=(relative - 3) { assert_send_transaction_fail( node, &transaction, @@ -56,11 +60,13 @@ impl ValidSince { .is_ok(), "transaction is ok, tip is equal to relative since block number", ); + + Self::check_committing_process(node, &transaction, started_tip_number + relative); } pub fn test_since_absolute_block_number(&self, node: &Node) { mine_until_out_bootstrap_period(node); - let absolute: BlockNumber = node.rpc_client().get_tip_block_number() + 5; + let absolute: BlockNumber = node.rpc_client().get_tip_block_number() + 10; let since = since_from_absolute_block_number(absolute); let transaction = { let cellbase = node.get_tip_block().transactions()[0].clone(); @@ -69,7 +75,7 @@ impl ValidSince { // Failed to send transaction since SinceImmaturity let tip_number = node.rpc_client().get_tip_block_number(); - for _ in tip_number + 1..absolute { + for _ in tip_number + 1..=(absolute - 3) { assert_send_transaction_fail( node, &transaction, @@ -85,6 +91,8 @@ impl ValidSince { .is_ok(), "transaction is ok, tip is equal to absolute since block number", ); + + Self::check_committing_process(node, &transaction, absolute); } pub fn test_since_relative_median_time(&self, node: &Node) { @@ -255,4 +263,28 @@ impl ValidSince { mine(&node, 1); node.assert_tx_pool_size(0, 0); } + + fn check_committing_process( + node: &Node, + transaction: &TransactionView, + committed_at: BlockNumber, + ) { + // Pending + node.assert_tx_pool_size(1, 0); + assert!(check::is_transaction_pending(node, transaction)); + // Gap + mine(&node, 1); + node.assert_tx_pool_size(1, 0); + assert!(check::is_transaction_pending(node, transaction)); + // Proposed + mine(&node, 1); + node.assert_tx_pool_size(0, 1); + assert!(check::is_transaction_proposed(node, transaction)); + // Committed + mine(&node, 1); + node.assert_tx_pool_size(0, 0); + assert!(check::is_transaction_committed(node, transaction)); + + assert_eq!(node.get_tip_block_number(), committed_at); + } } diff --git a/test/src/util/check.rs b/test/src/util/check.rs index a87f19ee7a..36e05e5c6b 100644 --- a/test/src/util/check.rs +++ b/test/src/util/check.rs @@ -1,6 +1,6 @@ use crate::Node; use ckb_jsonrpc_types::Status; -use ckb_types::core::TransactionView; +use ckb_types::core::{BlockView, EpochNumberWithFraction, HeaderView, TransactionView}; pub fn is_transaction_pending(node: &Node, transaction: &TransactionView) -> bool { node.rpc_client() @@ -28,3 +28,52 @@ pub fn is_transaction_unknown(node: &Node, transaction: &TransactionView) -> boo .get_transaction(transaction.hash()) .is_none() } + +pub fn assert_epoch_should_be(node: &Node, number: u64, index: u64, length: u64) { + let tip_header: HeaderView = node.rpc_client().get_tip_header().into(); + let tip_epoch = tip_header.epoch(); + let target_epoch = EpochNumberWithFraction::new(number, index, length); + assert_eq!( + tip_epoch, target_epoch, + "current tip epoch is {}, but expect epoch {}", + tip_epoch, target_epoch + ); +} + +pub fn assert_epoch_should_less_than(node: &Node, number: u64, index: u64, length: u64) { + let tip_header: HeaderView = node.rpc_client().get_tip_header().into(); + let tip_epoch = tip_header.epoch(); + let target_epoch = EpochNumberWithFraction::new(number, index, length); + assert!( + tip_epoch < target_epoch, + "current tip epoch is {}, but expect epoch less than {}", + tip_epoch, + target_epoch + ); +} + +pub fn assert_submit_block_fail(node: &Node, block: &BlockView, message: &str) { + let result = node + .rpc_client() + .submit_block("".to_owned(), block.data().into()); + assert!( + result.is_err(), + "expect error \"{}\" but got \"Ok(())\"", + message, + ); + let error = result.expect_err(&format!("block is invalid since {}", message)); + let error_string = error.to_string(); + assert!( + error_string.contains(message), + "expect error \"{}\" but got \"{}\"", + message, + error_string, + ); +} + +pub fn assert_submit_block_ok(node: &Node, block: &BlockView) { + let result = node + .rpc_client() + .submit_block("".to_owned(), block.data().into()); + assert!(result.is_ok(), "expect \"Ok(())\" but got \"{:?}\"", result,); +} diff --git a/test/src/util/mining.rs b/test/src/util/mining.rs index 6be7bfc9b0..8a16c5f1f7 100644 --- a/test/src/util/mining.rs +++ b/test/src/util/mining.rs @@ -1,7 +1,9 @@ use crate::util::chain::forward_main_blocks; use crate::Node; -use ckb_types::core::{BlockBuilder, BlockView}; -use ckb_types::packed; +use ckb_types::{ + core::{BlockBuilder, BlockView, EpochNumberWithFraction, HeaderView}, + packed, +}; pub fn out_bootstrap_period(nodes: &[Node]) { if let Some(node0) = nodes.first() { @@ -59,6 +61,21 @@ pub fn mine_until_out_bootstrap_period(node: &Node) { mine_until_bool(node, predicate) } +pub fn mine_until_epoch(node: &Node, number: u64, index: u64, length: u64) { + let target_epoch = EpochNumberWithFraction::new(number, index, length); + mine_until_bool(node, || { + let tip_header: HeaderView = node.rpc_client().get_tip_header().into(); + let tip_epoch = tip_header.epoch(); + if tip_epoch > target_epoch { + panic!( + "expect mine until epoch {} but already be epoch {}", + target_epoch, tip_epoch + ); + } + tip_epoch == target_epoch + }); +} + pub fn mine(node: &Node, count: u64) { let with = |builder: BlockBuilder| builder.build(); mine_with(node, count, with) diff --git a/test/src/utils.rs b/test/src/utils.rs index 7c629984f9..91153dd8f4 100644 --- a/test/src/utils.rs +++ b/test/src/utils.rs @@ -4,7 +4,7 @@ use crate::util::mining::mine; use crate::{Node, TXOSet}; use ckb_network::bytes::Bytes; use ckb_types::{ - core::{BlockNumber, BlockView, EpochNumber, HeaderView, TransactionView}, + core::{BlockNumber, BlockView, EpochNumberWithFraction, HeaderView, TransactionView}, packed::{ BlockTransactions, Byte32, CompactBlock, GetBlocks, RelayMessage, RelayTransaction, RelayTransactionHashes, RelayTransactions, SendBlock, SendHeaders, SyncMessage, @@ -161,12 +161,12 @@ pub fn since_from_absolute_block_number(block_number: BlockNumber) -> u64 { FLAG_SINCE_BLOCK_NUMBER | block_number } -pub fn since_from_relative_epoch_number(epoch_number: EpochNumber) -> u64 { - FLAG_SINCE_RELATIVE | FLAG_SINCE_EPOCH_NUMBER | epoch_number +pub fn since_from_relative_epoch_number(epoch_number: EpochNumberWithFraction) -> u64 { + FLAG_SINCE_RELATIVE | FLAG_SINCE_EPOCH_NUMBER | epoch_number.full_value() } -pub fn since_from_absolute_epoch_number(epoch_number: EpochNumber) -> u64 { - FLAG_SINCE_EPOCH_NUMBER | epoch_number +pub fn since_from_absolute_epoch_number(epoch_number: EpochNumberWithFraction) -> u64 { + FLAG_SINCE_EPOCH_NUMBER | epoch_number.full_value() } pub fn since_from_relative_timestamp(timestamp: u64) -> u64 { diff --git a/test/template/specs/integration.toml b/test/template/specs/integration.toml index ba748b891c..cf1b5ff7b7 100644 --- a/test/template/specs/integration.toml +++ b/test/template/specs/integration.toml @@ -68,5 +68,11 @@ primary_epoch_reward_halving_interval = 8760 epoch_duration_target = 14400 genesis_epoch_length = 1000 +[params.hardfork] +rfc_pr_0221 = 9_223_372_036_854_775_807 +rfc_pr_0222 = 9_223_372_036_854_775_807 +rfc_pr_0223 = 9_223_372_036_854_775_807 +rfc_pr_0224 = 9_223_372_036_854_775_807 + [pow] func = "Dummy" diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 014bb45f1c..2aa4a726af 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -18,7 +18,7 @@ use ckb_types::{ }, packed::{Byte32, OutPoint, ProposalShortId}, }; -use ckb_verification::cache::CacheEntry; +use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use faketime::unix_time_as_millis; use lru::LruCache; use std::collections::HashSet; @@ -352,7 +352,9 @@ impl TxPool { self.check_rtx_from_pending_and_proposed(&rtx)?; let snapshot = self.snapshot(); let max_cycles = snapshot.consensus().max_block_cycles(); - let verified = verify_rtx(snapshot, &rtx, cache_entry, max_cycles)?; + let tip_header = snapshot.tip_header(); + let tx_env = TxVerifyEnv::new_proposed(tip_header, 0); + let verified = verify_rtx(snapshot, &rtx, &tx_env, cache_entry, max_cycles)?; let entry = TxEntry::new(rtx, verified.cycles, verified.fee, size); let tx_hash = entry.transaction().hash(); @@ -372,7 +374,9 @@ impl TxPool { self.check_rtx_from_proposed(&rtx)?; let snapshot = self.snapshot(); let max_cycles = snapshot.consensus().max_block_cycles(); - let verified = verify_rtx(snapshot, &rtx, cache_entry, max_cycles)?; + let tip_header = snapshot.tip_header(); + let tx_env = TxVerifyEnv::new_proposed(tip_header, 1); + let verified = verify_rtx(snapshot, &rtx, &tx_env, cache_entry, max_cycles)?; let entry = TxEntry::new(rtx, verified.cycles, verified.fee, size); let tx_hash = entry.transaction().hash(); diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 32bfafb5b1..e881141032 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -24,14 +24,14 @@ use ckb_types::{ get_related_dep_out_points, OverlayCellChecker, ResolvedTransaction, TransactionsChecker, }, - BlockView, Capacity, Cycle, EpochExt, ScriptHashType, TransactionView, UncleBlockView, - Version, + BlockView, Capacity, Cycle, EpochExt, HeaderView, ScriptHashType, TransactionView, + UncleBlockView, Version, }, packed::{Byte32, CellbaseWitness, OutPoint, ProposalShortId, Script}, prelude::*, }; use ckb_util::LinkedHashSet; -use ckb_verification::cache::CacheEntry; +use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use faketime::unix_time_as_millis; use std::collections::HashSet; use std::collections::{HashMap, VecDeque}; @@ -49,12 +49,23 @@ pub enum PlugTarget { Proposed, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum TxStatus { Fresh, Gap, Proposed, } +impl TxStatus { + fn with_env(self, header: &HeaderView) -> TxVerifyEnv { + match self { + TxStatus::Fresh => TxVerifyEnv::new_submit(header), + TxStatus::Gap => TxVerifyEnv::new_proposed(header, 0), + TxStatus::Proposed => TxVerifyEnv::new_proposed(header, 1), + } + } +} + impl TxPoolService { async fn get_block_template_cache( &self, @@ -245,6 +256,7 @@ impl TxPoolService { cellbase: BlockAssembler::transform_cellbase(&cellbase, None), work_id: work_id.into(), dao: dao.into(), + extension: None, }) } @@ -570,7 +582,9 @@ impl TxPoolService { let verify_cache = self.fetch_tx_verify_cache(&tx_hash).await; let max_cycles = max_cycles.unwrap_or(self.tx_pool_config.max_tx_verify_cycles); - let verified = verify_rtx(&snapshot, &rtx, verify_cache, max_cycles)?; + let tip_header = snapshot.tip_header(); + let tx_env = status.with_env(tip_header); + let verified = verify_rtx(&snapshot, &rtx, &tx_env, verify_cache, max_cycles)?; let entry = TxEntry::new(rtx, verified.cycles, fee, tx_size); @@ -641,8 +655,11 @@ impl TxPoolService { if let Ok((rtx, status)) = resolve_tx(tx_pool, tx_pool.snapshot(), tx) { if let Ok(fee) = check_tx_fee(tx_pool, tx_pool.snapshot(), &rtx, tx_size) { let verify_cache = fetched_cache.get(&tx_hash).cloned(); + let snapshot = tx_pool.snapshot(); + let tip_header = snapshot.tip_header(); + let tx_env = status.with_env(tip_header); if let Ok(verified) = - verify_rtx(tx_pool.snapshot(), &rtx, verify_cache, max_cycles) + verify_rtx(snapshot, &rtx, &tx_env, verify_cache, max_cycles) { let entry = TxEntry::new(rtx, verified.cycles, fee, tx_size); if let Err(e) = _submit_entry(tx_pool, status, entry, &self.callbacks) { diff --git a/tx-pool/src/util.rs b/tx-pool/src/util.rs index 84923a6ee5..99fabda09b 100644 --- a/tx-pool/src/util.rs +++ b/tx-pool/src/util.rs @@ -7,7 +7,7 @@ use ckb_store::ChainStore; use ckb_types::core::{cell::ResolvedTransaction, Capacity, Cycle, TransactionView}; use ckb_verification::{ cache::CacheEntry, ContextualTransactionVerifier, NonContextualTransactionVerifier, - TimeRelativeTransactionVerifier, + TimeRelativeTransactionVerifier, TxVerifyEnv, }; use tokio::task::block_in_place; @@ -74,35 +74,24 @@ pub(crate) fn non_contextual_verify( pub(crate) fn verify_rtx( snapshot: &Snapshot, rtx: &ResolvedTransaction, + tx_env: &TxVerifyEnv, cache_entry: Option, max_tx_verify_cycles: Cycle, ) -> Result { - let tip_header = snapshot.tip_header(); - let tip_number = tip_header.number(); - let epoch = tip_header.epoch(); let consensus = snapshot.consensus(); if let Some(cached) = cache_entry { - TimeRelativeTransactionVerifier::new( - &rtx, - snapshot, - tip_number + 1, - epoch, - tip_header.hash(), - consensus, - ) - .verify() - .map(|_| cached) - .map_err(Reject::Verification) + TimeRelativeTransactionVerifier::new(&rtx, snapshot, tx_env) + .verify() + .map(|_| cached) + .map_err(Reject::Verification) } else { block_in_place(|| { ContextualTransactionVerifier::new( &rtx, - tip_number + 1, - epoch, - tip_header.hash(), consensus, &snapshot.as_data_provider(), + tx_env, ) .verify(max_tx_verify_cycles, false) .map_err(Reject::Verification) diff --git a/util/app-config/src/configs/store.rs b/util/app-config/src/configs/store.rs index fa3471562d..88cf1164be 100644 --- a/util/app-config/src/configs/store.rs +++ b/util/app-config/src/configs/store.rs @@ -13,6 +13,9 @@ pub struct Config { pub block_tx_hashes_cache_size: usize, /// The maximum number of blocks which uncles section is cached. pub block_uncles_cache_size: usize, + /// The maximum number of blocks which extension section is cached. + #[serde(default = "default_block_extensions_cache_size")] + pub block_extensions_cache_size: usize, /// The maximum number of blocks which cellbase transaction is cached. pub cellbase_cache_size: usize, /// whether enable freezer @@ -20,6 +23,10 @@ pub struct Config { pub freezer_enable: bool, } +const fn default_block_extensions_cache_size() -> usize { + 30 +} + fn default_freezer_enable() -> bool { false } @@ -32,6 +39,7 @@ impl Default for Config { block_proposals_cache_size: 30, block_tx_hashes_cache_size: 30, block_uncles_cache_size: 30, + block_extensions_cache_size: default_block_extensions_cache_size(), cellbase_cache_size: 30, freezer_enable: false, } diff --git a/util/constant/src/hardfork/mainnet.rs b/util/constant/src/hardfork/mainnet.rs new file mode 100644 index 0000000000..034af0f7a3 --- /dev/null +++ b/util/constant/src/hardfork/mainnet.rs @@ -0,0 +1,3 @@ +// TODO ckb2021 Update the epoch number for mainnet. +/// First epoch number for CKB v2021 +pub const CKB2021_START_EPOCH: u64 = u64::MAX; diff --git a/util/constant/src/hardfork/mod.rs b/util/constant/src/hardfork/mod.rs new file mode 100644 index 0000000000..7440163b8f --- /dev/null +++ b/util/constant/src/hardfork/mod.rs @@ -0,0 +1,4 @@ +/// Hardfork constant for mainnet. +pub mod mainnet; +/// Hardfork constant for testnet. +pub mod testnet; diff --git a/util/constant/src/hardfork/testnet.rs b/util/constant/src/hardfork/testnet.rs new file mode 100644 index 0000000000..23a7d3adf2 --- /dev/null +++ b/util/constant/src/hardfork/testnet.rs @@ -0,0 +1,3 @@ +// TODO ckb2021 Update the epoch number for testnet. +/// First epoch number for CKB v2021 +pub const CKB2021_START_EPOCH: u64 = u64::MAX; diff --git a/util/constant/src/lib.rs b/util/constant/src/lib.rs index 3403241463..85eb438468 100644 --- a/util/constant/src/lib.rs +++ b/util/constant/src/lib.rs @@ -1,5 +1,7 @@ //! Collect constants used across ckb components. +/// hardfork constant +pub mod hardfork; /// store constant pub mod store; /// sync constant diff --git a/util/jsonrpc-types/src/block_template.rs b/util/jsonrpc-types/src/block_template.rs index b53715d78e..53c962ef64 100644 --- a/util/jsonrpc-types/src/block_template.rs +++ b/util/jsonrpc-types/src/block_template.rs @@ -1,6 +1,6 @@ use crate::{ - BlockNumber, Byte32, Cycle, EpochNumberWithFraction, Header, ProposalShortId, Timestamp, - Transaction, Uint32, Uint64, Version, + BlockNumber, Byte32, Cycle, EpochNumberWithFraction, Header, JsonBytes, ProposalShortId, + Timestamp, Transaction, Uint32, Uint64, Version, }; use ckb_types::{packed, prelude::*, H256}; use serde::{Deserialize, Serialize}; @@ -84,6 +84,12 @@ pub struct BlockTemplate { /// /// See RFC [Deposit and Withdraw in Nervos DAO](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0023-dao-deposit-withdraw/0023-dao-deposit-withdraw.md#calculation). pub dao: Byte32, + /// The extension for the new block. + /// + /// This field is optional. It a reserved field, please leave it blank. + #[doc(hidden)] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub extension: Option, } impl From for packed::Block { @@ -100,6 +106,7 @@ impl From for packed::Block { proposals, cellbase, dao, + extension, .. } = block_template; let raw = packed::RawHeader::new_builder() @@ -116,25 +123,49 @@ impl From for packed::Block { .push(cellbase.into()) .extend(transactions.into_iter().map(|tx| tx.into())) .build(); - packed::Block::new_builder() - .header(header) - .uncles( - uncles - .into_iter() - .map(|u| u.into()) - .collect::>() - .pack(), - ) - .transactions(txs) - .proposals( - proposals - .into_iter() - .map(|p| p.into()) - .collect::>() - .pack(), - ) - .build() - .reset_header() + if let Some(extension) = extension { + let extension: packed::Bytes = extension.into(); + packed::BlockV1::new_builder() + .header(header) + .uncles( + uncles + .into_iter() + .map(|u| u.into()) + .collect::>() + .pack(), + ) + .transactions(txs) + .proposals( + proposals + .into_iter() + .map(|p| p.into()) + .collect::>() + .pack(), + ) + .extension(extension) + .build() + .as_v0() + } else { + packed::Block::new_builder() + .header(header) + .uncles( + uncles + .into_iter() + .map(|u| u.into()) + .collect::>() + .pack(), + ) + .transactions(txs) + .proposals( + proposals + .into_iter() + .map(|p| p.into()) + .collect::>() + .pack(), + ) + .build() + } + .reset_header() } } diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index d1ded04ae3..f04095c86f 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -637,6 +637,12 @@ pub struct Header { /// /// It is all zeros when `proposals` is empty, or the hash on all the bytes concatenated together. pub proposals_hash: H256, + // TODO ckb2021 Returns the extra hash as uncles hash directly since no extension now. + // The hash on `uncles` and extension in the block body. + // + // The uncles hash is all zeros when `uncles` is empty, or the hash on all the uncle header hashes concatenated together. + // The extension hash is the hash of the extension. + // The extra hash is the hash on uncles hash and extension hash concatenated together. /// The hash on `uncles` in the block body. /// /// It is all zeros when `uncles` is empty, or the hash on all the uncle header hashes concatenated together. @@ -696,7 +702,7 @@ impl From for Header { transactions_root: raw.transactions_root().unpack(), proposals_hash: raw.proposals_hash().unpack(), compact_target: raw.compact_target().unpack(), - uncles_hash: raw.uncles_hash().unpack(), + uncles_hash: raw.extra_hash().unpack(), dao: raw.dao().into(), nonce: input.nonce().unpack(), } @@ -743,7 +749,7 @@ impl From
for packed::Header { .transactions_root(transactions_root.pack()) .proposals_hash(proposals_hash.pack()) .compact_target(compact_target.pack()) - .uncles_hash(uncles_hash.pack()) + .extra_hash(uncles_hash.pack()) .dao(dao.into()) .build(); packed::Header::new_builder() @@ -843,6 +849,12 @@ pub struct Block { pub transactions: Vec, /// The proposal IDs in the block body. pub proposals: Vec, + /// The extension in the block body. + /// + /// This field is optional. It a reserved field, please leave it blank. + #[doc(hidden)] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub extension: Option, } /// The JSON view of a Block including header and body. @@ -856,6 +868,12 @@ pub struct BlockView { pub transactions: Vec, /// The proposal IDs in the block body. pub proposals: Vec, + /// The extension in the block body. + /// + /// This field is optional. It a reserved field, please leave it blank. + #[doc(hidden)] + #[serde(default, skip_serializing_if = "Option::is_none")] + pub extension: Option, } impl From for Block { @@ -865,6 +883,7 @@ impl From for Block { uncles: input.uncles().into_iter().map(Into::into).collect(), transactions: input.transactions().into_iter().map(Into::into).collect(), proposals: input.proposals().into_iter().map(Into::into).collect(), + extension: input.extension().map(Into::into), } } } @@ -905,6 +924,7 @@ impl From for BlockView { uncles, transactions, proposals: block.proposals().into_iter().map(Into::into).collect(), + extension: block.extension().map(Into::into), } } } @@ -916,13 +936,26 @@ impl From for packed::Block { uncles, transactions, proposals, + extension, } = json; - packed::Block::new_builder() - .header(header.into()) - .uncles(uncles.into_iter().map(Into::into).pack()) - .transactions(transactions.into_iter().map(Into::into).pack()) - .proposals(proposals.into_iter().map(Into::into).pack()) - .build() + if let Some(extension) = extension { + let extension: packed::Bytes = extension.into(); + packed::BlockV1::new_builder() + .header(header.into()) + .uncles(uncles.into_iter().map(Into::into).pack()) + .transactions(transactions.into_iter().map(Into::into).pack()) + .proposals(proposals.into_iter().map(Into::into).pack()) + .extension(extension) + .build() + .as_v0() + } else { + packed::Block::new_builder() + .header(header.into()) + .uncles(uncles.into_iter().map(Into::into).pack()) + .transactions(transactions.into_iter().map(Into::into).pack()) + .proposals(proposals.into_iter().map(Into::into).pack()) + .build() + } } } @@ -933,6 +966,7 @@ impl From for core::BlockView { uncles, transactions, proposals, + extension, } = input; let block = Block { header: header.inner, @@ -948,6 +982,7 @@ impl From for core::BlockView { .collect(), transactions: transactions.into_iter().map(|tx| tx.inner).collect(), proposals, + extension, }; let block: packed::Block = block.into(); block.into_view() diff --git a/util/snapshot/src/lib.rs b/util/snapshot/src/lib.rs index 22f6d12c11..19473a3b94 100644 --- a/util/snapshot/src/lib.rs +++ b/util/snapshot/src/lib.rs @@ -1,7 +1,7 @@ //! Rocksdb snapshot wrapper use arc_swap::{ArcSwap, Guard}; -use ckb_chain_spec::consensus::Consensus; +use ckb_chain_spec::consensus::{Consensus, ConsensusProvider}; use ckb_db::{ iter::{DBIter, IteratorMode}, DBPinnableSlice, @@ -210,3 +210,9 @@ impl HeaderProvider for Snapshot { self.store.get_block_header(hash) } } + +impl ConsensusProvider for Snapshot { + fn get_consensus(&self) -> &Consensus { + self.consensus() + } +} diff --git a/util/types/schemas/blockchain.mol b/util/types/schemas/blockchain.mol index 842872007a..3ab343d856 100644 --- a/util/types/schemas/blockchain.mol +++ b/util/types/schemas/blockchain.mol @@ -77,7 +77,7 @@ struct RawHeader { parent_hash: Byte32, transactions_root: Byte32, proposals_hash: Byte32, - uncles_hash: Byte32, + extra_hash: Byte32, dao: Byte32, } @@ -98,6 +98,14 @@ table Block { proposals: ProposalShortIdVec, } +table BlockV1 { + header: Header, + uncles: UncleBlockVec, + transactions: TransactionVec, + proposals: ProposalShortIdVec, + extension: Bytes, +} + table CellbaseWitness { lock: Script, message: Bytes, diff --git a/util/types/schemas/extensions.mol b/util/types/schemas/extensions.mol index 78c15227f6..dbaf7fb470 100644 --- a/util/types/schemas/extensions.mol +++ b/util/types/schemas/extensions.mol @@ -110,6 +110,15 @@ table CompactBlock { proposals: ProposalShortIdVec, } +table CompactBlockV1 { + header: Header, + short_ids: ProposalShortIdVec, + prefilled_transactions: IndexTransactionVec, + uncles: Byte32Vec, + proposals: ProposalShortIdVec, + extension: Bytes, +} + table RelayTransaction { cycles: Uint64, transaction: Transaction, diff --git a/util/types/src/core/advanced_builders.rs b/util/types/src/core/advanced_builders.rs index 16ffeeef7d..85b1a8eede 100644 --- a/util/types/src/core/advanced_builders.rs +++ b/util/types/src/core/advanced_builders.rs @@ -43,7 +43,7 @@ pub struct HeaderBuilder { pub(crate) transactions_root: packed::Byte32, pub(crate) proposals_hash: packed::Byte32, pub(crate) compact_target: packed::Uint32, - pub(crate) uncles_hash: packed::Byte32, + pub(crate) extra_hash: packed::Byte32, pub(crate) epoch: packed::Uint64, pub(crate) dao: packed::Byte32, // Nonce @@ -63,6 +63,7 @@ pub struct BlockBuilder { pub(crate) uncles: Vec, pub(crate) transactions: Vec, pub(crate) proposals: Vec, + pub(crate) extension: Option, } /* @@ -93,7 +94,7 @@ impl ::std::default::Default for HeaderBuilder { transactions_root: Default::default(), proposals_hash: Default::default(), compact_target: DIFF_TWO.pack(), - uncles_hash: Default::default(), + extra_hash: Default::default(), epoch: Default::default(), dao: Default::default(), nonce: Default::default(), @@ -254,7 +255,7 @@ impl HeaderBuilder { def_setter_simple!(transactions_root, Byte32); def_setter_simple!(proposals_hash, Byte32); def_setter_simple!(compact_target, Uint32); - def_setter_simple!(uncles_hash, Byte32); + def_setter_simple!(extra_hash, Byte32); def_setter_simple!(epoch, Uint64); def_setter_simple!(dao, Byte32); def_setter_simple!(nonce, Uint128); @@ -269,7 +270,7 @@ impl HeaderBuilder { transactions_root, proposals_hash, compact_target, - uncles_hash, + extra_hash, epoch, dao, nonce, @@ -286,7 +287,7 @@ impl HeaderBuilder { .transactions_root(transactions_root) .proposals_hash(proposals_hash) .compact_target(compact_target) - .uncles_hash(uncles_hash) + .extra_hash(extra_hash) .epoch(epoch) .dao(dao) .build(); @@ -304,7 +305,7 @@ impl BlockBuilder { def_setter_simple!(header, transactions_root, Byte32); def_setter_simple!(header, proposals_hash, Byte32); def_setter_simple!(header, compact_target, Uint32); - def_setter_simple!(header, uncles_hash, Byte32); + def_setter_simple!(header, extra_hash, Byte32); def_setter_simple!(header, epoch, Uint64); def_setter_simple!(header, dao, Byte32); def_setter_simple!(header, nonce, Uint128); @@ -330,12 +331,20 @@ impl BlockBuilder { self } + /// Set `extension`. + #[doc(hidden)] + pub fn extension(mut self, extension: Option) -> Self { + self.extension = extension; + self + } + fn build_internal(self, reset_header: bool) -> core::BlockView { let Self { header, uncles, transactions, proposals, + extension, } = self; let (uncles, uncle_hashes) = { let len = uncles.len(); @@ -390,22 +399,37 @@ impl BlockBuilder { let witnesses_root = merkle_root(&tx_witness_hashes[..]); let transactions_root = merkle_root(&[raw_transactions_root, witnesses_root]); let proposals_hash = proposals.calc_proposals_hash(); - let uncles_hash = uncles.calc_uncles_hash(); + let extra_hash_view = core::ExtraHashView::new( + uncles.calc_uncles_hash(), + extension.as_ref().map(packed::Bytes::calc_raw_data_hash), + ); + let extra_hash = extra_hash_view.extra_hash(); header .transactions_root(transactions_root) .proposals_hash(proposals_hash) - .uncles_hash(uncles_hash) + .extra_hash(extra_hash) .build() } else { header.build() }; - let block = packed::Block::new_builder() - .header(data) - .uncles(uncles) - .transactions(transactions.pack()) - .proposals(proposals) - .build(); + let block = if let Some(extension) = extension { + packed::BlockV1::new_builder() + .header(data) + .uncles(uncles) + .transactions(transactions.pack()) + .proposals(proposals) + .extension(extension) + .build() + .as_v0() + } else { + packed::Block::new_builder() + .header(data) + .uncles(uncles) + .transactions(transactions.pack()) + .proposals(proposals) + .build() + }; core::BlockView { data: block, hash, @@ -462,7 +486,7 @@ impl packed::Header { .transactions_root(self.raw().transactions_root()) .proposals_hash(self.raw().proposals_hash()) .compact_target(self.raw().compact_target()) - .uncles_hash(self.raw().uncles_hash()) + .extra_hash(self.raw().extra_hash()) .epoch(self.raw().epoch()) .dao(self.raw().dao()) .nonce(self.nonce()) @@ -470,6 +494,11 @@ impl packed::Header { } impl packed::Block { + /// Creates an empty advanced builder. + pub fn new_advanced_builder() -> BlockBuilder { + Default::default() + } + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> BlockBuilder { BlockBuilder::default() @@ -491,6 +520,11 @@ impl packed::Block { } impl core::TransactionView { + /// Creates an empty advanced builder. + pub fn new_advanced_builder() -> TransactionBuilder { + Default::default() + } + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> TransactionBuilder { self.data().as_advanced_builder() @@ -498,6 +532,11 @@ impl core::TransactionView { } impl core::HeaderView { + /// Creates an empty advanced builder. + pub fn new_advanced_builder() -> HeaderBuilder { + Default::default() + } + /// Creates an advanced builder base on current data. pub fn as_advanced_builder(&self) -> HeaderBuilder { self.data().as_advanced_builder() @@ -537,5 +576,6 @@ impl core::BlockView { .collect::>(), ) .proposals(data.proposals().into_iter().collect::>()) + .extension(data.extension()) } } diff --git a/util/types/src/core/extras.rs b/util/types/src/core/extras.rs index 867b1bb384..d7c2c920b7 100644 --- a/util/types/src/core/extras.rs +++ b/util/types/src/core/extras.rs @@ -428,6 +428,21 @@ impl EpochNumberWithFraction { self.0 } + /// Estimate the floor limit of epoch number after N blocks. + /// + /// Since we couldn't know the length of next epoch before reach the next epoch, + /// this function could only return `self.number()` or `self.number()+1`. + pub fn minimum_epoch_number_after_n_blocks(self, n: BlockNumber) -> EpochNumber { + let number = self.number(); + let length = self.length(); + let index = self.index(); + if index + n >= length { + number + 1 + } else { + number + } + } + /// TODO(doc): @quake // One caveat here, is that if the user specifies a zero epoch length either // deliberately, or by accident, calling to_rational() after that might @@ -447,4 +462,12 @@ impl EpochNumberWithFraction { pub fn to_rational(self) -> RationalU256 { RationalU256::new(self.index().into(), self.length().into()) + U256::from(self.number()) } + + /// Check the data format. + /// + /// The epoch length should be greater than zero. + /// The epoch index should be less than the epoch length. + pub fn is_well_formed(self) -> bool { + self.length() > 0 && self.length() > self.index() + } } diff --git a/util/types/src/core/hardfork.rs b/util/types/src/core/hardfork.rs new file mode 100644 index 0000000000..cb4a0e98fd --- /dev/null +++ b/util/types/src/core/hardfork.rs @@ -0,0 +1,212 @@ +//! Hard forks related types. + +use crate::core::EpochNumber; + +// Defines all methods for a feature. +macro_rules! define_methods { + ($feature:ident, $name_getter:ident, + $name_if_enabled:ident, $name_disable:ident, $rfc_name:literal) => { + define_methods!( + $feature, + $name_getter, + $name_if_enabled, + $name_disable, + concat!( + "Return the first epoch number when the [", + $rfc_name, + "](struct.HardForkSwitchBuilder.html#structfield.", + stringify!($feature), + ") is enabled." + ), + concat!( + "An alias for the method [", + stringify!($feature), + "(&self)](#method.", + stringify!($feature), + ") to let the code to be more readable." + ), + concat!( + "If the [", + $rfc_name, + "](struct.HardForkSwitchBuilder.html#structfield.", + stringify!($feature), + ") is enabled at the provided epoch." + ), + concat!( + "Set the first epoch number of the [", + $rfc_name, + "](struct.HardForkSwitchBuilder.html#structfield.", + stringify!($feature), + ")." + ), + concat!( + "Never enable the [", + $rfc_name, + "](struct.HardForkSwitchBuilder.html#structfield.", + stringify!($feature), + ")." + ) + ); + }; + ($feature:ident, $name_getter_alias:ident, + $name_if_enabled:ident, $name_disable:ident, + $comment_getter:expr,$comment_getter_alias:expr, $comment_if_enabled:expr, + $comment_setter:expr, $comment_disable:expr) => { + impl HardForkSwitch { + #[doc = $comment_getter] + #[inline] + pub fn $feature(&self) -> EpochNumber { + self.$feature + } + #[doc = $comment_getter_alias] + #[inline] + pub fn $name_getter_alias(&self) -> EpochNumber { + self.$feature + } + #[doc = $comment_if_enabled] + #[inline] + pub fn $name_if_enabled(&self, epoch_number: EpochNumber) -> bool { + epoch_number >= self.$feature + } + } + impl HardForkSwitchBuilder { + #[doc = $comment_setter] + #[inline] + pub fn $feature(mut self, epoch_number: EpochNumber) -> Self { + self.$feature = Some(epoch_number); + self + } + #[doc = $comment_disable] + #[inline] + pub fn $name_disable(mut self) -> Self { + self.$feature = Some(EpochNumber::MAX); + self + } + } + }; +} + +/// A switch to select hard fork features base on the epoch number. +/// +/// For safety, all fields are private and not allowed to update. +/// This structure can only be constructed by [`HardForkSwitchBuilder`]. +/// +/// [`HardForkSwitchBuilder`]: struct.HardForkSwitchBuilder.html +#[derive(Debug, Clone)] +pub struct HardForkSwitch { + rfc_pr_0221: EpochNumber, + rfc_pr_0222: EpochNumber, + rfc_pr_0223: EpochNumber, + rfc_pr_0224: EpochNumber, +} + +/// Builder for [`HardForkSwitch`]. +/// +/// [`HardForkSwitch`]: struct.HardForkSwitch.html +#[derive(Debug, Clone, Default)] +pub struct HardForkSwitchBuilder { + // TODO ckb2021 Update all rfc numbers and fix all links, after all proposals are merged. + /// Use the input cell creation block timestamp as start time in the + /// "relative since timestamp". + /// + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0221: Option, + /// Allow script multiple matches on identical data for type hash-type scripts. + /// + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0222: Option, + /// In the "since epoch", the index should be less than length and + /// the length should be greater than zero. + /// + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0223: Option, + /// Reuse `uncles_hash` in the header as `extra_hash`. + /// + /// Ref: [CKB RFC xxxx](https://github.com/nervosnetwork/rfcs/tree/master/rfcs/xxxx-rfc-title) + pub rfc_pr_0224: Option, +} + +impl HardForkSwitch { + /// Creates a new builder to build an instance. + pub fn new_builder() -> HardForkSwitchBuilder { + Default::default() + } + + /// Creates a new builder based on the current instance. + pub fn as_builder(&self) -> HardForkSwitchBuilder { + Self::new_builder() + .rfc_pr_0221(self.rfc_pr_0221()) + .rfc_pr_0222(self.rfc_pr_0222()) + .rfc_pr_0223(self.rfc_pr_0223()) + .rfc_pr_0224(self.rfc_pr_0224()) + } + + /// Creates a new instance that all hard fork features are disabled forever. + pub fn new_without_any_enabled() -> Self { + // Use a builder to ensure all features are set manually. + Self::new_builder() + .disable_rfc_pr_0221() + .disable_rfc_pr_0222() + .disable_rfc_pr_0223() + .disable_rfc_pr_0224() + .build() + .unwrap() + } +} + +define_methods!( + rfc_pr_0221, + block_ts_as_relative_since_start, + is_block_ts_as_relative_since_start_enabled, + disable_rfc_pr_0221, + "RFC PR 0221" +); +define_methods!( + rfc_pr_0222, + allow_multiple_matches_on_identical_data, + is_allow_multiple_matches_on_identical_data_enabled, + disable_rfc_pr_0222, + "RFC PR 0222" +); +define_methods!( + rfc_pr_0223, + check_length_in_epoch_since, + is_check_length_in_epoch_since_enabled, + disable_rfc_pr_0223, + "RFC PR 0223" +); +define_methods!( + rfc_pr_0224, + reuse_uncles_hash_as_extra_hash, + is_reuse_uncles_hash_as_extra_hash_enabled, + disable_rfc_pr_0224, + "RFC PR 0224" +); + +impl HardForkSwitchBuilder { + /// Build a new [`HardForkSwitch`]. + /// + /// Returns an error if failed at any check, for example, there maybe are some features depend + /// on others. + /// + /// [`HardForkSwitch`]: struct.HardForkSwitch.html + pub fn build(self) -> Result { + macro_rules! try_find { + ($feature:ident) => { + self.$feature.ok_or_else(|| { + concat!("The feature ", stringify!($feature), " isn't configured.").to_owned() + })?; + }; + } + let rfc_pr_0221 = try_find!(rfc_pr_0221); + let rfc_pr_0222 = try_find!(rfc_pr_0222); + let rfc_pr_0223 = try_find!(rfc_pr_0223); + let rfc_pr_0224 = try_find!(rfc_pr_0224); + Ok(HardForkSwitch { + rfc_pr_0221, + rfc_pr_0222, + rfc_pr_0223, + rfc_pr_0224, + }) + } +} diff --git a/util/types/src/core/mod.rs b/util/types/src/core/mod.rs index d0d9fc1f95..5eb1d142fe 100644 --- a/util/types/src/core/mod.rs +++ b/util/types/src/core/mod.rs @@ -13,6 +13,7 @@ pub mod cell; pub mod error; +pub mod hardfork; pub mod service; pub mod tx_pool; @@ -29,7 +30,9 @@ pub use extras::{BlockExt, EpochExt, EpochNumberWithFraction, TransactionInfo}; pub use fee_rate::FeeRate; pub use reward::{BlockEconomicState, BlockIssuance, BlockReward, MinerReward}; pub use transaction_meta::{TransactionMeta, TransactionMetaBuilder}; -pub use views::{BlockView, HeaderView, TransactionView, UncleBlockVecView, UncleBlockView}; +pub use views::{ + BlockView, ExtraHashView, HeaderView, TransactionView, UncleBlockVecView, UncleBlockView, +}; pub use ckb_occupied_capacity::{capacity_bytes, Capacity, Ratio, Result as CapacityResult}; pub use ckb_rational::RationalU256; diff --git a/util/types/src/core/views.rs b/util/types/src/core/views.rs index 1b6fa6fed8..00aede05ef 100644 --- a/util/types/src/core/views.rs +++ b/util/types/src/core/views.rs @@ -2,6 +2,7 @@ use std::collections::HashSet; +use ckb_hash::new_blake2b; use ckb_occupied_capacity::Result as CapacityResult; use crate::{ @@ -37,6 +38,17 @@ pub struct TransactionView { pub(crate) witness_hash: packed::Byte32, } +/// A readonly and immutable struct which includes extra hash and the decoupled +/// parts of it. +#[derive(Debug, Clone)] +pub struct ExtraHashView { + /// The uncles hash which is used to combine to the extra hash. + pub(crate) uncles_hash: packed::Byte32, + /// The first item is the new filed hash, which is used to combine to the extra hash. + /// The second item is the extra hash. + pub(crate) extension_hash_and_extra_hash: Option<(packed::Byte32, packed::Byte32)>, +} + /// A readonly and immutable struct which includes [`Header`] and its hash. /// /// # Notice @@ -114,6 +126,24 @@ impl ::std::fmt::Display for TransactionView { } } +impl ::std::fmt::Display for ExtraHashView { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + if let Some((ref extension_hash, ref extra_hash)) = self.extension_hash_and_extra_hash { + write!( + f, + "uncles_hash: {}, extension_hash: {}, extra_hash: {}", + self.uncles_hash, extension_hash, extra_hash + ) + } else { + write!( + f, + "uncles_hash: {}, extension_hash: None, extra_hash: uncles_hash", + self.uncles_hash + ) + } + } +} + impl ::std::fmt::Display for HeaderView { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!( @@ -394,6 +424,44 @@ impl TransactionView { } } +impl ExtraHashView { + /// Creates `ExtraHashView` with `uncles_hash` and optional `extension_hash`. + pub fn new(uncles_hash: packed::Byte32, extension_hash_opt: Option) -> Self { + let extension_hash_and_extra_hash = extension_hash_opt.map(|extension_hash| { + let mut ret = [0u8; 32]; + let mut blake2b = new_blake2b(); + blake2b.update(&uncles_hash.as_slice()); + blake2b.update(&extension_hash.as_slice()); + blake2b.finalize(&mut ret); + (extension_hash, ret.pack()) + }); + Self { + uncles_hash, + extension_hash_and_extra_hash, + } + } + + /// Gets `uncles_hash`. + pub fn uncles_hash(&self) -> packed::Byte32 { + self.uncles_hash.clone() + } + + /// Gets `extension_hash`. + pub fn extension_hash(&self) -> Option { + self.extension_hash_and_extra_hash + .as_ref() + .map(|(ref extension_hash, _)| extension_hash.clone()) + } + + /// Gets `extra_hash`. + pub fn extra_hash(&self) -> packed::Byte32 { + self.extension_hash_and_extra_hash + .as_ref() + .map(|(_, ref extra_hash)| extra_hash.clone()) + .unwrap_or_else(|| self.uncles_hash.clone()) + } +} + impl HeaderView { define_data_getter!(Header); define_cache_getter!(hash, Byte32); @@ -407,12 +475,8 @@ impl HeaderView { define_inner_getter!(header, packed, parent_hash, Byte32); define_inner_getter!(header, packed, transactions_root, Byte32); define_inner_getter!(header, packed, proposals_hash, Byte32); - define_inner_getter!(header, packed, uncles_hash, Byte32); - - /// Gets `raw.dao`. - pub fn dao(&self) -> packed::Byte32 { - self.data().raw().dao() - } + define_inner_getter!(header, packed, extra_hash, Byte32); + define_inner_getter!(header, packed, dao, Byte32); /// Gets `raw.difficulty`. pub fn difficulty(&self) -> U256 { @@ -449,12 +513,8 @@ impl UncleBlockView { define_inner_getter!(uncle, packed, parent_hash, Byte32); define_inner_getter!(uncle, packed, transactions_root, Byte32); define_inner_getter!(uncle, packed, proposals_hash, Byte32); - define_inner_getter!(uncle, packed, uncles_hash, Byte32); - - /// Gets `header.raw.dao`. - pub fn dao(&self) -> packed::Byte32 { - self.data().header().raw().dao() - } + define_inner_getter!(uncle, packed, extra_hash, Byte32); + define_inner_getter!(uncle, packed, dao, Byte32); /// Gets `header.raw.difficulty`. pub fn difficulty(&self) -> U256 { @@ -560,12 +620,8 @@ impl BlockView { define_inner_getter!(block, packed, parent_hash, Byte32); define_inner_getter!(block, packed, transactions_root, Byte32); define_inner_getter!(block, packed, proposals_hash, Byte32); - define_inner_getter!(block, packed, uncles_hash, Byte32); - - /// Gets `header.raw.dao`. - pub fn dao(&self) -> packed::Byte32 { - self.data().header().raw().dao() - } + define_inner_getter!(block, packed, extra_hash, Byte32); + define_inner_getter!(block, packed, dao, Byte32); /// Gets `header.nonce`. pub fn nonce(&self) -> u128 { @@ -593,6 +649,15 @@ impl BlockView { } } + /// Gets `extension`. + /// + /// # Panics + /// + /// Panics if the extension exists but not a valid [`Bytes`](../packed/struct.Bytes.html). + pub fn extension(&self) -> Option { + self.data.extension() + } + /// Converts into an uncle block. pub fn as_uncle(&self) -> UncleBlockView { UncleBlockView { @@ -668,6 +733,16 @@ impl BlockView { self.data().as_reader().calc_uncles_hash() } + /// Calculates the hash for extension. + pub fn calc_extension_hash(&self) -> Option { + self.data().as_reader().calc_extension_hash() + } + + /// Calculates the extra hash. + pub fn calc_extra_hash(&self) -> ExtraHashView { + self.data().as_reader().calc_extra_hash() + } + /// Calculates the hash for proposals. pub fn calc_proposals_hash(&self) -> packed::Byte32 { self.data().as_reader().calc_proposals_hash() @@ -751,6 +826,38 @@ impl BlockView { tx_witness_hashes, } } + + /// Creates a new `BlockView` with a extension. + /// + /// # Notice + /// + /// [`BlockView`] created by this method could have invalid hashes or + /// invalid merkle roots in the header. + pub fn new_unchecked_with_extension( + header: HeaderView, + uncles: UncleBlockVecView, + body: Vec, + proposals: packed::ProposalShortIdVec, + extension: packed::Bytes, + ) -> Self { + let block = packed::BlockV1::new_builder() + .header(header.data()) + .transactions(body.iter().map(|tx| tx.data()).pack()) + .uncles(uncles.data()) + .proposals(proposals) + .extension(extension) + .build() + .as_v0(); + let tx_hashes = body.iter().map(|tx| tx.hash()).collect::>(); + let tx_witness_hashes = body.iter().map(|tx| tx.witness_hash()).collect::>(); + Self { + data: block, + hash: header.hash(), + uncle_hashes: uncles.hashes(), + tx_hashes, + tx_witness_hashes, + } + } } /* diff --git a/util/types/src/extension/calc_hash.rs b/util/types/src/extension/calc_hash.rs index 5aa536bc6a..c69f68f5fb 100644 --- a/util/types/src/extension/calc_hash.rs +++ b/util/types/src/extension/calc_hash.rs @@ -1,6 +1,6 @@ use ckb_hash::{blake2b_256, new_blake2b}; -use crate::{packed, prelude::*}; +use crate::{core, packed, prelude::*}; /* * Calculate simple hash for packed bytes wrappers. @@ -70,6 +70,16 @@ impl packed::CellOutput { } } +impl<'r> packed::BytesReader<'r> { + /// Calculates the hash for raw data in `Bytes`. + /// + /// Returns the empty hash if no data, otherwise, calculates the hash of the data and returns it. + pub fn calc_raw_data_hash(&self) -> packed::Byte32 { + blake2b_256(self.raw_data()).pack() + } +} +impl_calc_special_hash_for_entity!(Bytes, calc_raw_data_hash); + impl<'r> packed::ScriptReader<'r> { /// Calculates the hash for [self.as_slice()] as the script hash. /// @@ -236,6 +246,24 @@ impl<'r> packed::BlockReader<'r> { self.uncles().calc_uncles_hash() } + /// Calculates the hash for the extension. + /// + /// If there is an extension (unknown for now), calculate the hash of its data. + pub fn calc_extension_hash(&self) -> Option { + self.extension() + .map(|extension| extension.calc_raw_data_hash()) + } + + /// Calculates the extra hash, which is a combination of the uncles hash and + /// the extension hash. + /// + /// - If there is no extension, extra hash is the same as the uncles hash. + /// - If there is a extension, then extra hash it the hash of the combination + /// of uncles hash and the extension hash. + pub fn calc_extra_hash(&self) -> core::ExtraHashView { + core::ExtraHashView::new(self.calc_uncles_hash(), self.calc_extension_hash()) + } + /// Calculates transaction hashes for all transactions in the block. pub fn calc_tx_hashes(&self) -> Vec { self.transactions() @@ -256,6 +284,8 @@ impl<'r> packed::BlockReader<'r> { impl_calc_special_hash_for_entity!(Block, calc_header_hash); impl_calc_special_hash_for_entity!(Block, calc_proposals_hash); impl_calc_special_hash_for_entity!(Block, calc_uncles_hash); +impl_calc_special_hash_for_entity!(Block, calc_extension_hash, Option); +impl_calc_special_hash_for_entity!(Block, calc_extra_hash, core::ExtraHashView); impl_calc_special_hash_for_entity!(Block, calc_tx_hashes, Vec); impl_calc_special_hash_for_entity!(Block, calc_tx_witness_hashes, Vec); @@ -329,7 +359,7 @@ mod tests { .proposals_hash( h256!("0xd1670e45af1deb9cc00951d71c09ce80932e7ddf9fb151d744436bd04ac4a562").pack(), ) - .uncles_hash( + .extra_hash( h256!("0x0000000000000000000000000000000000000000000000000000000000000000").pack(), ) .dao(h256!("0xb54bdd7f6be90000bb52f392d41cd70024f7ef29b437000000febffacf030000").pack()) @@ -359,7 +389,7 @@ mod tests { .proposals_hash( h256!("0x0000000000000000000000000000000000000000000000000000000000000000").pack(), ) - .uncles_hash( + .extra_hash( h256!("0x0000000000000000000000000000000000000000000000000000000000000000").pack(), ) .dao(h256!("0xb54bdd7f6be90000bb52f392d41cd70024f7ef29b437000000febffacf030000").pack()) @@ -384,6 +414,13 @@ mod tests { assert_eq!(uncles.calc_uncles_hash(), expect.pack()); } + #[test] + fn empty_extra_hash() { + let block = packed::Block::new_builder().build(); + let expect = h256!("0x0"); + assert_eq!(block.calc_extra_hash().extra_hash(), expect.pack()); + } + #[test] fn empty_script_hash() { let script = packed::Script::new_builder().build(); diff --git a/util/types/src/extension/shortcuts.rs b/util/types/src/extension/shortcuts.rs index f21d795edf..41d49017ff 100644 --- a/util/types/src/extension/shortcuts.rs +++ b/util/types/src/extension/shortcuts.rs @@ -1,6 +1,7 @@ use std::collections::HashSet; use crate::{ + bytes, core::{self, BlockNumber}, packed, prelude::*, @@ -192,17 +193,102 @@ impl packed::Block { let witnesses_root = merkle_root(tx_witness_hashes); let transactions_root = merkle_root(&[raw_transactions_root, witnesses_root]); let proposals_hash = self.as_reader().calc_proposals_hash(); - let uncles_hash = self.as_reader().calc_uncles_hash(); + let extra_hash = self.as_reader().calc_extra_hash().extra_hash(); let raw_header = self .header() .raw() .as_builder() .transactions_root(transactions_root) .proposals_hash(proposals_hash) - .uncles_hash(uncles_hash) + .extra_hash(extra_hash) .build(); let header = self.header().as_builder().raw(raw_header).build(); - self.as_builder().header(header).build() + if let Some(extension) = self.extension() { + packed::BlockV1::new_builder() + .header(header) + .uncles(self.uncles()) + .transactions(self.transactions()) + .proposals(self.proposals()) + .extension(extension) + .build() + .as_v0() + } else { + self.as_builder().header(header).build() + } + } + + /// Gets the i-th extra field if it exists; i started from 0. + pub fn extra_field(&self, index: usize) -> Option { + let count = self.count_extra_fields(); + if count > index { + let slice = self.as_slice(); + let i = (1 + Self::FIELD_COUNT + index) * molecule::NUMBER_SIZE; + let start = molecule::unpack_number(&slice[i..]) as usize; + if count == index + 1 { + Some(self.as_bytes().slice(start..)) + } else { + let j = i + molecule::NUMBER_SIZE; + let end = molecule::unpack_number(&slice[j..]) as usize; + Some(self.as_bytes().slice(start..end)) + } + } else { + None + } + } + + /// Gets the extension field if it existed. + /// + /// # Panics + /// + /// Panics if the first extra field exists but not a valid [`Bytes`](struct.Bytes.html). + pub fn extension(&self) -> Option { + self.extra_field(0) + .map(|data| packed::Bytes::from_slice(&data).unwrap()) + } +} + +impl packed::BlockV1 { + /// Converts to a compatible [`Block`](struct.Block.html) with an extra field. + pub fn as_v0(&self) -> packed::Block { + packed::Block::new_unchecked(self.as_bytes()) + } +} + +impl<'r> packed::BlockReader<'r> { + /// Gets the i-th extra field if it exists; i started from 0. + pub fn extra_field(&self, index: usize) -> Option<&[u8]> { + let count = self.count_extra_fields(); + if count > index { + let slice = self.as_slice(); + let i = (1 + Self::FIELD_COUNT + index) * molecule::NUMBER_SIZE; + let start = molecule::unpack_number(&slice[i..]) as usize; + if count == index + 1 { + Some(&self.as_slice()[start..]) + } else { + let j = i + molecule::NUMBER_SIZE; + let end = molecule::unpack_number(&slice[j..]) as usize; + Some(&self.as_slice()[start..end]) + } + } else { + None + } + } + + /// Gets the extension field if it existed. + /// + /// # Panics + /// + /// Panics if the first extra field exists but not a valid [`BytesReader`](struct.BytesReader.html). + pub fn extension(&self) -> Option { + self.extra_field(0) + .map(|data| packed::BytesReader::from_slice(&data).unwrap()) + } +} + +impl<'r> packed::BlockV1Reader<'r> { + /// Converts to a compatible [`BlockReader`](struct.BlockReader.html) with an extra field. + pub fn as_v0(&self) -> packed::BlockReader { + packed::BlockReader::new_unchecked(self.as_slice()) } } @@ -237,13 +323,25 @@ impl packed::CompactBlock { } } - packed::CompactBlock::new_builder() - .header(block.data().header()) - .short_ids(short_ids.pack()) - .prefilled_transactions(prefilled_transactions.pack()) - .uncles(block.uncle_hashes.clone()) - .proposals(block.data().proposals()) - .build() + if let Some(extension) = block.data().extension() { + packed::CompactBlockV1::new_builder() + .header(block.data().header()) + .short_ids(short_ids.pack()) + .prefilled_transactions(prefilled_transactions.pack()) + .uncles(block.uncle_hashes.clone()) + .proposals(block.data().proposals()) + .extension(extension) + .build() + .as_v0() + } else { + packed::CompactBlock::new_builder() + .header(block.data().header()) + .short_ids(short_ids.pack()) + .prefilled_transactions(prefilled_transactions.pack()) + .uncles(block.uncle_hashes.clone()) + .proposals(block.data().proposals()) + .build() + } } /// Takes proposal short ids for the transactions which are not prefilled. @@ -289,6 +387,20 @@ impl packed::CompactBlock { } } +impl packed::CompactBlockV1 { + /// Converts to a compatible [`CompactBlock`](struct.CompactBlock.html) with an extra field. + pub fn as_v0(&self) -> packed::CompactBlock { + packed::CompactBlock::new_unchecked(self.as_bytes()) + } +} + +impl<'r> packed::CompactBlockV1Reader<'r> { + /// Converts to a compatible [`CompactBlockReader`](struct.CompactBlockReader.html) with an extra field. + pub fn as_v0(&self) -> packed::CompactBlockReader { + packed::CompactBlockReader::new_unchecked(self.as_slice()) + } +} + impl AsRef<[u8]> for packed::TransactionKey { #[inline] fn as_ref(&self) -> &[u8] { diff --git a/util/types/src/generated/blockchain.rs b/util/types/src/generated/blockchain.rs index d14876e9c8..507f73f3c5 100644 --- a/util/types/src/generated/blockchain.rs +++ b/util/types/src/generated/blockchain.rs @@ -6961,7 +6961,7 @@ impl ::core::fmt::Display for RawHeader { write!(f, ", {}: {}", "parent_hash", self.parent_hash())?; write!(f, ", {}: {}", "transactions_root", self.transactions_root())?; write!(f, ", {}: {}", "proposals_hash", self.proposals_hash())?; - write!(f, ", {}: {}", "uncles_hash", self.uncles_hash())?; + write!(f, ", {}: {}", "extra_hash", self.extra_hash())?; write!(f, ", {}: {}", "dao", self.dao())?; write!(f, " }}") } @@ -7008,7 +7008,7 @@ impl RawHeader { pub fn proposals_hash(&self) -> Byte32 { Byte32::new_unchecked(self.0.slice(96..128)) } - pub fn uncles_hash(&self) -> Byte32 { + pub fn extra_hash(&self) -> Byte32 { Byte32::new_unchecked(self.0.slice(128..160)) } pub fn dao(&self) -> Byte32 { @@ -7049,7 +7049,7 @@ impl molecule::prelude::Entity for RawHeader { .parent_hash(self.parent_hash()) .transactions_root(self.transactions_root()) .proposals_hash(self.proposals_hash()) - .uncles_hash(self.uncles_hash()) + .extra_hash(self.extra_hash()) .dao(self.dao()) } } @@ -7080,7 +7080,7 @@ impl<'r> ::core::fmt::Display for RawHeaderReader<'r> { write!(f, ", {}: {}", "parent_hash", self.parent_hash())?; write!(f, ", {}: {}", "transactions_root", self.transactions_root())?; write!(f, ", {}: {}", "proposals_hash", self.proposals_hash())?; - write!(f, ", {}: {}", "uncles_hash", self.uncles_hash())?; + write!(f, ", {}: {}", "extra_hash", self.extra_hash())?; write!(f, ", {}: {}", "dao", self.dao())?; write!(f, " }}") } @@ -7113,7 +7113,7 @@ impl<'r> RawHeaderReader<'r> { pub fn proposals_hash(&self) -> Byte32Reader<'r> { Byte32Reader::new_unchecked(&self.as_slice()[96..128]) } - pub fn uncles_hash(&self) -> Byte32Reader<'r> { + pub fn extra_hash(&self) -> Byte32Reader<'r> { Byte32Reader::new_unchecked(&self.as_slice()[128..160]) } pub fn dao(&self) -> Byte32Reader<'r> { @@ -7151,7 +7151,7 @@ pub struct RawHeaderBuilder { pub(crate) parent_hash: Byte32, pub(crate) transactions_root: Byte32, pub(crate) proposals_hash: Byte32, - pub(crate) uncles_hash: Byte32, + pub(crate) extra_hash: Byte32, pub(crate) dao: Byte32, } impl RawHeaderBuilder { @@ -7190,8 +7190,8 @@ impl RawHeaderBuilder { self.proposals_hash = v; self } - pub fn uncles_hash(mut self, v: Byte32) -> Self { - self.uncles_hash = v; + pub fn extra_hash(mut self, v: Byte32) -> Self { + self.extra_hash = v; self } pub fn dao(mut self, v: Byte32) -> Self { @@ -7214,7 +7214,7 @@ impl molecule::prelude::Builder for RawHeaderBuilder { writer.write_all(self.parent_hash.as_slice())?; writer.write_all(self.transactions_root.as_slice())?; writer.write_all(self.proposals_hash.as_slice())?; - writer.write_all(self.uncles_hash.as_slice())?; + writer.write_all(self.extra_hash.as_slice())?; writer.write_all(self.dao.as_slice())?; Ok(()) } @@ -7989,6 +7989,353 @@ impl molecule::prelude::Builder for BlockBuilder { } } #[derive(Clone)] +pub struct BlockV1(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for BlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for BlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for BlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} {{ ", Self::NAME)?; + write!(f, "{}: {}", "header", self.header())?; + write!(f, ", {}: {}", "uncles", self.uncles())?; + write!(f, ", {}: {}", "transactions", self.transactions())?; + write!(f, ", {}: {}", "proposals", self.proposals())?; + write!(f, ", {}: {}", "extension", self.extension())?; + let extra_count = self.count_extra_fields(); + if extra_count != 0 { + write!(f, ", .. ({} fields)", extra_count)?; + } + write!(f, " }}") + } +} +impl ::core::default::Default for BlockV1 { + fn default() -> Self { + let v: Vec = vec![ + 248, 0, 0, 0, 24, 0, 0, 0, 232, 0, 0, 0, 236, 0, 0, 0, 240, 0, 0, 0, 244, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + BlockV1::new_unchecked(v.into()) + } +} +impl BlockV1 { + pub const FIELD_COUNT: usize = 5; + pub fn total_size(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn field_count(&self) -> usize { + if self.total_size() == molecule::NUMBER_SIZE { + 0 + } else { + (molecule::unpack_number(&self.as_slice()[molecule::NUMBER_SIZE..]) as usize / 4) - 1 + } + } + pub fn count_extra_fields(&self) -> usize { + self.field_count() - Self::FIELD_COUNT + } + pub fn has_extra_fields(&self) -> bool { + Self::FIELD_COUNT != self.field_count() + } + pub fn header(&self) -> Header { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[4..]) as usize; + let end = molecule::unpack_number(&slice[8..]) as usize; + Header::new_unchecked(self.0.slice(start..end)) + } + pub fn uncles(&self) -> UncleBlockVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[8..]) as usize; + let end = molecule::unpack_number(&slice[12..]) as usize; + UncleBlockVec::new_unchecked(self.0.slice(start..end)) + } + pub fn transactions(&self) -> TransactionVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[12..]) as usize; + let end = molecule::unpack_number(&slice[16..]) as usize; + TransactionVec::new_unchecked(self.0.slice(start..end)) + } + pub fn proposals(&self) -> ProposalShortIdVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[16..]) as usize; + let end = molecule::unpack_number(&slice[20..]) as usize; + ProposalShortIdVec::new_unchecked(self.0.slice(start..end)) + } + pub fn extension(&self) -> Bytes { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[20..]) as usize; + if self.has_extra_fields() { + let end = molecule::unpack_number(&slice[24..]) as usize; + Bytes::new_unchecked(self.0.slice(start..end)) + } else { + Bytes::new_unchecked(self.0.slice(start..)) + } + } + pub fn as_reader<'r>(&'r self) -> BlockV1Reader<'r> { + BlockV1Reader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for BlockV1 { + type Builder = BlockV1Builder; + const NAME: &'static str = "BlockV1"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + BlockV1(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + BlockV1Reader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + BlockV1Reader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder() + .header(self.header()) + .uncles(self.uncles()) + .transactions(self.transactions()) + .proposals(self.proposals()) + .extension(self.extension()) + } +} +#[derive(Clone, Copy)] +pub struct BlockV1Reader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for BlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for BlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for BlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} {{ ", Self::NAME)?; + write!(f, "{}: {}", "header", self.header())?; + write!(f, ", {}: {}", "uncles", self.uncles())?; + write!(f, ", {}: {}", "transactions", self.transactions())?; + write!(f, ", {}: {}", "proposals", self.proposals())?; + write!(f, ", {}: {}", "extension", self.extension())?; + let extra_count = self.count_extra_fields(); + if extra_count != 0 { + write!(f, ", .. ({} fields)", extra_count)?; + } + write!(f, " }}") + } +} +impl<'r> BlockV1Reader<'r> { + pub const FIELD_COUNT: usize = 5; + pub fn total_size(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn field_count(&self) -> usize { + if self.total_size() == molecule::NUMBER_SIZE { + 0 + } else { + (molecule::unpack_number(&self.as_slice()[molecule::NUMBER_SIZE..]) as usize / 4) - 1 + } + } + pub fn count_extra_fields(&self) -> usize { + self.field_count() - Self::FIELD_COUNT + } + pub fn has_extra_fields(&self) -> bool { + Self::FIELD_COUNT != self.field_count() + } + pub fn header(&self) -> HeaderReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[4..]) as usize; + let end = molecule::unpack_number(&slice[8..]) as usize; + HeaderReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn uncles(&self) -> UncleBlockVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[8..]) as usize; + let end = molecule::unpack_number(&slice[12..]) as usize; + UncleBlockVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn transactions(&self) -> TransactionVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[12..]) as usize; + let end = molecule::unpack_number(&slice[16..]) as usize; + TransactionVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn proposals(&self) -> ProposalShortIdVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[16..]) as usize; + let end = molecule::unpack_number(&slice[20..]) as usize; + ProposalShortIdVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn extension(&self) -> BytesReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[20..]) as usize; + if self.has_extra_fields() { + let end = molecule::unpack_number(&slice[24..]) as usize; + BytesReader::new_unchecked(&self.as_slice()[start..end]) + } else { + BytesReader::new_unchecked(&self.as_slice()[start..]) + } + } +} +impl<'r> molecule::prelude::Reader<'r> for BlockV1Reader<'r> { + type Entity = BlockV1; + const NAME: &'static str = "BlockV1Reader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + BlockV1Reader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len < molecule::NUMBER_SIZE { + return ve!(Self, HeaderIsBroken, molecule::NUMBER_SIZE, slice_len); + } + let total_size = molecule::unpack_number(slice) as usize; + if slice_len != total_size { + return ve!(Self, TotalSizeNotMatch, total_size, slice_len); + } + if slice_len == molecule::NUMBER_SIZE && Self::FIELD_COUNT == 0 { + return Ok(()); + } + if slice_len < molecule::NUMBER_SIZE * 2 { + return ve!(Self, HeaderIsBroken, molecule::NUMBER_SIZE * 2, slice_len); + } + let offset_first = molecule::unpack_number(&slice[molecule::NUMBER_SIZE..]) as usize; + if offset_first % molecule::NUMBER_SIZE != 0 || offset_first < molecule::NUMBER_SIZE * 2 { + return ve!(Self, OffsetsNotMatch); + } + if slice_len < offset_first { + return ve!(Self, HeaderIsBroken, offset_first, slice_len); + } + let field_count = offset_first / molecule::NUMBER_SIZE - 1; + if field_count < Self::FIELD_COUNT { + return ve!(Self, FieldCountNotMatch, Self::FIELD_COUNT, field_count); + } else if !compatible && field_count > Self::FIELD_COUNT { + return ve!(Self, FieldCountNotMatch, Self::FIELD_COUNT, field_count); + }; + let mut offsets: Vec = slice[molecule::NUMBER_SIZE..offset_first] + .chunks_exact(molecule::NUMBER_SIZE) + .map(|x| molecule::unpack_number(x) as usize) + .collect(); + offsets.push(total_size); + if offsets.windows(2).any(|i| i[0] > i[1]) { + return ve!(Self, OffsetsNotMatch); + } + HeaderReader::verify(&slice[offsets[0]..offsets[1]], compatible)?; + UncleBlockVecReader::verify(&slice[offsets[1]..offsets[2]], compatible)?; + TransactionVecReader::verify(&slice[offsets[2]..offsets[3]], compatible)?; + ProposalShortIdVecReader::verify(&slice[offsets[3]..offsets[4]], compatible)?; + BytesReader::verify(&slice[offsets[4]..offsets[5]], compatible)?; + Ok(()) + } +} +#[derive(Debug, Default)] +pub struct BlockV1Builder { + pub(crate) header: Header, + pub(crate) uncles: UncleBlockVec, + pub(crate) transactions: TransactionVec, + pub(crate) proposals: ProposalShortIdVec, + pub(crate) extension: Bytes, +} +impl BlockV1Builder { + pub const FIELD_COUNT: usize = 5; + pub fn header(mut self, v: Header) -> Self { + self.header = v; + self + } + pub fn uncles(mut self, v: UncleBlockVec) -> Self { + self.uncles = v; + self + } + pub fn transactions(mut self, v: TransactionVec) -> Self { + self.transactions = v; + self + } + pub fn proposals(mut self, v: ProposalShortIdVec) -> Self { + self.proposals = v; + self + } + pub fn extension(mut self, v: Bytes) -> Self { + self.extension = v; + self + } +} +impl molecule::prelude::Builder for BlockV1Builder { + type Entity = BlockV1; + const NAME: &'static str = "BlockV1Builder"; + fn expected_length(&self) -> usize { + molecule::NUMBER_SIZE * (Self::FIELD_COUNT + 1) + + self.header.as_slice().len() + + self.uncles.as_slice().len() + + self.transactions.as_slice().len() + + self.proposals.as_slice().len() + + self.extension.as_slice().len() + } + fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + let mut total_size = molecule::NUMBER_SIZE * (Self::FIELD_COUNT + 1); + let mut offsets = Vec::with_capacity(Self::FIELD_COUNT); + offsets.push(total_size); + total_size += self.header.as_slice().len(); + offsets.push(total_size); + total_size += self.uncles.as_slice().len(); + offsets.push(total_size); + total_size += self.transactions.as_slice().len(); + offsets.push(total_size); + total_size += self.proposals.as_slice().len(); + offsets.push(total_size); + total_size += self.extension.as_slice().len(); + writer.write_all(&molecule::pack_number(total_size as molecule::Number))?; + for offset in offsets.into_iter() { + writer.write_all(&molecule::pack_number(offset as molecule::Number))?; + } + writer.write_all(self.header.as_slice())?; + writer.write_all(self.uncles.as_slice())?; + writer.write_all(self.transactions.as_slice())?; + writer.write_all(self.proposals.as_slice())?; + writer.write_all(self.extension.as_slice())?; + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + BlockV1::new_unchecked(inner.into()) + } +} +#[derive(Clone)] pub struct CellbaseWitness(molecule::bytes::Bytes); impl ::core::fmt::LowerHex for CellbaseWitness { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { diff --git a/util/types/src/generated/extensions.rs b/util/types/src/generated/extensions.rs index 04d199aa87..2f75630e88 100644 --- a/util/types/src/generated/extensions.rs +++ b/util/types/src/generated/extensions.rs @@ -5567,6 +5567,388 @@ impl molecule::prelude::Builder for CompactBlockBuilder { } } #[derive(Clone)] +pub struct CompactBlockV1(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for CompactBlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for CompactBlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for CompactBlockV1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} {{ ", Self::NAME)?; + write!(f, "{}: {}", "header", self.header())?; + write!(f, ", {}: {}", "short_ids", self.short_ids())?; + write!( + f, + ", {}: {}", + "prefilled_transactions", + self.prefilled_transactions() + )?; + write!(f, ", {}: {}", "uncles", self.uncles())?; + write!(f, ", {}: {}", "proposals", self.proposals())?; + write!(f, ", {}: {}", "extension", self.extension())?; + let extra_count = self.count_extra_fields(); + if extra_count != 0 { + write!(f, ", .. ({} fields)", extra_count)?; + } + write!(f, " }}") + } +} +impl ::core::default::Default for CompactBlockV1 { + fn default() -> Self { + let v: Vec = vec![ + 0, 1, 0, 0, 28, 0, 0, 0, 236, 0, 0, 0, 240, 0, 0, 0, 244, 0, 0, 0, 248, 0, 0, 0, 252, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + CompactBlockV1::new_unchecked(v.into()) + } +} +impl CompactBlockV1 { + pub const FIELD_COUNT: usize = 6; + pub fn total_size(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn field_count(&self) -> usize { + if self.total_size() == molecule::NUMBER_SIZE { + 0 + } else { + (molecule::unpack_number(&self.as_slice()[molecule::NUMBER_SIZE..]) as usize / 4) - 1 + } + } + pub fn count_extra_fields(&self) -> usize { + self.field_count() - Self::FIELD_COUNT + } + pub fn has_extra_fields(&self) -> bool { + Self::FIELD_COUNT != self.field_count() + } + pub fn header(&self) -> Header { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[4..]) as usize; + let end = molecule::unpack_number(&slice[8..]) as usize; + Header::new_unchecked(self.0.slice(start..end)) + } + pub fn short_ids(&self) -> ProposalShortIdVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[8..]) as usize; + let end = molecule::unpack_number(&slice[12..]) as usize; + ProposalShortIdVec::new_unchecked(self.0.slice(start..end)) + } + pub fn prefilled_transactions(&self) -> IndexTransactionVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[12..]) as usize; + let end = molecule::unpack_number(&slice[16..]) as usize; + IndexTransactionVec::new_unchecked(self.0.slice(start..end)) + } + pub fn uncles(&self) -> Byte32Vec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[16..]) as usize; + let end = molecule::unpack_number(&slice[20..]) as usize; + Byte32Vec::new_unchecked(self.0.slice(start..end)) + } + pub fn proposals(&self) -> ProposalShortIdVec { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[20..]) as usize; + let end = molecule::unpack_number(&slice[24..]) as usize; + ProposalShortIdVec::new_unchecked(self.0.slice(start..end)) + } + pub fn extension(&self) -> Bytes { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[24..]) as usize; + if self.has_extra_fields() { + let end = molecule::unpack_number(&slice[28..]) as usize; + Bytes::new_unchecked(self.0.slice(start..end)) + } else { + Bytes::new_unchecked(self.0.slice(start..)) + } + } + pub fn as_reader<'r>(&'r self) -> CompactBlockV1Reader<'r> { + CompactBlockV1Reader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for CompactBlockV1 { + type Builder = CompactBlockV1Builder; + const NAME: &'static str = "CompactBlockV1"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + CompactBlockV1(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + CompactBlockV1Reader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + CompactBlockV1Reader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder() + .header(self.header()) + .short_ids(self.short_ids()) + .prefilled_transactions(self.prefilled_transactions()) + .uncles(self.uncles()) + .proposals(self.proposals()) + .extension(self.extension()) + } +} +#[derive(Clone, Copy)] +pub struct CompactBlockV1Reader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for CompactBlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for CompactBlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for CompactBlockV1Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} {{ ", Self::NAME)?; + write!(f, "{}: {}", "header", self.header())?; + write!(f, ", {}: {}", "short_ids", self.short_ids())?; + write!( + f, + ", {}: {}", + "prefilled_transactions", + self.prefilled_transactions() + )?; + write!(f, ", {}: {}", "uncles", self.uncles())?; + write!(f, ", {}: {}", "proposals", self.proposals())?; + write!(f, ", {}: {}", "extension", self.extension())?; + let extra_count = self.count_extra_fields(); + if extra_count != 0 { + write!(f, ", .. ({} fields)", extra_count)?; + } + write!(f, " }}") + } +} +impl<'r> CompactBlockV1Reader<'r> { + pub const FIELD_COUNT: usize = 6; + pub fn total_size(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn field_count(&self) -> usize { + if self.total_size() == molecule::NUMBER_SIZE { + 0 + } else { + (molecule::unpack_number(&self.as_slice()[molecule::NUMBER_SIZE..]) as usize / 4) - 1 + } + } + pub fn count_extra_fields(&self) -> usize { + self.field_count() - Self::FIELD_COUNT + } + pub fn has_extra_fields(&self) -> bool { + Self::FIELD_COUNT != self.field_count() + } + pub fn header(&self) -> HeaderReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[4..]) as usize; + let end = molecule::unpack_number(&slice[8..]) as usize; + HeaderReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn short_ids(&self) -> ProposalShortIdVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[8..]) as usize; + let end = molecule::unpack_number(&slice[12..]) as usize; + ProposalShortIdVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn prefilled_transactions(&self) -> IndexTransactionVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[12..]) as usize; + let end = molecule::unpack_number(&slice[16..]) as usize; + IndexTransactionVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn uncles(&self) -> Byte32VecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[16..]) as usize; + let end = molecule::unpack_number(&slice[20..]) as usize; + Byte32VecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn proposals(&self) -> ProposalShortIdVecReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[20..]) as usize; + let end = molecule::unpack_number(&slice[24..]) as usize; + ProposalShortIdVecReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn extension(&self) -> BytesReader<'r> { + let slice = self.as_slice(); + let start = molecule::unpack_number(&slice[24..]) as usize; + if self.has_extra_fields() { + let end = molecule::unpack_number(&slice[28..]) as usize; + BytesReader::new_unchecked(&self.as_slice()[start..end]) + } else { + BytesReader::new_unchecked(&self.as_slice()[start..]) + } + } +} +impl<'r> molecule::prelude::Reader<'r> for CompactBlockV1Reader<'r> { + type Entity = CompactBlockV1; + const NAME: &'static str = "CompactBlockV1Reader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + CompactBlockV1Reader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len < molecule::NUMBER_SIZE { + return ve!(Self, HeaderIsBroken, molecule::NUMBER_SIZE, slice_len); + } + let total_size = molecule::unpack_number(slice) as usize; + if slice_len != total_size { + return ve!(Self, TotalSizeNotMatch, total_size, slice_len); + } + if slice_len == molecule::NUMBER_SIZE && Self::FIELD_COUNT == 0 { + return Ok(()); + } + if slice_len < molecule::NUMBER_SIZE * 2 { + return ve!(Self, HeaderIsBroken, molecule::NUMBER_SIZE * 2, slice_len); + } + let offset_first = molecule::unpack_number(&slice[molecule::NUMBER_SIZE..]) as usize; + if offset_first % molecule::NUMBER_SIZE != 0 || offset_first < molecule::NUMBER_SIZE * 2 { + return ve!(Self, OffsetsNotMatch); + } + if slice_len < offset_first { + return ve!(Self, HeaderIsBroken, offset_first, slice_len); + } + let field_count = offset_first / molecule::NUMBER_SIZE - 1; + if field_count < Self::FIELD_COUNT { + return ve!(Self, FieldCountNotMatch, Self::FIELD_COUNT, field_count); + } else if !compatible && field_count > Self::FIELD_COUNT { + return ve!(Self, FieldCountNotMatch, Self::FIELD_COUNT, field_count); + }; + let mut offsets: Vec = slice[molecule::NUMBER_SIZE..offset_first] + .chunks_exact(molecule::NUMBER_SIZE) + .map(|x| molecule::unpack_number(x) as usize) + .collect(); + offsets.push(total_size); + if offsets.windows(2).any(|i| i[0] > i[1]) { + return ve!(Self, OffsetsNotMatch); + } + HeaderReader::verify(&slice[offsets[0]..offsets[1]], compatible)?; + ProposalShortIdVecReader::verify(&slice[offsets[1]..offsets[2]], compatible)?; + IndexTransactionVecReader::verify(&slice[offsets[2]..offsets[3]], compatible)?; + Byte32VecReader::verify(&slice[offsets[3]..offsets[4]], compatible)?; + ProposalShortIdVecReader::verify(&slice[offsets[4]..offsets[5]], compatible)?; + BytesReader::verify(&slice[offsets[5]..offsets[6]], compatible)?; + Ok(()) + } +} +#[derive(Debug, Default)] +pub struct CompactBlockV1Builder { + pub(crate) header: Header, + pub(crate) short_ids: ProposalShortIdVec, + pub(crate) prefilled_transactions: IndexTransactionVec, + pub(crate) uncles: Byte32Vec, + pub(crate) proposals: ProposalShortIdVec, + pub(crate) extension: Bytes, +} +impl CompactBlockV1Builder { + pub const FIELD_COUNT: usize = 6; + pub fn header(mut self, v: Header) -> Self { + self.header = v; + self + } + pub fn short_ids(mut self, v: ProposalShortIdVec) -> Self { + self.short_ids = v; + self + } + pub fn prefilled_transactions(mut self, v: IndexTransactionVec) -> Self { + self.prefilled_transactions = v; + self + } + pub fn uncles(mut self, v: Byte32Vec) -> Self { + self.uncles = v; + self + } + pub fn proposals(mut self, v: ProposalShortIdVec) -> Self { + self.proposals = v; + self + } + pub fn extension(mut self, v: Bytes) -> Self { + self.extension = v; + self + } +} +impl molecule::prelude::Builder for CompactBlockV1Builder { + type Entity = CompactBlockV1; + const NAME: &'static str = "CompactBlockV1Builder"; + fn expected_length(&self) -> usize { + molecule::NUMBER_SIZE * (Self::FIELD_COUNT + 1) + + self.header.as_slice().len() + + self.short_ids.as_slice().len() + + self.prefilled_transactions.as_slice().len() + + self.uncles.as_slice().len() + + self.proposals.as_slice().len() + + self.extension.as_slice().len() + } + fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + let mut total_size = molecule::NUMBER_SIZE * (Self::FIELD_COUNT + 1); + let mut offsets = Vec::with_capacity(Self::FIELD_COUNT); + offsets.push(total_size); + total_size += self.header.as_slice().len(); + offsets.push(total_size); + total_size += self.short_ids.as_slice().len(); + offsets.push(total_size); + total_size += self.prefilled_transactions.as_slice().len(); + offsets.push(total_size); + total_size += self.uncles.as_slice().len(); + offsets.push(total_size); + total_size += self.proposals.as_slice().len(); + offsets.push(total_size); + total_size += self.extension.as_slice().len(); + writer.write_all(&molecule::pack_number(total_size as molecule::Number))?; + for offset in offsets.into_iter() { + writer.write_all(&molecule::pack_number(offset as molecule::Number))?; + } + writer.write_all(self.header.as_slice())?; + writer.write_all(self.short_ids.as_slice())?; + writer.write_all(self.prefilled_transactions.as_slice())?; + writer.write_all(self.uncles.as_slice())?; + writer.write_all(self.proposals.as_slice())?; + writer.write_all(self.extension.as_slice())?; + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + CompactBlockV1::new_unchecked(inner.into()) + } +} +#[derive(Clone)] pub struct RelayTransaction(molecule::bytes::Bytes); impl ::core::fmt::LowerHex for RelayTransaction { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { diff --git a/verification/contextual/src/contextual_block_verifier.rs b/verification/contextual/src/contextual_block_verifier.rs index 76ea06548d..7259a7ca38 100644 --- a/verification/contextual/src/contextual_block_verifier.rs +++ b/verification/contextual/src/contextual_block_verifier.rs @@ -1,6 +1,6 @@ use crate::uncles_verifier::{UncleProvider, UnclesVerifier}; use ckb_async_runtime::Handle; -use ckb_chain_spec::consensus::Consensus; +use ckb_chain_spec::consensus::{Consensus, ConsensusProvider}; use ckb_dao::DaoCalculator; use ckb_error::Error; use ckb_logger::error_target; @@ -12,8 +12,7 @@ use ckb_types::{ core::error::OutPointError, core::{ cell::{HeaderChecker, ResolvedTransaction}, - BlockNumber, BlockReward, BlockView, Capacity, Cycle, EpochExt, EpochNumberWithFraction, - HeaderView, TransactionView, + BlockReward, BlockView, Capacity, Cycle, EpochExt, HeaderView, TransactionView, }, packed::{Byte32, CellOutput, Script}, prelude::*, @@ -23,7 +22,7 @@ use ckb_verification::{ BlockErrorKind, CellbaseError, CommitError, ContextualTransactionVerifier, TimeRelativeTransactionVerifier, UnknownParentError, }; -use ckb_verification::{BlockTransactionsError, EpochError}; +use ckb_verification::{BlockTransactionsError, EpochError, TxVerifyEnv}; use ckb_verification_traits::Switch; use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; use std::collections::{HashMap, HashSet}; @@ -72,6 +71,12 @@ impl<'a, CS: ChainStore<'a>> HeaderChecker for VerifyContext<'a, CS> { } } +impl<'a, CS: ChainStore<'a>> ConsensusProvider for VerifyContext<'a, CS> { + fn get_consensus(&self) -> &Consensus { + &self.consensus + } +} + pub struct UncleVerifierContext<'a, 'b, CS> { epoch: &'b EpochExt, context: &'a VerifyContext<'a, CS>, @@ -307,26 +312,19 @@ impl<'a, 'b, 'c, CS: ChainStore<'a>> DaoHeaderVerifier<'a, 'b, 'c, CS> { struct BlockTxsVerifier<'a, CS> { context: &'a VerifyContext<'a, CS>, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, + header: HeaderView, resolved: &'a [ResolvedTransaction], } impl<'a, CS: ChainStore<'a>> BlockTxsVerifier<'a, CS> { - #[allow(clippy::too_many_arguments)] pub fn new( context: &'a VerifyContext<'a, CS>, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, + header: HeaderView, resolved: &'a [ResolvedTransaction], ) -> Self { BlockTxsVerifier { context, - block_number, - epoch_number_with_fraction, - parent_hash, + header, resolved, } } @@ -375,32 +373,24 @@ impl<'a, CS: ChainStore<'a>> BlockTxsVerifier<'a, CS> { .enumerate() .map(|(index, tx)| { let tx_hash = tx.transaction.hash(); + let tx_env = TxVerifyEnv::new_commit(&self.header); if let Some(cache_entry) = fetched_cache.get(&tx_hash) { - TimeRelativeTransactionVerifier::new( - &tx, - self.context, - self.block_number, - self.epoch_number_with_fraction, - self.parent_hash.clone(), - self.context.consensus, - ) - .verify() - .map_err(|error| { - BlockTransactionsError { - index: index as u32, - error, - } - .into() - }) - .map(|_| (tx_hash, *cache_entry)) + TimeRelativeTransactionVerifier::new(&tx, self.context, &tx_env) + .verify() + .map_err(|error| { + BlockTransactionsError { + index: index as u32, + error, + } + .into() + }) + .map(|_| (tx_hash, *cache_entry)) } else { ContextualTransactionVerifier::new( &tx, - self.block_number, - self.epoch_number_with_fraction, - self.parent_hash.clone(), self.context.consensus, &self.context.store.as_data_provider(), + &tx_env, ) .verify( self.context.consensus.max_block_cycles(), @@ -506,6 +496,7 @@ impl<'a, CS: ChainStore<'a>> ContextualBlockVerifier<'a, CS> { ) -> Result<(Cycle, Vec), Error> { let timer = Timer::start(); let parent_hash = block.data().header().raw().parent_hash(); + let header = block.header(); let parent = self .context .store @@ -547,14 +538,11 @@ impl<'a, CS: ChainStore<'a>> ContextualBlockVerifier<'a, CS> { RewardVerifier::new(&self.context, resolved, &parent).verify()?; } - let ret = BlockTxsVerifier::new( - &self.context, - block.number(), - block.epoch(), - parent_hash, - resolved, - ) - .verify(txs_verify_cache, handle, switch.disable_script())?; + let ret = BlockTxsVerifier::new(&self.context, header, resolved).verify( + txs_verify_cache, + handle, + switch.disable_script(), + )?; metrics!(timing, "ckb.contextual_verified_block", timer.stop()); Ok(ret) } diff --git a/verification/contextual/src/tests/uncle_verifier.rs b/verification/contextual/src/tests/uncle_verifier.rs index 3f956d2a30..adc0a867c1 100644 --- a/verification/contextual/src/tests/uncle_verifier.rs +++ b/verification/contextual/src/tests/uncle_verifier.rs @@ -12,7 +12,7 @@ use ckb_types::{ BlockBuilder, BlockNumber, BlockView, EpochExt, HeaderView, TransactionBuilder, TransactionView, UncleBlockView, }, - packed::{Byte32, CellInput, ProposalShortId, Script, UncleBlockVec}, + packed::{CellInput, ProposalShortId, Script}, prelude::*, }; use ckb_verification::UnclesError; @@ -133,63 +133,6 @@ fn epoch(shared: &Shared, chain: &[BlockView], index: usize) -> EpochExt { .epoch() } -#[test] -fn test_invalid_uncle_hash_case1() { - let (shared, chain1, chain2) = prepare(); - let dummy_context = dummy_context(&shared); - - // header has uncle_count is 1 but uncles_hash is not Byte32::one() - // body has 1 uncles - let block = chain1 - .last() - .cloned() - .unwrap() - .as_advanced_builder() - .uncle(chain2.last().cloned().unwrap().as_uncle()) - .build_unchecked(); - - let epoch = epoch(&shared, &chain1, chain1.len() - 2); - let uncle_verifier_context = UncleVerifierContext::new(&dummy_context, &epoch); - let verifier = UnclesVerifier::new(uncle_verifier_context, &block); - - assert_error_eq!( - verifier.verify().unwrap_err(), - UnclesError::InvalidHash { - expected: Byte32::zero(), - actual: block.calc_uncles_hash(), - }, - ); -} - -#[test] -fn test_invalid_uncle_hash_case2() { - let (shared, chain1, chain2) = prepare(); - let dummy_context = dummy_context(&shared); - - // header has empty uncles, but the uncles hash is not matched - let uncles: UncleBlockVec = vec![chain2.last().cloned().unwrap().data().as_uncle()].pack(); - let uncles_hash = uncles.calc_uncles_hash(); - let block = chain1 - .last() - .cloned() - .unwrap() - .as_advanced_builder() - .uncles_hash(uncles_hash.clone()) - .build_unchecked(); - - let epoch = epoch(&shared, &chain1, chain1.len() - 2); - let uncle_verifier_context = UncleVerifierContext::new(&dummy_context, &epoch); - let verifier = UnclesVerifier::new(uncle_verifier_context, &block); - - assert_error_eq!( - verifier.verify().unwrap_err(), - UnclesError::InvalidHash { - expected: uncles_hash, - actual: Byte32::zero(), - }, - ); -} - // Uncle is ancestor block #[test] fn test_double_inclusion() { diff --git a/verification/contextual/src/uncles_verifier.rs b/verification/contextual/src/uncles_verifier.rs index 0333f03179..560e49e40a 100644 --- a/verification/contextual/src/uncles_verifier.rs +++ b/verification/contextual/src/uncles_verifier.rs @@ -36,7 +36,6 @@ where UnclesVerifier { provider, block } } - // - uncles_hash // - uncles_num // - depth // - uncle not in main chain @@ -44,16 +43,6 @@ where pub fn verify(&self) -> Result<(), Error> { let uncles_count = self.block.data().uncles().len() as u32; - // verify uncles_hash - let actual_uncles_hash = self.block.calc_uncles_hash(); - if actual_uncles_hash != self.block.uncles_hash() { - return Err(UnclesError::InvalidHash { - expected: self.block.uncles_hash(), - actual: actual_uncles_hash, - } - .into()); - } - // if self.block.uncles is empty, return if uncles_count == 0 { return Ok(()); diff --git a/verification/src/block_verifier.rs b/verification/src/block_verifier.rs index dbab3c36c4..590d739854 100644 --- a/verification/src/block_verifier.rs +++ b/verification/src/block_verifier.rs @@ -16,6 +16,7 @@ use std::collections::HashSet; /// Contains: /// - [`CellbaseVerifier`](./struct.CellbaseVerifier.html) /// - [`BlockBytesVerifier`](./struct.BlockBytesVerifier.html) +/// - [`BlockExtensionVerifier`](./struct.BlockExtensionVerifier.html) /// - [`BlockProposalsLimitVerifier`](./struct.BlockProposalsLimitVerifier.html) /// - [`DuplicateVerifier`](./struct.DuplicateVerifier.html) /// - [`MerkleRootVerifier`](./struct.MerkleRootVerifier.html) @@ -39,6 +40,7 @@ impl<'a> Verifier for BlockVerifier<'a> { let max_block_bytes = self.consensus.max_block_bytes(); BlockProposalsLimitVerifier::new(max_block_proposals_limit).verify(target)?; BlockBytesVerifier::new(max_block_bytes).verify(target)?; + BlockExtensionVerifier::new(self.consensus).verify(target)?; CellbaseVerifier::new().verify(target)?; DuplicateVerifier::new().verify(target)?; MerkleRootVerifier::new().verify(target) @@ -236,6 +238,59 @@ impl BlockBytesVerifier { } } +/// BlockExtensionVerifier. +/// +/// Check block extension. +#[derive(Clone)] +pub struct BlockExtensionVerifier<'a> { + consensus: &'a Consensus, +} + +impl<'a> BlockExtensionVerifier<'a> { + pub fn new(consensus: &'a Consensus) -> Self { + BlockExtensionVerifier { consensus } + } + + pub fn verify(&self, block: &BlockView) -> Result<(), Error> { + let epoch_number = block.epoch().number(); + let hardfork_switch = self.consensus.hardfork_switch(); + let extra_fields_count = block.data().count_extra_fields(); + let is_reuse_uncles_hash_as_extra_hash_enabled = + hardfork_switch.is_reuse_uncles_hash_as_extra_hash_enabled(epoch_number); + + if is_reuse_uncles_hash_as_extra_hash_enabled { + match extra_fields_count { + 0 => {} + 1 => { + let extension = if let Some(data) = block.extension() { + data + } else { + return Err(BlockErrorKind::UnknownFields.into()); + }; + if extension.is_empty() { + return Err(BlockErrorKind::EmptyBlockExtension.into()); + } + if extension.len() > 96 { + return Err(BlockErrorKind::ExceededMaximumBlockExtensionBytes.into()); + } + } + _ => { + return Err(BlockErrorKind::UnknownFields.into()); + } + } + } else if extra_fields_count > 0 { + return Err(BlockErrorKind::UnknownFields.into()); + } + + let actual_extra_hash = block.calc_extra_hash().extra_hash(); + if actual_extra_hash != block.extra_hash() { + return Err(BlockErrorKind::InvalidExtraHash.into()); + } + + Ok(()) + } +} + /// Context-independent verification checks for block transactions /// /// Basic checks that don't depend on any context diff --git a/verification/src/error.rs b/verification/src/error.rs index 72badedd05..915a0ce431 100644 --- a/verification/src/error.rs +++ b/verification/src/error.rs @@ -102,6 +102,18 @@ pub enum BlockErrorKind { /// Total bytes of block exceeds limit. ExceededMaximumBlockBytes, + + /// Empty block extension. + EmptyBlockExtension, + + /// Total bytes of block extension exceeds limit. + ExceededMaximumBlockExtensionBytes, + + /// The block has unknown field. + UnknownFields, + + /// The calculated extra-hash does not match with the one in the header. + InvalidExtraHash, } def_error_base_on_kind!( @@ -190,15 +202,6 @@ pub enum UnclesError { actual: u32, }, - /// The calculated uncle-hash does not match with the one in the header. - #[error("InvalidHash(expected: {expected}, actual: {actual})")] - InvalidHash { - /// The calculated uncle-hash - expected: Byte32, - /// The actual uncle-hash - actual: Byte32, - }, - /// There is an uncle whose number is greater than or equal to current block number. #[error("InvalidNumber")] InvalidNumber, diff --git a/verification/src/lib.rs b/verification/src/lib.rs index de3311821d..3edcaa1dcb 100644 --- a/verification/src/lib.rs +++ b/verification/src/lib.rs @@ -24,6 +24,7 @@ pub use crate::transaction_verifier::{ ContextualTransactionVerifier, NonContextualTransactionVerifier, ScriptVerifier, Since, SinceMetric, TimeRelativeTransactionVerifier, TransactionVerifier, }; +pub use ckb_script::TxVerifyEnv; /// Maximum amount of time that a block timestamp is allowed to exceed the /// current time before the block will be accepted. diff --git a/verification/src/tests/block_verifier.rs b/verification/src/tests/block_verifier.rs index 4320e3b6b5..44472321cd 100644 --- a/verification/src/tests/block_verifier.rs +++ b/verification/src/tests/block_verifier.rs @@ -1,14 +1,15 @@ use super::super::block_verifier::{ - BlockBytesVerifier, BlockProposalsLimitVerifier, CellbaseVerifier, DuplicateVerifier, - MerkleRootVerifier, + BlockBytesVerifier, BlockExtensionVerifier, BlockProposalsLimitVerifier, CellbaseVerifier, + DuplicateVerifier, MerkleRootVerifier, }; use crate::{BlockErrorKind, CellbaseError}; +use ckb_chain_spec::consensus::ConsensusBuilder; use ckb_error::assert_error_eq; use ckb_types::{ bytes::Bytes, core::{ - capacity_bytes, BlockBuilder, BlockNumber, Capacity, HeaderBuilder, TransactionBuilder, - TransactionView, + capacity_bytes, hardfork::HardForkSwitch, BlockBuilder, BlockNumber, Capacity, + EpochNumberWithFraction, HeaderBuilder, TransactionBuilder, TransactionView, }, h256, packed::{Byte32, CellInput, CellOutputBuilder, OutPoint, ProposalShortId, Script}, @@ -425,3 +426,128 @@ pub fn test_max_proposals_limit_verifier() { ); } } + +#[test] +fn test_block_extension_verifier() { + let fork_at = 10; + let epoch = EpochNumberWithFraction::new(fork_at, 0, 1); + + // normal block (no uncles) + let header = HeaderBuilder::default().epoch(epoch.pack()).build(); + let block = BlockBuilder::default().header(header).build(); + + // invalid extra hash (no extension) + let header1 = block + .header() + .as_advanced_builder() + .extra_hash(h256!("0x1").pack()) + .build(); + let block1 = BlockBuilder::default().header(header1).build_unchecked(); + + // empty extension + let block2 = block + .as_advanced_builder() + .extension(Some(Default::default())) + .build(); + // extension has only 1 byte + let block3 = block + .as_advanced_builder() + .extension(Some(vec![0u8].pack())) + .build(); + // extension has 96 bytes + let block4 = block + .as_advanced_builder() + .extension(Some(vec![0u8; 96].pack())) + .build(); + // extension has 97 bytes + let block5 = block + .as_advanced_builder() + .extension(Some(vec![0u8; 97].pack())) + .build(); + + // normal block (with uncles) + let block6 = block + .as_advanced_builder() + .uncle(BlockBuilder::default().build().as_uncle()) + .build(); + + // invalid extra hash (has extension but use uncles hash) + let block7 = block6 + .as_advanced_builder() + .extension(Some(vec![0u8; 32].pack())) + .build_unchecked(); + + { + // Test CKB v2019 + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0224(fork_at + 1) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .hardfork_switch(hardfork_switch) + .build(); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block1); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::InvalidExtraHash); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block2); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::UnknownFields); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block3); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::UnknownFields); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block4); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::UnknownFields); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block5); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::UnknownFields); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block6); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block7); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::UnknownFields); + } + { + // Test CKB v2021 + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0224(fork_at) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .hardfork_switch(hardfork_switch) + .build(); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block1); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::InvalidExtraHash); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block2); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::EmptyBlockExtension); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block3); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block4); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block5); + assert_error_eq!( + result.unwrap_err(), + BlockErrorKind::ExceededMaximumBlockExtensionBytes + ); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block6); + assert!(result.is_ok(), "result = {:?}", result); + + let result = BlockExtensionVerifier::new(&consensus).verify(&block7); + assert_error_eq!(result.unwrap_err(), BlockErrorKind::InvalidExtraHash); + } +} diff --git a/verification/src/tests/transaction_verifier.rs b/verification/src/tests/transaction_verifier.rs index 70382a494f..bda3972923 100644 --- a/verification/src/tests/transaction_verifier.rs +++ b/verification/src/tests/transaction_verifier.rs @@ -3,8 +3,8 @@ use super::super::transaction_verifier::{ Since, SinceVerifier, SizeVerifier, VersionVerifier, }; use crate::error::TransactionErrorSource; -use crate::TransactionError; -use ckb_chain_spec::{build_genesis_type_id_script, OUTPUT_INDEX_DAO}; +use crate::{TransactionError, TxVerifyEnv}; +use ckb_chain_spec::{build_genesis_type_id_script, consensus::ConsensusBuilder, OUTPUT_INDEX_DAO}; use ckb_error::{assert_error_eq, Error}; use ckb_test_chain_utils::{MockMedianTime, MOCK_MEDIAN_TIME_COUNT}; use ckb_traits::HeaderProvider; @@ -14,8 +14,9 @@ use ckb_types::{ core::{ capacity_bytes, cell::{CellMetaBuilder, ResolvedTransaction}, - BlockNumber, Capacity, EpochNumber, EpochNumberWithFraction, TransactionBuilder, - TransactionInfo, TransactionView, + hardfork::HardForkSwitch, + BlockNumber, Capacity, EpochNumber, EpochNumberWithFraction, HeaderView, + TransactionBuilder, TransactionInfo, TransactionView, }, h256, packed::{CellDep, CellInput, CellOutput, OutPoint}, @@ -392,15 +393,19 @@ fn verify_since<'a, DL: HeaderProvider>( epoch_number: EpochNumber, ) -> Result<(), Error> { let parent_hash = Arc::new(MockMedianTime::get_block_hash(block_number - 1)); - SinceVerifier::new( - rtx, - data_loader, - block_number, - EpochNumberWithFraction::new(epoch_number, 0, 10), - 11, - parent_hash.as_ref().to_owned(), - ) - .verify() + let consensus = ConsensusBuilder::default() + .median_time_block_count(11) + .build(); + let tx_env = { + let epoch = EpochNumberWithFraction::new(epoch_number, 0, 10); + let header = HeaderView::new_advanced_builder() + .number(block_number.pack()) + .epoch(epoch.pack()) + .parent_hash(parent_hash.as_ref().to_owned()) + .build(); + TxVerifyEnv::new_commit(&header) + }; + SinceVerifier::new(rtx, &consensus, data_loader, &tx_env).verify() } #[test] @@ -495,33 +500,100 @@ fn test_fraction_epoch_since_verify() { &tx, MockMedianTime::get_transaction_info(1, EpochNumberWithFraction::new(0, 0, 10), 1), ); + let consensus = ConsensusBuilder::default() + .median_time_block_count(MOCK_MEDIAN_TIME_COUNT) + .build(); let median_time_context = MockMedianTime::new(vec![0; 11]); let block_number = 1000; let parent_hash = Arc::new(MockMedianTime::get_block_hash(block_number - 1)); - let result = SinceVerifier::new( - &rtx, - &median_time_context, - block_number, - EpochNumberWithFraction::new(16, 1, 10), - MOCK_MEDIAN_TIME_COUNT, - parent_hash.as_ref().to_owned(), - ) - .verify(); + let tx_env = { + let epoch = EpochNumberWithFraction::new(16, 1, 10); + let header = HeaderView::new_advanced_builder() + .number(block_number.pack()) + .epoch(epoch.pack()) + .parent_hash(parent_hash.as_ref().to_owned()) + .build(); + TxVerifyEnv::new_commit(&header) + }; + let result = SinceVerifier::new(&rtx, &consensus, &median_time_context, &tx_env).verify(); assert_error_eq!(result.unwrap_err(), TransactionError::Immature { index: 0 }); - let result = SinceVerifier::new( - &rtx, - &median_time_context, - block_number, - EpochNumberWithFraction::new(16, 5, 10), - MOCK_MEDIAN_TIME_COUNT, - parent_hash.as_ref().to_owned(), - ) - .verify(); + let tx_env = { + let epoch = EpochNumberWithFraction::new(16, 5, 10); + let header = HeaderView::new_advanced_builder() + .number(block_number.pack()) + .epoch(epoch.pack()) + .parent_hash(parent_hash.as_ref().to_owned()) + .build(); + TxVerifyEnv::new_commit(&header) + }; + let result = SinceVerifier::new(&rtx, &consensus, &median_time_context, &tx_env).verify(); assert!(result.is_ok()); } +#[test] +fn test_fraction_epoch_since_verify_v2021() { + let fork_at = 16; + let transaction_info = + MockMedianTime::get_transaction_info(1, EpochNumberWithFraction::new(0, 0, 10), 1); + let tx1 = create_tx_with_lock(0x2000_0a00_0f00_000f); + let rtx1 = create_resolve_tx_with_transaction_info(&tx1, transaction_info.clone()); + let tx2 = create_tx_with_lock(0x2000_0a00_0500_0010); + let rtx2 = create_resolve_tx_with_transaction_info(&tx2, transaction_info); + let median_time_context = MockMedianTime::new(vec![0; 11]); + let tx_env = { + let block_number = 1000; + let epoch = EpochNumberWithFraction::new(fork_at, 5, 10); + let parent_hash = Arc::new(MockMedianTime::get_block_hash(block_number - 1)); + let header = HeaderView::new_advanced_builder() + .number(block_number.pack()) + .epoch(epoch.pack()) + .parent_hash(parent_hash.as_ref().to_owned()) + .build(); + TxVerifyEnv::new_commit(&header) + }; + + { + // Test CKB v2019 + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0223(fork_at + 1) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .median_time_block_count(MOCK_MEDIAN_TIME_COUNT) + .hardfork_switch(hardfork_switch) + .build(); + let result = SinceVerifier::new(&rtx1, &consensus, &median_time_context, &tx_env).verify(); + assert!(result.is_ok(), "result = {:?}", result); + + let result = SinceVerifier::new(&rtx2, &consensus, &median_time_context, &tx_env).verify(); + assert!(result.is_ok(), "result = {:?}", result); + } + { + // Test CKB v2021 + let hardfork_switch = HardForkSwitch::new_without_any_enabled() + .as_builder() + .rfc_pr_0223(fork_at) + .build() + .unwrap(); + let consensus = ConsensusBuilder::default() + .median_time_block_count(MOCK_MEDIAN_TIME_COUNT) + .hardfork_switch(hardfork_switch) + .build(); + + let result = SinceVerifier::new(&rtx1, &consensus, &median_time_context, &tx_env).verify(); + assert_error_eq!( + result.unwrap_err(), + TransactionError::InvalidSince { index: 0 } + ); + + let result = SinceVerifier::new(&rtx2, &consensus, &median_time_context, &tx_env).verify(); + assert!(result.is_ok(), "result = {:?}", result); + } +} + #[test] pub fn test_absolute_block_number_lock() { // absolute lock until block number 0xa diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index 08f1795007..f8b1d558af 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -1,7 +1,7 @@ use crate::cache::CacheEntry; use crate::error::TransactionErrorSource; -use crate::TransactionError; -use ckb_chain_spec::consensus::Consensus; +use crate::{TransactionError, TxVerifyEnv}; +use ckb_chain_spec::consensus::{Consensus, ConsensusProvider}; use ckb_dao::DaoCalculator; use ckb_error::Error; use ckb_metrics::{metrics, Timer}; @@ -10,8 +10,7 @@ use ckb_traits::{CellDataProvider, EpochProvider, HeaderProvider}; use ckb_types::{ core::{ cell::{CellMeta, ResolvedTransaction}, - BlockNumber, Capacity, Cycle, EpochNumberWithFraction, ScriptHashType, TransactionView, - Version, + Capacity, Cycle, EpochNumberWithFraction, ScriptHashType, TransactionView, Version, }, packed::Byte32, prelude::*, @@ -30,30 +29,13 @@ pub struct TimeRelativeTransactionVerifier<'a, M> { pub(crate) since: SinceVerifier<'a, M>, } -impl<'a, DL: HeaderProvider> TimeRelativeTransactionVerifier<'a, DL> { +impl<'a, DL: HeaderProvider + ConsensusProvider> TimeRelativeTransactionVerifier<'a, DL> { /// Creates a new TimeRelativeTransactionVerifier - pub fn new( - rtx: &'a ResolvedTransaction, - data_loader: &'a DL, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, - consensus: &'a Consensus, - ) -> Self { + pub fn new(rtx: &'a ResolvedTransaction, data_loader: &'a DL, tx_env: &'a TxVerifyEnv) -> Self { + let consensus = data_loader.get_consensus(); TimeRelativeTransactionVerifier { - maturity: MaturityVerifier::new( - &rtx, - epoch_number_with_fraction, - consensus.cellbase_maturity(), - ), - since: SinceVerifier::new( - rtx, - data_loader, - block_number, - epoch_number_with_fraction, - consensus.median_time_block_count(), - parent_hash, - ), + maturity: MaturityVerifier::new(&rtx, tx_env.epoch(), consensus.cellbase_maturity()), + since: SinceVerifier::new(rtx, consensus, data_loader, tx_env), } } @@ -126,31 +108,17 @@ where DL: CellDataProvider + HeaderProvider + EpochProvider, { /// Creates a new ContextualTransactionVerifier - #[allow(clippy::too_many_arguments)] pub fn new( rtx: &'a ResolvedTransaction, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, consensus: &'a Consensus, data_loader: &'a DL, + tx_env: &'a TxVerifyEnv, ) -> Self { ContextualTransactionVerifier { - maturity: MaturityVerifier::new( - &rtx, - epoch_number_with_fraction, - consensus.cellbase_maturity(), - ), - script: ScriptVerifier::new(rtx, data_loader), + maturity: MaturityVerifier::new(&rtx, tx_env.epoch(), consensus.cellbase_maturity()), + script: ScriptVerifier::new(rtx, consensus, data_loader, tx_env), capacity: CapacityVerifier::new(rtx, consensus.dao_type_hash()), - since: SinceVerifier::new( - rtx, - data_loader, - block_number, - epoch_number_with_fraction, - consensus.median_time_block_count(), - parent_hash, - ), + since: SinceVerifier::new(rtx, consensus, data_loader, tx_env), fee_calculator: FeeCalculator::new(rtx, consensus, data_loader), } } @@ -186,25 +154,15 @@ pub struct TransactionVerifier<'a, DL> { impl<'a, DL: HeaderProvider + CellDataProvider + EpochProvider> TransactionVerifier<'a, DL> { /// Creates a new TransactionVerifier - #[allow(clippy::too_many_arguments)] pub fn new( rtx: &'a ResolvedTransaction, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, consensus: &'a Consensus, data_loader: &'a DL, + tx_env: &'a TxVerifyEnv, ) -> Self { TransactionVerifier { non_contextual: NonContextualTransactionVerifier::new(&rtx.transaction, consensus), - contextual: ContextualTransactionVerifier::new( - rtx, - block_number, - epoch_number_with_fraction, - parent_hash, - consensus, - data_loader, - ), + contextual: ContextualTransactionVerifier::new(rtx, consensus, data_loader, tx_env), } } @@ -302,24 +260,38 @@ impl<'a> SizeVerifier<'a> { /// - [ckb-vm](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0003-ckb-vm/0003-ckb-vm.md) /// - [vm-cycle-limits](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0014-vm-cycle-limits/0014-vm-cycle-limits.md) pub struct ScriptVerifier<'a, DL> { - data_loader: &'a DL, resolved_transaction: &'a ResolvedTransaction, + consensus: &'a Consensus, + data_loader: &'a DL, + tx_env: &'a TxVerifyEnv, } impl<'a, DL: CellDataProvider + HeaderProvider> ScriptVerifier<'a, DL> { /// Creates a new ScriptVerifier - pub fn new(resolved_transaction: &'a ResolvedTransaction, data_loader: &'a DL) -> Self { + pub fn new( + resolved_transaction: &'a ResolvedTransaction, + consensus: &'a Consensus, + data_loader: &'a DL, + tx_env: &'a TxVerifyEnv, + ) -> Self { ScriptVerifier { - data_loader, resolved_transaction, + consensus, + data_loader, + tx_env, } } /// Perform script verification pub fn verify(&self, max_cycles: Cycle) -> Result { let timer = Timer::start(); - let cycle = TransactionScriptsVerifier::new(&self.resolved_transaction, self.data_loader) - .verify(max_cycles)?; + let cycle = TransactionScriptsVerifier::new( + &self.resolved_transaction, + self.consensus, + self.data_loader, + self.tx_env, + ) + .verify(max_cycles)?; metrics!(timing, "ckb.verified_script", timer.stop()); Ok(cycle) } @@ -588,31 +560,25 @@ impl Since { /// [tx-since-specification](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0017-tx-valid-since/0017-tx-valid-since.md#detailed-specification pub struct SinceVerifier<'a, DL> { rtx: &'a ResolvedTransaction, + consensus: &'a Consensus, data_loader: &'a DL, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - parent_hash: Byte32, - median_block_count: usize, + tx_env: &'a TxVerifyEnv, median_timestamps_cache: RefCell>, } impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { pub fn new( rtx: &'a ResolvedTransaction, + consensus: &'a Consensus, data_loader: &'a DL, - block_number: BlockNumber, - epoch_number_with_fraction: EpochNumberWithFraction, - median_block_count: usize, - parent_hash: Byte32, + tx_env: &'a TxVerifyEnv, ) -> Self { let median_timestamps_cache = RefCell::new(LruCache::new(rtx.resolved_inputs.len())); SinceVerifier { rtx, + consensus, data_loader, - block_number, - epoch_number_with_fraction, - parent_hash, - median_block_count, + tx_env, median_timestamps_cache, } } @@ -627,9 +593,10 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { return *median_time; } + let median_block_count = self.consensus.median_time_block_count(); let median_time = self .data_loader - .block_median_time(block_hash, self.median_block_count); + .block_median_time(block_hash, median_block_count); self.median_timestamps_cache .borrow_mut() .put(block_hash.clone(), median_time); @@ -640,17 +607,27 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { if since.is_absolute() { match since.extract_metric() { Some(SinceMetric::BlockNumber(block_number)) => { - if self.block_number < block_number { + let proposal_window = self.consensus.tx_proposal_window(); + if self.tx_env.block_number(proposal_window) < block_number { return Err((TransactionError::Immature { index }).into()); } } Some(SinceMetric::EpochNumberWithFraction(epoch_number_with_fraction)) => { - if self.epoch_number_with_fraction < epoch_number_with_fraction { + let proposal_window = self.consensus.tx_proposal_window(); + let epoch_number = self.tx_env.epoch_number(proposal_window); + let hardfork_switch = self.consensus.hardfork_switch(); + if hardfork_switch.is_check_length_in_epoch_since_enabled(epoch_number) + && !epoch_number_with_fraction.is_well_formed() + { + return Err((TransactionError::InvalidSince { index }).into()); + } + if self.tx_env.epoch() < epoch_number_with_fraction { return Err((TransactionError::Immature { index }).into()); } } Some(SinceMetric::Timestamp(timestamp)) => { - let tip_timestamp = self.block_median_time(&self.parent_hash); + let parent_hash = self.tx_env.parent_hash(); + let tip_timestamp = self.block_median_time(&parent_hash); if tip_timestamp < timestamp { return Err((TransactionError::Immature { index }).into()); } @@ -676,12 +653,22 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { }?; match since.extract_metric() { Some(SinceMetric::BlockNumber(block_number)) => { - if self.block_number < info.block_number + block_number { + let proposal_window = self.consensus.tx_proposal_window(); + if self.tx_env.block_number(proposal_window) < info.block_number + block_number + { return Err((TransactionError::Immature { index }).into()); } } Some(SinceMetric::EpochNumberWithFraction(epoch_number_with_fraction)) => { - let a = self.epoch_number_with_fraction.to_rational(); + let proposal_window = self.consensus.tx_proposal_window(); + let epoch_number = self.tx_env.epoch_number(proposal_window); + let hardfork_switch = self.consensus.hardfork_switch(); + if hardfork_switch.is_check_length_in_epoch_since_enabled(epoch_number) + && !epoch_number_with_fraction.is_well_formed() + { + return Err((TransactionError::InvalidSince { index }).into()); + } + let a = self.tx_env.epoch().to_rational(); let b = info.block_epoch.to_rational() + epoch_number_with_fraction.to_rational(); if a < b { @@ -693,9 +680,22 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { // parent of current block. // pass_median_time(input_cell's block) starts with cell_block_number - 1, // which is the parent of input_cell's block - let cell_median_timestamp = self.parent_median_time(&info.block_hash); - let current_median_time = self.block_median_time(&self.parent_hash); - if current_median_time < cell_median_timestamp + timestamp { + let proposal_window = self.consensus.tx_proposal_window(); + let parent_hash = self.tx_env.parent_hash(); + let epoch_number = self.tx_env.epoch_number(proposal_window); + let hardfork_switch = self.consensus.hardfork_switch(); + let base_timestamp = if hardfork_switch + .is_block_ts_as_relative_since_start_enabled(epoch_number) + { + self.data_loader + .get_header(&info.block_hash) + .expect("header exist") + .timestamp() + } else { + self.parent_median_time(&info.block_hash) + }; + let current_median_time = self.block_median_time(&parent_hash); + if current_median_time < base_timestamp + timestamp { return Err((TransactionError::Immature { index }).into()); } }