Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor(consensus): next-epoch #2591

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
122 changes: 61 additions & 61 deletions Cargo.lock

Large diffs are not rendered by default.

18 changes: 4 additions & 14 deletions benches/benches/benchmarks/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,15 +177,10 @@ pub fn gen_always_success_block(
txs_to_resolve.extend_from_slice(&transactions);
let dao = dao_data(shared, &p_block.header(), &txs_to_resolve);

let last_epoch = shared
.store()
.get_block_epoch_index(&p_block.hash())
.and_then(|index| shared.store().get_epoch_ext(&index))
.unwrap();
let epoch = shared
.store()
.next_epoch_ext(shared.consensus(), &last_epoch, &p_block.header())
.unwrap_or(last_epoch);
.next_epoch_ext(shared.consensus(), &p_block.header())
.unwrap_epoch();

let block = BlockBuilder::default()
.transaction(cellbase)
Expand Down Expand Up @@ -391,15 +386,10 @@ pub fn gen_secp_block(
txs_to_resolve.extend_from_slice(&transactions);
let dao = dao_data(shared, &p_block.header(), &txs_to_resolve);

let last_epoch = shared
.store()
.get_block_epoch_index(&p_block.hash())
.and_then(|index| shared.store().get_epoch_ext(&index))
.unwrap();
let epoch = shared
.store()
.next_epoch_ext(shared.consensus(), &last_epoch, &p_block.header())
.unwrap_or(last_epoch);
.next_epoch_ext(shared.consensus(), &p_block.header())
.unwrap_epoch();

let block = BlockBuilder::default()
.transaction(cellbase)
Expand Down
17 changes: 5 additions & 12 deletions chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -358,18 +358,11 @@ impl ChainService {

db_txn.insert_block(&block)?;

let parent_header_epoch = txn_snapshot
.get_block_epoch(&parent_header.hash())
.expect("parent epoch already store");

let next_epoch_ext = txn_snapshot.next_epoch_ext(
self.shared.consensus(),
&parent_header_epoch,
&parent_header,
);
let new_epoch = next_epoch_ext.is_some();
let next_epoch_ext =
txn_snapshot.next_epoch_ext(self.shared.consensus(), &parent_header)?;
let new_epoch = next_epoch_ext.is_new();

let epoch = next_epoch_ext.unwrap_or_else(|| parent_header_epoch.to_owned());
let epoch = next_epoch_ext.unwrap_epoch();

let ext = BlockExt {
received_at: unix_time_as_millis(),
Expand Down Expand Up @@ -708,7 +701,7 @@ impl ChainService {

let transactions = b.transactions();
let resolved = {
let txn_cell_provider = txn.cell_provider();
let txn_cell_provider = txn.provider();
let cell_provider = OverlayCellProvider::new(&block_cp, &txn_cell_provider);
transactions
.iter()
Expand Down
4 changes: 2 additions & 2 deletions chain/src/tests/basic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -487,8 +487,8 @@ fn prepare_context_chain(
for _ in 1..final_number - 1 {
let epoch = shared
.snapshot()
.next_epoch_ext(shared.consensus(), &last_epoch, &parent)
.unwrap_or(last_epoch);
.next_epoch_ext(shared.consensus(), &parent)
.unwrap_epoch();

let transactions = vec![create_cellbase(&mock_store, shared.consensus(), &parent)];
let dao = dao_data(
Expand Down
2 changes: 1 addition & 1 deletion chain/src/tests/cell.rs
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ fn test_block_cells_update() {
db_txn.attach_block(&block).unwrap();

attach_block_cell(&db_txn, &block).unwrap();
let txn_cell_provider = db_txn.cell_provider();
let txn_cell_provider = db_txn.provider();

// ensure tx0-2 outputs is spent after attach_block_cell
for tx in block.transactions()[1..4].iter() {
Expand Down
2 changes: 1 addition & 1 deletion chain/src/tests/reward.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ pub(crate) fn gen_block(
.unwrap();
let epoch = store
.0
.next_epoch_ext(shared.consensus(), &last_epoch, &parent_header)
.next_epoch_ext(shared.consensus(), &parent_header)
.unwrap_or(last_epoch);

let block = BlockBuilder::default()
Expand Down
4 changes: 2 additions & 2 deletions chain/src/tests/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ impl<'a> MockChain<'a> {
.unwrap();
let epoch = store
.0
.next_epoch_ext(self.consensus, &last_epoch, &parent)
.next_epoch_ext(self.consensus, &parent)
.unwrap_or(last_epoch);

let new_block = BlockBuilder::default()
Expand Down Expand Up @@ -390,7 +390,7 @@ impl<'a> MockChain<'a> {
.unwrap();
let epoch = store
.0
.next_epoch_ext(self.consensus, &last_epoch, &parent)
.next_epoch_ext(self.consensus, &parent)
.unwrap_or(last_epoch);

let new_block = BlockBuilder::default()
Expand Down
2 changes: 1 addition & 1 deletion ckb-bin/src/subcommand/run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ pub fn run(mut args: RunArgs, version: Version, async_handle: Handle) -> Result<

setup_system_cell_cache(
shared.consensus().genesis_block(),
&shared.store().cell_provider(),
&shared.store().provider(),
);

rayon::ThreadPoolBuilder::new()
Expand Down
56 changes: 39 additions & 17 deletions spec/src/consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ use crate::{
OUTPUT_INDEX_SECP256K1_BLAKE160_SIGHASH_ALL,
};
use ckb_dao_utils::genesis_dao_data_with_satoshi_gift;
use ckb_error::Error;
use ckb_pow::{Pow, PowEngine};
use ckb_rational::RationalU256;
use ckb_resource::Resource;
Expand Down Expand Up @@ -737,21 +738,17 @@ impl Consensus {

/// The [dynamic-difficulty-adjustment-mechanism](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0020-ckb-consensus-protocol/0020-ckb-consensus-protocol.md#dynamic-difficulty-adjustment-mechanism)
/// implementation
pub fn next_epoch_ext<A, B>(
pub fn next_epoch_ext<P: NextEpochProvider>(
&self,
last_epoch: &EpochExt,
provider: &P,
header: &HeaderView,
get_block_header: A,
total_uncles_count: B,
) -> Option<EpochExt>
where
A: Fn(&Byte32) -> Option<HeaderView>,
B: Fn(&Byte32) -> Option<u64>,
{
) -> Result<NextEpoch, Error> {
let last_epoch = provider.get_epoch_by_block_hash(&header.hash())?;
let last_epoch_length = last_epoch.length();

let header_number = header.number();
if header_number != (last_epoch.start_number() + last_epoch_length - 1) {
return None;
return Ok(NextEpoch::Last(last_epoch));
}

if self.permanent_difficulty() {
Expand All @@ -762,22 +759,21 @@ impl Consensus {
.last_block_hash_in_previous_epoch(header.hash())
.start_number(header_number + 1)
.build();
return Some(dummy_epoch_ext);
return Ok(NextEpoch::Last(dummy_epoch_ext));
}

let last_block_header_in_previous_epoch = if last_epoch.is_genesis() {
self.genesis_block().header()
} else {
get_block_header(&last_epoch.last_block_hash_in_previous_epoch())?
provider.get_block_header(&last_epoch.last_block_hash_in_previous_epoch())?
};

// (1) Computing the Adjusted Hash Rate Estimation
let last_difficulty = &header.difficulty();
let last_hash = header.hash();
let start_total_uncles_count =
total_uncles_count(&last_block_header_in_previous_epoch.hash())
.expect("block_ext exist");
let last_total_uncles_count = total_uncles_count(&last_hash).expect("block_ext exist");
provider.total_uncles_count(&last_block_header_in_previous_epoch.hash())?;
let last_total_uncles_count = provider.total_uncles_count(&last_hash)?;
let last_uncles_count = last_total_uncles_count - start_total_uncles_count;
let last_epoch_duration = U256::from(cmp::max(
header
Expand Down Expand Up @@ -862,7 +858,9 @@ impl Consensus {
U256::one()
};

let primary_epoch_reward = self.primary_epoch_reward_of_next_epoch(last_epoch).as_u64();
let primary_epoch_reward = self
.primary_epoch_reward_of_next_epoch(&last_epoch)
.as_u64();
let block_reward = Capacity::shannons(primary_epoch_reward / next_epoch_length);
let remainder_reward = Capacity::shannons(primary_epoch_reward % next_epoch_length);

Expand All @@ -877,7 +875,7 @@ impl Consensus {
.compact_target(difficulty_to_compact(next_epoch_diff))
.build();

Some(epoch_ext)
Ok(NextEpoch::New(epoch_ext))
}

/// The network identify name, used for network identify protocol
Expand Down Expand Up @@ -916,6 +914,30 @@ impl Consensus {
}
}

pub trait NextEpochProvider {
fn get_epoch_by_block_hash(&self, hash: &Byte32) -> Result<EpochExt, Error>;
fn get_block_header(&self, hash: &Byte32) -> Result<HeaderView, Error>;
fn total_uncles_count(&self, hash: &Byte32) -> Result<u64, Error>;
}

pub enum NextEpoch {
New(EpochExt),
Last(EpochExt),
}

impl NextEpoch {
pub fn unwrap_epoch(self) -> EpochExt {
match self {
NextEpoch::New(new) => new,
NextEpoch::Last(last) => last,
}
}

pub fn is_new(&self) -> bool {
matches!(*self, NextEpoch::New(_))
}
}

impl From<Consensus> for ckb_jsonrpc_types::Consensus {
fn from(consensus: Consensus) -> Self {
Self {
Expand Down
51 changes: 38 additions & 13 deletions store/src/store.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
use crate::cache::StoreCache;
use ckb_chain_spec::consensus::Consensus;
use ckb_chain_spec::consensus::{Consensus, NextEpoch, NextEpochProvider};
use ckb_db::iter::{DBIter, Direction, IteratorMode};
use ckb_db_schema::{
Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_HEADER,
COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, COLUMN_EPOCH,
COLUMN_INDEX, COLUMN_META, COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, META_CURRENT_EPOCH_KEY,
META_TIP_HEADER_KEY,
};
use ckb_error::{Error, InternalErrorKind};
use ckb_freezer::Freezer;
use ckb_types::{
bytes::Bytes,
Expand All @@ -19,7 +20,7 @@ use ckb_types::{
prelude::*,
};

pub struct CellProviderWrapper<'a, S>(&'a S);
pub struct ProviderWrapper<'a, S>(&'a S);

/// TODO(doc): @quake
pub trait ChainStore<'a>: Send + Sync + Sized {
Expand All @@ -34,8 +35,8 @@ pub trait ChainStore<'a>: Send + Sync + Sized {
/// TODO(doc): @quake
fn get_iter(&self, col: Col, mode: IteratorMode) -> DBIter;
/// TODO(doc): @quake
fn cell_provider(&self) -> CellProviderWrapper<Self> {
CellProviderWrapper(self)
fn provider(&self) -> ProviderWrapper<Self> {
ProviderWrapper(self)
}

/// Get block by block header hash
Expand Down Expand Up @@ -422,15 +423,9 @@ pub trait ChainStore<'a>: Send + Sync + Sized {
fn next_epoch_ext(
&'a self,
consensus: &Consensus,
last_epoch: &EpochExt,
header: &HeaderView,
) -> Option<EpochExt> {
consensus.next_epoch_ext(
last_epoch,
header,
|hash| self.get_block_header(&hash),
|hash| self.get_block_ext(&hash).map(|ext| ext.total_uncles_count),
)
) -> Result<NextEpoch, Error> {
consensus.next_epoch_ext(&self.provider(), header)
}

/// TODO(doc): @quake
Expand Down Expand Up @@ -493,7 +488,37 @@ fn build_cell_meta_from_reader(out_point: OutPoint, reader: packed::CellEntryRea
}
}

impl<'a, S> CellProvider for CellProviderWrapper<'a, S>
impl<'a, S> NextEpochProvider for ProviderWrapper<'a, S>
where
S: ChainStore<'a>,
{
fn get_epoch_by_block_hash(&self, hash: &packed::Byte32) -> Result<EpochExt, Error> {
self.0.get_block_epoch(hash).ok_or_else(|| {
InternalErrorKind::Database
.other("get_block_epoch not found")
.into()
})
}
fn get_block_header(&self, hash: &packed::Byte32) -> Result<HeaderView, Error> {
self.0.get_block_header(hash).ok_or_else(|| {
InternalErrorKind::Database
.other("get_block_header not found")
.into()
})
}
fn total_uncles_count(&self, hash: &packed::Byte32) -> Result<u64, Error> {
self.0
.get_block_ext(hash)
.map(|ext| ext.total_uncles_count)
.ok_or_else(|| {
InternalErrorKind::Database
.other("get_block_ext not found")
.into()
})
}
}

impl<'a, S> CellProvider for ProviderWrapper<'a, S>
where
S: ChainStore<'a>,
{
Expand Down
7 changes: 4 additions & 3 deletions tx-pool/src/process.rs
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,10 @@ impl TxPoolService {
) -> (Vec<UncleBlockView>, EpochExt, u64) {
let consensus = snapshot.consensus();
let tip_header = snapshot.tip_header();
let last_epoch = snapshot.get_current_epoch_ext().expect("current epoch ext");
let next_epoch_ext = snapshot.next_epoch_ext(consensus, &last_epoch, tip_header);
let current_epoch = next_epoch_ext.unwrap_or(last_epoch);
let next_epoch_ext = snapshot
.next_epoch_ext(consensus, tip_header)
.expect("current epoch ext");
let current_epoch = next_epoch_ext.unwrap_epoch();
let candidate_number = tip_header.number() + 1;

let mut guard = block_assembler.candidate_uncles.lock().await;
Expand Down
13 changes: 5 additions & 8 deletions util/dao/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,15 +101,12 @@ impl<'a, CS: ChainStore<'a>> DaoCalculator<'a, CS, DataLoaderWrapper<'a, CS>> {
// issuance for each block(which will only be issued on chain
// after the finalization delay), not the capacities generated
// in the cellbase of current block.
let parent_block_epoch = self
.store
.get_block_epoch_index(&parent.hash())
.and_then(|index| self.store.get_epoch_ext(&index))
.ok_or(DaoError::InvalidHeader)?;

let current_block_epoch = self
.store
.next_epoch_ext(&self.consensus, &parent_block_epoch, &parent)
.unwrap_or(parent_block_epoch);
.consensus
.next_epoch_ext(&self.store.provider(), &parent)
.map_err(|_| DaoError::InvalidHeader)?
.unwrap_epoch();
let current_block_number = parent.number() + 1;
let current_g2 = current_block_epoch.secondary_block_issuance(
current_block_number,
Expand Down
2 changes: 1 addition & 1 deletion util/snapshot/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ impl<'a> ChainStore<'a> for Snapshot {

impl CellProvider for Snapshot {
fn cell(&self, out_point: &OutPoint, with_data: bool) -> CellStatus {
self.store.cell_provider().cell(out_point, with_data)
self.store.provider().cell(out_point, with_data)
}
}

Expand Down