Skip to content

Commit

Permalink
blockstore: send duplicate proofs for chained merkle root conflicts
Browse files Browse the repository at this point in the history
  • Loading branch information
AshwinSekar committed Feb 26, 2024
1 parent e74d5cc commit 0527675
Show file tree
Hide file tree
Showing 7 changed files with 186 additions and 1 deletion.
23 changes: 23 additions & 0 deletions core/src/window_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,11 @@ fn run_check_duplicate(
shred_slot,
&root_bank,
);
let chained_merkle_conflict_duplicate_proofs = cluster_nodes::check_feature_activation(
&feature_set::chained_merkle_conflict_duplicate_proofs::id(),
shred_slot,
&root_bank,
);
let (shred1, shred2) = match shred {
PossibleDuplicateShred::LastIndexConflict(shred, conflict)
| PossibleDuplicateShred::ErasureConflict(shred, conflict) => {
Expand Down Expand Up @@ -196,6 +201,24 @@ fn run_check_duplicate(
return Ok(());
}
}
PossibleDuplicateShred::ChainedMerkleRootConflict(shred, conflict) => {
if chained_merkle_conflict_duplicate_proofs {
// Although this proof can be immediately stored on detection, we wait until
// here in order to check the feature flag, as storage in blockstore can
// preclude the detection of other duplicate proofs in this slot
if blockstore.has_duplicate_shreds_in_slot(shred_slot) {
return Ok(());
}
blockstore.store_duplicate_slot(
shred_slot,
conflict.clone(),
shred.clone().into_payload(),
)?;
(shred, conflict)
} else {
return Ok(());
}
}
PossibleDuplicateShred::Exists(shred) => {
// Unlike the other cases we have to wait until here to decide to handle the duplicate and store
// in blockstore. This is because the duplicate could have been part of the same insert batch,
Expand Down
130 changes: 130 additions & 0 deletions ledger/src/blockstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,7 @@ pub enum PossibleDuplicateShred {
LastIndexConflict(/* original */ Shred, /* conflict */ Vec<u8>), // The index of this shred conflicts with `slot_meta.last_index`
ErasureConflict(/* original */ Shred, /* conflict */ Vec<u8>), // The coding shred has a conflict in the erasure_meta
MerkleRootConflict(/* original */ Shred, /* conflict */ Vec<u8>), // Merkle root conflict in the same fec set
ChainedMerkleRootConflict(/* original */ Shred, /* conflict */ Vec<u8>), // Merkle root chaining conflict with previous fec set
}

impl PossibleDuplicateShred {
Expand All @@ -155,6 +156,7 @@ impl PossibleDuplicateShred {
Self::LastIndexConflict(shred, _) => shred.slot(),
Self::ErasureConflict(shred, _) => shred.slot(),
Self::MerkleRootConflict(shred, _) => shred.slot(),
Self::ChainedMerkleRootConflict(shred, _) => shred.slot(),
}
}
}
Expand Down Expand Up @@ -1283,6 +1285,18 @@ impl Blockstore {
return false;
}
}

// Check that the chaining between our current shred, the previous fec_set
// and the next fec_set
if !self.check_chained_merkle_root_consistency(
just_received_shreds,
&erasure_set,
merkle_root_metas,
&shred,
duplicate_shreds,
) {
return false;
}
}

let erasure_meta_entry = erasure_metas.entry(erasure_set).or_insert_with(|| {
Expand Down Expand Up @@ -1517,6 +1531,18 @@ impl Blockstore {
return Err(InsertDataShredError::InvalidShred);
}
}

// Check that the chaining between our current shred, the previous fec_set
// and the next fec_set
if !self.check_chained_merkle_root_consistency(
just_inserted_shreds,
&erasure_set,
merkle_root_metas,
&shred,
duplicate_shreds,
) {
return Err(InsertDataShredError::InvalidShred);
}
}

let newly_completed_data_sets = self.insert_data_shred(
Expand Down Expand Up @@ -1648,6 +1674,110 @@ impl Blockstore {
false
}

/// Returns true if there is no chaining conflict between
/// the `shred` and `merkle_root_meta` of the next or previous
/// FEC set, or if shreds from the next or previous set are
/// yet to be received.
///
/// Otherwise return false and add duplicate proof to
/// `duplicate_shreds`.
fn check_chained_merkle_root_consistency(
&self,
just_inserted_shreds: &HashMap<ShredId, Shred>,
erasure_set: &ErasureSetId,
merkle_root_metas: &HashMap<ErasureSetId, WorkingEntry<MerkleRootMeta>>,
shred: &Shred,
duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
) -> bool {
let (slot, fec_set_index) = erasure_set.store_key();

let next_erasure_set = ErasureSetId::new(slot, fec_set_index + 1);
if let Some(next_merkle_root_meta) =
merkle_root_metas.get(&next_erasure_set).map(AsRef::as_ref)
{
let next_shred_id = ShredId::new(
slot,
next_merkle_root_meta.first_received_shred_index(),
next_merkle_root_meta.first_received_shred_type(),
);
let next_shred =
Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, next_shred_id)
.expect("Shred indicated by merkle root meta must exist")
.into_owned();
let next_shred = Shred::new_from_serialized_shred(next_shred)
.expect("Shred indicated by merkle root meta should deserialize");

if !self.check_chaining(shred, &next_shred, duplicate_shreds) {
return false;
}
}

if fec_set_index == 0 {
// Although the first fec set chains to the last fec set of the parent block,
// if this chain is incorrect we do not which block is the duplicate until votes
// are received. We instead delay this check until the block reaches duplicate
// confirmation.
return true;
}
let prev_erasure_set = ErasureSetId::new(slot, fec_set_index - 1);
if let Some(prev_merkle_root_meta) =
merkle_root_metas.get(&prev_erasure_set).map(AsRef::as_ref)
{
let prev_shred_id = ShredId::new(
slot,
prev_merkle_root_meta.first_received_shred_index(),
prev_merkle_root_meta.first_received_shred_type(),
);
let prev_shred =
Self::get_shred_from_just_inserted_or_db(self, just_inserted_shreds, prev_shred_id)
.expect("Shred indicated by merkle root meta must exist")
.into_owned();
let prev_shred = Shred::new_from_serialized_shred(prev_shred)
.expect("Shred indicated by merkle root meta should deserialize");
if !self.check_chaining(&prev_shred, shred, duplicate_shreds) {
return false;
}
}

true
}

/// Checks if the chained merkle root of `next_shred` == `prev_shred`'s merkle root.
///
/// Returns true if no conflict, otherwise updates duplicate_shreds
fn check_chaining(
&self,
prev_shred: &Shred,
next_shred: &Shred,
duplicate_shreds: &mut Vec<PossibleDuplicateShred>,
) -> bool {
let merkle_root = prev_shred.merkle_root().ok();
let chained_merkle_root = next_shred.chained_merkle_root().ok();
if merkle_root == chained_merkle_root {
return true;
}
warn!(
"Received conflicting chained merkle roots for slot: {},
shred {:?} type {:?} has merkle root {:?}, however
next shred {:?} type {:?} chains to merkle root {:?}. Reporting as duplicate",
prev_shred.slot(),
prev_shred.erasure_set(),
prev_shred.shred_type(),
merkle_root,
next_shred.erasure_set(),
next_shred.shred_type(),
chained_merkle_root,
);

if !self.has_duplicate_shreds_in_slot(prev_shred.slot()) {
duplicate_shreds.push(PossibleDuplicateShred::ChainedMerkleRootConflict(
prev_shred.clone(),
next_shred.payload().clone(),
));
}
false
}

fn should_insert_data_shred(
&self,
shred: &Shred,
Expand Down
5 changes: 5 additions & 0 deletions ledger/src/shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,10 @@ impl ShredId {
pub(crate) struct ErasureSetId(Slot, /*fec_set_index:*/ u32);

impl ErasureSetId {
pub(crate) fn new(slot: Slot, fec_set_index: u32) -> Self {
Self(slot, fec_set_index)
}

pub(crate) fn slot(&self) -> Slot {
self.0
}
Expand Down Expand Up @@ -342,6 +346,7 @@ impl Shred {
dispatch!(pub(crate) fn erasure_shard_index(&self) -> Result<usize, Error>);

dispatch!(pub fn into_payload(self) -> Vec<u8>);
dispatch!(pub fn chained_merkle_root(&self) -> Result<Hash, Error>);
dispatch!(pub fn merkle_root(&self) -> Result<Hash, Error>);
dispatch!(pub fn payload(&self) -> &Vec<u8>);
dispatch!(pub fn sanitize(&self) -> Result<(), Error>);
Expand Down
10 changes: 9 additions & 1 deletion ledger/src/shred/merkle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,14 @@ impl ShredData {
Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true)?)
}

pub(super) fn chained_merkle_root(&self) -> Result<Hash, Error> {
let offset = self.chained_merkle_root_offset()?;
self.payload
.get(offset..offset + SIZE_OF_MERKLE_ROOT)
.map(Hash::new)
.ok_or(Error::InvalidPayloadSize(self.payload.len()))
}

fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> {
let offset = self.chained_merkle_root_offset()?;
let Some(buffer) = self.payload.get_mut(offset..offset + SIZE_OF_MERKLE_ROOT) else {
Expand Down Expand Up @@ -328,7 +336,7 @@ impl ShredCode {
Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /*chained:*/ true)?)
}

fn chained_merkle_root(&self) -> Result<Hash, Error> {
pub(super) fn chained_merkle_root(&self) -> Result<Hash, Error> {
let offset = self.chained_merkle_root_offset()?;
self.payload
.get(offset..offset + SIZE_OF_MERKLE_ROOT)
Expand Down
7 changes: 7 additions & 0 deletions ledger/src/shred/shred_code.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,13 @@ impl ShredCode {
}
}

pub(super) fn chained_merkle_root(&self) -> Result<Hash, Error> {
match self {
Self::Legacy(_) => Err(Error::InvalidShredType),
Self::Merkle(shred) => shred.chained_merkle_root(),
}
}

pub(super) fn merkle_root(&self) -> Result<Hash, Error> {
match self {
Self::Legacy(_) => Err(Error::InvalidShredType),
Expand Down
7 changes: 7 additions & 0 deletions ledger/src/shred/shred_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,13 @@ impl ShredData {
}
}

pub(super) fn chained_merkle_root(&self) -> Result<Hash, Error> {
match self {
Self::Legacy(_) => Err(Error::InvalidShredType),
Self::Merkle(shred) => shred.chained_merkle_root(),
}
}

pub(super) fn merkle_root(&self) -> Result<Hash, Error> {
match self {
Self::Legacy(_) => Err(Error::InvalidShredType),
Expand Down
5 changes: 5 additions & 0 deletions sdk/src/feature_set.rs
Original file line number Diff line number Diff line change
Expand Up @@ -776,6 +776,10 @@ pub mod enable_gossip_duplicate_proof_ingestion {
solana_sdk::declare_id!("FNKCMBzYUdjhHyPdsKG2LSmdzH8TCHXn3ytj8RNBS4nG");
}

pub mod chained_merkle_conflict_duplicate_proofs {
solana_sdk::declare_id!("chaie9S2zVfuxJKNRGkyTDokLwWxx6kD2ZLsqQHaDD8");
}

pub mod enable_chained_merkle_shreds {
solana_sdk::declare_id!("7uZBkJXJ1HkuP6R3MJfZs7mLwymBcDbKdqbF51ZWLier");
}
Expand Down Expand Up @@ -975,6 +979,7 @@ lazy_static! {
(enable_gossip_duplicate_proof_ingestion::id(), "enable gossip duplicate proof ingestion #32963"),
(enable_chained_merkle_shreds::id(), "Enable chained Merkle shreds #34916"),
(remove_rounding_in_fee_calculation::id(), "Removing unwanted rounding in fee calculation #34982"),
(chained_merkle_conflict_duplicate_proofs::id(), "generate duplicate proofs for chained merkle root conflicts #"),
/*************** ADD NEW FEATURES HERE ***************/
]
.iter()
Expand Down

0 comments on commit 0527675

Please sign in to comment.