Skip to content

Commit

Permalink
removes unused code from duplicate-shreds (#30329)
Browse files Browse the repository at this point in the history
  • Loading branch information
behzadnouri committed Feb 15, 2023
1 parent a9ad0f2 commit eede50c
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 58 deletions.
12 changes: 8 additions & 4 deletions gossip/src/crds_gossip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use {
crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats},
crds_gossip_push::CrdsGossipPush,
crds_value::{CrdsData, CrdsValue},
duplicate_shred::{self, DuplicateShredIndex, LeaderScheduleFn, MAX_DUPLICATE_SHREDS},
duplicate_shred::{self, DuplicateShredIndex, MAX_DUPLICATE_SHREDS},
legacy_contact_info::LegacyContactInfo as ContactInfo,
ping_pong::PingCache,
},
Expand All @@ -22,6 +22,7 @@ use {
rayon::ThreadPool,
solana_ledger::shred::Shred,
solana_sdk::{
clock::Slot,
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
Expand Down Expand Up @@ -88,15 +89,18 @@ impl CrdsGossip {
self.push.new_push_messages(pubkey, &self.crds, now, stakes)
}

pub(crate) fn push_duplicate_shred(
pub(crate) fn push_duplicate_shred<F>(
&self,
keypair: &Keypair,
shred: &Shred,
other_payload: &[u8],
leader_schedule: Option<impl LeaderScheduleFn>,
leader_schedule: Option<F>,
// Maximum serialized size of each DuplicateShred chunk payload.
max_payload_size: usize,
) -> Result<(), duplicate_shred::Error> {
) -> Result<(), duplicate_shred::Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
{
let pubkey = keypair.pubkey();
// Skip if there are already records of duplicate shreds for this slot.
let shred_slot = shred.slot();
Expand Down
64 changes: 11 additions & 53 deletions gossip/src/duplicate_shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,6 @@ const DUPLICATE_SHRED_HEADER_SIZE: usize = 63;
pub(crate) type DuplicateShredIndex = u16;
pub(crate) const MAX_DUPLICATE_SHREDS: DuplicateShredIndex = 512;

/// Function returning leader at a given slot.
pub trait LeaderScheduleFn: FnOnce(Slot) -> Option<Pubkey> {}
impl<F> LeaderScheduleFn for F where F: FnOnce(Slot) -> Option<Pubkey> {}

#[derive(Clone, Debug, PartialEq, Eq, AbiExample, Deserialize, Serialize)]
pub struct DuplicateShred {
pub(crate) from: Pubkey,
Expand Down Expand Up @@ -93,11 +89,10 @@ pub enum Error {
// Asserts that the two shreds can indicate duplicate proof for
// the same triplet of (slot, shred-index, and shred-type_), and
// that they have valid signatures from the slot leader.
fn check_shreds(
leader_schedule: Option<impl LeaderScheduleFn>,
shred1: &Shred,
shred2: &Shred,
) -> Result<(), Error> {
fn check_shreds<F>(leader_schedule: Option<F>, shred1: &Shred, shred2: &Shred) -> Result<(), Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
{
if shred1.slot() != shred2.slot() {
Err(Error::SlotMismatch)
} else if shred1.index() != shred2.index() {
Expand All @@ -120,54 +115,17 @@ fn check_shreds(
}
}

/// Splits a DuplicateSlotProof into DuplicateShred
/// chunks with a size limit on each chunk.
pub fn from_duplicate_slot_proof(
proof: &DuplicateSlotProof,
self_pubkey: Pubkey, // Pubkey of my node broadcasting crds value.
leader_schedule: Option<impl LeaderScheduleFn>,
wallclock: u64,
max_size: usize, // Maximum serialized size of each DuplicateShred.
) -> Result<impl Iterator<Item = DuplicateShred>, Error> {
if proof.shred1 == proof.shred2 {
return Err(Error::InvalidDuplicateSlotProof);
}
let shred1 = Shred::new_from_serialized_shred(proof.shred1.clone())?;
let shred2 = Shred::new_from_serialized_shred(proof.shred2.clone())?;
check_shreds(leader_schedule, &shred1, &shred2)?;
let (slot, shred_index, shred_type) = (shred1.slot(), shred1.index(), shred1.shred_type());
let data = bincode::serialize(proof)?;
let chunk_size = if DUPLICATE_SHRED_HEADER_SIZE < max_size {
max_size - DUPLICATE_SHRED_HEADER_SIZE
} else {
return Err(Error::InvalidSizeLimit);
};
let chunks: Vec<_> = data.chunks(chunk_size).map(Vec::from).collect();
let num_chunks = u8::try_from(chunks.len())?;
let chunks = chunks
.into_iter()
.enumerate()
.map(move |(i, chunk)| DuplicateShred {
from: self_pubkey,
wallclock,
slot,
shred_index,
shred_type,
num_chunks,
chunk_index: i as u8,
chunk,
});
Ok(chunks)
}

pub(crate) fn from_shred(
pub(crate) fn from_shred<F>(
shred: Shred,
self_pubkey: Pubkey, // Pubkey of my node broadcasting crds value.
other_payload: Vec<u8>,
leader_schedule: Option<impl LeaderScheduleFn>,
leader_schedule: Option<F>,
wallclock: u64,
max_size: usize, // Maximum serialized size of each DuplicateShred.
) -> Result<impl Iterator<Item = DuplicateShred>, Error> {
) -> Result<impl Iterator<Item = DuplicateShred>, Error>
where
F: FnOnce(Slot) -> Option<Pubkey>,
{
if shred.payload() == &other_payload {
return Err(Error::InvalidDuplicateShreds);
}
Expand Down Expand Up @@ -333,7 +291,7 @@ pub(crate) mod tests {
);
}

pub fn new_rand_shred<R: Rng>(
pub(crate) fn new_rand_shred<R: Rng>(
rng: &mut R,
next_shred_index: u32,
shredder: &Shredder,
Expand Down
2 changes: 1 addition & 1 deletion gossip/src/duplicate_shred_listener.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ mod tests {
}

impl FakeHandler {
pub fn new(count: Arc<AtomicU32>) -> Self {
fn new(count: Arc<AtomicU32>) -> Self {
Self { count }
}
}
Expand Down

0 comments on commit eede50c

Please sign in to comment.