Skip to content

Commit

Permalink
returns Error when Shred::sanitize fails (solana-labs#24653)
Browse files Browse the repository at this point in the history
Including the error in the output allows to debug when Shred::sanitize
fails.
  • Loading branch information
behzadnouri authored and jeffwashington committed Jun 29, 2022
1 parent 0bc1eec commit 63a17d5
Show file tree
Hide file tree
Showing 4 changed files with 158 additions and 104 deletions.
2 changes: 1 addition & 1 deletion core/src/window_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ pub(crate) fn should_retransmit_and_persist(
} else if shred.index() >= MAX_DATA_SHREDS_PER_SLOT as u32 {
inc_new_counter_warn!("streamer-recv_window-shred_index_overrun", 1);
false
} else if !shred.sanitize() {
} else if shred.sanitize().is_err() {
inc_new_counter_warn!("streamer-recv_window-invalid-shred", 1);
false
} else {
Expand Down
6 changes: 3 additions & 3 deletions gossip/src/duplicate_shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use {
itertools::Itertools,
solana_ledger::{
blockstore_meta::DuplicateSlotProof,
shred::{Shred, ShredError, ShredType},
shred::{self, Shred, ShredType},
},
solana_sdk::{
clock::Slot,
Expand Down Expand Up @@ -55,8 +55,8 @@ pub enum Error {
InvalidSignature,
#[error("invalid size limit")]
InvalidSizeLimit,
#[error("invalid shred")]
InvalidShred(#[from] ShredError),
#[error(transparent)]
InvalidShred(#[from] shred::Error),
#[error("number of chunks mismatch")]
NumChunksMismatch,
#[error("missing data chunk")]
Expand Down
18 changes: 9 additions & 9 deletions ledger/src/blockstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ use {
leader_schedule_cache::LeaderScheduleCache,
next_slots_iterator::NextSlotsIterator,
shred::{
max_ticks_per_n_shreds, ErasureSetId, Result as ShredResult, Shred, ShredId, ShredType,
Shredder, SHRED_PAYLOAD_SIZE,
self, max_ticks_per_n_shreds, ErasureSetId, Shred, ShredId, ShredType, Shredder,
SHRED_PAYLOAD_SIZE,
},
slot_stats::{ShredSource, SlotsStats},
},
Expand Down Expand Up @@ -1367,7 +1367,7 @@ impl Blockstore {
}

fn should_insert_coding_shred(shred: &Shred, last_root: &RwLock<u64>) -> bool {
shred.is_code() && shred.sanitize() && shred.slot() > *last_root.read().unwrap()
shred.is_code() && shred.sanitize().is_ok() && shred.slot() > *last_root.read().unwrap()
}

fn insert_coding_shred(
Expand All @@ -1381,7 +1381,7 @@ impl Blockstore {

// Assert guaranteed by integrity checks on the shred that happen before
// `insert_coding_shred` is called
assert!(shred.is_code() && shred.sanitize());
assert!(shred.is_code() && shred.sanitize().is_ok());

// Commit step: commit all changes to the mutable structures at once, or none at all.
// We don't want only a subset of these changes going through.
Expand Down Expand Up @@ -1430,7 +1430,7 @@ impl Blockstore {
} else {
false
};
if !shred.sanitize() {
if let Err(err) = shred.sanitize() {
let leader_pubkey = leader_schedule
.and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None));

Expand All @@ -1439,8 +1439,8 @@ impl Blockstore {
(
"error",
format!(
"Leader {:?}, slot {}: received invalid shred",
leader_pubkey, slot,
"Leader {:?}, slot {}: received invalid shred: {:?}",
leader_pubkey, slot, err,
),
String
)
Expand Down Expand Up @@ -1637,7 +1637,7 @@ impl Blockstore {
&self,
slot: Slot,
start_index: u64,
) -> ShredResult<Vec<Shred>> {
) -> std::result::Result<Vec<Shred>, shred::Error> {
self.slot_data_iterator(slot, start_index)
.expect("blockstore couldn't fetch iterator")
.map(|data| Shred::new_from_serialized_shred(data.1.to_vec()))
Expand Down Expand Up @@ -1693,7 +1693,7 @@ impl Blockstore {
&self,
slot: Slot,
start_index: u64,
) -> ShredResult<Vec<Shred>> {
) -> std::result::Result<Vec<Shred>, shred::Error> {
self.slot_coding_iterator(slot, start_index)
.expect("blockstore couldn't fetch iterator")
.map(|code| Shred::new_from_serialized_shred(code.1.to_vec()))
Expand Down
Loading

0 comments on commit 63a17d5

Please sign in to comment.