diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 1d5d4b5fe5413..41c00169e5412 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -543,7 +543,7 @@ fn aux_storage_cleanup + HeaderBackend, Block: B let stale_forks = match client.expand_forks(¬ification.stale_heads) { Ok(stale_forks) => stale_forks, Err((stale_forks, e)) => { - warn!(target: "babe", "{:?}", e,); + warn!(target: LOG_TARGET, "{:?}", e); stale_forks }, }; @@ -1511,11 +1511,12 @@ where if let Some(next_epoch_descriptor) = next_epoch_digest { old_epoch_changes = Some((*epoch_changes).clone()); - let viable_epoch = epoch_changes + let mut viable_epoch = epoch_changes .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .ok_or_else(|| { ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })?; + })? + .into_cloned(); let epoch_config = next_config_digest .map(Into::into) @@ -1528,6 +1529,48 @@ where log::Level::Info }; + if viable_epoch.as_ref().end_slot() <= slot { + // some epochs must have been skipped as our current slot + // fits outside the current epoch. we will figure out + // which epoch it belongs to and we will re-use the same + // data for that epoch + let mut epoch_data = viable_epoch.as_mut(); + let skipped_epochs = + *slot.saturating_sub(epoch_data.start_slot) / epoch_data.duration; + + // NOTE: notice that we are only updating a local copy of the `Epoch`, this + // makes it so that when we insert the next epoch into `EpochChanges` below + // (after incrementing it), it will use the correct epoch index and start slot. + // we do not update the original epoch that will be re-used because there might + // be other forks (that we haven't imported) where the epoch isn't skipped, and + // to import those forks we want to keep the original epoch data. not updating + // the original epoch works because when we search the tree for which epoch to + // use for a given slot, we will search in-depth with the predicate + // `epoch.start_slot <= slot` which will still match correctly without updating + // `start_slot` to the correct value as below. + let epoch_index = epoch_data.epoch_index.checked_add(skipped_epochs).expect( + "epoch number is u64; it should be strictly smaller than number of slots; \ + slots relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed.", + ); + + let start_slot = skipped_epochs + .checked_mul(epoch_data.duration) + .and_then(|skipped_slots| epoch_data.start_slot.checked_add(skipped_slots)) + .expect( + "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed.", + ); + + warn!( + target: LOG_TARGET, + "👶 Epoch(s) skipped: from {} to {}", epoch_data.epoch_index, epoch_index, + ); + + epoch_data.epoch_index = epoch_index; + epoch_data.start_slot = Slot::from(start_slot); + } + log!( target: LOG_TARGET, log_level, diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 84ac3d7341199..f74864a003e2a 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -28,6 +28,7 @@ use rand_chacha::{ use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer}; use sc_consensus::{BoxBlockImport, BoxJustificationImport}; +use sc_consensus_epochs::{EpochIdentifier, EpochIdentifierPosition}; use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::BABE; @@ -1109,3 +1110,263 @@ async fn obsolete_blocks_aux_data_cleanup() { // Present C4, C5 assert!(aux_data_check(&fork3_hashes, true)); } + +#[tokio::test] +async fn allows_skipping_epochs() { + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer.data.as_ref().expect("babe link set up during initialization"); + + let client = peer.client().as_client(); + let mut block_import = data.block_import.lock().take().expect("import set up during init"); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + let epoch_changes = data.link.epoch_changes.clone(); + let epoch_length = data.link.config.epoch_length; + + // we create all of the blocks in epoch 0 as well as a block in epoch 1 + let blocks = propose_and_import_blocks( + &client, + &mut proposer_factory, + &mut block_import, + client.chain_info().genesis_hash, + epoch_length as usize + 1, + ) + .await; + + // the first block in epoch 0 (#1) announces both epoch 0 and 1 (this is a + // special genesis epoch) + let epoch0 = epoch_changes + .shared_data() + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Genesis0, + hash: blocks[0], + number: 1, + }) + .unwrap() + .clone(); + + assert_eq!(epoch0.epoch_index, 0); + assert_eq!(epoch0.start_slot, Slot::from(1)); + + let epoch1 = epoch_changes + .shared_data() + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Genesis1, + hash: blocks[0], + number: 1, + }) + .unwrap() + .clone(); + + assert_eq!(epoch1.epoch_index, 1); + assert_eq!(epoch1.start_slot, Slot::from(epoch_length + 1)); + + // the first block in epoch 1 (#7) announces epoch 2. we will be skipping + // this epoch and therefore re-using its data for epoch 3 + let epoch2 = epoch_changes + .shared_data() + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: blocks[epoch_length as usize], + number: epoch_length + 1, + }) + .unwrap() + .clone(); + + assert_eq!(epoch2.epoch_index, 2); + assert_eq!(epoch2.start_slot, Slot::from(epoch_length * 2 + 1)); + + // we now author a block that belongs to epoch 3, thereby skipping epoch 2 + let last_block = client.expect_header(*blocks.last().unwrap()).unwrap(); + let block = propose_and_import_block( + &last_block, + Some((epoch_length * 3 + 1).into()), + &mut proposer_factory, + &mut block_import, + ) + .await; + + // and the first block in epoch 3 (#8) announces epoch 4 + let epoch4 = epoch_changes + .shared_data() + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: block, + number: epoch_length + 2, + }) + .unwrap() + .clone(); + + assert_eq!(epoch4.epoch_index, 4); + assert_eq!(epoch4.start_slot, Slot::from(epoch_length * 4 + 1)); + + // if we try to get the epoch data for a slot in epoch 3 + let epoch3 = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &block, + epoch_length + 2, + (epoch_length * 3 + 2).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + // we get back the data for epoch 2 + assert_eq!(epoch3, epoch2); + + // but if we try to get the epoch data for a slot in epoch 4 + let epoch4_ = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &block, + epoch_length + 2, + (epoch_length * 4 + 1).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + // we get epoch 4 as expected + assert_eq!(epoch4, epoch4_); +} + +#[tokio::test] +async fn allows_skipping_epochs_on_some_forks() { + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer.data.as_ref().expect("babe link set up during initialization"); + + let client = peer.client().as_client(); + let mut block_import = data.block_import.lock().take().expect("import set up during init"); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + let epoch_changes = data.link.epoch_changes.clone(); + let epoch_length = data.link.config.epoch_length; + + // we create all of the blocks in epoch 0 as well as two blocks in epoch 1 + let blocks = propose_and_import_blocks( + &client, + &mut proposer_factory, + &mut block_import, + client.chain_info().genesis_hash, + epoch_length as usize + 1, + ) + .await; + + // we now author a block that belongs to epoch 2, built on top of the last + // authored block in epoch 1. + let last_block = client.expect_header(*blocks.last().unwrap()).unwrap(); + + let epoch2_block = propose_and_import_block( + &last_block, + Some((epoch_length * 2 + 1).into()), + &mut proposer_factory, + &mut block_import, + ) + .await; + + // if we try to get the epoch data for a slot in epoch 2, we get the data that + // was previously announced when epoch 1 started + let epoch2 = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &epoch2_block, + epoch_length + 2, + (epoch_length * 2 + 2).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + // we now author a block that belongs to epoch 3, built on top of the last + // authored block in epoch 1. authoring this block means we're skipping epoch 2 + // entirely on this fork + let epoch3_block = propose_and_import_block( + &last_block, + Some((epoch_length * 3 + 1).into()), + &mut proposer_factory, + &mut block_import, + ) + .await; + + // if we try to get the epoch data for a slot in epoch 3 + let epoch3_ = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &epoch3_block, + epoch_length + 2, + (epoch_length * 3 + 2).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + // we get back the data for epoch 2 + assert_eq!(epoch3_, epoch2); + + // if we try to get the epoch data for a slot in epoch 4 in the fork + // where we skipped epoch 2, we should get the epoch data for epoch 4 + // that was announced at the beginning of epoch 3 + let epoch_data = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &epoch3_block, + epoch_length + 2, + (epoch_length * 4 + 1).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + assert!(epoch_data != epoch3_); + + // if we try to get the epoch data for a slot in epoch 4 in the fork + // where we didn't skip epoch 2, we should get back the data for epoch 3, + // that was announced when epoch 2 started in that fork + let epoch_data = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &epoch2_block, + epoch_length + 2, + (epoch_length * 4 + 1).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + assert!(epoch_data != epoch3_); + + let epoch3 = epoch_changes + .shared_data() + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: epoch2_block, + number: epoch_length + 2, + }) + .unwrap() + .clone(); + + assert_eq!(epoch_data, epoch3); +} diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 28ae4bfdc9dad..eb87d65b0549f 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -588,10 +588,19 @@ impl Pallet { return } - // Update epoch index - let epoch_index = EpochIndex::::get() - .checked_add(1) - .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + // Update epoch index. + // + // NOTE: we figure out the epoch index from the slot, which may not + // necessarily be contiguous if the chain was offline for more than + // `T::EpochDuration` slots. When skipping from epoch N to e.g. N+4, we + // will be using the randomness and authorities for that epoch that had + // been previously announced for epoch N+1, and the randomness collected + // during the current epoch (N) will be used for epoch N+5. + let epoch_index = sp_consensus_babe::epoch_index( + CurrentSlot::::get(), + GenesisSlot::::get(), + T::EpochDuration::get(), + ); EpochIndex::::put(epoch_index); Authorities::::put(authorities); @@ -638,11 +647,16 @@ impl Pallet { } } - /// Finds the start slot of the current epoch. only guaranteed to - /// give correct results after `initialize` of the first block - /// in the chain (as its result is based off of `GenesisSlot`). + /// Finds the start slot of the current epoch. + /// + /// Only guaranteed to give correct results after `initialize` of the first + /// block in the chain (as its result is based off of `GenesisSlot`). pub fn current_epoch_start() -> Slot { - Self::epoch_start(EpochIndex::::get()) + sp_consensus_babe::epoch_start_slot( + EpochIndex::::get(), + GenesisSlot::::get(), + T::EpochDuration::get(), + ) } /// Produces information about the current epoch. @@ -666,9 +680,15 @@ impl Pallet { if u64 is not enough we should crash for safety; qed.", ); + let start_slot = sp_consensus_babe::epoch_start_slot( + next_epoch_index, + GenesisSlot::::get(), + T::EpochDuration::get(), + ); + Epoch { epoch_index: next_epoch_index, - start_slot: Self::epoch_start(next_epoch_index), + start_slot, duration: T::EpochDuration::get(), authorities: NextAuthorities::::get().to_vec(), randomness: NextRandomness::::get(), @@ -680,17 +700,6 @@ impl Pallet { } } - fn epoch_start(epoch_index: u64) -> Slot { - // (epoch_index * epoch_duration) + genesis_slot - - const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ - if u64 is not enough we should crash for safety; qed."; - - let epoch_start = epoch_index.checked_mul(T::EpochDuration::get()).expect(PROOF); - - epoch_start.checked_add(*GenesisSlot::::get()).expect(PROOF).into() - } - fn deposit_consensus(new: U) { let log = DigestItem::Consensus(BABE_ENGINE_ID, new.encode()); >::deposit_log(log) diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index d4132e6378540..0b8a02547144b 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -948,3 +948,34 @@ fn generate_equivocation_report_blob() { println!("equivocation_proof.encode(): {:?}", equivocation_proof.encode()); }); } + +#[test] +fn skipping_over_epochs_works() { + let mut ext = new_test_ext(3); + + ext.execute_with(|| { + let epoch_duration: u64 = ::EpochDuration::get(); + + // this sets the genesis slot to 100; + let genesis_slot = 100; + go_to_block(1, genesis_slot); + + // we will author all blocks from epoch #0 and arrive at a point where + // we are in epoch #1. we should already have the randomness ready that + // will be used in epoch #2 + progress_to_block(epoch_duration + 1); + assert_eq!(EpochIndex::::get(), 1); + + // genesis randomness is an array of zeros + let randomness_for_epoch_2 = NextRandomness::::get(); + assert!(randomness_for_epoch_2 != [0; 32]); + + // we will now create a block for a slot that is part of epoch #4. + // we should appropriately increment the epoch index as well as re-use + // the randomness from epoch #2 on epoch #4 + go_to_block(System::block_number() + 1, genesis_slot + epoch_duration * 4); + + assert_eq!(EpochIndex::::get(), 4); + assert_eq!(Randomness::::get(), randomness_for_epoch_2); + }); +} diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 621ab859b914f..cb44afcb8d4e4 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -360,6 +360,25 @@ pub struct Epoch { pub config: BabeEpochConfiguration, } +/// Returns the epoch index the given slot belongs to. +pub fn epoch_index(slot: Slot, genesis_slot: Slot, epoch_duration: u64) -> u64 { + *slot.saturating_sub(genesis_slot) / epoch_duration +} + +/// Returns the first slot at the given epoch index. +pub fn epoch_start_slot(epoch_index: u64, genesis_slot: Slot, epoch_duration: u64) -> Slot { + // (epoch_index * epoch_duration) + genesis_slot + + const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed."; + + epoch_index + .checked_mul(epoch_duration) + .and_then(|slot| slot.checked_add(*genesis_slot)) + .expect(PROOF) + .into() +} + sp_api::decl_runtime_apis! { /// API necessary for block authorship with BABE. #[api_version(2)]