From 80f7c78b296a48eda3d3e69d266396482679f35a Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Mon, 18 Oct 2021 20:47:34 +0400 Subject: [PATCH] fix: ensure that accumulated orphan chain data is committed before header validation (#3462) Description --- - commit orphan chain accumulated data before orphan header validation so that accumulated target difficulty is available for chain comparison - use hash for reorg chain rewind because the height value of the candidate block is not yet checked against the fork height. - improve test infra to allow blockchain tests to be easily be constructed - add a failing test for the fix in the PR Motivation and Context --- Observed errors: ``` 2021-10-10 09:17:19.987800805 [c::cs::database] WARN Discarding orphan 0291ba64d777e46016ca7b055bdf6979c1fe11bf31b78a7c20d507cb69c3f293 because it has an invalid header: FatalStorageError("The requested chain_header_in_all_chains was not found via hash:9c2c5734a783d891b617905e27e861ac1595760d5c22335c8d31feb7dc38a2a5 in the database") ``` The above error occurs because of orphan accumulated data was set in the `DbTransaction` but is not actually committed to lmdb. This manifests as a validation error and was not picked up by other tests because the validator was mocked out. The solution in this PR is to write the transactions as needed. This means that in a rollback situation, the accumulated orphan chain data will remain. That _should_ be ok since that data can be overwritten/replaced on the next reorg evaluation if needed. This demonstrates the shortcomings of the current approach and the need for the calling code in `BlockchainDatabase` to have access to ACID DB transactions. I have played with the idea of abstracting and generifying atomic transactions but found the need for [GAT](https://blog.rust-lang.org/2021/08/03/GATs-stabilization-push.html) to get it to work. We no longer have or use any other BlockchainBackend impl other than LMDB so leaking LMDB `Read,WriteTransaction` may be acceptable. Passing the transaction in to every function in BlockchainBackend may be a deal breaker. How Has This Been Tested? --- - Issue reproduced in `handle_possible_reorg::it_links_many_orphan_branches_to_main_chain` - Manually (not necessarily tested, the base node runs for a day and is still at the tip) Consolidated some of the existing "blockchain builder" test infra that was duplicated and added a declarative macro for building chains `let specs = block_spec!(["A->GB"], ["B->A", difficulty: 100]);` --- .../horizon_state_synchronization.rs | 2 +- .../src/chain_storage/accumulated_data.rs | 2 +- .../src/chain_storage/block_add_result.rs | 13 +- .../src/chain_storage/blockchain_database.rs | 382 +++++++++++++----- .../core/src/chain_storage/db_transaction.rs | 12 +- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 117 +++--- base_layer/core/src/lib.rs | 1 + .../core/src/test_helpers/block_spec.rs | 238 +++++++++++ .../core/src/test_helpers/blockchain.rs | 39 +- base_layer/core/src/test_helpers/mod.rs | 65 +-- .../validation/block_validators/body_only.rs | 14 +- .../core/src/validation/chain_balance.rs | 2 +- base_layer/core/src/validation/mocks.rs | 4 +- base_layer/core/src/validation/test.rs | 6 +- base_layer/core/src/validation/traits.rs | 4 +- base_layer/core/tests/block_validation.rs | 10 +- 16 files changed, 642 insertions(+), 269 deletions(-) create mode 100644 base_layer/core/src/test_helpers/block_spec.rs diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index 3511ade206..7e10d59745 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -604,10 +604,10 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { .sync_validators .final_horizon_state .validate( + &*self.db().clone().into_inner().db_read_access()?, header.height(), &pruned_utxo_sum, &pruned_kernel_sum, - &*self.db().clone().into_inner().db_read_access()?, ) .map_err(HorizonSyncError::FinalStateValidationFailed)?; diff --git a/base_layer/core/src/chain_storage/accumulated_data.rs b/base_layer/core/src/chain_storage/accumulated_data.rs index 5c050290d4..c7eef86bd0 100644 --- a/base_layer/core/src/chain_storage/accumulated_data.rs +++ b/base_layer/core/src/chain_storage/accumulated_data.rs @@ -273,7 +273,7 @@ impl BlockHeaderAccumulatedDataBuilder<'_> { .hash .ok_or_else(|| ChainStorageError::InvalidOperation("hash not provided".to_string()))?; - if hash == self.previous_accum.hash { + if hash == previous_accum.hash { return Err(ChainStorageError::InvalidOperation( "Hash was set to the same hash that is contained in previous accumulated data".to_string(), )); diff --git a/base_layer/core/src/chain_storage/block_add_result.rs b/base_layer/core/src/chain_storage/block_add_result.rs index 26b0107ed0..4c9a783f17 100644 --- a/base_layer/core/src/chain_storage/block_add_result.rs +++ b/base_layer/core/src/chain_storage/block_add_result.rs @@ -46,6 +46,14 @@ impl BlockAddResult { matches!(self, BlockAddResult::Ok(_)) } + pub fn is_chain_reorg(&self) -> bool { + matches!(self, BlockAddResult::ChainReorg { .. }) + } + + pub fn is_orphaned(&self) -> bool { + matches!(self, BlockAddResult::OrphanBlock) + } + pub fn assert_added(&self) -> ChainBlock { match self { BlockAddResult::ChainReorg { added, removed } => panic!( @@ -60,10 +68,7 @@ impl BlockAddResult { } pub fn assert_orphaned(&self) { - match self { - BlockAddResult::OrphanBlock => (), - _ => panic!("Result was not orphaned"), - } + assert!(self.is_orphaned(), "Result was not orphaned"); } pub fn assert_reorg(&self, num_added: usize, num_removed: usize) { diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 6053ccef95..34a4cce5da 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -849,7 +849,7 @@ where B: BlockchainBackend fn insert_block(&self, block: Arc) -> Result<(), ChainStorageError> { let mut db = self.db_write_access()?; let mut txn = DbTransaction::new(); - insert_block(&mut txn, block)?; + insert_best_block(&mut txn, block)?; db.write(txn) } @@ -1239,8 +1239,8 @@ fn add_block( ) } -// Adds a new block onto the chain tip. -fn insert_block(txn: &mut DbTransaction, block: Arc) -> Result<(), ChainStorageError> { +/// Adds a new block onto the chain tip and sets it to the best block. +fn insert_best_block(txn: &mut DbTransaction, block: Arc) -> Result<(), ChainStorageError> { let block_hash = block.accumulated_data().hash.clone(); debug!( target: LOG_TARGET, @@ -1432,6 +1432,8 @@ fn rewind_to_height( let metadata = db.fetch_chain_metadata()?; let expected_block_hash = metadata.best_block().clone(); let last_block_height = metadata.height_of_longest_chain(); + // We use the cmp::max value here because we'll only delete headers here and leave remaining headers to be deleted + // with the whole block let steps_back = last_header_height .checked_sub(cmp::max(last_block_height, height)) .ok_or_else(|| { @@ -1461,9 +1463,8 @@ fn rewind_to_height( }); // Delete blocks - let mut steps_back = last_block_height.saturating_sub(height); - // No blocks to remove + // No blocks to remove, no need to update the best block if steps_back == 0 { db.write(txn)?; return Ok(vec![]); @@ -1477,16 +1478,17 @@ fn rewind_to_height( last_block_height - steps_back ); - let prune_past_horizon = metadata.is_pruned_node() && steps_back > metadata.pruning_horizon(); + let effective_pruning_horizon = metadata.height_of_longest_chain() - metadata.pruned_height(); + let prune_past_horizon = metadata.is_pruned_node() && steps_back > effective_pruning_horizon; if prune_past_horizon { warn!( target: LOG_TARGET, - "WARNING, reorg past pruning horizon, rewinding back to 0" + "WARNING, reorg past pruning horizon (more than {} blocks back), rewinding back to 0", + effective_pruning_horizon ); - steps_back = metadata.pruning_horizon(); + steps_back = effective_pruning_horizon; height = 0; } - let chain_header = db.fetch_chain_header_by_height(height)?; for h in 0..steps_back { info!(target: LOG_TARGET, "Deleting block {}", last_block_height - h,); @@ -1519,6 +1521,7 @@ fn rewind_to_height( } } + let chain_header = db.fetch_chain_header_by_height(height)?; // Update metadata debug!( target: LOG_TARGET, @@ -1585,7 +1588,7 @@ fn handle_possible_reorg( } // Check the accumulated difficulty of the best fork chain compared to the main chain. - let fork_header = find_strongest_orphan_tip(new_tips, chain_strength_comparer)?.ok_or_else(|| { + let fork_header = find_strongest_orphan_tip(new_tips, chain_strength_comparer).ok_or_else(|| { // This should never happen because a block is always added to the orphan pool before // checking, but just in case warn!( @@ -1654,16 +1657,15 @@ fn handle_possible_reorg( // TODO: We already have the first link in this chain, can be optimized to exclude it let reorg_chain = get_orphan_link_main_chain(db, fork_header.hash())?; - let fork_height = reorg_chain + let fork_hash = reorg_chain .front() .expect("The new orphan block should be in the queue") - .block() - .header - .height - - 1; + .header() + .prev_hash + .clone(); let num_added_blocks = reorg_chain.len(); - let removed_blocks = reorganize_chain(db, block_validator, fork_height, &reorg_chain)?; + let removed_blocks = reorganize_chain(db, block_validator, fork_hash, &reorg_chain)?; let num_removed_blocks = removed_blocks.len(); // reorg is required when any blocks are removed or more than one are added @@ -1708,15 +1710,15 @@ fn handle_possible_reorg( fn reorganize_chain( backend: &mut T, block_validator: &dyn PostOrphanBodyValidation, - fork_height: u64, + fork_hash: HashOutput, chain: &VecDeque>, ) -> Result>, ChainStorageError> { - let removed_blocks = rewind_to_height(backend, fork_height)?; + let removed_blocks = rewind_to_hash(backend, fork_hash.clone())?; debug!( target: LOG_TARGET, - "Validate and add {} chain block(s) from height {}. Rewound blocks: [{}]", + "Validate and add {} chain block(s) from block {}. Rewound blocks: [{}]", chain.len(), - fork_height, + fork_hash.to_hex(), removed_blocks .iter() .map(|b| b.height().to_string()) @@ -1726,25 +1728,25 @@ fn reorganize_chain( for block in chain { let mut txn = DbTransaction::new(); - let block_hash_hex = block.accumulated_data().hash.to_hex(); - txn.delete_orphan(block.accumulated_data().hash.clone()); + let block_hash = block.hash().clone(); + txn.delete_orphan(block_hash.clone()); let chain_metadata = backend.fetch_chain_metadata()?; - if let Err(e) = block_validator.validate_body_for_valid_orphan(block, backend, &chain_metadata) { + if let Err(e) = block_validator.validate_body_for_valid_orphan(backend, block, &chain_metadata) { warn!( target: LOG_TARGET, "Orphan block {} ({}) failed validation during chain reorg: {:?}", block.header().height, - block_hash_hex, + block_hash.to_hex(), e ); - remove_orphan(backend, block.accumulated_data().hash.clone())?; + remove_orphan(backend, block_hash)?; info!(target: LOG_TARGET, "Restoring previous chain after failed reorg."); - restore_reorged_chain(backend, fork_height, removed_blocks)?; + restore_reorged_chain(backend, fork_hash, removed_blocks)?; return Err(e.into()); } - insert_block(&mut txn, block.clone())?; + insert_best_block(&mut txn, block.clone())?; // Failed to store the block - this should typically never happen unless there is a bug in the validator // (e.g. does not catch a double spend). In any case, we still need to restore the chain to a // good state before returning. @@ -1754,7 +1756,7 @@ fn reorganize_chain( "Failed to commit reorg chain: {:?}. Restoring last chain.", e ); - restore_reorged_chain(backend, fork_height, removed_blocks)?; + restore_reorged_chain(backend, fork_hash, removed_blocks)?; return Err(e); } } @@ -1773,10 +1775,10 @@ fn reorganize_chain( fn restore_reorged_chain( db: &mut T, - height: u64, + to_hash: HashOutput, previous_chain: Vec>, ) -> Result<(), ChainStorageError> { - let invalid_chain = rewind_to_height(db, height)?; + let invalid_chain = rewind_to_hash(db, to_hash)?; debug!( target: LOG_TARGET, "Removed {} blocks during chain restore: {:?}.", @@ -1790,7 +1792,7 @@ fn restore_reorged_chain( for block in previous_chain.into_iter().rev() { txn.delete_orphan(block.accumulated_data().hash.clone()); - insert_block(&mut txn, block)?; + insert_best_block(&mut txn, block)?; } db.write(txn)?; Ok(()) @@ -1810,10 +1812,11 @@ fn insert_orphan_and_find_new_tips( return Ok(vec![]); } - let mut txn = DbTransaction::new(); let parent = match db.fetch_orphan_chain_tip_by_hash(&block.header.prev_hash)? { Some(curr_parent) => { + let mut txn = DbTransaction::new(); txn.remove_orphan_chain_tip(block.header.prev_hash.clone()); + db.write(txn)?; info!( target: LOG_TARGET, "New orphan extends a chain in the current candidate tip set" @@ -1827,22 +1830,32 @@ fn insert_orphan_and_find_new_tips( Some(curr_parent) => { info!( target: LOG_TARGET, - "New orphan does not have a parent in the current tip set. Parent is {}", + "New orphan #{} ({}) does not have a parent in the current tip set. Parent is {}", + block.header.height, + hash.to_hex(), curr_parent.hash().to_hex() ); curr_parent }, None => { - info!( - target: LOG_TARGET, - "Orphan {} was not connected to any previous headers. Inserting as true orphan", - hash.to_hex() - ); - - if !db.contains(&DbKey::OrphanBlock(hash))? { + let hash_hex = hash.to_hex(); + if db.contains(&DbKey::OrphanBlock(hash))? { + info!( + target: LOG_TARGET, + "Orphan #{} ({}) already found in orphan database", block.header.height, hash_hex + ); + } else { + info!( + target: LOG_TARGET, + "Orphan #{} ({}) was not connected to any previous headers. Inserting as true orphan", + block.header.height, + hash_hex + ); + + let mut txn = DbTransaction::new(); txn.insert_orphan(block); + db.write(txn)?; } - db.write(txn)?; return Ok(vec![]); }, }, @@ -1861,10 +1874,16 @@ fn insert_orphan_and_find_new_tips( let chain_header = chain_block.to_chain_header(); // Extend orphan chain tip. - txn.insert_chained_orphan(Arc::new(chain_block)); + let mut txn = DbTransaction::new(); + if !db.contains(&DbKey::OrphanBlock(chain_block.accumulated_data().hash.clone()))? { + txn.insert_orphan(chain_block.to_arc_block()); + } + txn.set_accumulated_data_for_orphan(chain_block.accumulated_data().clone()); + db.write(txn)?; - let tips = find_orphan_descendant_tips_of(&*db, &chain_header, validator, difficulty_calculator, &mut txn)?; + let tips = find_orphan_descendant_tips_of(db, chain_header, validator, difficulty_calculator)?; debug!(target: LOG_TARGET, "Found {} new orphan tips", tips.len()); + let mut txn = DbTransaction::new(); for new_tip in &tips { txn.insert_orphan_chain_tip(new_tip.hash().clone()); } @@ -1875,11 +1894,10 @@ fn insert_orphan_and_find_new_tips( // Find the tip set of any orphans that have hash as an ancestor fn find_orphan_descendant_tips_of( - db: &T, - prev_chain_header: &ChainHeader, + db: &mut T, + prev_chain_header: ChainHeader, validator: &dyn HeaderValidation, difficulty_calculator: &DifficultyCalculator, - txn: &mut DbTransaction, ) -> Result, ChainStorageError> { let children = db.fetch_orphan_children_of(prev_chain_header.hash().clone())?; if children.is_empty() { @@ -1889,11 +1907,19 @@ fn find_orphan_descendant_tips_of( prev_chain_header.height(), prev_chain_header.hash().to_hex() ); - return Ok(vec![prev_chain_header.clone()]); + return Ok(vec![prev_chain_header]); } let mut res = vec![]; for child in children { + debug!( + target: LOG_TARGET, + "Validating header #{} ({}), descendant of #{} ({})", + child.header.height, + child.hash().to_hex(), + prev_chain_header.height(), + prev_chain_header.hash().to_hex() + ); match validator.validate(db, &child.header, difficulty_calculator) { Ok(achieved_target) => { let child_hash = child.hash(); @@ -1911,10 +1937,10 @@ fn find_orphan_descendant_tips_of( })?; // Set/overwrite accumulated data for this orphan block - txn.set_accumulated_data_for_orphan(chain_header.clone()); - - let children = - find_orphan_descendant_tips_of(db, &chain_header, validator, difficulty_calculator, txn)?; + let mut txn = DbTransaction::new(); + txn.set_accumulated_data_for_orphan(chain_header.accumulated_data().clone()); + db.write(txn)?; + let children = find_orphan_descendant_tips_of(db, chain_header, validator, difficulty_calculator)?; res.extend(children); }, Err(e) => { @@ -1925,7 +1951,9 @@ fn find_orphan_descendant_tips_of( child.hash().to_hex(), e ); + let mut txn = DbTransaction::new(); txn.delete_orphan(child.hash()); + db.write(txn)?; }, }; } @@ -1939,7 +1967,7 @@ fn remove_orphan(db: &mut T, hash: HashOutput) -> Result<( db.write(txn) } -/// Gets all blocks ordered from the orphan tip to the point (exclusive) where it connects to the best chain. +/// Gets all blocks ordered from the the block that connects (via prev_hash) to the main chain, to the orphan tip. // TODO: this would probably perform better if it reused the db transaction #[allow(clippy::ptr_arg)] fn get_orphan_link_main_chain( @@ -1971,7 +1999,7 @@ fn get_orphan_link_main_chain( fn find_strongest_orphan_tip( orphan_chain_tips: Vec, chain_strength_comparer: &dyn ChainStrengthComparer, -) -> Result, ChainStorageError> { +) -> Option { let mut best_block_header: Option = None; for tip in orphan_chain_tips { best_block_header = match best_block_header { @@ -1983,7 +2011,7 @@ fn find_strongest_orphan_tip( }; } - Ok(best_block_header) + best_block_header } // Perform a comprehensive search to remove all the minimum height orphans to maintain the configured orphan pool @@ -2092,24 +2120,29 @@ fn convert_to_option_bounds>(bounds: T) -> (Option, Opt mod test { use super::*; use crate::{ + block_specs, consensus::{ chain_strength_comparer::strongest_chain, consensus_constants::PowAlgorithmConstants, ConsensusConstantsBuilder, ConsensusManager, }, - test_helpers::blockchain::{ - create_chained_blocks, - create_main_chain, - create_new_blockchain, - create_orphan_chain, - create_test_blockchain_db, - TempDatabase, + test_helpers::{ + blockchain::{ + create_chained_blocks, + create_main_chain, + create_new_blockchain, + create_orphan_chain, + create_test_blockchain_db, + TempDatabase, + }, + BlockSpecs, }, validation::{header_validator::HeaderValidator, mocks::MockValidator}, }; - use std::collections::HashMap; + use std::{collections::HashMap, sync}; use tari_common::configuration::Network; + use tari_test_utils::unpack_enum; #[test] fn lmdb_fetch_monero_seeds() { @@ -2208,7 +2241,7 @@ mod test { let db = create_new_blockchain(); let validator = MockValidator::new(true); let genesis_block = db.fetch_block(0).unwrap().try_into_chain_block().map(Arc::new).unwrap(); - let (_, chain) = create_chained_blocks(&[("A->GB", 1, 120)], genesis_block); + let (_, chain) = create_chained_blocks(&[("A->GB", 1u64, 120u64)], genesis_block); let block = chain.get("A").unwrap().clone(); let mut access = db.db_write_access().unwrap(); let chain = insert_orphan_and_find_new_tips( @@ -2296,6 +2329,118 @@ mod test { } } + mod handle_possible_reorg { + use super::*; + + #[test] + fn it_links_many_orphan_branches_to_main_chain() { + let test = TestHarness::setup(); + + let (_, main_chain) = + create_main_chain(&test.db, block_specs!(["1a->GB"], ["2a->1a"], ["3a->2a"], ["4a->3a"])); + let genesis = main_chain.get("GB").unwrap().clone(); + + let fork_root = main_chain.get("1a").unwrap().clone(); + let (_, orphan_chain_b) = create_chained_blocks( + block_specs!(["2b->GB"], ["3b->2b"], ["4b->3b"], ["5b->4b"], ["6b->5b"]), + fork_root, + ); + + // Add orphans out of height order + for name in ["5b", "3b", "4b", "6b"] { + let block = orphan_chain_b.get(name).unwrap().clone(); + let result = test.handle_possible_reorg(block.to_arc_block()).unwrap(); + assert!(result.is_orphaned()); + } + + // Add chain c orphans branching from chain b + let fork_root = orphan_chain_b.get("3b").unwrap().clone(); + let (_, orphan_chain_c) = + create_chained_blocks(block_specs!(["4c->GB"], ["5c->4c"], ["6c->5c"], ["7c->6c"]), fork_root); + + for name in ["7c", "5c", "6c", "4c"] { + let block = orphan_chain_c.get(name).unwrap().clone(); + let result = test.handle_possible_reorg(block.to_arc_block()).unwrap(); + assert!(result.is_orphaned()); + } + + let fork_root = orphan_chain_c.get("6c").unwrap().clone(); + let (_, orphan_chain_d) = create_chained_blocks(block_specs!(["7d->GB", difficulty: 10]), fork_root); + + let block = orphan_chain_d.get("7d").unwrap().clone(); + let result = test.handle_possible_reorg(block.to_arc_block()).unwrap(); + assert!(result.is_orphaned()); + + // Now, connect the chain and check that the c branch is the tip + let block = orphan_chain_b.get("2b").unwrap().clone(); + let result = test.handle_possible_reorg(block.to_arc_block()).unwrap(); + result.assert_reorg(6, 3); + + { + // Check 2b was added + let access = test.db_write_access(); + let block = orphan_chain_b.get("2b").unwrap().clone(); + assert!(access.contains(&DbKey::BlockHash(block.hash().clone())).unwrap()); + + // Check 7d is the tip + let block = orphan_chain_d.get("7d").unwrap().clone(); + let tip = access.fetch_tip_header().unwrap(); + assert_eq!(tip.hash(), block.hash()); + let metadata = access.fetch_chain_metadata().unwrap(); + assert_eq!(metadata.best_block(), block.hash()); + assert_eq!(metadata.height_of_longest_chain(), block.height()); + assert!(access.contains(&DbKey::BlockHash(block.hash().clone())).unwrap()); + + let mut all_blocks = main_chain + .into_iter() + .chain(orphan_chain_b) + .chain(orphan_chain_c) + .chain(orphan_chain_d) + .collect::>(); + all_blocks.insert("GB".to_string(), genesis); + // Check the chain heights + let expected_chain = ["GB", "1a", "2b", "3b", "4c", "5c", "6c", "7d"]; + for (height, name) in expected_chain.iter().enumerate() { + let expected_block = all_blocks.get(*name).unwrap(); + unpack_enum!( + DbValue::BlockHeader(found_block) = + access.fetch(&DbKey::BlockHeader(height as u64)).unwrap().unwrap() + ); + assert_eq!(*found_block, *expected_block.header()); + } + } + } + + #[test] + fn it_errors_if_reorging_to_an_invalid_height() { + let test = TestHarness::setup(); + let (_, main_chain) = + create_main_chain(&test.db, block_specs!(["1a->GB"], ["2a->1a"], ["3a->2a"], ["4a->3a"])); + + let fork_root = main_chain.get("1a").unwrap().clone(); + let (_, orphan_chain_b) = + create_chained_blocks(block_specs!(["2b->GB", height: 10, difficulty: 10]), fork_root); + + let block = orphan_chain_b.get("2b").unwrap().clone(); + let err = test.handle_possible_reorg(block.to_arc_block()).unwrap_err(); + unpack_enum!(ChainStorageError::InvalidOperation(_) = err); + } + + #[test] + fn it_allows_orphan_blocks_with_any_height() { + let test = TestHarness::setup(); + let (_, main_chain) = create_main_chain(&test.db, block_specs!(["1a->GB", difficulty: 2])); + + let fork_root = main_chain.get("GB").unwrap().clone(); + let (_, orphan_chain_b) = create_chained_blocks(block_specs!(["1b->GB", height: 10]), fork_root); + + let block = orphan_chain_b.get("1b").unwrap().clone(); + test.handle_possible_reorg(block.to_arc_block()) + .unwrap() + .assert_orphaned(); + } + } + #[test] fn test_handle_possible_reorg_case1() { // Normal chain @@ -2684,33 +2829,71 @@ mod test { assert_eq!(accum_difficulty, values); } - #[allow(clippy::type_complexity)] - fn test_case_handle_possible_reorg( - blocks: &[(&str, u64, u64)], - ) -> Result<(Vec, HashMap>), ChainStorageError> { - let db = create_new_blockchain(); - let genesis_block = db.fetch_block(0).unwrap().try_into_chain_block().map(Arc::new).unwrap(); - let (block_names, chain) = create_chained_blocks(blocks, genesis_block); - let mock_validator = Box::new(MockValidator::new(true)); - // A real validator is needed here to test target difficulties + struct TestHarness { + db: BlockchainDatabase, + chain_strength_comparer: Box, + difficulty_calculator: DifficultyCalculator, + post_orphan_body_validator: Box>, + header_validator: Box>, + } - let consensus = ConsensusManager::builder(Network::LocalNet) - .add_consensus_constants( - ConsensusConstantsBuilder::new(Network::LocalNet) - .clear_proof_of_work() - .add_proof_of_work(PowAlgorithm::Sha3, PowAlgorithmConstants { - max_target_time: 1200, - min_difficulty: 1.into(), - max_difficulty: 100.into(), - target_time: 120, - }) - .build(), + impl TestHarness { + pub fn setup() -> Self { + let consensus = create_consensus_rules(); + let header_validator = Box::new(HeaderValidator::new(consensus)); + let mock_validator = Box::new(MockValidator::new(true)); + Self::new(header_validator, mock_validator) + } + + pub fn new( + header_validator: Box>, + post_orphan_body_validator: Box>, + ) -> Self { + let db = create_new_blockchain(); + let consensus = create_consensus_rules(); + let difficulty_calculator = DifficultyCalculator::new(consensus, Default::default()); + let chain_strength_comparer = strongest_chain().by_sha3_difficulty().build(); + Self { + db, + chain_strength_comparer, + difficulty_calculator, + header_validator, + post_orphan_body_validator, + } + } + + pub fn db_write_access(&self) -> sync::RwLockWriteGuard<'_, TempDatabase> { + self.db.db_write_access().unwrap() + } + + pub fn handle_possible_reorg(&self, block: Arc) -> Result { + let mut access = self.db_write_access(); + handle_possible_reorg( + &mut *access, + &*self.post_orphan_body_validator, + &*self.header_validator, + &self.difficulty_calculator, + &*self.chain_strength_comparer, + block, ) - .build(); + } + } + + #[allow(clippy::type_complexity)] + fn test_case_handle_possible_reorg>( + blocks: T, + ) -> Result<(Vec, HashMap>), ChainStorageError> { + let test = TestHarness::setup(); + // let db = create_new_blockchain(); + let genesis_block = test + .db + .fetch_block(0) + .unwrap() + .try_into_chain_block() + .map(Arc::new) + .unwrap(); + let (block_names, chain) = create_chained_blocks(blocks.into(), genesis_block); - let difficulty_calculator = DifficultyCalculator::new(consensus.clone(), Default::default()); - let header_validator = Box::new(HeaderValidator::new(consensus)); - let chain_strength_comparer = strongest_chain().by_sha3_difficulty().build(); let mut results = vec![]; for name in block_names { let block = chain.get(&name.to_string()).unwrap(); @@ -2720,15 +2903,24 @@ mod test { block.hash().to_hex(), block.header().prev_hash.to_hex() ); - results.push(handle_possible_reorg( - &mut *db.db_write_access()?, - &*mock_validator, - &*header_validator, - &difficulty_calculator, - &*chain_strength_comparer, - block.to_arc_block(), - )?); + results.push(test.handle_possible_reorg(block.to_arc_block()).unwrap()); } Ok((results, chain)) } + + fn create_consensus_rules() -> ConsensusManager { + ConsensusManager::builder(Network::LocalNet) + .add_consensus_constants( + ConsensusConstantsBuilder::new(Network::LocalNet) + .clear_proof_of_work() + .add_proof_of_work(PowAlgorithm::Sha3, PowAlgorithmConstants { + max_target_time: 1200, + min_difficulty: 1.into(), + max_difficulty: 100.into(), + target_time: 120, + }) + .build(), + ) + .build() + } } diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index e004682da6..e3bd5fc645 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -21,7 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ blocks::{Block, BlockHeader}, - chain_storage::{error::ChainStorageError, ChainBlock, ChainHeader, MmrTree}, + chain_storage::{error::ChainStorageError, BlockHeaderAccumulatedData, ChainBlock, ChainHeader, MmrTree}, transactions::transaction::{TransactionKernel, TransactionOutput}, }; use croaring::Bitmap; @@ -214,9 +214,9 @@ impl DbTransaction { /// Sets accumulated data for the orphan block, "upgrading" the orphan block to a chained orphan. /// Any existing accumulated data is overwritten. /// The transaction will rollback and write will return an error if the orphan block does not exist. - pub fn set_accumulated_data_for_orphan(&mut self, chain_header: ChainHeader) -> &mut Self { + pub fn set_accumulated_data_for_orphan(&mut self, accumulated_data: BlockHeaderAccumulatedData) -> &mut Self { self.operations - .push(WriteOperation::SetAccumulatedDataForOrphan(chain_header)); + .push(WriteOperation::SetAccumulatedDataForOrphan(accumulated_data)); self } @@ -318,7 +318,7 @@ pub enum WriteOperation { header_hash: HashOutput, kernel_sum: Commitment, }, - SetAccumulatedDataForOrphan(ChainHeader), + SetAccumulatedDataForOrphan(BlockHeaderAccumulatedData), SetBestBlock { height: u64, hash: HashOutput, @@ -415,8 +415,8 @@ impl fmt::Display for WriteOperation { horizon ), UpdateKernelSum { header_hash, .. } => write!(f, "Update kernel sum for block: {}", header_hash.to_hex()), - SetAccumulatedDataForOrphan(chain_header) => { - write!(f, "Set accumulated data for orphan {}", chain_header.hash().to_hex()) + SetAccumulatedDataForOrphan(accumulated_data) => { + write!(f, "Set accumulated data for orphan {}", accumulated_data) }, SetBestBlock { height, diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index f58eea66be..1bae3c9ab2 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -203,12 +203,12 @@ impl LMDBDatabase { _file_lock: Arc::new(file_lock), }; - db.build_indexes()?; + db.check_if_rebuild_required()?; Ok(db) } - fn build_indexes(&self) -> Result<(), ChainStorageError> { + fn check_if_rebuild_required(&self) -> Result<(), ChainStorageError> { let txn = self.read_transaction()?; if lmdb_len(&txn, &self.deleted_txo_mmr_position_to_height_index)? == 0 && lmdb_len(&txn, &self.inputs_db)? > 0 { @@ -299,20 +299,12 @@ impl LMDBDatabase { InsertMoneroSeedHeight(data, height) => { self.insert_monero_seed_height(&write_txn, data, *height)?; }, - SetAccumulatedDataForOrphan(chain_header) => { - self.set_accumulated_data_for_orphan( - &write_txn, - chain_header.hash(), - chain_header.accumulated_data(), - )?; + SetAccumulatedDataForOrphan(accumulated_data) => { + self.set_accumulated_data_for_orphan(&write_txn, accumulated_data)?; }, InsertChainOrphanBlock(chain_block) => { self.insert_orphan_block(&write_txn, chain_block.block())?; - self.set_accumulated_data_for_orphan( - &write_txn, - chain_block.hash(), - chain_block.accumulated_data(), - )?; + self.set_accumulated_data_for_orphan(&write_txn, chain_block.accumulated_data())?; }, UpdatePrunedHashSet { mmr_tree, @@ -651,24 +643,22 @@ impl LMDBDatabase { Ok(()) } - #[allow(clippy::ptr_arg)] fn set_accumulated_data_for_orphan( &self, txn: &WriteTransaction<'_>, - header_hash: &HashOutput, accumulated_data: &BlockHeaderAccumulatedData, ) -> Result<(), ChainStorageError> { - if !lmdb_exists(txn, &self.orphans_db, header_hash.as_slice())? { + if !lmdb_exists(txn, &self.orphans_db, accumulated_data.hash.as_slice())? { return Err(ChainStorageError::InvalidOperation(format!( "set_accumulated_data_for_orphan: orphan {} does not exist", - header_hash.to_hex() + accumulated_data.hash.to_hex() ))); } lmdb_insert( txn, &self.orphan_header_accumulated_data_db, - header_hash.as_slice(), + accumulated_data.hash.as_slice(), &accumulated_data, "orphan_header_accumulated_data_db", )?; @@ -704,10 +694,9 @@ impl LMDBDatabase { if let Some(ref last_header) = self.fetch_last_header_in_txn(txn)? { if last_header.height != header.height.saturating_sub(1) { return Err(ChainStorageError::InvalidOperation(format!( - "Attempted to insert a header out of order. Was expecting chain height to be {} but current last \ - header height is {}", - header.height - 1, - last_header.height + "Attempted to insert a header out of order. The last header height is {} but attempted to insert \ + a header with height {}", + last_header.height, header.height, ))); } @@ -953,47 +942,56 @@ impl LMDBDatabase { } fn delete_orphan(&self, txn: &WriteTransaction<'_>, hash: &HashOutput) -> Result<(), ChainStorageError> { - if let Some(orphan) = lmdb_get::<_, Block>(txn, &self.orphans_db, hash.as_slice())? { - let parent_hash = orphan.header.prev_hash; - lmdb_delete_key_value(txn, &self.orphan_parent_map_index, parent_hash.as_slice(), &hash)?; + let orphan = match lmdb_get::<_, Block>(txn, &self.orphans_db, hash.as_slice())? { + Some(orphan) => orphan, + None => { + // delete_orphan is idempotent + debug!( + target: LOG_TARGET, + "delete_orphan: request to delete orphan block {} that was not found.", + hash.to_hex() + ); + return Ok(()); + }, + }; - // Orphan is a tip hash - if lmdb_exists(txn, &self.orphan_chain_tips_db, hash.as_slice())? { - lmdb_delete(txn, &self.orphan_chain_tips_db, hash.as_slice(), "orphan_chain_tips_db")?; + let parent_hash = orphan.header.prev_hash; + lmdb_delete_key_value(txn, &self.orphan_parent_map_index, parent_hash.as_slice(), &hash)?; - // Parent becomes a tip hash - if lmdb_exists(txn, &self.orphans_db, parent_hash.as_slice())? { - lmdb_insert( - txn, - &self.orphan_chain_tips_db, - parent_hash.as_slice(), - &parent_hash, - "orphan_chain_tips_db", - )?; - } - } + // Orphan is a tip hash + if lmdb_exists(txn, &self.orphan_chain_tips_db, hash.as_slice())? { + lmdb_delete(txn, &self.orphan_chain_tips_db, hash.as_slice(), "orphan_chain_tips_db")?; - if lmdb_exists(txn, &self.orphan_header_accumulated_data_db, hash.as_slice())? { - lmdb_delete( + // Parent becomes a tip hash + if lmdb_exists(txn, &self.orphans_db, parent_hash.as_slice())? { + lmdb_insert( txn, - &self.orphan_header_accumulated_data_db, - hash.as_slice(), - "orphan_header_accumulated_data_db", + &self.orphan_chain_tips_db, + parent_hash.as_slice(), + &parent_hash, + "orphan_chain_tips_db", )?; } + } - if lmdb_get::<_, BlockHeaderAccumulatedData>(txn, &self.orphan_header_accumulated_data_db, hash.as_slice())? - .is_some() - { - lmdb_delete( - txn, - &self.orphan_header_accumulated_data_db, - hash.as_slice(), - "orphan_header_accumulated_data_db", - )?; - } - lmdb_delete(txn, &self.orphans_db, hash.as_slice(), "orphans_db")?; + if lmdb_exists(txn, &self.orphan_header_accumulated_data_db, hash.as_slice())? { + lmdb_delete( + txn, + &self.orphan_header_accumulated_data_db, + hash.as_slice(), + "orphan_header_accumulated_data_db", + )?; + } + + if lmdb_exists(txn, &self.orphan_header_accumulated_data_db, hash.as_slice())? { + lmdb_delete( + txn, + &self.orphan_header_accumulated_data_db, + hash.as_slice(), + "orphan_header_accumulated_data_db", + )?; } + lmdb_delete(txn, &self.orphans_db, hash.as_slice(), "orphans_db")?; Ok(()) } @@ -1514,7 +1512,7 @@ impl BlockchainBackend for LMDBDatabase { } Err(ChainStorageError::ValueNotFound { - entity: "chain_header_in_all_chains", + entity: "chain header (in chain_header_in_all_chains)", field: "hash", value: hash.to_hex(), }) @@ -2015,14 +2013,15 @@ impl BlockchainBackend for LMDBDatabase { Ok(Some(chain_header)) } - fn fetch_orphan_children_of(&self, hash: HashOutput) -> Result, ChainStorageError> { + fn fetch_orphan_children_of(&self, parent_hash: HashOutput) -> Result, ChainStorageError> { trace!( target: LOG_TARGET, "Call to fetch_orphan_children_of({})", - hash.to_hex() + parent_hash.to_hex() ); let txn = self.read_transaction()?; - let orphan_hashes: Vec = lmdb_get_multiple(&txn, &self.orphan_parent_map_index, hash.as_slice())?; + let orphan_hashes: Vec = + lmdb_get_multiple(&txn, &self.orphan_parent_map_index, parent_hash.as_slice())?; let mut res = Vec::with_capacity(orphan_hashes.len()); for hash in orphan_hashes { res.push(lmdb_get(&txn, &self.orphans_db, hash.as_slice())?.ok_or_else(|| { diff --git a/base_layer/core/src/lib.rs b/base_layer/core/src/lib.rs index 81da0f0c85..8d6b4fc3b5 100644 --- a/base_layer/core/src/lib.rs +++ b/base_layer/core/src/lib.rs @@ -45,6 +45,7 @@ pub mod proof_of_work; pub mod validation; #[cfg(any(test, feature = "base_node"))] +#[macro_use] pub mod test_helpers; #[cfg(any(feature = "base_node", feature = "base_node_proto"))] diff --git a/base_layer/core/src/test_helpers/block_spec.rs b/base_layer/core/src/test_helpers/block_spec.rs new file mode 100644 index 0000000000..997e05ef29 --- /dev/null +++ b/base_layer/core/src/test_helpers/block_spec.rs @@ -0,0 +1,238 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::{ + proof_of_work::Difficulty, + transactions::{tari_amount::MicroTari, transaction::Transaction}, +}; + +pub struct BlockSpecs { + pub specs: Vec, +} + +impl BlockSpecs { + pub fn len(&self) -> usize { + self.specs.len() + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn into_vec(self) -> Vec { + self.specs + } +} + +impl From> for BlockSpecs { + fn from(specs: Vec) -> Self { + Self { specs } + } +} + +impl<'a, const N: usize> From<&'a [(&'static str, u64, u64); N]> for BlockSpecs { + fn from(arr: &'a [(&'static str, u64, u64); N]) -> Self { + BlockSpecs::from(&arr[..]) + } +} + +impl<'a> From<&'a [(&'static str, u64, u64)]> for BlockSpecs { + fn from(arr: &'a [(&'static str, u64, u64)]) -> Self { + Self { + specs: arr + .iter() + .map(|(name, diff, time)| { + BlockSpec::builder() + .with_name(name) + .with_block_time(*time) + .with_difficulty((*diff).into()) + .finish() + }) + .collect(), + } + } +} + +impl IntoIterator for BlockSpecs { + type IntoIter = std::vec::IntoIter; + type Item = BlockSpec; + + fn into_iter(self) -> Self::IntoIter { + self.specs.into_iter() + } +} + +#[macro_export] +macro_rules! block_spec { + (@ { $spec:ident }) => {}; + + (@ { $spec: ident } height: $height:expr, $($tail:tt)*) => { + $spec = $spec.with_height($height); + $crate::block_spec!(@ { $spec } $($tail)*) + }; + (@ { $spec: ident } difficulty: $difficulty:expr, $($tail:tt)*) => { + $spec = $spec.with_difficulty($difficulty.into()); + $crate::block_spec!(@ { $spec } $($tail)*) + }; + (@ { $spec: ident } reward: $reward:expr, $($tail:tt)*) => { + $spec = $spec.with_reward($reward.into()); + $crate::block_spec!(@ { spec } $($tail)*) + }; + + (@ { $spec: ident } $k:ident: $v:expr $(,)?) => { $crate::block_spec!(@ { $spec } $k: $v,) }; + + ($name:expr, $($tail:tt)+) => {{ + let mut spec = $crate::block_spec!($name); + $crate::block_spec!(@ { spec } $($tail)+); + spec.finish() + }}; + ($name:expr $(,)?) => { + $crate::test_helpers::BlockSpec::builder().with_name($name).finish() + }; +} + +/// Usage: +/// ```ignore +/// block_specs!(["1a->GB"], ["2a->1a"], ["3a->2a", difficulty: 2], ["4a->3a", reward: 50000]); +/// ``` +#[macro_export] +macro_rules! block_specs { + (@ { $specs:ident }) => {}; + + (@ { $specs:ident } [$name:expr, $($k:ident: $v:expr),*], $($tail:tt)*) => { + $specs.push($crate::block_spec!($name, $($k: $v),*)); + block_specs!(@ { $specs } $($tail)*) + }; + + (@ { $specs:ident } [$name:expr $(,)?], $($tail:tt)*) => { block_specs!(@ { $specs } [$name,], $($tail)*) }; + + (@ { $specs:ident } [$name:expr $(,)?]$(,)?) => { block_specs!(@ { $specs } [$name,],) }; + + (@ { $specs:ident } [$name:expr, $($k:ident: $v:expr),* $(,)?] $(,)?) => { block_specs!(@ { $specs } [$name, $($k: $v),*],) }; + + // Entrypoints + ([$name:expr, $($k:ident: $v:expr),*], $($tail:tt)*) => { + #[allow(clippy::vec_init_then_push)] + { + let mut specs = Vec::new(); + $crate::block_specs!(@ { specs } [$name, $($k: $v),*], $($tail)*); + BlockSpecs::from(specs) + } + }; + ([$name:expr, $($k:ident: $v:expr),* $(,)?] $(,)*) => {{ + $crate::block_specs!([$name, $($k: $v),*],) + }}; + + ([$name:expr], $($tail:tt)*) => {{ $crate::block_specs!([$name,], $($tail)*) }}; + + ([$name:expr]) => {{ $crate::block_specs!([$name,],) }}; + + () => { BlockSpecs::from(Vec::new()) }; +} + +#[derive(Debug, Clone)] +pub struct BlockSpec { + pub name: &'static str, + pub prev_block: &'static str, + pub version: u16, + pub difficulty: Difficulty, + pub block_time: u64, + pub reward_override: Option, + pub height_override: Option, + pub transactions: Vec, + pub skip_coinbase: bool, +} + +impl BlockSpec { + pub fn new() -> Self { + Default::default() + } + + pub fn builder() -> Self { + Default::default() + } + + pub fn with_name(mut self, name: &'static str) -> Self { + let mut split = name.splitn(2, "->"); + let name = split.next().unwrap_or(""); + self.name = name; + if let Some(prev_block) = split.next() { + self.prev_block = prev_block; + } + self + } + + pub fn with_prev_block(mut self, prev_block_name: &'static str) -> Self { + self.prev_block = prev_block_name; + self + } + + pub fn with_height(mut self, height: u64) -> Self { + self.height_override = Some(height); + self + } + + pub fn with_difficulty(mut self, difficulty: Difficulty) -> Self { + self.difficulty = difficulty; + self + } + + pub fn with_block_time(mut self, block_time: u64) -> Self { + self.block_time = block_time; + self + } + + pub fn with_reward(mut self, reward: MicroTari) -> Self { + self.reward_override = Some(reward); + self + } + + pub fn skip_coinbase(mut self) -> Self { + self.skip_coinbase = true; + self + } + + pub fn with_transactions(mut self, transactions: Vec) -> Self { + self.transactions = transactions; + self + } + + pub fn finish(self) -> Self { + self + } +} + +impl Default for BlockSpec { + fn default() -> Self { + Self { + name: "", + prev_block: "", + version: 0, + difficulty: 1.into(), + block_time: 120, + height_override: None, + reward_override: None, + transactions: vec![], + skip_coinbase: false, + } + } +} diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index c6e0589666..a98a318947 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -49,7 +49,7 @@ use crate::{ consensus::{chain_strength_comparer::ChainStrengthComparerBuilder, ConsensusConstantsBuilder, ConsensusManager}, crypto::tari_utilities::Hashable, proof_of_work::{AchievedTargetDifficulty, Difficulty, PowAlgorithm}, - test_helpers::BlockSpec, + test_helpers::{block_spec::BlockSpecs, BlockSpec}, transactions::{ transaction::{TransactionInput, TransactionKernel, UnblindedOutput}, CryptoFactories, @@ -381,32 +381,25 @@ impl BlockchainBackend for TempDatabase { } } -pub fn create_chained_blocks( - blocks: &[(&str, u64, u64)], +pub fn create_chained_blocks>( + blocks: T, genesis_block: Arc, ) -> (Vec, HashMap>) { let mut block_hashes = HashMap::new(); block_hashes.insert("GB".to_string(), genesis_block); let rules = ConsensusManager::builder(Network::LocalNet).build(); - + let blocks: BlockSpecs = blocks.into(); let mut block_names = Vec::with_capacity(blocks.len()); - for (name, difficulty, time) in blocks { - let split = name.split("->").collect::>(); - let to = split[0].to_string(); - let from = split[1].to_string(); - + for block_spec in blocks { let prev_block = block_hashes - .get(&from) - .unwrap_or_else(|| panic!("Could not find block {}", from)); - let block_spec = BlockSpec::new() - .with_difficulty((*difficulty).into()) - .with_block_time(*time) - .finish(); + .get(block_spec.prev_block) + .unwrap_or_else(|| panic!("Could not find block {}", block_spec.prev_block)); + let name = block_spec.name; + let difficulty = block_spec.difficulty; let (block, _) = create_block(&rules, prev_block.block(), block_spec); - let block = mine_block(block, prev_block.accumulated_data(), (*difficulty).into()); - - block_names.push(to.clone()); - block_hashes.insert(to, block); + let block = mine_block(block, prev_block.accumulated_data(), difficulty); + block_names.push(name.to_string()); + block_hashes.insert(name.to_string(), block); } (block_names, block_hashes) } @@ -425,9 +418,9 @@ fn mine_block(block: Block, prev_block_accum: &BlockHeaderAccumulatedData, diffi Arc::new(ChainBlock::try_construct(Arc::new(block), accum).unwrap()) } -pub fn create_main_chain( +pub fn create_main_chain>( db: &BlockchainDatabase, - blocks: &[(&str, u64, u64)], + blocks: T, ) -> (Vec, HashMap>) { let genesis_block = db.fetch_block(0).unwrap().try_into_chain_block().map(Arc::new).unwrap(); let (names, chain) = create_chained_blocks(blocks, genesis_block); @@ -439,9 +432,9 @@ pub fn create_main_chain( (names, chain) } -pub fn create_orphan_chain( +pub fn create_orphan_chain>( db: &BlockchainDatabase, - blocks: &[(&str, u64, u64)], + blocks: T, root_block: Arc, ) -> (Vec, HashMap>) { let (names, chain) = create_chained_blocks(blocks, root_block); diff --git a/base_layer/core/src/test_helpers/mod.rs b/base_layer/core/src/test_helpers/mod.rs index 691623b292..278d9791b8 100644 --- a/base_layer/core/src/test_helpers/mod.rs +++ b/base_layer/core/src/test_helpers/mod.rs @@ -23,6 +23,10 @@ //! Common test helper functions that are small and useful enough to be included in the main crate, rather than the //! integration test folder. +#[macro_use] +mod block_spec; +pub use block_spec::{BlockSpec, BlockSpecs}; + pub mod blockchain; use crate::{ @@ -32,7 +36,6 @@ use crate::{ crypto::tari_utilities::Hashable, proof_of_work::{sha3_difficulty, AchievedTargetDifficulty, Difficulty}, transactions::{ - tari_amount::MicroTari, transaction::{Transaction, UnblindedOutput}, CoinbaseBuilder, CryptoFactories, @@ -43,64 +46,6 @@ use std::{iter, path::Path, sync::Arc}; use tari_comms::PeerManager; use tari_storage::{lmdb_store::LMDBBuilder, LMDBWrapper}; -#[derive(Debug, Clone)] -pub struct BlockSpec { - version: u16, - difficulty: Difficulty, - block_time: u64, - reward_override: Option, - transactions: Vec, - skip_coinbase: bool, -} - -impl BlockSpec { - pub fn new() -> Self { - Default::default() - } - - pub fn with_difficulty(mut self, difficulty: Difficulty) -> Self { - self.difficulty = difficulty; - self - } - - pub fn with_block_time(mut self, block_time: u64) -> Self { - self.block_time = block_time; - self - } - - pub fn with_reward(mut self, reward: MicroTari) -> Self { - self.reward_override = Some(reward); - self - } - - pub fn skip_coinbase(mut self) -> Self { - self.skip_coinbase = true; - self - } - - pub fn with_transactions(mut self, transactions: Vec) -> Self { - self.transactions = transactions; - self - } - - pub fn finish(self) -> Self { - self - } -} - -impl Default for BlockSpec { - fn default() -> Self { - Self { - version: 0, - difficulty: 1.into(), - block_time: 120, - reward_override: None, - transactions: vec![], - skip_coinbase: false, - } - } -} - /// Create a partially constructed block using the provided set of transactions /// is chain_block, or rename it to `create_orphan_block` and drop the prev_block argument pub fn create_orphan_block(block_height: u64, transactions: Vec, consensus: &ConsensusManager) -> Block { @@ -111,7 +56,7 @@ pub fn create_orphan_block(block_height: u64, transactions: Vec, co pub fn create_block(rules: &ConsensusManager, prev_block: &Block, spec: BlockSpec) -> (Block, UnblindedOutput) { let mut header = BlockHeader::new(spec.version); - let block_height = prev_block.header.height + 1; + let block_height = spec.height_override.unwrap_or(prev_block.header.height + 1); header.height = block_height; header.prev_hash = prev_block.hash(); let reward = spec.reward_override.unwrap_or_else(|| { diff --git a/base_layer/core/src/validation/block_validators/body_only.rs b/base_layer/core/src/validation/block_validators/body_only.rs index 3c88d75640..2e722654f7 100644 --- a/base_layer/core/src/validation/block_validators/body_only.rs +++ b/base_layer/core/src/validation/block_validators/body_only.rs @@ -46,22 +46,22 @@ impl PostOrphanBodyValidation for BodyOnlyValidator { /// 1. Are the block header MMR roots valid? fn validate_body_for_valid_orphan( &self, - block: &ChainBlock, backend: &B, + block: &ChainBlock, metadata: &ChainMetadata, ) -> Result<(), ValidationError> { - if block.header().height != metadata.height_of_longest_chain() + 1 { - return Err(ValidationError::IncorrectNextTipHeight { - expected: metadata.height_of_longest_chain() + 1, - block_height: block.height(), - }); - } if block.header().prev_hash != *metadata.best_block() { return Err(ValidationError::IncorrectPreviousHash { expected: metadata.best_block().to_hex(), block_hash: block.hash().to_hex(), }); } + if block.height() != metadata.height_of_longest_chain() + 1 { + return Err(ValidationError::IncorrectNextTipHeight { + expected: metadata.height_of_longest_chain() + 1, + block_height: block.height(), + }); + } let block_id = format!("block #{} ({})", block.header().height, block.hash().to_hex()); helpers::check_inputs_are_utxos(backend, &block.block().body)?; diff --git a/base_layer/core/src/validation/chain_balance.rs b/base_layer/core/src/validation/chain_balance.rs index f6bf03e898..582c4402d9 100644 --- a/base_layer/core/src/validation/chain_balance.rs +++ b/base_layer/core/src/validation/chain_balance.rs @@ -55,10 +55,10 @@ impl ChainBalanceValidator { impl FinalHorizonStateValidation for ChainBalanceValidator { fn validate( &self, + backend: &B, height: u64, total_utxo_sum: &Commitment, total_kernel_sum: &Commitment, - backend: &B, ) -> Result<(), ValidationError> { let emission_h = self.get_emission_commitment_at(height); let total_offset = self.fetch_total_offset_commitment(height, backend)?; diff --git a/base_layer/core/src/validation/mocks.rs b/base_layer/core/src/validation/mocks.rs index 891e7125a0..d5d347133b 100644 --- a/base_layer/core/src/validation/mocks.rs +++ b/base_layer/core/src/validation/mocks.rs @@ -82,7 +82,7 @@ impl BlockSyncBodyValidation for MockValidator { } impl PostOrphanBodyValidation for MockValidator { - fn validate_body_for_valid_orphan(&self, _: &ChainBlock, _: &B, _: &ChainMetadata) -> Result<(), ValidationError> { + fn validate_body_for_valid_orphan(&self, _: &B, _: &ChainBlock, _: &ChainMetadata) -> Result<(), ValidationError> { if self.is_valid.load(Ordering::SeqCst) { Ok(()) } else { @@ -143,10 +143,10 @@ impl MempoolTransactionValidation for MockValidator { impl FinalHorizonStateValidation for MockValidator { fn validate( &self, + _backend: &B, _height: u64, _total_utxo_sum: &Commitment, _total_kernel_sum: &Commitment, - _backend: &B, ) -> Result<(), ValidationError> { if self.is_valid.load(Ordering::SeqCst) { Ok(()) diff --git a/base_layer/core/src/validation/test.rs b/base_layer/core/src/validation/test.rs index f60f28a265..886a878dcb 100644 --- a/base_layer/core/src/validation/test.rs +++ b/base_layer/core/src/validation/test.rs @@ -137,7 +137,7 @@ fn chain_balance_validation() { let validator = ChainBalanceValidator::new(consensus_manager.clone(), factories.clone()); // Validate the genesis state validator - .validate(0, &utxo_sum, &kernel_sum, &*db.db_read_access().unwrap()) + .validate(&*db.db_read_access().unwrap(), 0, &utxo_sum, &kernel_sum) .unwrap(); //---------------------------------- Add a new coinbase and header --------------------------------------------// @@ -187,7 +187,7 @@ fn chain_balance_validation() { utxo_sum = &coinbase.commitment + &utxo_sum; kernel_sum = &kernel.excess + &kernel_sum; validator - .validate(1, &utxo_sum, &kernel_sum, &*db.db_read_access().unwrap()) + .validate(&*db.db_read_access().unwrap(), 1, &utxo_sum, &kernel_sum) .unwrap(); //---------------------------------- Try to inflate --------------------------------------------// @@ -231,6 +231,6 @@ fn chain_balance_validation() { db.commit(txn).unwrap(); validator - .validate(2, &utxo_sum, &kernel_sum, &*db.db_read_access().unwrap()) + .validate(&*db.db_read_access().unwrap(), 2, &utxo_sum, &kernel_sum) .unwrap_err(); } diff --git a/base_layer/core/src/validation/traits.rs b/base_layer/core/src/validation/traits.rs index ad77c387e2..684e68600a 100644 --- a/base_layer/core/src/validation/traits.rs +++ b/base_layer/core/src/validation/traits.rs @@ -41,8 +41,8 @@ pub trait BlockSyncBodyValidation: Send + Sync { pub trait PostOrphanBodyValidation: Send + Sync { fn validate_body_for_valid_orphan( &self, - block: &ChainBlock, backend: &B, + block: &ChainBlock, metadata: &ChainMetadata, ) -> Result<(), ValidationError>; } @@ -67,9 +67,9 @@ pub trait HeaderValidation: Send + Sync { pub trait FinalHorizonStateValidation: Send + Sync { fn validate( &self, + backend: &B, height: u64, total_utxo_sum: &Commitment, total_kernel_sum: &Commitment, - backend: &B, ) -> Result<(), ValidationError>; } diff --git a/base_layer/core/tests/block_validation.rs b/base_layer/core/tests/block_validation.rs index 8b7303ed47..a05688f5ef 100644 --- a/base_layer/core/tests/block_validation.rs +++ b/base_layer/core/tests/block_validation.rs @@ -439,7 +439,7 @@ OutputFeatures::default()), let metadata = db.get_chain_metadata().unwrap(); // this block should be okay assert!(body_only_validator - .validate_body_for_valid_orphan(&chain_block, &*db.db_read_access().unwrap(), &metadata) + .validate_body_for_valid_orphan(&*db.db_read_access().unwrap(), &chain_block, &metadata) .is_ok()); // lets break the chain sequence @@ -464,7 +464,7 @@ OutputFeatures::default()), let chain_block = ChainBlock::try_construct(Arc::new(new_block), accumulated_data).unwrap(); let metadata = db.get_chain_metadata().unwrap(); assert!(body_only_validator - .validate_body_for_valid_orphan(&chain_block, &*db.db_read_access().unwrap(), &metadata) + .validate_body_for_valid_orphan(&*db.db_read_access().unwrap(), &chain_block, &metadata) .is_err()); // lets have unknown inputs; @@ -503,7 +503,7 @@ OutputFeatures::default()), let chain_block = ChainBlock::try_construct(Arc::new(new_block), accumulated_data).unwrap(); let metadata = db.get_chain_metadata().unwrap(); assert!(body_only_validator - .validate_body_for_valid_orphan(&chain_block, &*db.db_read_access().unwrap(), &metadata) + .validate_body_for_valid_orphan(&*db.db_read_access().unwrap(), &chain_block, &metadata) .is_err()); // lets check duplicate txos @@ -533,7 +533,7 @@ OutputFeatures::default()), let chain_block = ChainBlock::try_construct(Arc::new(new_block), accumulated_data).unwrap(); let metadata = db.get_chain_metadata().unwrap(); assert!(body_only_validator - .validate_body_for_valid_orphan(&chain_block, &*db.db_read_access().unwrap(), &metadata) + .validate_body_for_valid_orphan(&*db.db_read_access().unwrap(), &chain_block, &metadata) .is_err()); // check mmr roots @@ -560,7 +560,7 @@ OutputFeatures::default()), let chain_block = ChainBlock::try_construct(Arc::new(new_block), accumulated_data).unwrap(); let metadata = db.get_chain_metadata().unwrap(); assert!(body_only_validator - .validate_body_for_valid_orphan(&chain_block, &*db.db_read_access().unwrap(), &metadata) + .validate_body_for_valid_orphan(&*db.db_read_access().unwrap(), &chain_block, &metadata) .is_err()); }